kvm: bios: resolve memory device roll over reporting issues with >32G guests
[qemu-kvm/fedora.git] / cpu-exec.c
blob89083b8132deb8f5ab20350d3d20aa717be8aa57
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #if !defined(TARGET_IA64)
25 #include "tcg.h"
26 #endif
27 #include "kvm.h"
29 #if !defined(CONFIG_SOFTMMU)
30 #undef EAX
31 #undef ECX
32 #undef EDX
33 #undef EBX
34 #undef ESP
35 #undef EBP
36 #undef ESI
37 #undef EDI
38 #undef EIP
39 #include <signal.h>
40 #ifdef __linux__
41 #include <sys/ucontext.h>
42 #endif
43 #endif
45 #include "qemu-kvm.h"
47 #if defined(__sparc__) && !defined(HOST_SOLARIS)
48 // Work around ugly bugs in glibc that mangle global register contents
49 #undef env
50 #define env cpu_single_env
51 #endif
53 int tb_invalidated_flag;
55 //#define DEBUG_EXEC
56 //#define DEBUG_SIGNAL
58 void cpu_loop_exit(void)
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
62 regs_to_env();
63 longjmp(env->jmp_env, 1);
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState *env1, void *puc)
71 #if !defined(CONFIG_SOFTMMU)
72 #ifdef __linux__
73 struct ucontext *uc = puc;
74 #elif defined(__OpenBSD__)
75 struct sigcontext *uc = puc;
76 #endif
77 #endif
79 env = env1;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
84 if (puc) {
85 /* XXX: use siglongjmp ? */
86 #ifdef __linux__
87 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
90 #endif
92 #endif
93 longjmp(env->jmp_env, 1);
96 /* Execute the code without caching the generated code. An interpreter
97 could be used if available. */
98 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
100 unsigned long next_tb;
101 TranslationBlock *tb;
103 /* Should never happen.
104 We only end up here when an existing TB is too long. */
105 if (max_cycles > CF_COUNT_MASK)
106 max_cycles = CF_COUNT_MASK;
108 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
109 max_cycles);
110 env->current_tb = tb;
111 /* execute the generated code */
112 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
114 if ((next_tb & 3) == 2) {
115 /* Restore PC. This may happen if async event occurs before
116 the TB starts executing. */
117 CPU_PC_FROM_TB(env, tb);
119 tb_phys_invalidate(tb, -1);
120 tb_free(tb);
123 static TranslationBlock *tb_find_slow(target_ulong pc,
124 target_ulong cs_base,
125 uint64_t flags)
127 TranslationBlock *tb, **ptb1;
128 unsigned int h;
129 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
131 tb_invalidated_flag = 0;
133 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
135 /* find translated block using physical mappings */
136 phys_pc = get_phys_addr_code(env, pc);
137 phys_page1 = phys_pc & TARGET_PAGE_MASK;
138 phys_page2 = -1;
139 h = tb_phys_hash_func(phys_pc);
140 ptb1 = &tb_phys_hash[h];
141 for(;;) {
142 tb = *ptb1;
143 if (!tb)
144 goto not_found;
145 if (tb->pc == pc &&
146 tb->page_addr[0] == phys_page1 &&
147 tb->cs_base == cs_base &&
148 tb->flags == flags) {
149 /* check next page if needed */
150 if (tb->page_addr[1] != -1) {
151 virt_page2 = (pc & TARGET_PAGE_MASK) +
152 TARGET_PAGE_SIZE;
153 phys_page2 = get_phys_addr_code(env, virt_page2);
154 if (tb->page_addr[1] == phys_page2)
155 goto found;
156 } else {
157 goto found;
160 ptb1 = &tb->phys_hash_next;
162 not_found:
163 /* if no translated code available, then translate it now */
164 tb = tb_gen_code(env, pc, cs_base, flags, 0);
166 found:
167 /* we add the TB in the virtual pc hash table */
168 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
169 return tb;
172 static inline TranslationBlock *tb_find_fast(void)
174 TranslationBlock *tb;
175 target_ulong cs_base, pc;
176 uint64_t flags;
178 /* we record a subset of the CPU state. It will
179 always be the same before a given translated block
180 is executed. */
181 #if defined(TARGET_I386)
182 flags = env->hflags;
183 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
184 cs_base = env->segs[R_CS].base;
185 pc = cs_base + env->eip;
186 #elif defined(TARGET_ARM)
187 flags = env->thumb | (env->vfp.vec_len << 1)
188 | (env->vfp.vec_stride << 4);
189 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
190 flags |= (1 << 6);
191 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
192 flags |= (1 << 7);
193 flags |= (env->condexec_bits << 8);
194 cs_base = 0;
195 pc = env->regs[15];
196 #elif defined(TARGET_SPARC)
197 #ifdef TARGET_SPARC64
198 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
199 flags = ((env->pstate & PS_AM) << 2)
200 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
201 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
202 #else
203 // FPU enable . Supervisor
204 flags = (env->psref << 4) | env->psrs;
205 #endif
206 cs_base = env->npc;
207 pc = env->pc;
208 #elif defined(TARGET_PPC)
209 flags = env->hflags;
210 cs_base = 0;
211 pc = env->nip;
212 #elif defined(TARGET_MIPS)
213 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
214 cs_base = 0;
215 pc = env->active_tc.PC;
216 #elif defined(TARGET_M68K)
217 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
218 | (env->sr & SR_S) /* Bit 13 */
219 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
220 cs_base = 0;
221 pc = env->pc;
222 #elif defined(TARGET_SH4)
223 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
224 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
225 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
226 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
227 cs_base = 0;
228 pc = env->pc;
229 #elif defined(TARGET_ALPHA)
230 flags = env->ps;
231 cs_base = 0;
232 pc = env->pc;
233 #elif defined(TARGET_CRIS)
234 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
235 flags |= env->dslot;
236 cs_base = 0;
237 pc = env->pc;
238 #elif defined(TARGET_IA64)
239 flags = 0;
240 cs_base = 0; /* XXXXX */
241 pc = 0;
242 #else
243 #error unsupported CPU
244 #endif
245 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
246 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
247 tb->flags != flags)) {
248 tb = tb_find_slow(pc, cs_base, flags);
250 return tb;
253 /* main execution loop */
255 int cpu_exec(CPUState *env1)
257 #define DECLARE_HOST_REGS 1
258 #include "hostregs_helper.h"
259 int ret, interrupt_request;
260 TranslationBlock *tb;
261 uint8_t *tc_ptr;
262 unsigned long next_tb;
264 if (cpu_halted(env1) == EXCP_HALTED)
265 return EXCP_HALTED;
267 cpu_single_env = env1;
269 /* first we save global registers */
270 #define SAVE_HOST_REGS 1
271 #include "hostregs_helper.h"
272 env = env1;
274 env_to_regs();
275 #if defined(TARGET_I386)
276 /* put eflags in CPU temporary format */
277 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
278 DF = 1 - (2 * ((env->eflags >> 10) & 1));
279 CC_OP = CC_OP_EFLAGS;
280 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
281 #elif defined(TARGET_SPARC)
282 #elif defined(TARGET_M68K)
283 env->cc_op = CC_OP_FLAGS;
284 env->cc_dest = env->sr & 0xf;
285 env->cc_x = (env->sr >> 4) & 1;
286 #elif defined(TARGET_ALPHA)
287 #elif defined(TARGET_ARM)
288 #elif defined(TARGET_PPC)
289 #elif defined(TARGET_MIPS)
290 #elif defined(TARGET_SH4)
291 #elif defined(TARGET_CRIS)
292 #elif defined(TARGET_IA64)
293 /* XXXXX */
294 #else
295 #error unsupported target CPU
296 #endif
297 env->exception_index = -1;
299 /* prepare setjmp context for exception handling */
300 for(;;) {
301 if (setjmp(env->jmp_env) == 0) {
302 env->current_tb = NULL;
303 /* if an exception is pending, we execute it here */
304 if (env->exception_index >= 0) {
305 if (env->exception_index >= EXCP_INTERRUPT) {
306 /* exit request from the cpu execution loop */
307 ret = env->exception_index;
308 break;
309 } else if (env->user_mode_only) {
310 /* if user mode only, we simulate a fake exception
311 which will be handled outside the cpu execution
312 loop */
313 #if defined(TARGET_I386)
314 do_interrupt_user(env->exception_index,
315 env->exception_is_int,
316 env->error_code,
317 env->exception_next_eip);
318 /* successfully delivered */
319 env->old_exception = -1;
320 #endif
321 ret = env->exception_index;
322 break;
323 } else {
324 #if defined(TARGET_I386)
325 /* simulate a real cpu exception. On i386, it can
326 trigger new exceptions, but we do not handle
327 double or triple faults yet. */
328 do_interrupt(env->exception_index,
329 env->exception_is_int,
330 env->error_code,
331 env->exception_next_eip, 0);
332 /* successfully delivered */
333 env->old_exception = -1;
334 #elif defined(TARGET_PPC)
335 do_interrupt(env);
336 #elif defined(TARGET_MIPS)
337 do_interrupt(env);
338 #elif defined(TARGET_SPARC)
339 do_interrupt(env);
340 #elif defined(TARGET_ARM)
341 do_interrupt(env);
342 #elif defined(TARGET_SH4)
343 do_interrupt(env);
344 #elif defined(TARGET_ALPHA)
345 do_interrupt(env);
346 #elif defined(TARGET_CRIS)
347 do_interrupt(env);
348 #elif defined(TARGET_M68K)
349 do_interrupt(0);
350 #elif defined(TARGET_IA64)
351 do_interrupt(env);
352 #endif
354 env->exception_index = -1;
356 #ifdef USE_KQEMU
357 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
358 int ret;
359 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
360 ret = kqemu_cpu_exec(env);
361 /* put eflags in CPU temporary format */
362 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
363 DF = 1 - (2 * ((env->eflags >> 10) & 1));
364 CC_OP = CC_OP_EFLAGS;
365 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
366 if (ret == 1) {
367 /* exception */
368 longjmp(env->jmp_env, 1);
369 } else if (ret == 2) {
370 /* softmmu execution needed */
371 } else {
372 if (env->interrupt_request != 0) {
373 /* hardware interrupt will be executed just after */
374 } else {
375 /* otherwise, we restart */
376 longjmp(env->jmp_env, 1);
380 #endif
382 /* kvm vcpu threads */
383 if (kvm_enabled()) {
384 kvm_cpu_exec(env);
385 longjmp(env->jmp_env, 1);
388 if (kvm_enabled()) {
389 kvm_cpu_exec(env);
390 longjmp(env->jmp_env, 1);
393 next_tb = 0; /* force lookup of first TB */
394 for(;;) {
395 interrupt_request = env->interrupt_request;
396 if (unlikely(interrupt_request)) {
397 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
398 /* Mask out external interrupts for this step. */
399 interrupt_request &= ~(CPU_INTERRUPT_HARD |
400 CPU_INTERRUPT_FIQ |
401 CPU_INTERRUPT_SMI |
402 CPU_INTERRUPT_NMI);
404 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
405 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
406 env->exception_index = EXCP_DEBUG;
407 cpu_loop_exit();
409 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
410 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
411 if (interrupt_request & CPU_INTERRUPT_HALT) {
412 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
413 env->halted = 1;
414 env->exception_index = EXCP_HLT;
415 cpu_loop_exit();
417 #endif
418 #if defined(TARGET_I386)
419 if (env->hflags2 & HF2_GIF_MASK) {
420 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
421 !(env->hflags & HF_SMM_MASK)) {
422 svm_check_intercept(SVM_EXIT_SMI);
423 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
424 do_smm_enter();
425 next_tb = 0;
426 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
427 !(env->hflags2 & HF2_NMI_MASK)) {
428 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
429 env->hflags2 |= HF2_NMI_MASK;
430 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
431 next_tb = 0;
432 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
433 (((env->hflags2 & HF2_VINTR_MASK) &&
434 (env->hflags2 & HF2_HIF_MASK)) ||
435 (!(env->hflags2 & HF2_VINTR_MASK) &&
436 (env->eflags & IF_MASK &&
437 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
438 int intno;
439 svm_check_intercept(SVM_EXIT_INTR);
440 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
441 intno = cpu_get_pic_interrupt(env);
442 if (loglevel & CPU_LOG_TB_IN_ASM) {
443 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
445 do_interrupt(intno, 0, 0, 0, 1);
446 /* ensure that no TB jump will be modified as
447 the program flow was changed */
448 next_tb = 0;
449 #if !defined(CONFIG_USER_ONLY)
450 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
451 (env->eflags & IF_MASK) &&
452 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
453 int intno;
454 /* FIXME: this should respect TPR */
455 svm_check_intercept(SVM_EXIT_VINTR);
456 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
457 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
458 if (loglevel & CPU_LOG_TB_IN_ASM)
459 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
460 do_interrupt(intno, 0, 0, 0, 1);
461 next_tb = 0;
462 #endif
465 #elif defined(TARGET_PPC)
466 #if 0
467 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
468 cpu_ppc_reset(env);
470 #endif
471 if (interrupt_request & CPU_INTERRUPT_HARD) {
472 ppc_hw_interrupt(env);
473 if (env->pending_interrupts == 0)
474 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
475 next_tb = 0;
477 #elif defined(TARGET_MIPS)
478 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
479 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
480 (env->CP0_Status & (1 << CP0St_IE)) &&
481 !(env->CP0_Status & (1 << CP0St_EXL)) &&
482 !(env->CP0_Status & (1 << CP0St_ERL)) &&
483 !(env->hflags & MIPS_HFLAG_DM)) {
484 /* Raise it */
485 env->exception_index = EXCP_EXT_INTERRUPT;
486 env->error_code = 0;
487 do_interrupt(env);
488 next_tb = 0;
490 #elif defined(TARGET_SPARC)
491 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
492 (env->psret != 0)) {
493 int pil = env->interrupt_index & 15;
494 int type = env->interrupt_index & 0xf0;
496 if (((type == TT_EXTINT) &&
497 (pil == 15 || pil > env->psrpil)) ||
498 type != TT_EXTINT) {
499 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
500 env->exception_index = env->interrupt_index;
501 do_interrupt(env);
502 env->interrupt_index = 0;
503 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
504 cpu_check_irqs(env);
505 #endif
506 next_tb = 0;
508 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
509 //do_interrupt(0, 0, 0, 0, 0);
510 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
512 #elif defined(TARGET_ARM)
513 if (interrupt_request & CPU_INTERRUPT_FIQ
514 && !(env->uncached_cpsr & CPSR_F)) {
515 env->exception_index = EXCP_FIQ;
516 do_interrupt(env);
517 next_tb = 0;
519 /* ARMv7-M interrupt return works by loading a magic value
520 into the PC. On real hardware the load causes the
521 return to occur. The qemu implementation performs the
522 jump normally, then does the exception return when the
523 CPU tries to execute code at the magic address.
524 This will cause the magic PC value to be pushed to
525 the stack if an interrupt occured at the wrong time.
526 We avoid this by disabling interrupts when
527 pc contains a magic address. */
528 if (interrupt_request & CPU_INTERRUPT_HARD
529 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
530 || !(env->uncached_cpsr & CPSR_I))) {
531 env->exception_index = EXCP_IRQ;
532 do_interrupt(env);
533 next_tb = 0;
535 #elif defined(TARGET_SH4)
536 if (interrupt_request & CPU_INTERRUPT_HARD) {
537 do_interrupt(env);
538 next_tb = 0;
540 #elif defined(TARGET_ALPHA)
541 if (interrupt_request & CPU_INTERRUPT_HARD) {
542 do_interrupt(env);
543 next_tb = 0;
545 #elif defined(TARGET_CRIS)
546 if (interrupt_request & CPU_INTERRUPT_HARD
547 && (env->pregs[PR_CCS] & I_FLAG)) {
548 env->exception_index = EXCP_IRQ;
549 do_interrupt(env);
550 next_tb = 0;
552 if (interrupt_request & CPU_INTERRUPT_NMI
553 && (env->pregs[PR_CCS] & M_FLAG)) {
554 env->exception_index = EXCP_NMI;
555 do_interrupt(env);
556 next_tb = 0;
558 #elif defined(TARGET_M68K)
559 if (interrupt_request & CPU_INTERRUPT_HARD
560 && ((env->sr & SR_I) >> SR_I_SHIFT)
561 < env->pending_level) {
562 /* Real hardware gets the interrupt vector via an
563 IACK cycle at this point. Current emulated
564 hardware doesn't rely on this, so we
565 provide/save the vector when the interrupt is
566 first signalled. */
567 env->exception_index = env->pending_vector;
568 do_interrupt(1);
569 next_tb = 0;
571 #endif
572 /* Don't use the cached interupt_request value,
573 do_interrupt may have updated the EXITTB flag. */
574 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
575 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
576 /* ensure that no TB jump will be modified as
577 the program flow was changed */
578 next_tb = 0;
580 if (interrupt_request & CPU_INTERRUPT_EXIT) {
581 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
582 env->exception_index = EXCP_INTERRUPT;
583 cpu_loop_exit();
586 #ifdef DEBUG_EXEC
587 if ((loglevel & CPU_LOG_TB_CPU)) {
588 /* restore flags in standard format */
589 regs_to_env();
590 #if defined(TARGET_I386)
591 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
592 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
593 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
594 #elif defined(TARGET_ARM)
595 cpu_dump_state(env, logfile, fprintf, 0);
596 #elif defined(TARGET_SPARC)
597 cpu_dump_state(env, logfile, fprintf, 0);
598 #elif defined(TARGET_PPC)
599 cpu_dump_state(env, logfile, fprintf, 0);
600 #elif defined(TARGET_M68K)
601 cpu_m68k_flush_flags(env, env->cc_op);
602 env->cc_op = CC_OP_FLAGS;
603 env->sr = (env->sr & 0xffe0)
604 | env->cc_dest | (env->cc_x << 4);
605 cpu_dump_state(env, logfile, fprintf, 0);
606 #elif defined(TARGET_MIPS)
607 cpu_dump_state(env, logfile, fprintf, 0);
608 #elif defined(TARGET_SH4)
609 cpu_dump_state(env, logfile, fprintf, 0);
610 #elif defined(TARGET_ALPHA)
611 cpu_dump_state(env, logfile, fprintf, 0);
612 #elif defined(TARGET_CRIS)
613 cpu_dump_state(env, logfile, fprintf, 0);
614 #else
615 #error unsupported target CPU
616 #endif
618 #endif
619 spin_lock(&tb_lock);
620 tb = tb_find_fast();
621 /* Note: we do it here to avoid a gcc bug on Mac OS X when
622 doing it in tb_find_slow */
623 if (tb_invalidated_flag) {
624 /* as some TB could have been invalidated because
625 of memory exceptions while generating the code, we
626 must recompute the hash index here */
627 next_tb = 0;
628 tb_invalidated_flag = 0;
630 #ifdef DEBUG_EXEC
631 if ((loglevel & CPU_LOG_EXEC)) {
632 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
633 (long)tb->tc_ptr, tb->pc,
634 lookup_symbol(tb->pc));
636 #endif
637 /* see if we can patch the calling TB. When the TB
638 spans two pages, we cannot safely do a direct
639 jump. */
641 if (next_tb != 0 &&
642 #ifdef USE_KQEMU
643 (env->kqemu_enabled != 2) &&
644 #endif
645 tb->page_addr[1] == -1) {
646 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
649 spin_unlock(&tb_lock);
650 env->current_tb = tb;
652 /* cpu_interrupt might be called while translating the
653 TB, but before it is linked into a potentially
654 infinite loop and becomes env->current_tb. Avoid
655 starting execution if there is a pending interrupt. */
656 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
657 env->current_tb = NULL;
659 while (env->current_tb) {
660 tc_ptr = tb->tc_ptr;
661 /* execute the generated code */
662 #if defined(__sparc__) && !defined(HOST_SOLARIS)
663 #undef env
664 env = cpu_single_env;
665 #define env cpu_single_env
666 #endif
667 next_tb = tcg_qemu_tb_exec(tc_ptr);
668 env->current_tb = NULL;
669 if ((next_tb & 3) == 2) {
670 /* Instruction counter expired. */
671 int insns_left;
672 tb = (TranslationBlock *)(long)(next_tb & ~3);
673 /* Restore PC. */
674 CPU_PC_FROM_TB(env, tb);
675 insns_left = env->icount_decr.u32;
676 if (env->icount_extra && insns_left >= 0) {
677 /* Refill decrementer and continue execution. */
678 env->icount_extra += insns_left;
679 if (env->icount_extra > 0xffff) {
680 insns_left = 0xffff;
681 } else {
682 insns_left = env->icount_extra;
684 env->icount_extra -= insns_left;
685 env->icount_decr.u16.low = insns_left;
686 } else {
687 if (insns_left > 0) {
688 /* Execute remaining instructions. */
689 cpu_exec_nocache(insns_left, tb);
691 env->exception_index = EXCP_INTERRUPT;
692 next_tb = 0;
693 cpu_loop_exit();
697 /* reset soft MMU for next block (it can currently
698 only be set by a memory fault) */
699 #if defined(USE_KQEMU)
700 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
701 if (kqemu_is_ok(env) &&
702 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
703 cpu_loop_exit();
705 #endif
706 } /* for(;;) */
707 } else {
708 env_to_regs();
710 } /* for(;;) */
713 #if defined(TARGET_I386)
714 /* restore flags in standard format */
715 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
716 #elif defined(TARGET_ARM)
717 /* XXX: Save/restore host fpu exception state?. */
718 #elif defined(TARGET_SPARC)
719 #elif defined(TARGET_PPC)
720 #elif defined(TARGET_M68K)
721 cpu_m68k_flush_flags(env, env->cc_op);
722 env->cc_op = CC_OP_FLAGS;
723 env->sr = (env->sr & 0xffe0)
724 | env->cc_dest | (env->cc_x << 4);
725 #elif defined(TARGET_MIPS)
726 #elif defined(TARGET_SH4)
727 #elif defined(TARGET_IA64)
728 #elif defined(TARGET_ALPHA)
729 #elif defined(TARGET_CRIS)
730 /* XXXXX */
731 #else
732 #error unsupported target CPU
733 #endif
735 /* restore global registers */
736 #include "hostregs_helper.h"
738 /* fail safe : never use cpu_single_env outside cpu_exec() */
739 cpu_single_env = NULL;
740 return ret;
743 /* must only be called from the generated code as an exception can be
744 generated */
745 void tb_invalidate_page_range(target_ulong start, target_ulong end)
747 /* XXX: cannot enable it yet because it yields to MMU exception
748 where NIP != read address on PowerPC */
749 #if 0
750 target_ulong phys_addr;
751 phys_addr = get_phys_addr_code(env, start);
752 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
753 #endif
756 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
758 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
760 CPUX86State *saved_env;
762 saved_env = env;
763 env = s;
764 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
765 selector &= 0xffff;
766 cpu_x86_load_seg_cache(env, seg_reg, selector,
767 (selector << 4), 0xffff, 0);
768 } else {
769 helper_load_seg(seg_reg, selector);
771 env = saved_env;
774 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
776 CPUX86State *saved_env;
778 saved_env = env;
779 env = s;
781 helper_fsave(ptr, data32);
783 env = saved_env;
786 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
788 CPUX86State *saved_env;
790 saved_env = env;
791 env = s;
793 helper_frstor(ptr, data32);
795 env = saved_env;
798 #endif /* TARGET_I386 */
800 #if !defined(CONFIG_SOFTMMU)
802 #if defined(TARGET_I386)
804 /* 'pc' is the host PC at which the exception was raised. 'address' is
805 the effective address of the memory exception. 'is_write' is 1 if a
806 write caused the exception and otherwise 0'. 'old_set' is the
807 signal set which should be restored */
808 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
809 int is_write, sigset_t *old_set,
810 void *puc)
812 TranslationBlock *tb;
813 int ret;
815 if (cpu_single_env)
816 env = cpu_single_env; /* XXX: find a correct solution for multithread */
817 #if defined(DEBUG_SIGNAL)
818 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
819 pc, address, is_write, *(unsigned long *)old_set);
820 #endif
821 /* XXX: locking issue */
822 if (is_write && page_unprotect(h2g(address), pc, puc)) {
823 return 1;
826 /* see if it is an MMU fault */
827 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
828 if (ret < 0)
829 return 0; /* not an MMU fault */
830 if (ret == 0)
831 return 1; /* the MMU fault was handled without causing real CPU fault */
832 /* now we have a real cpu fault */
833 tb = tb_find_pc(pc);
834 if (tb) {
835 /* the PC is inside the translated code. It means that we have
836 a virtual CPU fault */
837 cpu_restore_state(tb, env, pc, puc);
839 if (ret == 1) {
840 #if 0
841 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
842 env->eip, env->cr[2], env->error_code);
843 #endif
844 /* we restore the process signal mask as the sigreturn should
845 do it (XXX: use sigsetjmp) */
846 sigprocmask(SIG_SETMASK, old_set, NULL);
847 raise_exception_err(env->exception_index, env->error_code);
848 } else {
849 /* activate soft MMU for this block */
850 env->hflags |= HF_SOFTMMU_MASK;
851 cpu_resume_from_signal(env, puc);
853 /* never comes here */
854 return 1;
857 #elif defined(TARGET_ARM)
858 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
859 int is_write, sigset_t *old_set,
860 void *puc)
862 TranslationBlock *tb;
863 int ret;
865 if (cpu_single_env)
866 env = cpu_single_env; /* XXX: find a correct solution for multithread */
867 #if defined(DEBUG_SIGNAL)
868 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
869 pc, address, is_write, *(unsigned long *)old_set);
870 #endif
871 /* XXX: locking issue */
872 if (is_write && page_unprotect(h2g(address), pc, puc)) {
873 return 1;
875 /* see if it is an MMU fault */
876 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
877 if (ret < 0)
878 return 0; /* not an MMU fault */
879 if (ret == 0)
880 return 1; /* the MMU fault was handled without causing real CPU fault */
881 /* now we have a real cpu fault */
882 tb = tb_find_pc(pc);
883 if (tb) {
884 /* the PC is inside the translated code. It means that we have
885 a virtual CPU fault */
886 cpu_restore_state(tb, env, pc, puc);
888 /* we restore the process signal mask as the sigreturn should
889 do it (XXX: use sigsetjmp) */
890 sigprocmask(SIG_SETMASK, old_set, NULL);
891 cpu_loop_exit();
892 /* never comes here */
893 return 1;
895 #elif defined(TARGET_SPARC)
896 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
897 int is_write, sigset_t *old_set,
898 void *puc)
900 TranslationBlock *tb;
901 int ret;
903 if (cpu_single_env)
904 env = cpu_single_env; /* XXX: find a correct solution for multithread */
905 #if defined(DEBUG_SIGNAL)
906 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
907 pc, address, is_write, *(unsigned long *)old_set);
908 #endif
909 /* XXX: locking issue */
910 if (is_write && page_unprotect(h2g(address), pc, puc)) {
911 return 1;
913 /* see if it is an MMU fault */
914 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
915 if (ret < 0)
916 return 0; /* not an MMU fault */
917 if (ret == 0)
918 return 1; /* the MMU fault was handled without causing real CPU fault */
919 /* now we have a real cpu fault */
920 tb = tb_find_pc(pc);
921 if (tb) {
922 /* the PC is inside the translated code. It means that we have
923 a virtual CPU fault */
924 cpu_restore_state(tb, env, pc, puc);
926 /* we restore the process signal mask as the sigreturn should
927 do it (XXX: use sigsetjmp) */
928 sigprocmask(SIG_SETMASK, old_set, NULL);
929 cpu_loop_exit();
930 /* never comes here */
931 return 1;
933 #elif defined (TARGET_PPC)
934 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
935 int is_write, sigset_t *old_set,
936 void *puc)
938 TranslationBlock *tb;
939 int ret;
941 if (cpu_single_env)
942 env = cpu_single_env; /* XXX: find a correct solution for multithread */
943 #if defined(DEBUG_SIGNAL)
944 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
945 pc, address, is_write, *(unsigned long *)old_set);
946 #endif
947 /* XXX: locking issue */
948 if (is_write && page_unprotect(h2g(address), pc, puc)) {
949 return 1;
952 /* see if it is an MMU fault */
953 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
954 if (ret < 0)
955 return 0; /* not an MMU fault */
956 if (ret == 0)
957 return 1; /* the MMU fault was handled without causing real CPU fault */
959 /* now we have a real cpu fault */
960 tb = tb_find_pc(pc);
961 if (tb) {
962 /* the PC is inside the translated code. It means that we have
963 a virtual CPU fault */
964 cpu_restore_state(tb, env, pc, puc);
966 if (ret == 1) {
967 #if 0
968 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
969 env->nip, env->error_code, tb);
970 #endif
971 /* we restore the process signal mask as the sigreturn should
972 do it (XXX: use sigsetjmp) */
973 sigprocmask(SIG_SETMASK, old_set, NULL);
974 do_raise_exception_err(env->exception_index, env->error_code);
975 } else {
976 /* activate soft MMU for this block */
977 cpu_resume_from_signal(env, puc);
979 /* never comes here */
980 return 1;
983 #elif defined(TARGET_M68K)
984 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
985 int is_write, sigset_t *old_set,
986 void *puc)
988 TranslationBlock *tb;
989 int ret;
991 if (cpu_single_env)
992 env = cpu_single_env; /* XXX: find a correct solution for multithread */
993 #if defined(DEBUG_SIGNAL)
994 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
995 pc, address, is_write, *(unsigned long *)old_set);
996 #endif
997 /* XXX: locking issue */
998 if (is_write && page_unprotect(address, pc, puc)) {
999 return 1;
1001 /* see if it is an MMU fault */
1002 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1003 if (ret < 0)
1004 return 0; /* not an MMU fault */
1005 if (ret == 0)
1006 return 1; /* the MMU fault was handled without causing real CPU fault */
1007 /* now we have a real cpu fault */
1008 tb = tb_find_pc(pc);
1009 if (tb) {
1010 /* the PC is inside the translated code. It means that we have
1011 a virtual CPU fault */
1012 cpu_restore_state(tb, env, pc, puc);
1014 /* we restore the process signal mask as the sigreturn should
1015 do it (XXX: use sigsetjmp) */
1016 sigprocmask(SIG_SETMASK, old_set, NULL);
1017 cpu_loop_exit();
1018 /* never comes here */
1019 return 1;
1022 #elif defined (TARGET_MIPS)
1023 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1024 int is_write, sigset_t *old_set,
1025 void *puc)
1027 TranslationBlock *tb;
1028 int ret;
1030 if (cpu_single_env)
1031 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1032 #if defined(DEBUG_SIGNAL)
1033 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1034 pc, address, is_write, *(unsigned long *)old_set);
1035 #endif
1036 /* XXX: locking issue */
1037 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1038 return 1;
1041 /* see if it is an MMU fault */
1042 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1043 if (ret < 0)
1044 return 0; /* not an MMU fault */
1045 if (ret == 0)
1046 return 1; /* the MMU fault was handled without causing real CPU fault */
1048 /* now we have a real cpu fault */
1049 tb = tb_find_pc(pc);
1050 if (tb) {
1051 /* the PC is inside the translated code. It means that we have
1052 a virtual CPU fault */
1053 cpu_restore_state(tb, env, pc, puc);
1055 if (ret == 1) {
1056 #if 0
1057 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1058 env->PC, env->error_code, tb);
1059 #endif
1060 /* we restore the process signal mask as the sigreturn should
1061 do it (XXX: use sigsetjmp) */
1062 sigprocmask(SIG_SETMASK, old_set, NULL);
1063 do_raise_exception_err(env->exception_index, env->error_code);
1064 } else {
1065 /* activate soft MMU for this block */
1066 cpu_resume_from_signal(env, puc);
1068 /* never comes here */
1069 return 1;
1072 #elif defined (TARGET_SH4)
1073 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1074 int is_write, sigset_t *old_set,
1075 void *puc)
1077 TranslationBlock *tb;
1078 int ret;
1080 if (cpu_single_env)
1081 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1082 #if defined(DEBUG_SIGNAL)
1083 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1084 pc, address, is_write, *(unsigned long *)old_set);
1085 #endif
1086 /* XXX: locking issue */
1087 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1088 return 1;
1091 /* see if it is an MMU fault */
1092 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1093 if (ret < 0)
1094 return 0; /* not an MMU fault */
1095 if (ret == 0)
1096 return 1; /* the MMU fault was handled without causing real CPU fault */
1098 /* now we have a real cpu fault */
1099 tb = tb_find_pc(pc);
1100 if (tb) {
1101 /* the PC is inside the translated code. It means that we have
1102 a virtual CPU fault */
1103 cpu_restore_state(tb, env, pc, puc);
1105 #if 0
1106 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1107 env->nip, env->error_code, tb);
1108 #endif
1109 /* we restore the process signal mask as the sigreturn should
1110 do it (XXX: use sigsetjmp) */
1111 sigprocmask(SIG_SETMASK, old_set, NULL);
1112 cpu_loop_exit();
1113 /* never comes here */
1114 return 1;
1117 #elif defined (TARGET_ALPHA)
1118 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1119 int is_write, sigset_t *old_set,
1120 void *puc)
1122 TranslationBlock *tb;
1123 int ret;
1125 if (cpu_single_env)
1126 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1127 #if defined(DEBUG_SIGNAL)
1128 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1129 pc, address, is_write, *(unsigned long *)old_set);
1130 #endif
1131 /* XXX: locking issue */
1132 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1133 return 1;
1136 /* see if it is an MMU fault */
1137 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1138 if (ret < 0)
1139 return 0; /* not an MMU fault */
1140 if (ret == 0)
1141 return 1; /* the MMU fault was handled without causing real CPU fault */
1143 /* now we have a real cpu fault */
1144 tb = tb_find_pc(pc);
1145 if (tb) {
1146 /* the PC is inside the translated code. It means that we have
1147 a virtual CPU fault */
1148 cpu_restore_state(tb, env, pc, puc);
1150 #if 0
1151 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1152 env->nip, env->error_code, tb);
1153 #endif
1154 /* we restore the process signal mask as the sigreturn should
1155 do it (XXX: use sigsetjmp) */
1156 sigprocmask(SIG_SETMASK, old_set, NULL);
1157 cpu_loop_exit();
1158 /* never comes here */
1159 return 1;
1161 #elif defined (TARGET_CRIS)
1162 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1163 int is_write, sigset_t *old_set,
1164 void *puc)
1166 TranslationBlock *tb;
1167 int ret;
1169 if (cpu_single_env)
1170 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1171 #if defined(DEBUG_SIGNAL)
1172 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1173 pc, address, is_write, *(unsigned long *)old_set);
1174 #endif
1175 /* XXX: locking issue */
1176 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1177 return 1;
1180 /* see if it is an MMU fault */
1181 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1182 if (ret < 0)
1183 return 0; /* not an MMU fault */
1184 if (ret == 0)
1185 return 1; /* the MMU fault was handled without causing real CPU fault */
1187 /* now we have a real cpu fault */
1188 tb = tb_find_pc(pc);
1189 if (tb) {
1190 /* the PC is inside the translated code. It means that we have
1191 a virtual CPU fault */
1192 cpu_restore_state(tb, env, pc, puc);
1194 /* we restore the process signal mask as the sigreturn should
1195 do it (XXX: use sigsetjmp) */
1196 sigprocmask(SIG_SETMASK, old_set, NULL);
1197 cpu_loop_exit();
1198 /* never comes here */
1199 return 1;
1202 #else
1203 #error unsupported target CPU
1204 #endif
1206 #if defined(__i386__)
1208 #if defined(__APPLE__)
1209 # include <sys/ucontext.h>
1211 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1212 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1213 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1214 #else
1215 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1216 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1217 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1218 #endif
1220 int cpu_signal_handler(int host_signum, void *pinfo,
1221 void *puc)
1223 siginfo_t *info = pinfo;
1224 struct ucontext *uc = puc;
1225 unsigned long pc;
1226 int trapno;
1228 #ifndef REG_EIP
1229 /* for glibc 2.1 */
1230 #define REG_EIP EIP
1231 #define REG_ERR ERR
1232 #define REG_TRAPNO TRAPNO
1233 #endif
1234 pc = EIP_sig(uc);
1235 trapno = TRAP_sig(uc);
1236 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1237 trapno == 0xe ?
1238 (ERROR_sig(uc) >> 1) & 1 : 0,
1239 &uc->uc_sigmask, puc);
1242 #elif defined(__x86_64__)
1244 int cpu_signal_handler(int host_signum, void *pinfo,
1245 void *puc)
1247 siginfo_t *info = pinfo;
1248 struct ucontext *uc = puc;
1249 unsigned long pc;
1251 pc = uc->uc_mcontext.gregs[REG_RIP];
1252 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1253 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1254 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1255 &uc->uc_sigmask, puc);
1258 #elif defined(__powerpc__)
1260 /***********************************************************************
1261 * signal context platform-specific definitions
1262 * From Wine
1264 #ifdef linux
1265 /* All Registers access - only for local access */
1266 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1267 /* Gpr Registers access */
1268 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1269 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1270 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1271 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1272 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1273 # define LR_sig(context) REG_sig(link, context) /* Link register */
1274 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1275 /* Float Registers access */
1276 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1277 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1278 /* Exception Registers access */
1279 # define DAR_sig(context) REG_sig(dar, context)
1280 # define DSISR_sig(context) REG_sig(dsisr, context)
1281 # define TRAP_sig(context) REG_sig(trap, context)
1282 #endif /* linux */
1284 #ifdef __APPLE__
1285 # include <sys/ucontext.h>
1286 typedef struct ucontext SIGCONTEXT;
1287 /* All Registers access - only for local access */
1288 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1289 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1290 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1291 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1292 /* Gpr Registers access */
1293 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1294 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1295 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1296 # define CTR_sig(context) REG_sig(ctr, context)
1297 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1298 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1299 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1300 /* Float Registers access */
1301 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1302 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1303 /* Exception Registers access */
1304 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1305 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1306 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1307 #endif /* __APPLE__ */
1309 int cpu_signal_handler(int host_signum, void *pinfo,
1310 void *puc)
1312 siginfo_t *info = pinfo;
1313 struct ucontext *uc = puc;
1314 unsigned long pc;
1315 int is_write;
1317 pc = IAR_sig(uc);
1318 is_write = 0;
1319 #if 0
1320 /* ppc 4xx case */
1321 if (DSISR_sig(uc) & 0x00800000)
1322 is_write = 1;
1323 #else
1324 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1325 is_write = 1;
1326 #endif
1327 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1328 is_write, &uc->uc_sigmask, puc);
1331 #elif defined(__alpha__)
1333 int cpu_signal_handler(int host_signum, void *pinfo,
1334 void *puc)
1336 siginfo_t *info = pinfo;
1337 struct ucontext *uc = puc;
1338 uint32_t *pc = uc->uc_mcontext.sc_pc;
1339 uint32_t insn = *pc;
1340 int is_write = 0;
1342 /* XXX: need kernel patch to get write flag faster */
1343 switch (insn >> 26) {
1344 case 0x0d: // stw
1345 case 0x0e: // stb
1346 case 0x0f: // stq_u
1347 case 0x24: // stf
1348 case 0x25: // stg
1349 case 0x26: // sts
1350 case 0x27: // stt
1351 case 0x2c: // stl
1352 case 0x2d: // stq
1353 case 0x2e: // stl_c
1354 case 0x2f: // stq_c
1355 is_write = 1;
1358 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1359 is_write, &uc->uc_sigmask, puc);
1361 #elif defined(__sparc__)
1363 int cpu_signal_handler(int host_signum, void *pinfo,
1364 void *puc)
1366 siginfo_t *info = pinfo;
1367 int is_write;
1368 uint32_t insn;
1369 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1370 uint32_t *regs = (uint32_t *)(info + 1);
1371 void *sigmask = (regs + 20);
1372 /* XXX: is there a standard glibc define ? */
1373 unsigned long pc = regs[1];
1374 #else
1375 #ifdef __linux__
1376 struct sigcontext *sc = puc;
1377 unsigned long pc = sc->sigc_regs.tpc;
1378 void *sigmask = (void *)sc->sigc_mask;
1379 #elif defined(__OpenBSD__)
1380 struct sigcontext *uc = puc;
1381 unsigned long pc = uc->sc_pc;
1382 void *sigmask = (void *)(long)uc->sc_mask;
1383 #endif
1384 #endif
1386 /* XXX: need kernel patch to get write flag faster */
1387 is_write = 0;
1388 insn = *(uint32_t *)pc;
1389 if ((insn >> 30) == 3) {
1390 switch((insn >> 19) & 0x3f) {
1391 case 0x05: // stb
1392 case 0x06: // sth
1393 case 0x04: // st
1394 case 0x07: // std
1395 case 0x24: // stf
1396 case 0x27: // stdf
1397 case 0x25: // stfsr
1398 is_write = 1;
1399 break;
1402 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1403 is_write, sigmask, NULL);
1406 #elif defined(__arm__)
1408 int cpu_signal_handler(int host_signum, void *pinfo,
1409 void *puc)
1411 siginfo_t *info = pinfo;
1412 struct ucontext *uc = puc;
1413 unsigned long pc;
1414 int is_write;
1416 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1417 pc = uc->uc_mcontext.gregs[R15];
1418 #else
1419 pc = uc->uc_mcontext.arm_pc;
1420 #endif
1421 /* XXX: compute is_write */
1422 is_write = 0;
1423 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1424 is_write,
1425 &uc->uc_sigmask, puc);
1428 #elif defined(__mc68000)
1430 int cpu_signal_handler(int host_signum, void *pinfo,
1431 void *puc)
1433 siginfo_t *info = pinfo;
1434 struct ucontext *uc = puc;
1435 unsigned long pc;
1436 int is_write;
1438 pc = uc->uc_mcontext.gregs[16];
1439 /* XXX: compute is_write */
1440 is_write = 0;
1441 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1442 is_write,
1443 &uc->uc_sigmask, puc);
1446 #elif defined(__ia64)
1448 #ifndef __ISR_VALID
1449 /* This ought to be in <bits/siginfo.h>... */
1450 # define __ISR_VALID 1
1451 #endif
1453 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1455 siginfo_t *info = pinfo;
1456 struct ucontext *uc = puc;
1457 unsigned long ip;
1458 int is_write = 0;
1460 ip = uc->uc_mcontext.sc_ip;
1461 switch (host_signum) {
1462 case SIGILL:
1463 case SIGFPE:
1464 case SIGSEGV:
1465 case SIGBUS:
1466 case SIGTRAP:
1467 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1468 /* ISR.W (write-access) is bit 33: */
1469 is_write = (info->si_isr >> 33) & 1;
1470 break;
1472 default:
1473 break;
1475 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1476 is_write,
1477 &uc->uc_sigmask, puc);
1480 #elif defined(__s390__)
1482 int cpu_signal_handler(int host_signum, void *pinfo,
1483 void *puc)
1485 siginfo_t *info = pinfo;
1486 struct ucontext *uc = puc;
1487 unsigned long pc;
1488 int is_write;
1490 pc = uc->uc_mcontext.psw.addr;
1491 /* XXX: compute is_write */
1492 is_write = 0;
1493 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1494 is_write, &uc->uc_sigmask, puc);
1497 #elif defined(__mips__)
1499 int cpu_signal_handler(int host_signum, void *pinfo,
1500 void *puc)
1502 siginfo_t *info = pinfo;
1503 struct ucontext *uc = puc;
1504 greg_t pc = uc->uc_mcontext.pc;
1505 int is_write;
1507 /* XXX: compute is_write */
1508 is_write = 0;
1509 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1510 is_write, &uc->uc_sigmask, puc);
1513 #elif defined(__hppa__)
1515 int cpu_signal_handler(int host_signum, void *pinfo,
1516 void *puc)
1518 struct siginfo *info = pinfo;
1519 struct ucontext *uc = puc;
1520 unsigned long pc;
1521 int is_write;
1523 pc = uc->uc_mcontext.sc_iaoq[0];
1524 /* FIXME: compute is_write */
1525 is_write = 0;
1526 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1527 is_write,
1528 &uc->uc_sigmask, puc);
1531 #else
1533 #error host CPU specific signal handler needed
1535 #endif
1537 #endif /* !defined(CONFIG_SOFTMMU) */