Re-add missing declarations for qemu_live_{savevm, loadvm}_state
[qemu-kvm/fedora.git] / cpu-exec.c
blob3fe73f0183f4118497411efe5f3ad80298f5b09f
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
38 #ifdef USE_KVM
39 #include "qemu-kvm.h"
40 extern int kvm_allowed;
41 #endif
43 int tb_invalidated_flag;
45 //#define DEBUG_EXEC
46 //#define DEBUG_SIGNAL
48 #define SAVE_GLOBALS()
49 #define RESTORE_GLOBALS()
51 #if defined(__sparc__) && !defined(HOST_SOLARIS)
52 #include <features.h>
53 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
54 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
55 // Work around ugly bugs in glibc that mangle global register contents
57 static volatile void *saved_env;
58 static volatile unsigned long saved_t0, saved_i7;
59 #undef SAVE_GLOBALS
60 #define SAVE_GLOBALS() do { \
61 saved_env = env; \
62 saved_t0 = T0; \
63 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
64 } while(0)
66 #undef RESTORE_GLOBALS
67 #define RESTORE_GLOBALS() do { \
68 env = (void *)saved_env; \
69 T0 = saved_t0; \
70 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
71 } while(0)
73 static int sparc_setjmp(jmp_buf buf)
75 int ret;
77 SAVE_GLOBALS();
78 ret = setjmp(buf);
79 RESTORE_GLOBALS();
80 return ret;
82 #undef setjmp
83 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
85 static void sparc_longjmp(jmp_buf buf, int val)
87 SAVE_GLOBALS();
88 longjmp(buf, val);
90 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
91 #endif
92 #endif
94 void cpu_loop_exit(void)
96 /* NOTE: the register at this point must be saved by hand because
97 longjmp restore them */
98 regs_to_env();
99 longjmp(env->jmp_env, 1);
102 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
103 #define reg_T2
104 #endif
106 /* exit the current TB from a signal handler. The host registers are
107 restored in a state compatible with the CPU emulator
109 void cpu_resume_from_signal(CPUState *env1, void *puc)
111 #if !defined(CONFIG_SOFTMMU)
112 struct ucontext *uc = puc;
113 #endif
115 env = env1;
117 /* XXX: restore cpu registers saved in host registers */
119 #if !defined(CONFIG_SOFTMMU)
120 if (puc) {
121 /* XXX: use siglongjmp ? */
122 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
124 #endif
125 longjmp(env->jmp_env, 1);
129 static TranslationBlock *tb_find_slow(target_ulong pc,
130 target_ulong cs_base,
131 uint64_t flags)
133 TranslationBlock *tb, **ptb1;
134 int code_gen_size;
135 unsigned int h;
136 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
137 uint8_t *tc_ptr;
139 spin_lock(&tb_lock);
141 tb_invalidated_flag = 0;
143 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
145 /* find translated block using physical mappings */
146 phys_pc = get_phys_addr_code(env, pc);
147 phys_page1 = phys_pc & TARGET_PAGE_MASK;
148 phys_page2 = -1;
149 h = tb_phys_hash_func(phys_pc);
150 ptb1 = &tb_phys_hash[h];
151 for(;;) {
152 tb = *ptb1;
153 if (!tb)
154 goto not_found;
155 if (tb->pc == pc &&
156 tb->page_addr[0] == phys_page1 &&
157 tb->cs_base == cs_base &&
158 tb->flags == flags) {
159 /* check next page if needed */
160 if (tb->page_addr[1] != -1) {
161 virt_page2 = (pc & TARGET_PAGE_MASK) +
162 TARGET_PAGE_SIZE;
163 phys_page2 = get_phys_addr_code(env, virt_page2);
164 if (tb->page_addr[1] == phys_page2)
165 goto found;
166 } else {
167 goto found;
170 ptb1 = &tb->phys_hash_next;
172 not_found:
173 /* if no translated code available, then translate it now */
174 tb = tb_alloc(pc);
175 if (!tb) {
176 /* flush must be done */
177 tb_flush(env);
178 /* cannot fail at this point */
179 tb = tb_alloc(pc);
180 /* don't forget to invalidate previous TB info */
181 tb_invalidated_flag = 1;
183 tc_ptr = code_gen_ptr;
184 tb->tc_ptr = tc_ptr;
185 tb->cs_base = cs_base;
186 tb->flags = flags;
187 SAVE_GLOBALS();
188 cpu_gen_code(env, tb, &code_gen_size);
189 RESTORE_GLOBALS();
190 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
192 /* check next page if needed */
193 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
194 phys_page2 = -1;
195 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
196 phys_page2 = get_phys_addr_code(env, virt_page2);
198 tb_link_phys(tb, phys_pc, phys_page2);
200 found:
201 /* we add the TB in the virtual pc hash table */
202 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
203 spin_unlock(&tb_lock);
204 return tb;
207 static inline TranslationBlock *tb_find_fast(void)
209 TranslationBlock *tb;
210 target_ulong cs_base, pc;
211 uint64_t flags;
213 /* we record a subset of the CPU state. It will
214 always be the same before a given translated block
215 is executed. */
216 #if defined(TARGET_I386)
217 flags = env->hflags;
218 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
219 flags |= env->intercept;
220 cs_base = env->segs[R_CS].base;
221 pc = cs_base + env->eip;
222 #elif defined(TARGET_ARM)
223 flags = env->thumb | (env->vfp.vec_len << 1)
224 | (env->vfp.vec_stride << 4);
225 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
226 flags |= (1 << 6);
227 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
228 flags |= (1 << 7);
229 flags |= (env->condexec_bits << 8);
230 cs_base = 0;
231 pc = env->regs[15];
232 #elif defined(TARGET_SPARC)
233 #ifdef TARGET_SPARC64
234 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
235 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
236 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
237 #else
238 // FPU enable . Supervisor
239 flags = (env->psref << 4) | env->psrs;
240 #endif
241 cs_base = env->npc;
242 pc = env->pc;
243 #elif defined(TARGET_PPC)
244 flags = env->hflags;
245 cs_base = 0;
246 pc = env->nip;
247 #elif defined(TARGET_MIPS)
248 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
249 cs_base = 0;
250 pc = env->PC[env->current_tc];
251 #elif defined(TARGET_M68K)
252 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
253 | (env->sr & SR_S) /* Bit 13 */
254 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
255 cs_base = 0;
256 pc = env->pc;
257 #elif defined(TARGET_SH4)
258 flags = env->flags;
259 cs_base = 0;
260 pc = env->pc;
261 #elif defined(TARGET_ALPHA)
262 flags = env->ps;
263 cs_base = 0;
264 pc = env->pc;
265 #elif defined(TARGET_CRIS)
266 flags = 0;
267 cs_base = 0;
268 pc = env->pc;
269 #elif defined(TARGET_IA64)
270 flags = 0;
271 cs_base = 0; /* XXXXX */
272 pc = 0;
273 #else
274 #error unsupported CPU
275 #endif
276 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
277 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
278 tb->flags != flags, 0)) {
279 tb = tb_find_slow(pc, cs_base, flags);
280 /* Note: we do it here to avoid a gcc bug on Mac OS X when
281 doing it in tb_find_slow */
282 if (tb_invalidated_flag) {
283 /* as some TB could have been invalidated because
284 of memory exceptions while generating the code, we
285 must recompute the hash index here */
286 T0 = 0;
289 return tb;
292 #define BREAK_CHAIN T0 = 0
294 /* main execution loop */
296 int cpu_exec(CPUState *env1)
298 #define DECLARE_HOST_REGS 1
299 #include "hostregs_helper.h"
300 #if defined(TARGET_SPARC)
301 #if defined(reg_REGWPTR)
302 uint32_t *saved_regwptr;
303 #endif
304 #endif
305 int ret, interrupt_request;
306 void (*gen_func)(void);
307 TranslationBlock *tb;
308 uint8_t *tc_ptr;
310 if (cpu_halted(env1) == EXCP_HALTED)
311 return EXCP_HALTED;
313 cpu_single_env = env1;
315 /* first we save global registers */
316 #define SAVE_HOST_REGS 1
317 #include "hostregs_helper.h"
318 env = env1;
319 SAVE_GLOBALS();
321 env_to_regs();
322 #if defined(TARGET_I386)
323 /* put eflags in CPU temporary format */
324 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
325 DF = 1 - (2 * ((env->eflags >> 10) & 1));
326 CC_OP = CC_OP_EFLAGS;
327 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
328 #elif defined(TARGET_SPARC)
329 #if defined(reg_REGWPTR)
330 saved_regwptr = REGWPTR;
331 #endif
332 #elif defined(TARGET_M68K)
333 env->cc_op = CC_OP_FLAGS;
334 env->cc_dest = env->sr & 0xf;
335 env->cc_x = (env->sr >> 4) & 1;
336 #elif defined(TARGET_ALPHA)
337 #elif defined(TARGET_ARM)
338 #elif defined(TARGET_PPC)
339 #elif defined(TARGET_MIPS)
340 #elif defined(TARGET_SH4)
341 #elif defined(TARGET_CRIS)
342 #elif defined(TARGET_IA64)
343 /* XXXXX */
344 #else
345 #error unsupported target CPU
346 #endif
347 env->exception_index = -1;
349 /* prepare setjmp context for exception handling */
350 for(;;) {
351 if (setjmp(env->jmp_env) == 0) {
352 env->current_tb = NULL;
353 /* if an exception is pending, we execute it here */
354 if (env->exception_index >= 0) {
355 if (env->exception_index >= EXCP_INTERRUPT) {
356 /* exit request from the cpu execution loop */
357 ret = env->exception_index;
358 break;
359 } else if (env->user_mode_only) {
360 /* if user mode only, we simulate a fake exception
361 which will be handled outside the cpu execution
362 loop */
363 #if defined(TARGET_I386)
364 do_interrupt_user(env->exception_index,
365 env->exception_is_int,
366 env->error_code,
367 env->exception_next_eip);
368 #endif
369 ret = env->exception_index;
370 break;
371 } else {
372 #if defined(TARGET_I386)
373 /* simulate a real cpu exception. On i386, it can
374 trigger new exceptions, but we do not handle
375 double or triple faults yet. */
376 do_interrupt(env->exception_index,
377 env->exception_is_int,
378 env->error_code,
379 env->exception_next_eip, 0);
380 /* successfully delivered */
381 env->old_exception = -1;
382 #elif defined(TARGET_PPC)
383 do_interrupt(env);
384 #elif defined(TARGET_MIPS)
385 do_interrupt(env);
386 #elif defined(TARGET_SPARC)
387 do_interrupt(env->exception_index);
388 #elif defined(TARGET_ARM)
389 do_interrupt(env);
390 #elif defined(TARGET_SH4)
391 do_interrupt(env);
392 #elif defined(TARGET_ALPHA)
393 do_interrupt(env);
394 #elif defined(TARGET_CRIS)
395 do_interrupt(env);
396 #elif defined(TARGET_M68K)
397 do_interrupt(0);
398 #elif defined(TARGET_IA64)
399 do_interrupt(env);
400 #endif
402 env->exception_index = -1;
404 #ifdef USE_KQEMU
405 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
406 int ret;
407 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
408 ret = kqemu_cpu_exec(env);
409 /* put eflags in CPU temporary format */
410 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
411 DF = 1 - (2 * ((env->eflags >> 10) & 1));
412 CC_OP = CC_OP_EFLAGS;
413 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
414 if (ret == 1) {
415 /* exception */
416 longjmp(env->jmp_env, 1);
417 } else if (ret == 2) {
418 /* softmmu execution needed */
419 } else {
420 if (env->interrupt_request != 0) {
421 /* hardware interrupt will be executed just after */
422 } else {
423 /* otherwise, we restart */
424 longjmp(env->jmp_env, 1);
428 #endif
430 #ifdef USE_KVM
431 if (kvm_allowed) {
432 kvm_cpu_exec(env);
433 longjmp(env->jmp_env, 1);
435 #endif
436 T0 = 0; /* force lookup of first TB */
437 for(;;) {
438 SAVE_GLOBALS();
439 interrupt_request = env->interrupt_request;
440 if (__builtin_expect(interrupt_request, 0)
441 #if defined(TARGET_I386)
442 && env->hflags & HF_GIF_MASK
443 #endif
445 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
446 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
447 env->exception_index = EXCP_DEBUG;
448 cpu_loop_exit();
450 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
451 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
452 if (interrupt_request & CPU_INTERRUPT_HALT) {
453 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
454 env->halted = 1;
455 env->exception_index = EXCP_HLT;
456 cpu_loop_exit();
458 #endif
459 #if defined(TARGET_I386)
460 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
461 !(env->hflags & HF_SMM_MASK)) {
462 svm_check_intercept(SVM_EXIT_SMI);
463 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
464 do_smm_enter();
465 BREAK_CHAIN;
466 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
467 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
468 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
469 int intno;
470 svm_check_intercept(SVM_EXIT_INTR);
471 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
472 intno = cpu_get_pic_interrupt(env);
473 if (loglevel & CPU_LOG_TB_IN_ASM) {
474 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
476 do_interrupt(intno, 0, 0, 0, 1);
477 /* ensure that no TB jump will be modified as
478 the program flow was changed */
479 BREAK_CHAIN;
480 #if !defined(CONFIG_USER_ONLY)
481 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
482 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
483 int intno;
484 /* FIXME: this should respect TPR */
485 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
486 svm_check_intercept(SVM_EXIT_VINTR);
487 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
488 if (loglevel & CPU_LOG_TB_IN_ASM)
489 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
490 do_interrupt(intno, 0, 0, -1, 1);
491 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
492 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
493 BREAK_CHAIN;
494 #endif
496 #elif defined(TARGET_PPC)
497 #if 0
498 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
499 cpu_ppc_reset(env);
501 #endif
502 if (interrupt_request & CPU_INTERRUPT_HARD) {
503 ppc_hw_interrupt(env);
504 if (env->pending_interrupts == 0)
505 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
506 BREAK_CHAIN;
508 #elif defined(TARGET_MIPS)
509 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
510 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
511 (env->CP0_Status & (1 << CP0St_IE)) &&
512 !(env->CP0_Status & (1 << CP0St_EXL)) &&
513 !(env->CP0_Status & (1 << CP0St_ERL)) &&
514 !(env->hflags & MIPS_HFLAG_DM)) {
515 /* Raise it */
516 env->exception_index = EXCP_EXT_INTERRUPT;
517 env->error_code = 0;
518 do_interrupt(env);
519 BREAK_CHAIN;
521 #elif defined(TARGET_SPARC)
522 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
523 (env->psret != 0)) {
524 int pil = env->interrupt_index & 15;
525 int type = env->interrupt_index & 0xf0;
527 if (((type == TT_EXTINT) &&
528 (pil == 15 || pil > env->psrpil)) ||
529 type != TT_EXTINT) {
530 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
531 do_interrupt(env->interrupt_index);
532 env->interrupt_index = 0;
533 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
534 cpu_check_irqs(env);
535 #endif
536 BREAK_CHAIN;
538 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
539 //do_interrupt(0, 0, 0, 0, 0);
540 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
542 #elif defined(TARGET_ARM)
543 if (interrupt_request & CPU_INTERRUPT_FIQ
544 && !(env->uncached_cpsr & CPSR_F)) {
545 env->exception_index = EXCP_FIQ;
546 do_interrupt(env);
547 BREAK_CHAIN;
549 /* ARMv7-M interrupt return works by loading a magic value
550 into the PC. On real hardware the load causes the
551 return to occur. The qemu implementation performs the
552 jump normally, then does the exception return when the
553 CPU tries to execute code at the magic address.
554 This will cause the magic PC value to be pushed to
555 the stack if an interrupt occured at the wrong time.
556 We avoid this by disabling interrupts when
557 pc contains a magic address. */
558 if (interrupt_request & CPU_INTERRUPT_HARD
559 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
560 || !(env->uncached_cpsr & CPSR_I))) {
561 env->exception_index = EXCP_IRQ;
562 do_interrupt(env);
563 BREAK_CHAIN;
565 #elif defined(TARGET_SH4)
566 if (interrupt_request & CPU_INTERRUPT_HARD) {
567 do_interrupt(env);
568 BREAK_CHAIN;
570 #elif defined(TARGET_ALPHA)
571 if (interrupt_request & CPU_INTERRUPT_HARD) {
572 do_interrupt(env);
573 BREAK_CHAIN;
575 #elif defined(TARGET_CRIS)
576 if (interrupt_request & CPU_INTERRUPT_HARD) {
577 do_interrupt(env);
578 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
579 BREAK_CHAIN;
581 #elif defined(TARGET_M68K)
582 if (interrupt_request & CPU_INTERRUPT_HARD
583 && ((env->sr & SR_I) >> SR_I_SHIFT)
584 < env->pending_level) {
585 /* Real hardware gets the interrupt vector via an
586 IACK cycle at this point. Current emulated
587 hardware doesn't rely on this, so we
588 provide/save the vector when the interrupt is
589 first signalled. */
590 env->exception_index = env->pending_vector;
591 do_interrupt(1);
592 BREAK_CHAIN;
594 #endif
595 /* Don't use the cached interupt_request value,
596 do_interrupt may have updated the EXITTB flag. */
597 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
598 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
599 /* ensure that no TB jump will be modified as
600 the program flow was changed */
601 BREAK_CHAIN;
603 if (interrupt_request & CPU_INTERRUPT_EXIT) {
604 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
605 env->exception_index = EXCP_INTERRUPT;
606 cpu_loop_exit();
609 #ifdef DEBUG_EXEC
610 if ((loglevel & CPU_LOG_TB_CPU)) {
611 /* restore flags in standard format */
612 regs_to_env();
613 #if defined(TARGET_I386)
614 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
615 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
616 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
617 #elif defined(TARGET_ARM)
618 cpu_dump_state(env, logfile, fprintf, 0);
619 #elif defined(TARGET_SPARC)
620 REGWPTR = env->regbase + (env->cwp * 16);
621 env->regwptr = REGWPTR;
622 cpu_dump_state(env, logfile, fprintf, 0);
623 #elif defined(TARGET_PPC)
624 cpu_dump_state(env, logfile, fprintf, 0);
625 #elif defined(TARGET_M68K)
626 cpu_m68k_flush_flags(env, env->cc_op);
627 env->cc_op = CC_OP_FLAGS;
628 env->sr = (env->sr & 0xffe0)
629 | env->cc_dest | (env->cc_x << 4);
630 cpu_dump_state(env, logfile, fprintf, 0);
631 #elif defined(TARGET_MIPS)
632 cpu_dump_state(env, logfile, fprintf, 0);
633 #elif defined(TARGET_SH4)
634 cpu_dump_state(env, logfile, fprintf, 0);
635 #elif defined(TARGET_ALPHA)
636 cpu_dump_state(env, logfile, fprintf, 0);
637 #elif defined(TARGET_CRIS)
638 cpu_dump_state(env, logfile, fprintf, 0);
639 #else
640 #error unsupported target CPU
641 #endif
643 #endif
644 tb = tb_find_fast();
645 #ifdef DEBUG_EXEC
646 if ((loglevel & CPU_LOG_EXEC)) {
647 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
648 (long)tb->tc_ptr, tb->pc,
649 lookup_symbol(tb->pc));
651 #endif
652 RESTORE_GLOBALS();
653 /* see if we can patch the calling TB. When the TB
654 spans two pages, we cannot safely do a direct
655 jump. */
657 if (T0 != 0 &&
658 #if USE_KQEMU
659 (env->kqemu_enabled != 2) &&
660 #endif
661 tb->page_addr[1] == -1) {
662 spin_lock(&tb_lock);
663 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
664 spin_unlock(&tb_lock);
667 tc_ptr = tb->tc_ptr;
668 env->current_tb = tb;
669 /* execute the generated code */
670 gen_func = (void *)tc_ptr;
671 #if defined(__sparc__)
672 __asm__ __volatile__("call %0\n\t"
673 "mov %%o7,%%i0"
674 : /* no outputs */
675 : "r" (gen_func)
676 : "i0", "i1", "i2", "i3", "i4", "i5",
677 "o0", "o1", "o2", "o3", "o4", "o5",
678 "l0", "l1", "l2", "l3", "l4", "l5",
679 "l6", "l7");
680 #elif defined(__arm__)
681 asm volatile ("mov pc, %0\n\t"
682 ".global exec_loop\n\t"
683 "exec_loop:\n\t"
684 : /* no outputs */
685 : "r" (gen_func)
686 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
687 #elif defined(__ia64)
688 struct fptr {
689 void *ip;
690 void *gp;
691 } fp;
693 fp.ip = tc_ptr;
694 fp.gp = code_gen_buffer + 2 * (1 << 20);
695 (*(void (*)(void)) &fp)();
696 #else
697 gen_func();
698 #endif
699 env->current_tb = NULL;
700 /* reset soft MMU for next block (it can currently
701 only be set by a memory fault) */
702 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
703 if (env->hflags & HF_SOFTMMU_MASK) {
704 env->hflags &= ~HF_SOFTMMU_MASK;
705 /* do not allow linking to another block */
706 T0 = 0;
708 #endif
709 #if defined(USE_KQEMU)
710 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
711 if (kqemu_is_ok(env) &&
712 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
713 cpu_loop_exit();
715 #endif
716 } /* for(;;) */
717 } else {
718 env_to_regs();
720 } /* for(;;) */
723 #if defined(TARGET_I386)
724 /* restore flags in standard format */
725 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
726 #elif defined(TARGET_ARM)
727 /* XXX: Save/restore host fpu exception state?. */
728 #elif defined(TARGET_SPARC)
729 #if defined(reg_REGWPTR)
730 REGWPTR = saved_regwptr;
731 #endif
732 #elif defined(TARGET_PPC)
733 #elif defined(TARGET_M68K)
734 cpu_m68k_flush_flags(env, env->cc_op);
735 env->cc_op = CC_OP_FLAGS;
736 env->sr = (env->sr & 0xffe0)
737 | env->cc_dest | (env->cc_x << 4);
738 #elif defined(TARGET_MIPS)
739 #elif defined(TARGET_SH4)
740 #elif defined(TARGET_IA64)
741 #elif defined(TARGET_ALPHA)
742 #elif defined(TARGET_CRIS)
743 /* XXXXX */
744 #else
745 #error unsupported target CPU
746 #endif
748 /* restore global registers */
749 RESTORE_GLOBALS();
750 #include "hostregs_helper.h"
752 /* fail safe : never use cpu_single_env outside cpu_exec() */
753 cpu_single_env = NULL;
754 return ret;
757 /* must only be called from the generated code as an exception can be
758 generated */
759 void tb_invalidate_page_range(target_ulong start, target_ulong end)
761 /* XXX: cannot enable it yet because it yields to MMU exception
762 where NIP != read address on PowerPC */
763 #if 0
764 target_ulong phys_addr;
765 phys_addr = get_phys_addr_code(env, start);
766 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
767 #endif
770 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
772 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
774 CPUX86State *saved_env;
776 saved_env = env;
777 env = s;
778 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
779 selector &= 0xffff;
780 cpu_x86_load_seg_cache(env, seg_reg, selector,
781 (selector << 4), 0xffff, 0);
782 } else {
783 load_seg(seg_reg, selector);
785 env = saved_env;
788 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
790 CPUX86State *saved_env;
792 saved_env = env;
793 env = s;
795 helper_fsave(ptr, data32);
797 env = saved_env;
800 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
802 CPUX86State *saved_env;
804 saved_env = env;
805 env = s;
807 helper_frstor(ptr, data32);
809 env = saved_env;
812 #endif /* TARGET_I386 */
814 #if !defined(CONFIG_SOFTMMU)
816 #if defined(TARGET_I386)
818 /* 'pc' is the host PC at which the exception was raised. 'address' is
819 the effective address of the memory exception. 'is_write' is 1 if a
820 write caused the exception and otherwise 0'. 'old_set' is the
821 signal set which should be restored */
822 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
823 int is_write, sigset_t *old_set,
824 void *puc)
826 TranslationBlock *tb;
827 int ret;
829 if (cpu_single_env)
830 env = cpu_single_env; /* XXX: find a correct solution for multithread */
831 #if defined(DEBUG_SIGNAL)
832 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
833 pc, address, is_write, *(unsigned long *)old_set);
834 #endif
835 /* XXX: locking issue */
836 if (is_write && page_unprotect(h2g(address), pc, puc)) {
837 return 1;
840 /* see if it is an MMU fault */
841 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
842 if (ret < 0)
843 return 0; /* not an MMU fault */
844 if (ret == 0)
845 return 1; /* the MMU fault was handled without causing real CPU fault */
846 /* now we have a real cpu fault */
847 tb = tb_find_pc(pc);
848 if (tb) {
849 /* the PC is inside the translated code. It means that we have
850 a virtual CPU fault */
851 cpu_restore_state(tb, env, pc, puc);
853 if (ret == 1) {
854 #if 0
855 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
856 env->eip, env->cr[2], env->error_code);
857 #endif
858 /* we restore the process signal mask as the sigreturn should
859 do it (XXX: use sigsetjmp) */
860 sigprocmask(SIG_SETMASK, old_set, NULL);
861 raise_exception_err(env->exception_index, env->error_code);
862 } else {
863 /* activate soft MMU for this block */
864 env->hflags |= HF_SOFTMMU_MASK;
865 cpu_resume_from_signal(env, puc);
867 /* never comes here */
868 return 1;
871 #elif defined(TARGET_ARM)
872 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
873 int is_write, sigset_t *old_set,
874 void *puc)
876 TranslationBlock *tb;
877 int ret;
879 if (cpu_single_env)
880 env = cpu_single_env; /* XXX: find a correct solution for multithread */
881 #if defined(DEBUG_SIGNAL)
882 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
883 pc, address, is_write, *(unsigned long *)old_set);
884 #endif
885 /* XXX: locking issue */
886 if (is_write && page_unprotect(h2g(address), pc, puc)) {
887 return 1;
889 /* see if it is an MMU fault */
890 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
891 if (ret < 0)
892 return 0; /* not an MMU fault */
893 if (ret == 0)
894 return 1; /* the MMU fault was handled without causing real CPU fault */
895 /* now we have a real cpu fault */
896 tb = tb_find_pc(pc);
897 if (tb) {
898 /* the PC is inside the translated code. It means that we have
899 a virtual CPU fault */
900 cpu_restore_state(tb, env, pc, puc);
902 /* we restore the process signal mask as the sigreturn should
903 do it (XXX: use sigsetjmp) */
904 sigprocmask(SIG_SETMASK, old_set, NULL);
905 cpu_loop_exit();
907 #elif defined(TARGET_SPARC)
908 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
909 int is_write, sigset_t *old_set,
910 void *puc)
912 TranslationBlock *tb;
913 int ret;
915 if (cpu_single_env)
916 env = cpu_single_env; /* XXX: find a correct solution for multithread */
917 #if defined(DEBUG_SIGNAL)
918 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
919 pc, address, is_write, *(unsigned long *)old_set);
920 #endif
921 /* XXX: locking issue */
922 if (is_write && page_unprotect(h2g(address), pc, puc)) {
923 return 1;
925 /* see if it is an MMU fault */
926 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
927 if (ret < 0)
928 return 0; /* not an MMU fault */
929 if (ret == 0)
930 return 1; /* the MMU fault was handled without causing real CPU fault */
931 /* now we have a real cpu fault */
932 tb = tb_find_pc(pc);
933 if (tb) {
934 /* the PC is inside the translated code. It means that we have
935 a virtual CPU fault */
936 cpu_restore_state(tb, env, pc, puc);
938 /* we restore the process signal mask as the sigreturn should
939 do it (XXX: use sigsetjmp) */
940 sigprocmask(SIG_SETMASK, old_set, NULL);
941 cpu_loop_exit();
943 #elif defined (TARGET_PPC)
944 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
945 int is_write, sigset_t *old_set,
946 void *puc)
948 TranslationBlock *tb;
949 int ret;
951 if (cpu_single_env)
952 env = cpu_single_env; /* XXX: find a correct solution for multithread */
953 #if defined(DEBUG_SIGNAL)
954 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
955 pc, address, is_write, *(unsigned long *)old_set);
956 #endif
957 /* XXX: locking issue */
958 if (is_write && page_unprotect(h2g(address), pc, puc)) {
959 return 1;
962 /* see if it is an MMU fault */
963 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
964 if (ret < 0)
965 return 0; /* not an MMU fault */
966 if (ret == 0)
967 return 1; /* the MMU fault was handled without causing real CPU fault */
969 /* now we have a real cpu fault */
970 tb = tb_find_pc(pc);
971 if (tb) {
972 /* the PC is inside the translated code. It means that we have
973 a virtual CPU fault */
974 cpu_restore_state(tb, env, pc, puc);
976 if (ret == 1) {
977 #if 0
978 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
979 env->nip, env->error_code, tb);
980 #endif
981 /* we restore the process signal mask as the sigreturn should
982 do it (XXX: use sigsetjmp) */
983 sigprocmask(SIG_SETMASK, old_set, NULL);
984 do_raise_exception_err(env->exception_index, env->error_code);
985 } else {
986 /* activate soft MMU for this block */
987 cpu_resume_from_signal(env, puc);
989 /* never comes here */
990 return 1;
993 #elif defined(TARGET_M68K)
994 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
995 int is_write, sigset_t *old_set,
996 void *puc)
998 TranslationBlock *tb;
999 int ret;
1001 if (cpu_single_env)
1002 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1003 #if defined(DEBUG_SIGNAL)
1004 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1005 pc, address, is_write, *(unsigned long *)old_set);
1006 #endif
1007 /* XXX: locking issue */
1008 if (is_write && page_unprotect(address, pc, puc)) {
1009 return 1;
1011 /* see if it is an MMU fault */
1012 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1013 if (ret < 0)
1014 return 0; /* not an MMU fault */
1015 if (ret == 0)
1016 return 1; /* the MMU fault was handled without causing real CPU fault */
1017 /* now we have a real cpu fault */
1018 tb = tb_find_pc(pc);
1019 if (tb) {
1020 /* the PC is inside the translated code. It means that we have
1021 a virtual CPU fault */
1022 cpu_restore_state(tb, env, pc, puc);
1024 /* we restore the process signal mask as the sigreturn should
1025 do it (XXX: use sigsetjmp) */
1026 sigprocmask(SIG_SETMASK, old_set, NULL);
1027 cpu_loop_exit();
1028 /* never comes here */
1029 return 1;
1032 #elif defined (TARGET_MIPS)
1033 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1034 int is_write, sigset_t *old_set,
1035 void *puc)
1037 TranslationBlock *tb;
1038 int ret;
1040 if (cpu_single_env)
1041 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1042 #if defined(DEBUG_SIGNAL)
1043 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1044 pc, address, is_write, *(unsigned long *)old_set);
1045 #endif
1046 /* XXX: locking issue */
1047 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1048 return 1;
1051 /* see if it is an MMU fault */
1052 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1053 if (ret < 0)
1054 return 0; /* not an MMU fault */
1055 if (ret == 0)
1056 return 1; /* the MMU fault was handled without causing real CPU fault */
1058 /* now we have a real cpu fault */
1059 tb = tb_find_pc(pc);
1060 if (tb) {
1061 /* the PC is inside the translated code. It means that we have
1062 a virtual CPU fault */
1063 cpu_restore_state(tb, env, pc, puc);
1065 if (ret == 1) {
1066 #if 0
1067 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1068 env->PC, env->error_code, tb);
1069 #endif
1070 /* we restore the process signal mask as the sigreturn should
1071 do it (XXX: use sigsetjmp) */
1072 sigprocmask(SIG_SETMASK, old_set, NULL);
1073 do_raise_exception_err(env->exception_index, env->error_code);
1074 } else {
1075 /* activate soft MMU for this block */
1076 cpu_resume_from_signal(env, puc);
1078 /* never comes here */
1079 return 1;
1082 #elif defined (TARGET_SH4)
1083 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1084 int is_write, sigset_t *old_set,
1085 void *puc)
1087 TranslationBlock *tb;
1088 int ret;
1090 if (cpu_single_env)
1091 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1092 #if defined(DEBUG_SIGNAL)
1093 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1094 pc, address, is_write, *(unsigned long *)old_set);
1095 #endif
1096 /* XXX: locking issue */
1097 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1098 return 1;
1101 /* see if it is an MMU fault */
1102 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1103 if (ret < 0)
1104 return 0; /* not an MMU fault */
1105 if (ret == 0)
1106 return 1; /* the MMU fault was handled without causing real CPU fault */
1108 /* now we have a real cpu fault */
1109 tb = tb_find_pc(pc);
1110 if (tb) {
1111 /* the PC is inside the translated code. It means that we have
1112 a virtual CPU fault */
1113 cpu_restore_state(tb, env, pc, puc);
1115 #if 0
1116 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1117 env->nip, env->error_code, tb);
1118 #endif
1119 /* we restore the process signal mask as the sigreturn should
1120 do it (XXX: use sigsetjmp) */
1121 sigprocmask(SIG_SETMASK, old_set, NULL);
1122 cpu_loop_exit();
1123 /* never comes here */
1124 return 1;
1127 #elif defined (TARGET_ALPHA)
1128 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1129 int is_write, sigset_t *old_set,
1130 void *puc)
1132 TranslationBlock *tb;
1133 int ret;
1135 if (cpu_single_env)
1136 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1137 #if defined(DEBUG_SIGNAL)
1138 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1139 pc, address, is_write, *(unsigned long *)old_set);
1140 #endif
1141 /* XXX: locking issue */
1142 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1143 return 1;
1146 /* see if it is an MMU fault */
1147 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1148 if (ret < 0)
1149 return 0; /* not an MMU fault */
1150 if (ret == 0)
1151 return 1; /* the MMU fault was handled without causing real CPU fault */
1153 /* now we have a real cpu fault */
1154 tb = tb_find_pc(pc);
1155 if (tb) {
1156 /* the PC is inside the translated code. It means that we have
1157 a virtual CPU fault */
1158 cpu_restore_state(tb, env, pc, puc);
1160 #if 0
1161 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1162 env->nip, env->error_code, tb);
1163 #endif
1164 /* we restore the process signal mask as the sigreturn should
1165 do it (XXX: use sigsetjmp) */
1166 sigprocmask(SIG_SETMASK, old_set, NULL);
1167 cpu_loop_exit();
1168 /* never comes here */
1169 return 1;
1171 #elif defined (TARGET_CRIS)
1172 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1173 int is_write, sigset_t *old_set,
1174 void *puc)
1176 TranslationBlock *tb;
1177 int ret;
1179 if (cpu_single_env)
1180 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1181 #if defined(DEBUG_SIGNAL)
1182 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1183 pc, address, is_write, *(unsigned long *)old_set);
1184 #endif
1185 /* XXX: locking issue */
1186 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1187 return 1;
1190 /* see if it is an MMU fault */
1191 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1192 if (ret < 0)
1193 return 0; /* not an MMU fault */
1194 if (ret == 0)
1195 return 1; /* the MMU fault was handled without causing real CPU fault */
1197 /* now we have a real cpu fault */
1198 tb = tb_find_pc(pc);
1199 if (tb) {
1200 /* the PC is inside the translated code. It means that we have
1201 a virtual CPU fault */
1202 cpu_restore_state(tb, env, pc, puc);
1204 #if 0
1205 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1206 env->nip, env->error_code, tb);
1207 #endif
1208 /* we restore the process signal mask as the sigreturn should
1209 do it (XXX: use sigsetjmp) */
1210 sigprocmask(SIG_SETMASK, old_set, NULL);
1211 cpu_loop_exit();
1212 /* never comes here */
1213 return 1;
1216 #else
1217 #error unsupported target CPU
1218 #endif
1220 #if defined(__i386__)
1222 #if defined(__APPLE__)
1223 # include <sys/ucontext.h>
1225 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1226 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1227 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1228 #else
1229 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1230 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1231 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1232 #endif
1234 int cpu_signal_handler(int host_signum, void *pinfo,
1235 void *puc)
1237 siginfo_t *info = pinfo;
1238 struct ucontext *uc = puc;
1239 unsigned long pc;
1240 int trapno;
1242 #ifndef REG_EIP
1243 /* for glibc 2.1 */
1244 #define REG_EIP EIP
1245 #define REG_ERR ERR
1246 #define REG_TRAPNO TRAPNO
1247 #endif
1248 pc = EIP_sig(uc);
1249 trapno = TRAP_sig(uc);
1250 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1251 trapno == 0xe ?
1252 (ERROR_sig(uc) >> 1) & 1 : 0,
1253 &uc->uc_sigmask, puc);
1256 #elif defined(__x86_64__)
1258 int cpu_signal_handler(int host_signum, void *pinfo,
1259 void *puc)
1261 siginfo_t *info = pinfo;
1262 struct ucontext *uc = puc;
1263 unsigned long pc;
1265 pc = uc->uc_mcontext.gregs[REG_RIP];
1266 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1267 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1268 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1269 &uc->uc_sigmask, puc);
1272 #elif defined(__powerpc__)
1274 /***********************************************************************
1275 * signal context platform-specific definitions
1276 * From Wine
1278 #ifdef linux
1279 /* All Registers access - only for local access */
1280 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1281 /* Gpr Registers access */
1282 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1283 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1284 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1285 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1286 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1287 # define LR_sig(context) REG_sig(link, context) /* Link register */
1288 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1289 /* Float Registers access */
1290 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1291 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1292 /* Exception Registers access */
1293 # define DAR_sig(context) REG_sig(dar, context)
1294 # define DSISR_sig(context) REG_sig(dsisr, context)
1295 # define TRAP_sig(context) REG_sig(trap, context)
1296 #endif /* linux */
1298 #ifdef __APPLE__
1299 # include <sys/ucontext.h>
1300 typedef struct ucontext SIGCONTEXT;
1301 /* All Registers access - only for local access */
1302 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1303 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1304 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1305 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1306 /* Gpr Registers access */
1307 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1308 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1309 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1310 # define CTR_sig(context) REG_sig(ctr, context)
1311 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1312 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1313 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1314 /* Float Registers access */
1315 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1316 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1317 /* Exception Registers access */
1318 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1319 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1320 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1321 #endif /* __APPLE__ */
1323 int cpu_signal_handler(int host_signum, void *pinfo,
1324 void *puc)
1326 siginfo_t *info = pinfo;
1327 struct ucontext *uc = puc;
1328 unsigned long pc;
1329 int is_write;
1331 pc = IAR_sig(uc);
1332 is_write = 0;
1333 #if 0
1334 /* ppc 4xx case */
1335 if (DSISR_sig(uc) & 0x00800000)
1336 is_write = 1;
1337 #else
1338 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1339 is_write = 1;
1340 #endif
1341 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1342 is_write, &uc->uc_sigmask, puc);
1345 #elif defined(__alpha__)
1347 int cpu_signal_handler(int host_signum, void *pinfo,
1348 void *puc)
1350 siginfo_t *info = pinfo;
1351 struct ucontext *uc = puc;
1352 uint32_t *pc = uc->uc_mcontext.sc_pc;
1353 uint32_t insn = *pc;
1354 int is_write = 0;
1356 /* XXX: need kernel patch to get write flag faster */
1357 switch (insn >> 26) {
1358 case 0x0d: // stw
1359 case 0x0e: // stb
1360 case 0x0f: // stq_u
1361 case 0x24: // stf
1362 case 0x25: // stg
1363 case 0x26: // sts
1364 case 0x27: // stt
1365 case 0x2c: // stl
1366 case 0x2d: // stq
1367 case 0x2e: // stl_c
1368 case 0x2f: // stq_c
1369 is_write = 1;
1372 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1373 is_write, &uc->uc_sigmask, puc);
1375 #elif defined(__sparc__)
1377 int cpu_signal_handler(int host_signum, void *pinfo,
1378 void *puc)
1380 siginfo_t *info = pinfo;
1381 uint32_t *regs = (uint32_t *)(info + 1);
1382 void *sigmask = (regs + 20);
1383 unsigned long pc;
1384 int is_write;
1385 uint32_t insn;
1387 /* XXX: is there a standard glibc define ? */
1388 pc = regs[1];
1389 /* XXX: need kernel patch to get write flag faster */
1390 is_write = 0;
1391 insn = *(uint32_t *)pc;
1392 if ((insn >> 30) == 3) {
1393 switch((insn >> 19) & 0x3f) {
1394 case 0x05: // stb
1395 case 0x06: // sth
1396 case 0x04: // st
1397 case 0x07: // std
1398 case 0x24: // stf
1399 case 0x27: // stdf
1400 case 0x25: // stfsr
1401 is_write = 1;
1402 break;
1405 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1406 is_write, sigmask, NULL);
1409 #elif defined(__arm__)
1411 int cpu_signal_handler(int host_signum, void *pinfo,
1412 void *puc)
1414 siginfo_t *info = pinfo;
1415 struct ucontext *uc = puc;
1416 unsigned long pc;
1417 int is_write;
1419 pc = uc->uc_mcontext.gregs[R15];
1420 /* XXX: compute is_write */
1421 is_write = 0;
1422 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1423 is_write,
1424 &uc->uc_sigmask, puc);
1427 #elif defined(__mc68000)
1429 int cpu_signal_handler(int host_signum, void *pinfo,
1430 void *puc)
1432 siginfo_t *info = pinfo;
1433 struct ucontext *uc = puc;
1434 unsigned long pc;
1435 int is_write;
1437 pc = uc->uc_mcontext.gregs[16];
1438 /* XXX: compute is_write */
1439 is_write = 0;
1440 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1441 is_write,
1442 &uc->uc_sigmask, puc);
1445 #elif defined(__ia64)
1447 #ifndef __ISR_VALID
1448 /* This ought to be in <bits/siginfo.h>... */
1449 # define __ISR_VALID 1
1450 #endif
1452 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1454 siginfo_t *info = pinfo;
1455 struct ucontext *uc = puc;
1456 unsigned long ip;
1457 int is_write = 0;
1459 ip = uc->uc_mcontext.sc_ip;
1460 switch (host_signum) {
1461 case SIGILL:
1462 case SIGFPE:
1463 case SIGSEGV:
1464 case SIGBUS:
1465 case SIGTRAP:
1466 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1467 /* ISR.W (write-access) is bit 33: */
1468 is_write = (info->si_isr >> 33) & 1;
1469 break;
1471 default:
1472 break;
1474 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1475 is_write,
1476 &uc->uc_sigmask, puc);
1479 #elif defined(__s390__)
1481 int cpu_signal_handler(int host_signum, void *pinfo,
1482 void *puc)
1484 siginfo_t *info = pinfo;
1485 struct ucontext *uc = puc;
1486 unsigned long pc;
1487 int is_write;
1489 pc = uc->uc_mcontext.psw.addr;
1490 /* XXX: compute is_write */
1491 is_write = 0;
1492 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1493 is_write, &uc->uc_sigmask, puc);
1496 #elif defined(__mips__)
1498 int cpu_signal_handler(int host_signum, void *pinfo,
1499 void *puc)
1501 siginfo_t *info = pinfo;
1502 struct ucontext *uc = puc;
1503 greg_t pc = uc->uc_mcontext.pc;
1504 int is_write;
1506 /* XXX: compute is_write */
1507 is_write = 0;
1508 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1509 is_write, &uc->uc_sigmask, puc);
1512 #else
1514 #error host CPU specific signal handler needed
1516 #endif
1518 #endif /* !defined(CONFIG_SOFTMMU) */