Proper vm_stop on debug events
[qemu-kvm/fedora.git] / cpu-exec.c
bloba7f8e135fe79a2bbb537a208a00d4f65f3ac66ff
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
38 #include "qemu-kvm.h"
40 int tb_invalidated_flag;
41 static unsigned long next_tb;
43 //#define DEBUG_EXEC
44 //#define DEBUG_SIGNAL
46 #define SAVE_GLOBALS()
47 #define RESTORE_GLOBALS()
49 #if defined(__sparc__) && !defined(HOST_SOLARIS)
50 #include <features.h>
51 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
52 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
53 // Work around ugly bugs in glibc that mangle global register contents
55 static volatile void *saved_env;
56 static volatile unsigned long saved_t0, saved_i7;
57 #undef SAVE_GLOBALS
58 #define SAVE_GLOBALS() do { \
59 saved_env = env; \
60 saved_t0 = T0; \
61 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
62 } while(0)
64 #undef RESTORE_GLOBALS
65 #define RESTORE_GLOBALS() do { \
66 env = (void *)saved_env; \
67 T0 = saved_t0; \
68 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
69 } while(0)
71 static int sparc_setjmp(jmp_buf buf)
73 int ret;
75 SAVE_GLOBALS();
76 ret = setjmp(buf);
77 RESTORE_GLOBALS();
78 return ret;
80 #undef setjmp
81 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
83 static void sparc_longjmp(jmp_buf buf, int val)
85 SAVE_GLOBALS();
86 longjmp(buf, val);
88 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
89 #endif
90 #endif
92 void cpu_loop_exit(void)
94 /* NOTE: the register at this point must be saved by hand because
95 longjmp restore them */
96 regs_to_env();
97 longjmp(env->jmp_env, 1);
100 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
101 #define reg_T2
102 #endif
104 /* exit the current TB from a signal handler. The host registers are
105 restored in a state compatible with the CPU emulator
107 void cpu_resume_from_signal(CPUState *env1, void *puc)
109 #if !defined(CONFIG_SOFTMMU)
110 struct ucontext *uc = puc;
111 #endif
113 env = env1;
115 /* XXX: restore cpu registers saved in host registers */
117 #if !defined(CONFIG_SOFTMMU)
118 if (puc) {
119 /* XXX: use siglongjmp ? */
120 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
122 #endif
123 longjmp(env->jmp_env, 1);
126 static TranslationBlock *tb_find_slow(target_ulong pc,
127 target_ulong cs_base,
128 uint64_t flags)
130 TranslationBlock *tb, **ptb1;
131 int code_gen_size;
132 unsigned int h;
133 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
134 uint8_t *tc_ptr;
136 spin_lock(&tb_lock);
138 tb_invalidated_flag = 0;
140 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
142 /* find translated block using physical mappings */
143 phys_pc = get_phys_addr_code(env, pc);
144 phys_page1 = phys_pc & TARGET_PAGE_MASK;
145 phys_page2 = -1;
146 h = tb_phys_hash_func(phys_pc);
147 ptb1 = &tb_phys_hash[h];
148 for(;;) {
149 tb = *ptb1;
150 if (!tb)
151 goto not_found;
152 if (tb->pc == pc &&
153 tb->page_addr[0] == phys_page1 &&
154 tb->cs_base == cs_base &&
155 tb->flags == flags) {
156 /* check next page if needed */
157 if (tb->page_addr[1] != -1) {
158 virt_page2 = (pc & TARGET_PAGE_MASK) +
159 TARGET_PAGE_SIZE;
160 phys_page2 = get_phys_addr_code(env, virt_page2);
161 if (tb->page_addr[1] == phys_page2)
162 goto found;
163 } else {
164 goto found;
167 ptb1 = &tb->phys_hash_next;
169 not_found:
170 /* if no translated code available, then translate it now */
171 tb = tb_alloc(pc);
172 if (!tb) {
173 /* flush must be done */
174 tb_flush(env);
175 /* cannot fail at this point */
176 tb = tb_alloc(pc);
177 /* don't forget to invalidate previous TB info */
178 tb_invalidated_flag = 1;
180 tc_ptr = code_gen_ptr;
181 tb->tc_ptr = tc_ptr;
182 tb->cs_base = cs_base;
183 tb->flags = flags;
184 SAVE_GLOBALS();
185 cpu_gen_code(env, tb, &code_gen_size);
186 RESTORE_GLOBALS();
187 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
189 /* check next page if needed */
190 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
191 phys_page2 = -1;
192 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
193 phys_page2 = get_phys_addr_code(env, virt_page2);
195 tb_link_phys(tb, phys_pc, phys_page2);
197 found:
198 /* we add the TB in the virtual pc hash table */
199 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
200 spin_unlock(&tb_lock);
201 return tb;
204 static inline TranslationBlock *tb_find_fast(void)
206 TranslationBlock *tb;
207 target_ulong cs_base, pc;
208 uint64_t flags;
210 /* we record a subset of the CPU state. It will
211 always be the same before a given translated block
212 is executed. */
213 #if defined(TARGET_I386)
214 flags = env->hflags;
215 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
216 flags |= env->intercept;
217 cs_base = env->segs[R_CS].base;
218 pc = cs_base + env->eip;
219 #elif defined(TARGET_ARM)
220 flags = env->thumb | (env->vfp.vec_len << 1)
221 | (env->vfp.vec_stride << 4);
222 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
223 flags |= (1 << 6);
224 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
225 flags |= (1 << 7);
226 flags |= (env->condexec_bits << 8);
227 cs_base = 0;
228 pc = env->regs[15];
229 #elif defined(TARGET_SPARC)
230 #ifdef TARGET_SPARC64
231 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
232 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
233 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
234 #else
235 // FPU enable . Supervisor
236 flags = (env->psref << 4) | env->psrs;
237 #endif
238 cs_base = env->npc;
239 pc = env->pc;
240 #elif defined(TARGET_PPC)
241 flags = env->hflags;
242 cs_base = 0;
243 pc = env->nip;
244 #elif defined(TARGET_MIPS)
245 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
246 cs_base = 0;
247 pc = env->PC[env->current_tc];
248 #elif defined(TARGET_M68K)
249 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
250 | (env->sr & SR_S) /* Bit 13 */
251 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
252 cs_base = 0;
253 pc = env->pc;
254 #elif defined(TARGET_SH4)
255 flags = env->flags;
256 cs_base = 0;
257 pc = env->pc;
258 #elif defined(TARGET_ALPHA)
259 flags = env->ps;
260 cs_base = 0;
261 pc = env->pc;
262 #elif defined(TARGET_CRIS)
263 flags = env->pregs[PR_CCS] & U_FLAG;
264 cs_base = 0;
265 pc = env->pc;
266 #elif defined(TARGET_IA64)
267 flags = 0;
268 cs_base = 0; /* XXXXX */
269 pc = 0;
270 #else
271 #error unsupported CPU
272 #endif
273 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
274 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
275 tb->flags != flags, 0)) {
276 tb = tb_find_slow(pc, cs_base, flags);
277 /* Note: we do it here to avoid a gcc bug on Mac OS X when
278 doing it in tb_find_slow */
279 if (tb_invalidated_flag) {
280 /* as some TB could have been invalidated because
281 of memory exceptions while generating the code, we
282 must recompute the hash index here */
283 next_tb = 0;
286 return tb;
289 /* main execution loop */
291 int cpu_exec(CPUState *env1)
293 #define DECLARE_HOST_REGS 1
294 #include "hostregs_helper.h"
295 #if defined(TARGET_SPARC)
296 #if defined(reg_REGWPTR)
297 uint32_t *saved_regwptr;
298 #endif
299 #endif
300 int ret, interrupt_request;
301 unsigned long (*gen_func)(void);
302 TranslationBlock *tb;
303 uint8_t *tc_ptr;
305 if (cpu_halted(env1) == EXCP_HALTED)
306 return EXCP_HALTED;
308 cpu_single_env = env1;
310 /* first we save global registers */
311 #define SAVE_HOST_REGS 1
312 #include "hostregs_helper.h"
313 env = env1;
314 SAVE_GLOBALS();
316 env_to_regs();
317 #if defined(TARGET_I386)
318 /* put eflags in CPU temporary format */
319 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
320 DF = 1 - (2 * ((env->eflags >> 10) & 1));
321 CC_OP = CC_OP_EFLAGS;
322 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
323 #elif defined(TARGET_SPARC)
324 #if defined(reg_REGWPTR)
325 saved_regwptr = REGWPTR;
326 #endif
327 #elif defined(TARGET_M68K)
328 env->cc_op = CC_OP_FLAGS;
329 env->cc_dest = env->sr & 0xf;
330 env->cc_x = (env->sr >> 4) & 1;
331 #elif defined(TARGET_ALPHA)
332 #elif defined(TARGET_ARM)
333 #elif defined(TARGET_PPC)
334 #elif defined(TARGET_MIPS)
335 #elif defined(TARGET_SH4)
336 #elif defined(TARGET_CRIS)
337 #elif defined(TARGET_IA64)
338 /* XXXXX */
339 #else
340 #error unsupported target CPU
341 #endif
342 env->exception_index = -1;
344 /* prepare setjmp context for exception handling */
345 for(;;) {
346 if (setjmp(env->jmp_env) == 0) {
347 env->current_tb = NULL;
348 /* if an exception is pending, we execute it here */
349 if (env->exception_index >= 0) {
350 if (env->exception_index >= EXCP_INTERRUPT) {
351 /* exit request from the cpu execution loop */
352 ret = env->exception_index;
353 break;
354 } else if (env->user_mode_only) {
355 /* if user mode only, we simulate a fake exception
356 which will be handled outside the cpu execution
357 loop */
358 #if defined(TARGET_I386)
359 do_interrupt_user(env->exception_index,
360 env->exception_is_int,
361 env->error_code,
362 env->exception_next_eip);
363 #endif
364 ret = env->exception_index;
365 break;
366 } else {
367 #if defined(TARGET_I386)
368 /* simulate a real cpu exception. On i386, it can
369 trigger new exceptions, but we do not handle
370 double or triple faults yet. */
371 do_interrupt(env->exception_index,
372 env->exception_is_int,
373 env->error_code,
374 env->exception_next_eip, 0);
375 /* successfully delivered */
376 env->old_exception = -1;
377 #elif defined(TARGET_PPC)
378 do_interrupt(env);
379 #elif defined(TARGET_MIPS)
380 do_interrupt(env);
381 #elif defined(TARGET_SPARC)
382 do_interrupt(env->exception_index);
383 #elif defined(TARGET_ARM)
384 do_interrupt(env);
385 #elif defined(TARGET_SH4)
386 do_interrupt(env);
387 #elif defined(TARGET_ALPHA)
388 do_interrupt(env);
389 #elif defined(TARGET_CRIS)
390 do_interrupt(env);
391 #elif defined(TARGET_M68K)
392 do_interrupt(0);
393 #elif defined(TARGET_IA64)
394 do_interrupt(env);
395 #endif
397 env->exception_index = -1;
399 #ifdef USE_KQEMU
400 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
401 int ret;
402 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
403 ret = kqemu_cpu_exec(env);
404 /* put eflags in CPU temporary format */
405 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
406 DF = 1 - (2 * ((env->eflags >> 10) & 1));
407 CC_OP = CC_OP_EFLAGS;
408 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
409 if (ret == 1) {
410 /* exception */
411 longjmp(env->jmp_env, 1);
412 } else if (ret == 2) {
413 /* softmmu execution needed */
414 } else {
415 if (env->interrupt_request != 0) {
416 /* hardware interrupt will be executed just after */
417 } else {
418 /* otherwise, we restart */
419 longjmp(env->jmp_env, 1);
423 #endif
425 if (kvm_enabled()) {
426 kvm_cpu_exec(env);
427 longjmp(env->jmp_env, 1);
429 next_tb = 0; /* force lookup of first TB */
430 for(;;) {
431 SAVE_GLOBALS();
432 interrupt_request = env->interrupt_request;
433 if (__builtin_expect(interrupt_request, 0)
434 #if defined(TARGET_I386)
435 && env->hflags & HF_GIF_MASK
436 #endif
438 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
439 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
440 env->exception_index = EXCP_DEBUG;
441 cpu_loop_exit();
443 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
444 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
445 if (interrupt_request & CPU_INTERRUPT_HALT) {
446 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
447 env->halted = 1;
448 env->exception_index = EXCP_HLT;
449 cpu_loop_exit();
451 #endif
452 #if defined(TARGET_I386)
453 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
454 !(env->hflags & HF_SMM_MASK)) {
455 svm_check_intercept(SVM_EXIT_SMI);
456 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
457 do_smm_enter();
458 next_tb = 0;
459 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
460 !(env->hflags & HF_NMI_MASK)) {
461 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
462 env->hflags |= HF_NMI_MASK;
463 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
464 next_tb = 0;
465 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
466 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
467 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
468 int intno;
469 svm_check_intercept(SVM_EXIT_INTR);
470 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
471 intno = cpu_get_pic_interrupt(env);
472 if (loglevel & CPU_LOG_TB_IN_ASM) {
473 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
475 do_interrupt(intno, 0, 0, 0, 1);
476 /* ensure that no TB jump will be modified as
477 the program flow was changed */
478 next_tb = 0;
479 #if !defined(CONFIG_USER_ONLY)
480 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
481 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
482 int intno;
483 /* FIXME: this should respect TPR */
484 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
485 svm_check_intercept(SVM_EXIT_VINTR);
486 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
487 if (loglevel & CPU_LOG_TB_IN_ASM)
488 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
489 do_interrupt(intno, 0, 0, -1, 1);
490 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
491 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
492 next_tb = 0;
493 #endif
495 #elif defined(TARGET_PPC)
496 #if 0
497 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
498 cpu_ppc_reset(env);
500 #endif
501 if (interrupt_request & CPU_INTERRUPT_HARD) {
502 ppc_hw_interrupt(env);
503 if (env->pending_interrupts == 0)
504 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
505 next_tb = 0;
507 #elif defined(TARGET_MIPS)
508 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
509 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
510 (env->CP0_Status & (1 << CP0St_IE)) &&
511 !(env->CP0_Status & (1 << CP0St_EXL)) &&
512 !(env->CP0_Status & (1 << CP0St_ERL)) &&
513 !(env->hflags & MIPS_HFLAG_DM)) {
514 /* Raise it */
515 env->exception_index = EXCP_EXT_INTERRUPT;
516 env->error_code = 0;
517 do_interrupt(env);
518 next_tb = 0;
520 #elif defined(TARGET_SPARC)
521 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
522 (env->psret != 0)) {
523 int pil = env->interrupt_index & 15;
524 int type = env->interrupt_index & 0xf0;
526 if (((type == TT_EXTINT) &&
527 (pil == 15 || pil > env->psrpil)) ||
528 type != TT_EXTINT) {
529 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
530 do_interrupt(env->interrupt_index);
531 env->interrupt_index = 0;
532 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
533 cpu_check_irqs(env);
534 #endif
535 next_tb = 0;
537 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
538 //do_interrupt(0, 0, 0, 0, 0);
539 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
541 #elif defined(TARGET_ARM)
542 if (interrupt_request & CPU_INTERRUPT_FIQ
543 && !(env->uncached_cpsr & CPSR_F)) {
544 env->exception_index = EXCP_FIQ;
545 do_interrupt(env);
546 next_tb = 0;
548 /* ARMv7-M interrupt return works by loading a magic value
549 into the PC. On real hardware the load causes the
550 return to occur. The qemu implementation performs the
551 jump normally, then does the exception return when the
552 CPU tries to execute code at the magic address.
553 This will cause the magic PC value to be pushed to
554 the stack if an interrupt occured at the wrong time.
555 We avoid this by disabling interrupts when
556 pc contains a magic address. */
557 if (interrupt_request & CPU_INTERRUPT_HARD
558 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
559 || !(env->uncached_cpsr & CPSR_I))) {
560 env->exception_index = EXCP_IRQ;
561 do_interrupt(env);
562 next_tb = 0;
564 #elif defined(TARGET_SH4)
565 if (interrupt_request & CPU_INTERRUPT_HARD) {
566 do_interrupt(env);
567 next_tb = 0;
569 #elif defined(TARGET_ALPHA)
570 if (interrupt_request & CPU_INTERRUPT_HARD) {
571 do_interrupt(env);
572 next_tb = 0;
574 #elif defined(TARGET_CRIS)
575 if (interrupt_request & CPU_INTERRUPT_HARD) {
576 do_interrupt(env);
577 next_tb = 0;
579 #elif defined(TARGET_M68K)
580 if (interrupt_request & CPU_INTERRUPT_HARD
581 && ((env->sr & SR_I) >> SR_I_SHIFT)
582 < env->pending_level) {
583 /* Real hardware gets the interrupt vector via an
584 IACK cycle at this point. Current emulated
585 hardware doesn't rely on this, so we
586 provide/save the vector when the interrupt is
587 first signalled. */
588 env->exception_index = env->pending_vector;
589 do_interrupt(1);
590 next_tb = 0;
592 #endif
593 /* Don't use the cached interupt_request value,
594 do_interrupt may have updated the EXITTB flag. */
595 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
596 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
597 /* ensure that no TB jump will be modified as
598 the program flow was changed */
599 next_tb = 0;
601 if (interrupt_request & CPU_INTERRUPT_EXIT) {
602 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
603 env->exception_index = EXCP_INTERRUPT;
604 cpu_loop_exit();
607 #ifdef DEBUG_EXEC
608 if ((loglevel & CPU_LOG_TB_CPU)) {
609 /* restore flags in standard format */
610 regs_to_env();
611 #if defined(TARGET_I386)
612 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
613 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
614 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
615 #elif defined(TARGET_ARM)
616 cpu_dump_state(env, logfile, fprintf, 0);
617 #elif defined(TARGET_SPARC)
618 REGWPTR = env->regbase + (env->cwp * 16);
619 env->regwptr = REGWPTR;
620 cpu_dump_state(env, logfile, fprintf, 0);
621 #elif defined(TARGET_PPC)
622 cpu_dump_state(env, logfile, fprintf, 0);
623 #elif defined(TARGET_M68K)
624 cpu_m68k_flush_flags(env, env->cc_op);
625 env->cc_op = CC_OP_FLAGS;
626 env->sr = (env->sr & 0xffe0)
627 | env->cc_dest | (env->cc_x << 4);
628 cpu_dump_state(env, logfile, fprintf, 0);
629 #elif defined(TARGET_MIPS)
630 cpu_dump_state(env, logfile, fprintf, 0);
631 #elif defined(TARGET_SH4)
632 cpu_dump_state(env, logfile, fprintf, 0);
633 #elif defined(TARGET_ALPHA)
634 cpu_dump_state(env, logfile, fprintf, 0);
635 #elif defined(TARGET_CRIS)
636 cpu_dump_state(env, logfile, fprintf, 0);
637 #else
638 #error unsupported target CPU
639 #endif
641 #endif
642 tb = tb_find_fast();
643 #ifdef DEBUG_EXEC
644 if ((loglevel & CPU_LOG_EXEC)) {
645 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
646 (long)tb->tc_ptr, tb->pc,
647 lookup_symbol(tb->pc));
649 #endif
650 RESTORE_GLOBALS();
651 /* see if we can patch the calling TB. When the TB
652 spans two pages, we cannot safely do a direct
653 jump. */
655 if (next_tb != 0 &&
656 #if USE_KQEMU
657 (env->kqemu_enabled != 2) &&
658 #endif
659 tb->page_addr[1] == -1) {
660 spin_lock(&tb_lock);
661 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
662 spin_unlock(&tb_lock);
665 tc_ptr = tb->tc_ptr;
666 env->current_tb = tb;
667 /* execute the generated code */
668 gen_func = (void *)tc_ptr;
669 #if defined(__sparc__)
670 __asm__ __volatile__("call %0\n\t"
671 "mov %%o7,%%i0"
672 : /* no outputs */
673 : "r" (gen_func)
674 : "i0", "i1", "i2", "i3", "i4", "i5",
675 "o0", "o1", "o2", "o3", "o4", "o5",
676 "l0", "l1", "l2", "l3", "l4", "l5",
677 "l6", "l7");
678 #elif defined(__hppa__)
679 asm volatile ("ble 0(%%sr4,%1)\n"
680 "copy %%r31,%%r18\n"
681 "copy %%r28,%0\n"
682 : "=r" (next_tb)
683 : "r" (gen_func)
684 : "r1", "r2", "r3", "r4", "r5", "r6", "r7",
685 "r8", "r9", "r10", "r11", "r12", "r13",
686 "r18", "r19", "r20", "r21", "r22", "r23",
687 "r24", "r25", "r26", "r27", "r28", "r29",
688 "r30", "r31");
689 #elif defined(__arm__)
690 asm volatile ("mov pc, %0\n\t"
691 ".global exec_loop\n\t"
692 "exec_loop:\n\t"
693 : /* no outputs */
694 : "r" (gen_func)
695 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
696 #elif defined(__ia64)
697 struct fptr {
698 void *ip;
699 void *gp;
700 } fp;
702 fp.ip = tc_ptr;
703 fp.gp = code_gen_buffer + 2 * (1 << 20);
704 (*(void (*)(void)) &fp)();
705 #elif defined(__i386)
706 asm volatile ("sub $12, %%esp\n\t"
707 "push %%ebp\n\t"
708 "call *%1\n\t"
709 "pop %%ebp\n\t"
710 "add $12, %%esp\n\t"
711 : "=a" (next_tb)
712 : "a" (gen_func)
713 : "ebx", "ecx", "edx", "esi", "edi", "cc",
714 "memory");
715 #elif defined(__x86_64__)
716 asm volatile ("sub $8, %%rsp\n\t"
717 "push %%rbp\n\t"
718 "call *%1\n\t"
719 "pop %%rbp\n\t"
720 "add $8, %%rsp\n\t"
721 : "=a" (next_tb)
722 : "a" (gen_func)
723 : "rbx", "rcx", "rdx", "rsi", "rdi", "r8", "r9",
724 "r10", "r11", "r12", "r13", "r14", "r15", "cc",
725 "memory");
726 #else
727 next_tb = gen_func();
728 #endif
729 env->current_tb = NULL;
730 /* reset soft MMU for next block (it can currently
731 only be set by a memory fault) */
732 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
733 if (env->hflags & HF_SOFTMMU_MASK) {
734 env->hflags &= ~HF_SOFTMMU_MASK;
735 /* do not allow linking to another block */
736 next_tb = 0;
738 #endif
739 #if defined(USE_KQEMU)
740 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
741 if (kqemu_is_ok(env) &&
742 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
743 cpu_loop_exit();
745 #endif
746 } /* for(;;) */
747 } else {
748 env_to_regs();
750 } /* for(;;) */
753 #if defined(TARGET_I386)
754 /* restore flags in standard format */
755 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
756 #elif defined(TARGET_ARM)
757 /* XXX: Save/restore host fpu exception state?. */
758 #elif defined(TARGET_SPARC)
759 #if defined(reg_REGWPTR)
760 REGWPTR = saved_regwptr;
761 #endif
762 #elif defined(TARGET_PPC)
763 #elif defined(TARGET_M68K)
764 cpu_m68k_flush_flags(env, env->cc_op);
765 env->cc_op = CC_OP_FLAGS;
766 env->sr = (env->sr & 0xffe0)
767 | env->cc_dest | (env->cc_x << 4);
768 #elif defined(TARGET_MIPS)
769 #elif defined(TARGET_SH4)
770 #elif defined(TARGET_IA64)
771 #elif defined(TARGET_ALPHA)
772 #elif defined(TARGET_CRIS)
773 /* XXXXX */
774 #else
775 #error unsupported target CPU
776 #endif
778 /* restore global registers */
779 RESTORE_GLOBALS();
780 #include "hostregs_helper.h"
782 /* fail safe : never use cpu_single_env outside cpu_exec() */
783 cpu_single_env = NULL;
784 return ret;
787 /* must only be called from the generated code as an exception can be
788 generated */
789 void tb_invalidate_page_range(target_ulong start, target_ulong end)
791 /* XXX: cannot enable it yet because it yields to MMU exception
792 where NIP != read address on PowerPC */
793 #if 0
794 target_ulong phys_addr;
795 phys_addr = get_phys_addr_code(env, start);
796 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
797 #endif
800 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
802 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
804 CPUX86State *saved_env;
806 saved_env = env;
807 env = s;
808 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
809 selector &= 0xffff;
810 cpu_x86_load_seg_cache(env, seg_reg, selector,
811 (selector << 4), 0xffff, 0);
812 } else {
813 load_seg(seg_reg, selector);
815 env = saved_env;
818 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
820 CPUX86State *saved_env;
822 saved_env = env;
823 env = s;
825 helper_fsave(ptr, data32);
827 env = saved_env;
830 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
832 CPUX86State *saved_env;
834 saved_env = env;
835 env = s;
837 helper_frstor(ptr, data32);
839 env = saved_env;
842 #endif /* TARGET_I386 */
844 #if !defined(CONFIG_SOFTMMU)
846 #if defined(TARGET_I386)
848 /* 'pc' is the host PC at which the exception was raised. 'address' is
849 the effective address of the memory exception. 'is_write' is 1 if a
850 write caused the exception and otherwise 0'. 'old_set' is the
851 signal set which should be restored */
852 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
853 int is_write, sigset_t *old_set,
854 void *puc)
856 TranslationBlock *tb;
857 int ret;
859 if (cpu_single_env)
860 env = cpu_single_env; /* XXX: find a correct solution for multithread */
861 #if defined(DEBUG_SIGNAL)
862 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
863 pc, address, is_write, *(unsigned long *)old_set);
864 #endif
865 /* XXX: locking issue */
866 if (is_write && page_unprotect(h2g(address), pc, puc)) {
867 return 1;
870 /* see if it is an MMU fault */
871 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
872 if (ret < 0)
873 return 0; /* not an MMU fault */
874 if (ret == 0)
875 return 1; /* the MMU fault was handled without causing real CPU fault */
876 /* now we have a real cpu fault */
877 tb = tb_find_pc(pc);
878 if (tb) {
879 /* the PC is inside the translated code. It means that we have
880 a virtual CPU fault */
881 cpu_restore_state(tb, env, pc, puc);
883 if (ret == 1) {
884 #if 0
885 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
886 env->eip, env->cr[2], env->error_code);
887 #endif
888 /* we restore the process signal mask as the sigreturn should
889 do it (XXX: use sigsetjmp) */
890 sigprocmask(SIG_SETMASK, old_set, NULL);
891 raise_exception_err(env->exception_index, env->error_code);
892 } else {
893 /* activate soft MMU for this block */
894 env->hflags |= HF_SOFTMMU_MASK;
895 cpu_resume_from_signal(env, puc);
897 /* never comes here */
898 return 1;
901 #elif defined(TARGET_ARM)
902 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
903 int is_write, sigset_t *old_set,
904 void *puc)
906 TranslationBlock *tb;
907 int ret;
909 if (cpu_single_env)
910 env = cpu_single_env; /* XXX: find a correct solution for multithread */
911 #if defined(DEBUG_SIGNAL)
912 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
913 pc, address, is_write, *(unsigned long *)old_set);
914 #endif
915 /* XXX: locking issue */
916 if (is_write && page_unprotect(h2g(address), pc, puc)) {
917 return 1;
919 /* see if it is an MMU fault */
920 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
921 if (ret < 0)
922 return 0; /* not an MMU fault */
923 if (ret == 0)
924 return 1; /* the MMU fault was handled without causing real CPU fault */
925 /* now we have a real cpu fault */
926 tb = tb_find_pc(pc);
927 if (tb) {
928 /* the PC is inside the translated code. It means that we have
929 a virtual CPU fault */
930 cpu_restore_state(tb, env, pc, puc);
932 /* we restore the process signal mask as the sigreturn should
933 do it (XXX: use sigsetjmp) */
934 sigprocmask(SIG_SETMASK, old_set, NULL);
935 cpu_loop_exit();
936 /* never comes here */
937 return 1;
939 #elif defined(TARGET_SPARC)
940 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
941 int is_write, sigset_t *old_set,
942 void *puc)
944 TranslationBlock *tb;
945 int ret;
947 if (cpu_single_env)
948 env = cpu_single_env; /* XXX: find a correct solution for multithread */
949 #if defined(DEBUG_SIGNAL)
950 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
951 pc, address, is_write, *(unsigned long *)old_set);
952 #endif
953 /* XXX: locking issue */
954 if (is_write && page_unprotect(h2g(address), pc, puc)) {
955 return 1;
957 /* see if it is an MMU fault */
958 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
959 if (ret < 0)
960 return 0; /* not an MMU fault */
961 if (ret == 0)
962 return 1; /* the MMU fault was handled without causing real CPU fault */
963 /* now we have a real cpu fault */
964 tb = tb_find_pc(pc);
965 if (tb) {
966 /* the PC is inside the translated code. It means that we have
967 a virtual CPU fault */
968 cpu_restore_state(tb, env, pc, puc);
970 /* we restore the process signal mask as the sigreturn should
971 do it (XXX: use sigsetjmp) */
972 sigprocmask(SIG_SETMASK, old_set, NULL);
973 cpu_loop_exit();
974 /* never comes here */
975 return 1;
977 #elif defined (TARGET_PPC)
978 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
979 int is_write, sigset_t *old_set,
980 void *puc)
982 TranslationBlock *tb;
983 int ret;
985 if (cpu_single_env)
986 env = cpu_single_env; /* XXX: find a correct solution for multithread */
987 #if defined(DEBUG_SIGNAL)
988 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
989 pc, address, is_write, *(unsigned long *)old_set);
990 #endif
991 /* XXX: locking issue */
992 if (is_write && page_unprotect(h2g(address), pc, puc)) {
993 return 1;
996 /* see if it is an MMU fault */
997 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
998 if (ret < 0)
999 return 0; /* not an MMU fault */
1000 if (ret == 0)
1001 return 1; /* the MMU fault was handled without causing real CPU fault */
1003 /* now we have a real cpu fault */
1004 tb = tb_find_pc(pc);
1005 if (tb) {
1006 /* the PC is inside the translated code. It means that we have
1007 a virtual CPU fault */
1008 cpu_restore_state(tb, env, pc, puc);
1010 if (ret == 1) {
1011 #if 0
1012 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1013 env->nip, env->error_code, tb);
1014 #endif
1015 /* we restore the process signal mask as the sigreturn should
1016 do it (XXX: use sigsetjmp) */
1017 sigprocmask(SIG_SETMASK, old_set, NULL);
1018 do_raise_exception_err(env->exception_index, env->error_code);
1019 } else {
1020 /* activate soft MMU for this block */
1021 cpu_resume_from_signal(env, puc);
1023 /* never comes here */
1024 return 1;
1027 #elif defined(TARGET_M68K)
1028 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1029 int is_write, sigset_t *old_set,
1030 void *puc)
1032 TranslationBlock *tb;
1033 int ret;
1035 if (cpu_single_env)
1036 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1037 #if defined(DEBUG_SIGNAL)
1038 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1039 pc, address, is_write, *(unsigned long *)old_set);
1040 #endif
1041 /* XXX: locking issue */
1042 if (is_write && page_unprotect(address, pc, puc)) {
1043 return 1;
1045 /* see if it is an MMU fault */
1046 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1047 if (ret < 0)
1048 return 0; /* not an MMU fault */
1049 if (ret == 0)
1050 return 1; /* the MMU fault was handled without causing real CPU fault */
1051 /* now we have a real cpu fault */
1052 tb = tb_find_pc(pc);
1053 if (tb) {
1054 /* the PC is inside the translated code. It means that we have
1055 a virtual CPU fault */
1056 cpu_restore_state(tb, env, pc, puc);
1058 /* we restore the process signal mask as the sigreturn should
1059 do it (XXX: use sigsetjmp) */
1060 sigprocmask(SIG_SETMASK, old_set, NULL);
1061 cpu_loop_exit();
1062 /* never comes here */
1063 return 1;
1066 #elif defined (TARGET_MIPS)
1067 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1068 int is_write, sigset_t *old_set,
1069 void *puc)
1071 TranslationBlock *tb;
1072 int ret;
1074 if (cpu_single_env)
1075 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1076 #if defined(DEBUG_SIGNAL)
1077 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1078 pc, address, is_write, *(unsigned long *)old_set);
1079 #endif
1080 /* XXX: locking issue */
1081 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1082 return 1;
1085 /* see if it is an MMU fault */
1086 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1087 if (ret < 0)
1088 return 0; /* not an MMU fault */
1089 if (ret == 0)
1090 return 1; /* the MMU fault was handled without causing real CPU fault */
1092 /* now we have a real cpu fault */
1093 tb = tb_find_pc(pc);
1094 if (tb) {
1095 /* the PC is inside the translated code. It means that we have
1096 a virtual CPU fault */
1097 cpu_restore_state(tb, env, pc, puc);
1099 if (ret == 1) {
1100 #if 0
1101 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1102 env->PC, env->error_code, tb);
1103 #endif
1104 /* we restore the process signal mask as the sigreturn should
1105 do it (XXX: use sigsetjmp) */
1106 sigprocmask(SIG_SETMASK, old_set, NULL);
1107 do_raise_exception_err(env->exception_index, env->error_code);
1108 } else {
1109 /* activate soft MMU for this block */
1110 cpu_resume_from_signal(env, puc);
1112 /* never comes here */
1113 return 1;
1116 #elif defined (TARGET_SH4)
1117 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1118 int is_write, sigset_t *old_set,
1119 void *puc)
1121 TranslationBlock *tb;
1122 int ret;
1124 if (cpu_single_env)
1125 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1126 #if defined(DEBUG_SIGNAL)
1127 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1128 pc, address, is_write, *(unsigned long *)old_set);
1129 #endif
1130 /* XXX: locking issue */
1131 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1132 return 1;
1135 /* see if it is an MMU fault */
1136 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1137 if (ret < 0)
1138 return 0; /* not an MMU fault */
1139 if (ret == 0)
1140 return 1; /* the MMU fault was handled without causing real CPU fault */
1142 /* now we have a real cpu fault */
1143 tb = tb_find_pc(pc);
1144 if (tb) {
1145 /* the PC is inside the translated code. It means that we have
1146 a virtual CPU fault */
1147 cpu_restore_state(tb, env, pc, puc);
1149 #if 0
1150 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1151 env->nip, env->error_code, tb);
1152 #endif
1153 /* we restore the process signal mask as the sigreturn should
1154 do it (XXX: use sigsetjmp) */
1155 sigprocmask(SIG_SETMASK, old_set, NULL);
1156 cpu_loop_exit();
1157 /* never comes here */
1158 return 1;
1161 #elif defined (TARGET_ALPHA)
1162 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1163 int is_write, sigset_t *old_set,
1164 void *puc)
1166 TranslationBlock *tb;
1167 int ret;
1169 if (cpu_single_env)
1170 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1171 #if defined(DEBUG_SIGNAL)
1172 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1173 pc, address, is_write, *(unsigned long *)old_set);
1174 #endif
1175 /* XXX: locking issue */
1176 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1177 return 1;
1180 /* see if it is an MMU fault */
1181 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1182 if (ret < 0)
1183 return 0; /* not an MMU fault */
1184 if (ret == 0)
1185 return 1; /* the MMU fault was handled without causing real CPU fault */
1187 /* now we have a real cpu fault */
1188 tb = tb_find_pc(pc);
1189 if (tb) {
1190 /* the PC is inside the translated code. It means that we have
1191 a virtual CPU fault */
1192 cpu_restore_state(tb, env, pc, puc);
1194 #if 0
1195 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1196 env->nip, env->error_code, tb);
1197 #endif
1198 /* we restore the process signal mask as the sigreturn should
1199 do it (XXX: use sigsetjmp) */
1200 sigprocmask(SIG_SETMASK, old_set, NULL);
1201 cpu_loop_exit();
1202 /* never comes here */
1203 return 1;
1205 #elif defined (TARGET_CRIS)
1206 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1207 int is_write, sigset_t *old_set,
1208 void *puc)
1210 TranslationBlock *tb;
1211 int ret;
1213 if (cpu_single_env)
1214 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1215 #if defined(DEBUG_SIGNAL)
1216 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1217 pc, address, is_write, *(unsigned long *)old_set);
1218 #endif
1219 /* XXX: locking issue */
1220 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1221 return 1;
1224 /* see if it is an MMU fault */
1225 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1226 if (ret < 0)
1227 return 0; /* not an MMU fault */
1228 if (ret == 0)
1229 return 1; /* the MMU fault was handled without causing real CPU fault */
1231 /* now we have a real cpu fault */
1232 tb = tb_find_pc(pc);
1233 if (tb) {
1234 /* the PC is inside the translated code. It means that we have
1235 a virtual CPU fault */
1236 cpu_restore_state(tb, env, pc, puc);
1238 /* we restore the process signal mask as the sigreturn should
1239 do it (XXX: use sigsetjmp) */
1240 sigprocmask(SIG_SETMASK, old_set, NULL);
1241 cpu_loop_exit();
1242 /* never comes here */
1243 return 1;
1246 #else
1247 #error unsupported target CPU
1248 #endif
1250 #if defined(__i386__)
1252 #if defined(__APPLE__)
1253 # include <sys/ucontext.h>
1255 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1256 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1257 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1258 #else
1259 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1260 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1261 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1262 #endif
1264 int cpu_signal_handler(int host_signum, void *pinfo,
1265 void *puc)
1267 siginfo_t *info = pinfo;
1268 struct ucontext *uc = puc;
1269 unsigned long pc;
1270 int trapno;
1272 #ifndef REG_EIP
1273 /* for glibc 2.1 */
1274 #define REG_EIP EIP
1275 #define REG_ERR ERR
1276 #define REG_TRAPNO TRAPNO
1277 #endif
1278 pc = EIP_sig(uc);
1279 trapno = TRAP_sig(uc);
1280 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1281 trapno == 0xe ?
1282 (ERROR_sig(uc) >> 1) & 1 : 0,
1283 &uc->uc_sigmask, puc);
1286 #elif defined(__x86_64__)
1288 int cpu_signal_handler(int host_signum, void *pinfo,
1289 void *puc)
1291 siginfo_t *info = pinfo;
1292 struct ucontext *uc = puc;
1293 unsigned long pc;
1295 pc = uc->uc_mcontext.gregs[REG_RIP];
1296 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1297 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1298 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1299 &uc->uc_sigmask, puc);
1302 #elif defined(__powerpc__)
1304 /***********************************************************************
1305 * signal context platform-specific definitions
1306 * From Wine
1308 #ifdef linux
1309 /* All Registers access - only for local access */
1310 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1311 /* Gpr Registers access */
1312 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1313 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1314 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1315 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1316 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1317 # define LR_sig(context) REG_sig(link, context) /* Link register */
1318 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1319 /* Float Registers access */
1320 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1321 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1322 /* Exception Registers access */
1323 # define DAR_sig(context) REG_sig(dar, context)
1324 # define DSISR_sig(context) REG_sig(dsisr, context)
1325 # define TRAP_sig(context) REG_sig(trap, context)
1326 #endif /* linux */
1328 #ifdef __APPLE__
1329 # include <sys/ucontext.h>
1330 typedef struct ucontext SIGCONTEXT;
1331 /* All Registers access - only for local access */
1332 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1333 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1334 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1335 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1336 /* Gpr Registers access */
1337 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1338 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1339 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1340 # define CTR_sig(context) REG_sig(ctr, context)
1341 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1342 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1343 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1344 /* Float Registers access */
1345 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1346 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1347 /* Exception Registers access */
1348 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1349 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1350 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1351 #endif /* __APPLE__ */
1353 int cpu_signal_handler(int host_signum, void *pinfo,
1354 void *puc)
1356 siginfo_t *info = pinfo;
1357 struct ucontext *uc = puc;
1358 unsigned long pc;
1359 int is_write;
1361 pc = IAR_sig(uc);
1362 is_write = 0;
1363 #if 0
1364 /* ppc 4xx case */
1365 if (DSISR_sig(uc) & 0x00800000)
1366 is_write = 1;
1367 #else
1368 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1369 is_write = 1;
1370 #endif
1371 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1372 is_write, &uc->uc_sigmask, puc);
1375 #elif defined(__alpha__)
1377 int cpu_signal_handler(int host_signum, void *pinfo,
1378 void *puc)
1380 siginfo_t *info = pinfo;
1381 struct ucontext *uc = puc;
1382 uint32_t *pc = uc->uc_mcontext.sc_pc;
1383 uint32_t insn = *pc;
1384 int is_write = 0;
1386 /* XXX: need kernel patch to get write flag faster */
1387 switch (insn >> 26) {
1388 case 0x0d: // stw
1389 case 0x0e: // stb
1390 case 0x0f: // stq_u
1391 case 0x24: // stf
1392 case 0x25: // stg
1393 case 0x26: // sts
1394 case 0x27: // stt
1395 case 0x2c: // stl
1396 case 0x2d: // stq
1397 case 0x2e: // stl_c
1398 case 0x2f: // stq_c
1399 is_write = 1;
1402 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1403 is_write, &uc->uc_sigmask, puc);
1405 #elif defined(__sparc__)
1407 int cpu_signal_handler(int host_signum, void *pinfo,
1408 void *puc)
1410 siginfo_t *info = pinfo;
1411 uint32_t *regs = (uint32_t *)(info + 1);
1412 void *sigmask = (regs + 20);
1413 unsigned long pc;
1414 int is_write;
1415 uint32_t insn;
1417 /* XXX: is there a standard glibc define ? */
1418 pc = regs[1];
1419 /* XXX: need kernel patch to get write flag faster */
1420 is_write = 0;
1421 insn = *(uint32_t *)pc;
1422 if ((insn >> 30) == 3) {
1423 switch((insn >> 19) & 0x3f) {
1424 case 0x05: // stb
1425 case 0x06: // sth
1426 case 0x04: // st
1427 case 0x07: // std
1428 case 0x24: // stf
1429 case 0x27: // stdf
1430 case 0x25: // stfsr
1431 is_write = 1;
1432 break;
1435 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1436 is_write, sigmask, NULL);
1439 #elif defined(__arm__)
1441 int cpu_signal_handler(int host_signum, void *pinfo,
1442 void *puc)
1444 siginfo_t *info = pinfo;
1445 struct ucontext *uc = puc;
1446 unsigned long pc;
1447 int is_write;
1449 pc = uc->uc_mcontext.arm_pc;
1450 /* XXX: compute is_write */
1451 is_write = 0;
1452 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1453 is_write,
1454 &uc->uc_sigmask, puc);
1457 #elif defined(__mc68000)
1459 int cpu_signal_handler(int host_signum, void *pinfo,
1460 void *puc)
1462 siginfo_t *info = pinfo;
1463 struct ucontext *uc = puc;
1464 unsigned long pc;
1465 int is_write;
1467 pc = uc->uc_mcontext.gregs[16];
1468 /* XXX: compute is_write */
1469 is_write = 0;
1470 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1471 is_write,
1472 &uc->uc_sigmask, puc);
1475 #elif defined(__ia64)
1477 #ifndef __ISR_VALID
1478 /* This ought to be in <bits/siginfo.h>... */
1479 # define __ISR_VALID 1
1480 #endif
1482 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1484 siginfo_t *info = pinfo;
1485 struct ucontext *uc = puc;
1486 unsigned long ip;
1487 int is_write = 0;
1489 ip = uc->uc_mcontext.sc_ip;
1490 switch (host_signum) {
1491 case SIGILL:
1492 case SIGFPE:
1493 case SIGSEGV:
1494 case SIGBUS:
1495 case SIGTRAP:
1496 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1497 /* ISR.W (write-access) is bit 33: */
1498 is_write = (info->si_isr >> 33) & 1;
1499 break;
1501 default:
1502 break;
1504 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1505 is_write,
1506 &uc->uc_sigmask, puc);
1509 #elif defined(__s390__)
1511 int cpu_signal_handler(int host_signum, void *pinfo,
1512 void *puc)
1514 siginfo_t *info = pinfo;
1515 struct ucontext *uc = puc;
1516 unsigned long pc;
1517 int is_write;
1519 pc = uc->uc_mcontext.psw.addr;
1520 /* XXX: compute is_write */
1521 is_write = 0;
1522 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1523 is_write, &uc->uc_sigmask, puc);
1526 #elif defined(__mips__)
1528 int cpu_signal_handler(int host_signum, void *pinfo,
1529 void *puc)
1531 siginfo_t *info = pinfo;
1532 struct ucontext *uc = puc;
1533 greg_t pc = uc->uc_mcontext.pc;
1534 int is_write;
1536 /* XXX: compute is_write */
1537 is_write = 0;
1538 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1539 is_write, &uc->uc_sigmask, puc);
1542 #elif defined(__hppa__)
1544 int cpu_signal_handler(int host_signum, void *pinfo,
1545 void *puc)
1547 struct siginfo *info = pinfo;
1548 struct ucontext *uc = puc;
1549 unsigned long pc;
1550 int is_write;
1552 pc = uc->uc_mcontext.sc_iaoq[0];
1553 /* FIXME: compute is_write */
1554 is_write = 0;
1555 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1556 is_write,
1557 &uc->uc_sigmask, puc);
1560 #else
1562 #error host CPU specific signal handler needed
1564 #endif
1566 #endif /* !defined(CONFIG_SOFTMMU) */