Fix timer rearm fallout from last qemu merge
[qemu-kvm/fedora.git] / cpu-exec.c
blob9d05ef9f384ea9bf0fc32a2527823d3064b9a506
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23 #include <string.h>
25 #if !defined(CONFIG_SOFTMMU)
26 #undef EAX
27 #undef ECX
28 #undef EDX
29 #undef EBX
30 #undef ESP
31 #undef EBP
32 #undef ESI
33 #undef EDI
34 #undef EIP
35 #include <signal.h>
36 #include <sys/ucontext.h>
37 #endif
39 #include "qemu-kvm.h"
41 int tb_invalidated_flag;
43 //#define DEBUG_EXEC
44 //#define DEBUG_SIGNAL
46 /* translation settings */
47 int translation_settings = 0;
49 #define SAVE_GLOBALS()
50 #define RESTORE_GLOBALS()
52 #if defined(__sparc__) && !defined(HOST_SOLARIS)
53 #include <features.h>
54 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
55 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
56 // Work around ugly bugs in glibc that mangle global register contents
58 static volatile void *saved_env;
59 static volatile unsigned long saved_t0, saved_i7;
60 #undef SAVE_GLOBALS
61 #define SAVE_GLOBALS() do { \
62 saved_env = env; \
63 saved_t0 = T0; \
64 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
65 } while(0)
67 #undef RESTORE_GLOBALS
68 #define RESTORE_GLOBALS() do { \
69 env = (void *)saved_env; \
70 T0 = saved_t0; \
71 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
72 } while(0)
74 static int sparc_setjmp(jmp_buf buf)
76 int ret;
78 SAVE_GLOBALS();
79 ret = setjmp(buf);
80 RESTORE_GLOBALS();
81 return ret;
83 #undef setjmp
84 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
86 static void sparc_longjmp(jmp_buf buf, int val)
88 SAVE_GLOBALS();
89 longjmp(buf, val);
91 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
92 #endif
93 #endif
95 void cpu_loop_exit(void)
97 /* NOTE: the register at this point must be saved by hand because
98 longjmp restore them */
99 regs_to_env();
100 longjmp(env->jmp_env, 1);
103 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
104 #define reg_T2
105 #endif
107 /* exit the current TB from a signal handler. The host registers are
108 restored in a state compatible with the CPU emulator
110 void cpu_resume_from_signal(CPUState *env1, void *puc)
112 #if !defined(CONFIG_SOFTMMU)
113 struct ucontext *uc = puc;
114 #endif
116 env = env1;
118 /* XXX: restore cpu registers saved in host registers */
120 #if !defined(CONFIG_SOFTMMU)
121 if (puc) {
122 /* XXX: use siglongjmp ? */
123 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
125 #endif
126 longjmp(env->jmp_env, 1);
129 CPUTranslationSetting cpu_translation_settings[] = {
130 { CPU_SETTING_NO_CACHE, "no-cache",
131 "Do not use translation blocks cache (very slow!)" },
132 { 0, NULL, NULL },
135 void cpu_set_translation_settings(int translation_flags)
137 translation_settings = translation_flags;
140 static int cmp1(const char *s1, int n, const char *s2)
142 if (strlen(s2) != n)
143 return 0;
144 return memcmp(s1, s2, n) == 0;
147 /* takes a comma separated list of translation settings. Return 0 if error. */
148 int cpu_str_to_translation_mask(const char *str)
150 CPUTranslationSetting *setting;
151 int mask;
152 const char *p, *p1;
154 p = str;
155 mask = 0;
156 for(;;) {
157 p1 = strchr(p, ',');
158 if (!p1)
159 p1 = p + strlen(p);
160 if(cmp1(p,p1-p,"all")) {
161 for(setting = cpu_translation_settings; setting->mask != 0; setting++) {
162 mask |= setting->mask;
164 } else {
165 for(setting = cpu_translation_settings; setting->mask != 0; setting++) {
166 if (cmp1(p, p1 - p, setting->name))
167 goto found;
169 return 0;
171 found:
172 mask |= setting->mask;
173 if (*p1 != ',')
174 break;
175 p = p1 + 1;
177 return mask;
180 static TranslationBlock *tb_find_slow(target_ulong pc,
181 target_ulong cs_base,
182 uint64_t flags)
184 TranslationBlock *tb, **ptb1;
185 int code_gen_size;
186 unsigned int h;
187 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
188 uint8_t *tc_ptr;
190 spin_lock(&tb_lock);
192 tb_invalidated_flag = 0;
194 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
196 /* find translated block using physical mappings */
197 phys_pc = get_phys_addr_code(env, pc);
198 phys_page1 = phys_pc & TARGET_PAGE_MASK;
199 phys_page2 = -1;
200 if (translation_settings & CPU_SETTING_NO_CACHE)
201 goto not_found;
203 h = tb_phys_hash_func(phys_pc);
204 ptb1 = &tb_phys_hash[h];
205 for(;;) {
206 tb = *ptb1;
207 if (!tb)
208 goto not_found;
209 if (tb->pc == pc &&
210 tb->page_addr[0] == phys_page1 &&
211 tb->cs_base == cs_base &&
212 tb->flags == flags) {
213 /* check next page if needed */
214 if (tb->page_addr[1] != -1) {
215 virt_page2 = (pc & TARGET_PAGE_MASK) +
216 TARGET_PAGE_SIZE;
217 phys_page2 = get_phys_addr_code(env, virt_page2);
218 if (tb->page_addr[1] == phys_page2)
219 goto found;
220 } else {
221 goto found;
224 ptb1 = &tb->phys_hash_next;
226 not_found:
227 /* if no translated code available, then translate it now */
228 tb = tb_alloc(pc);
229 if (!tb) {
230 /* flush must be done */
231 tb_flush(env);
232 /* cannot fail at this point */
233 tb = tb_alloc(pc);
234 /* don't forget to invalidate previous TB info */
235 tb_invalidated_flag = 1;
237 tc_ptr = code_gen_ptr;
238 tb->tc_ptr = tc_ptr;
239 tb->cs_base = cs_base;
240 tb->flags = flags;
241 SAVE_GLOBALS();
242 cpu_gen_code(env, tb, &code_gen_size);
243 RESTORE_GLOBALS();
244 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
246 /* check next page if needed */
247 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
248 phys_page2 = -1;
249 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
250 phys_page2 = get_phys_addr_code(env, virt_page2);
252 tb_link_phys(tb, phys_pc, phys_page2);
254 found:
255 /* we add the TB in the virtual pc hash table */
256 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
257 spin_unlock(&tb_lock);
258 return tb;
261 static inline TranslationBlock *tb_find_fast(void)
263 TranslationBlock *tb;
264 target_ulong cs_base, pc;
265 uint64_t flags;
267 /* we record a subset of the CPU state. It will
268 always be the same before a given translated block
269 is executed. */
270 #if defined(TARGET_I386)
271 flags = env->hflags;
272 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
273 flags |= env->intercept;
274 cs_base = env->segs[R_CS].base;
275 pc = cs_base + env->eip;
276 #elif defined(TARGET_ARM)
277 flags = env->thumb | (env->vfp.vec_len << 1)
278 | (env->vfp.vec_stride << 4);
279 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
280 flags |= (1 << 6);
281 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
282 flags |= (1 << 7);
283 flags |= (env->condexec_bits << 8);
284 cs_base = 0;
285 pc = env->regs[15];
286 #elif defined(TARGET_SPARC)
287 #ifdef TARGET_SPARC64
288 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
289 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
290 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
291 #else
292 // FPU enable . Supervisor
293 flags = (env->psref << 4) | env->psrs;
294 #endif
295 cs_base = env->npc;
296 pc = env->pc;
297 #elif defined(TARGET_PPC)
298 flags = env->hflags;
299 cs_base = 0;
300 pc = env->nip;
301 #elif defined(TARGET_MIPS)
302 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
303 cs_base = 0;
304 pc = env->PC[env->current_tc];
305 #elif defined(TARGET_M68K)
306 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
307 | (env->sr & SR_S) /* Bit 13 */
308 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
309 cs_base = 0;
310 pc = env->pc;
311 #elif defined(TARGET_SH4)
312 flags = env->flags;
313 cs_base = 0;
314 pc = env->pc;
315 #elif defined(TARGET_ALPHA)
316 flags = env->ps;
317 cs_base = 0;
318 pc = env->pc;
319 #elif defined(TARGET_CRIS)
320 flags = 0;
321 cs_base = 0;
322 pc = env->pc;
323 #elif defined(TARGET_IA64)
324 flags = 0;
325 cs_base = 0; /* XXXXX */
326 pc = 0;
327 #else
328 #error unsupported CPU
329 #endif
330 if (translation_settings & CPU_SETTING_NO_CACHE)
331 tb = NULL;
332 else
333 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
334 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
335 tb->flags != flags, 0)) {
336 tb = tb_find_slow(pc, cs_base, flags);
337 /* Note: we do it here to avoid a gcc bug on Mac OS X when
338 doing it in tb_find_slow */
339 if (tb_invalidated_flag) {
340 /* as some TB could have been invalidated because
341 of memory exceptions while generating the code, we
342 must recompute the hash index here */
343 T0 = 0;
346 return tb;
349 #define BREAK_CHAIN T0 = 0
351 /* main execution loop */
353 int cpu_exec(CPUState *env1)
355 #define DECLARE_HOST_REGS 1
356 #include "hostregs_helper.h"
357 #if defined(TARGET_SPARC)
358 #if defined(reg_REGWPTR)
359 uint32_t *saved_regwptr;
360 #endif
361 #endif
362 int ret, interrupt_request;
363 void (*gen_func)(void);
364 TranslationBlock *tb;
365 uint8_t *tc_ptr;
367 if (cpu_halted(env1) == EXCP_HALTED)
368 return EXCP_HALTED;
370 cpu_single_env = env1;
372 /* first we save global registers */
373 #define SAVE_HOST_REGS 1
374 #include "hostregs_helper.h"
375 env = env1;
376 SAVE_GLOBALS();
378 env_to_regs();
379 #if defined(TARGET_I386)
380 /* put eflags in CPU temporary format */
381 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
382 DF = 1 - (2 * ((env->eflags >> 10) & 1));
383 CC_OP = CC_OP_EFLAGS;
384 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
385 #elif defined(TARGET_SPARC)
386 #if defined(reg_REGWPTR)
387 saved_regwptr = REGWPTR;
388 #endif
389 #elif defined(TARGET_M68K)
390 env->cc_op = CC_OP_FLAGS;
391 env->cc_dest = env->sr & 0xf;
392 env->cc_x = (env->sr >> 4) & 1;
393 #elif defined(TARGET_ALPHA)
394 #elif defined(TARGET_ARM)
395 #elif defined(TARGET_PPC)
396 #elif defined(TARGET_MIPS)
397 #elif defined(TARGET_SH4)
398 #elif defined(TARGET_CRIS)
399 #elif defined(TARGET_IA64)
400 /* XXXXX */
401 #else
402 #error unsupported target CPU
403 #endif
404 env->exception_index = -1;
406 /* prepare setjmp context for exception handling */
407 for(;;) {
408 if (setjmp(env->jmp_env) == 0) {
409 env->current_tb = NULL;
410 /* if an exception is pending, we execute it here */
411 if (env->exception_index >= 0) {
412 if (env->exception_index >= EXCP_INTERRUPT) {
413 /* exit request from the cpu execution loop */
414 ret = env->exception_index;
415 break;
416 } else if (env->user_mode_only) {
417 /* if user mode only, we simulate a fake exception
418 which will be handled outside the cpu execution
419 loop */
420 #if defined(TARGET_I386)
421 do_interrupt_user(env->exception_index,
422 env->exception_is_int,
423 env->error_code,
424 env->exception_next_eip);
425 #endif
426 ret = env->exception_index;
427 break;
428 } else {
429 #if defined(TARGET_I386)
430 /* simulate a real cpu exception. On i386, it can
431 trigger new exceptions, but we do not handle
432 double or triple faults yet. */
433 do_interrupt(env->exception_index,
434 env->exception_is_int,
435 env->error_code,
436 env->exception_next_eip, 0);
437 /* successfully delivered */
438 env->old_exception = -1;
439 #elif defined(TARGET_PPC)
440 do_interrupt(env);
441 #elif defined(TARGET_MIPS)
442 do_interrupt(env);
443 #elif defined(TARGET_SPARC)
444 do_interrupt(env->exception_index);
445 #elif defined(TARGET_ARM)
446 do_interrupt(env);
447 #elif defined(TARGET_SH4)
448 do_interrupt(env);
449 #elif defined(TARGET_ALPHA)
450 do_interrupt(env);
451 #elif defined(TARGET_CRIS)
452 do_interrupt(env);
453 #elif defined(TARGET_M68K)
454 do_interrupt(0);
455 #elif defined(TARGET_IA64)
456 do_interrupt(env);
457 #endif
459 env->exception_index = -1;
461 #ifdef USE_KQEMU
462 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
463 int ret;
464 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
465 ret = kqemu_cpu_exec(env);
466 /* put eflags in CPU temporary format */
467 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
468 DF = 1 - (2 * ((env->eflags >> 10) & 1));
469 CC_OP = CC_OP_EFLAGS;
470 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
471 if (ret == 1) {
472 /* exception */
473 longjmp(env->jmp_env, 1);
474 } else if (ret == 2) {
475 /* softmmu execution needed */
476 } else {
477 if (env->interrupt_request != 0) {
478 /* hardware interrupt will be executed just after */
479 } else {
480 /* otherwise, we restart */
481 longjmp(env->jmp_env, 1);
485 #endif
487 if (kvm_enabled()) {
488 kvm_cpu_exec(env);
489 longjmp(env->jmp_env, 1);
491 T0 = 0; /* force lookup of first TB */
492 for(;;) {
493 SAVE_GLOBALS();
494 interrupt_request = env->interrupt_request;
495 if (__builtin_expect(interrupt_request, 0)
496 #if defined(TARGET_I386)
497 && env->hflags & HF_GIF_MASK
498 #endif
500 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
501 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
502 env->exception_index = EXCP_DEBUG;
503 cpu_loop_exit();
505 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
506 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
507 if (interrupt_request & CPU_INTERRUPT_HALT) {
508 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
509 env->halted = 1;
510 env->exception_index = EXCP_HLT;
511 cpu_loop_exit();
513 #endif
514 #if defined(TARGET_I386)
515 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
516 !(env->hflags & HF_SMM_MASK)) {
517 svm_check_intercept(SVM_EXIT_SMI);
518 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
519 do_smm_enter();
520 BREAK_CHAIN;
521 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
522 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
523 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
524 int intno;
525 svm_check_intercept(SVM_EXIT_INTR);
526 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
527 intno = cpu_get_pic_interrupt(env);
528 if (loglevel & CPU_LOG_TB_IN_ASM) {
529 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
531 do_interrupt(intno, 0, 0, 0, 1);
532 /* ensure that no TB jump will be modified as
533 the program flow was changed */
534 BREAK_CHAIN;
535 #if !defined(CONFIG_USER_ONLY)
536 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
537 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
538 int intno;
539 /* FIXME: this should respect TPR */
540 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
541 svm_check_intercept(SVM_EXIT_VINTR);
542 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
543 if (loglevel & CPU_LOG_TB_IN_ASM)
544 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
545 do_interrupt(intno, 0, 0, -1, 1);
546 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
547 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
548 BREAK_CHAIN;
549 #endif
551 #elif defined(TARGET_PPC)
552 #if 0
553 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
554 cpu_ppc_reset(env);
556 #endif
557 if (interrupt_request & CPU_INTERRUPT_HARD) {
558 ppc_hw_interrupt(env);
559 if (env->pending_interrupts == 0)
560 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
561 BREAK_CHAIN;
563 #elif defined(TARGET_MIPS)
564 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
565 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
566 (env->CP0_Status & (1 << CP0St_IE)) &&
567 !(env->CP0_Status & (1 << CP0St_EXL)) &&
568 !(env->CP0_Status & (1 << CP0St_ERL)) &&
569 !(env->hflags & MIPS_HFLAG_DM)) {
570 /* Raise it */
571 env->exception_index = EXCP_EXT_INTERRUPT;
572 env->error_code = 0;
573 do_interrupt(env);
574 BREAK_CHAIN;
576 #elif defined(TARGET_SPARC)
577 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
578 (env->psret != 0)) {
579 int pil = env->interrupt_index & 15;
580 int type = env->interrupt_index & 0xf0;
582 if (((type == TT_EXTINT) &&
583 (pil == 15 || pil > env->psrpil)) ||
584 type != TT_EXTINT) {
585 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
586 do_interrupt(env->interrupt_index);
587 env->interrupt_index = 0;
588 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
589 cpu_check_irqs(env);
590 #endif
591 BREAK_CHAIN;
593 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
594 //do_interrupt(0, 0, 0, 0, 0);
595 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
597 #elif defined(TARGET_ARM)
598 if (interrupt_request & CPU_INTERRUPT_FIQ
599 && !(env->uncached_cpsr & CPSR_F)) {
600 env->exception_index = EXCP_FIQ;
601 do_interrupt(env);
602 BREAK_CHAIN;
604 /* ARMv7-M interrupt return works by loading a magic value
605 into the PC. On real hardware the load causes the
606 return to occur. The qemu implementation performs the
607 jump normally, then does the exception return when the
608 CPU tries to execute code at the magic address.
609 This will cause the magic PC value to be pushed to
610 the stack if an interrupt occured at the wrong time.
611 We avoid this by disabling interrupts when
612 pc contains a magic address. */
613 if (interrupt_request & CPU_INTERRUPT_HARD
614 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
615 || !(env->uncached_cpsr & CPSR_I))) {
616 env->exception_index = EXCP_IRQ;
617 do_interrupt(env);
618 BREAK_CHAIN;
620 #elif defined(TARGET_SH4)
621 if (interrupt_request & CPU_INTERRUPT_HARD) {
622 do_interrupt(env);
623 BREAK_CHAIN;
625 #elif defined(TARGET_ALPHA)
626 if (interrupt_request & CPU_INTERRUPT_HARD) {
627 do_interrupt(env);
628 BREAK_CHAIN;
630 #elif defined(TARGET_CRIS)
631 if (interrupt_request & CPU_INTERRUPT_HARD) {
632 do_interrupt(env);
633 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
634 BREAK_CHAIN;
636 #elif defined(TARGET_M68K)
637 if (interrupt_request & CPU_INTERRUPT_HARD
638 && ((env->sr & SR_I) >> SR_I_SHIFT)
639 < env->pending_level) {
640 /* Real hardware gets the interrupt vector via an
641 IACK cycle at this point. Current emulated
642 hardware doesn't rely on this, so we
643 provide/save the vector when the interrupt is
644 first signalled. */
645 env->exception_index = env->pending_vector;
646 do_interrupt(1);
647 BREAK_CHAIN;
649 #endif
650 /* Don't use the cached interupt_request value,
651 do_interrupt may have updated the EXITTB flag. */
652 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
653 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
654 /* ensure that no TB jump will be modified as
655 the program flow was changed */
656 BREAK_CHAIN;
658 if (interrupt_request & CPU_INTERRUPT_EXIT) {
659 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
660 env->exception_index = EXCP_INTERRUPT;
661 cpu_loop_exit();
664 #ifdef DEBUG_EXEC
665 if ((loglevel & CPU_LOG_TB_CPU)) {
666 /* restore flags in standard format */
667 regs_to_env();
668 #if defined(TARGET_I386)
669 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
670 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
671 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
672 #elif defined(TARGET_ARM)
673 cpu_dump_state(env, logfile, fprintf, 0);
674 #elif defined(TARGET_SPARC)
675 REGWPTR = env->regbase + (env->cwp * 16);
676 env->regwptr = REGWPTR;
677 cpu_dump_state(env, logfile, fprintf, 0);
678 #elif defined(TARGET_PPC)
679 cpu_dump_state(env, logfile, fprintf, 0);
680 #elif defined(TARGET_M68K)
681 cpu_m68k_flush_flags(env, env->cc_op);
682 env->cc_op = CC_OP_FLAGS;
683 env->sr = (env->sr & 0xffe0)
684 | env->cc_dest | (env->cc_x << 4);
685 cpu_dump_state(env, logfile, fprintf, 0);
686 #elif defined(TARGET_MIPS)
687 cpu_dump_state(env, logfile, fprintf, 0);
688 #elif defined(TARGET_SH4)
689 cpu_dump_state(env, logfile, fprintf, 0);
690 #elif defined(TARGET_ALPHA)
691 cpu_dump_state(env, logfile, fprintf, 0);
692 #elif defined(TARGET_CRIS)
693 cpu_dump_state(env, logfile, fprintf, 0);
694 #else
695 #error unsupported target CPU
696 #endif
698 #endif
699 tb = tb_find_fast();
700 #ifdef DEBUG_EXEC
701 if ((loglevel & CPU_LOG_EXEC)) {
702 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
703 (long)tb->tc_ptr, tb->pc,
704 lookup_symbol(tb->pc));
706 #endif
707 RESTORE_GLOBALS();
708 /* see if we can patch the calling TB. When the TB
709 spans two pages, we cannot safely do a direct
710 jump. */
712 if (T0 != 0 &&
713 #if USE_KQEMU
714 (env->kqemu_enabled != 2) &&
715 #endif
716 tb->page_addr[1] == -1) {
717 spin_lock(&tb_lock);
718 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
719 spin_unlock(&tb_lock);
722 tc_ptr = tb->tc_ptr;
723 env->current_tb = tb;
724 /* execute the generated code */
725 gen_func = (void *)tc_ptr;
726 #if defined(__sparc__)
727 __asm__ __volatile__("call %0\n\t"
728 "mov %%o7,%%i0"
729 : /* no outputs */
730 : "r" (gen_func)
731 : "i0", "i1", "i2", "i3", "i4", "i5",
732 "o0", "o1", "o2", "o3", "o4", "o5",
733 "l0", "l1", "l2", "l3", "l4", "l5",
734 "l6", "l7");
735 #elif defined(__arm__)
736 asm volatile ("mov pc, %0\n\t"
737 ".global exec_loop\n\t"
738 "exec_loop:\n\t"
739 : /* no outputs */
740 : "r" (gen_func)
741 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
742 #elif defined(__ia64)
743 struct fptr {
744 void *ip;
745 void *gp;
746 } fp;
748 fp.ip = tc_ptr;
749 fp.gp = code_gen_buffer + 2 * (1 << 20);
750 (*(void (*)(void)) &fp)();
751 #else
752 gen_func();
753 #endif
754 env->current_tb = NULL;
755 /* reset soft MMU for next block (it can currently
756 only be set by a memory fault) */
757 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
758 if (env->hflags & HF_SOFTMMU_MASK) {
759 env->hflags &= ~HF_SOFTMMU_MASK;
760 /* do not allow linking to another block */
761 T0 = 0;
763 #endif
764 #if defined(USE_KQEMU)
765 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
766 if (kqemu_is_ok(env) &&
767 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
768 cpu_loop_exit();
770 #endif
771 } /* for(;;) */
772 } else {
773 env_to_regs();
775 } /* for(;;) */
778 #if defined(TARGET_I386)
779 /* restore flags in standard format */
780 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
781 #elif defined(TARGET_ARM)
782 /* XXX: Save/restore host fpu exception state?. */
783 #elif defined(TARGET_SPARC)
784 #if defined(reg_REGWPTR)
785 REGWPTR = saved_regwptr;
786 #endif
787 #elif defined(TARGET_PPC)
788 #elif defined(TARGET_M68K)
789 cpu_m68k_flush_flags(env, env->cc_op);
790 env->cc_op = CC_OP_FLAGS;
791 env->sr = (env->sr & 0xffe0)
792 | env->cc_dest | (env->cc_x << 4);
793 #elif defined(TARGET_MIPS)
794 #elif defined(TARGET_SH4)
795 #elif defined(TARGET_IA64)
796 #elif defined(TARGET_ALPHA)
797 #elif defined(TARGET_CRIS)
798 /* XXXXX */
799 #else
800 #error unsupported target CPU
801 #endif
803 /* restore global registers */
804 RESTORE_GLOBALS();
805 #include "hostregs_helper.h"
807 /* fail safe : never use cpu_single_env outside cpu_exec() */
808 cpu_single_env = NULL;
809 return ret;
812 /* must only be called from the generated code as an exception can be
813 generated */
814 void tb_invalidate_page_range(target_ulong start, target_ulong end)
816 /* XXX: cannot enable it yet because it yields to MMU exception
817 where NIP != read address on PowerPC */
818 #if 0
819 target_ulong phys_addr;
820 phys_addr = get_phys_addr_code(env, start);
821 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
822 #endif
825 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
827 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
829 CPUX86State *saved_env;
831 saved_env = env;
832 env = s;
833 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
834 selector &= 0xffff;
835 cpu_x86_load_seg_cache(env, seg_reg, selector,
836 (selector << 4), 0xffff, 0);
837 } else {
838 load_seg(seg_reg, selector);
840 env = saved_env;
843 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
845 CPUX86State *saved_env;
847 saved_env = env;
848 env = s;
850 helper_fsave(ptr, data32);
852 env = saved_env;
855 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
857 CPUX86State *saved_env;
859 saved_env = env;
860 env = s;
862 helper_frstor(ptr, data32);
864 env = saved_env;
867 #endif /* TARGET_I386 */
869 #if !defined(CONFIG_SOFTMMU)
871 #if defined(TARGET_I386)
873 /* 'pc' is the host PC at which the exception was raised. 'address' is
874 the effective address of the memory exception. 'is_write' is 1 if a
875 write caused the exception and otherwise 0'. 'old_set' is the
876 signal set which should be restored */
877 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
878 int is_write, sigset_t *old_set,
879 void *puc)
881 TranslationBlock *tb;
882 int ret;
884 if (cpu_single_env)
885 env = cpu_single_env; /* XXX: find a correct solution for multithread */
886 #if defined(DEBUG_SIGNAL)
887 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
888 pc, address, is_write, *(unsigned long *)old_set);
889 #endif
890 /* XXX: locking issue */
891 if (is_write && page_unprotect(h2g(address), pc, puc)) {
892 return 1;
895 /* see if it is an MMU fault */
896 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
897 if (ret < 0)
898 return 0; /* not an MMU fault */
899 if (ret == 0)
900 return 1; /* the MMU fault was handled without causing real CPU fault */
901 /* now we have a real cpu fault */
902 tb = tb_find_pc(pc);
903 if (tb) {
904 /* the PC is inside the translated code. It means that we have
905 a virtual CPU fault */
906 cpu_restore_state(tb, env, pc, puc);
908 if (ret == 1) {
909 #if 0
910 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
911 env->eip, env->cr[2], env->error_code);
912 #endif
913 /* we restore the process signal mask as the sigreturn should
914 do it (XXX: use sigsetjmp) */
915 sigprocmask(SIG_SETMASK, old_set, NULL);
916 raise_exception_err(env->exception_index, env->error_code);
917 } else {
918 /* activate soft MMU for this block */
919 env->hflags |= HF_SOFTMMU_MASK;
920 cpu_resume_from_signal(env, puc);
922 /* never comes here */
923 return 1;
926 #elif defined(TARGET_ARM)
927 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
928 int is_write, sigset_t *old_set,
929 void *puc)
931 TranslationBlock *tb;
932 int ret;
934 if (cpu_single_env)
935 env = cpu_single_env; /* XXX: find a correct solution for multithread */
936 #if defined(DEBUG_SIGNAL)
937 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
938 pc, address, is_write, *(unsigned long *)old_set);
939 #endif
940 /* XXX: locking issue */
941 if (is_write && page_unprotect(h2g(address), pc, puc)) {
942 return 1;
944 /* see if it is an MMU fault */
945 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
946 if (ret < 0)
947 return 0; /* not an MMU fault */
948 if (ret == 0)
949 return 1; /* the MMU fault was handled without causing real CPU fault */
950 /* now we have a real cpu fault */
951 tb = tb_find_pc(pc);
952 if (tb) {
953 /* the PC is inside the translated code. It means that we have
954 a virtual CPU fault */
955 cpu_restore_state(tb, env, pc, puc);
957 /* we restore the process signal mask as the sigreturn should
958 do it (XXX: use sigsetjmp) */
959 sigprocmask(SIG_SETMASK, old_set, NULL);
960 cpu_loop_exit();
962 #elif defined(TARGET_SPARC)
963 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
964 int is_write, sigset_t *old_set,
965 void *puc)
967 TranslationBlock *tb;
968 int ret;
970 if (cpu_single_env)
971 env = cpu_single_env; /* XXX: find a correct solution for multithread */
972 #if defined(DEBUG_SIGNAL)
973 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
974 pc, address, is_write, *(unsigned long *)old_set);
975 #endif
976 /* XXX: locking issue */
977 if (is_write && page_unprotect(h2g(address), pc, puc)) {
978 return 1;
980 /* see if it is an MMU fault */
981 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
982 if (ret < 0)
983 return 0; /* not an MMU fault */
984 if (ret == 0)
985 return 1; /* the MMU fault was handled without causing real CPU fault */
986 /* now we have a real cpu fault */
987 tb = tb_find_pc(pc);
988 if (tb) {
989 /* the PC is inside the translated code. It means that we have
990 a virtual CPU fault */
991 cpu_restore_state(tb, env, pc, puc);
993 /* we restore the process signal mask as the sigreturn should
994 do it (XXX: use sigsetjmp) */
995 sigprocmask(SIG_SETMASK, old_set, NULL);
996 cpu_loop_exit();
998 #elif defined (TARGET_PPC)
999 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1000 int is_write, sigset_t *old_set,
1001 void *puc)
1003 TranslationBlock *tb;
1004 int ret;
1006 if (cpu_single_env)
1007 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1008 #if defined(DEBUG_SIGNAL)
1009 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1010 pc, address, is_write, *(unsigned long *)old_set);
1011 #endif
1012 /* XXX: locking issue */
1013 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1014 return 1;
1017 /* see if it is an MMU fault */
1018 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1019 if (ret < 0)
1020 return 0; /* not an MMU fault */
1021 if (ret == 0)
1022 return 1; /* the MMU fault was handled without causing real CPU fault */
1024 /* now we have a real cpu fault */
1025 tb = tb_find_pc(pc);
1026 if (tb) {
1027 /* the PC is inside the translated code. It means that we have
1028 a virtual CPU fault */
1029 cpu_restore_state(tb, env, pc, puc);
1031 if (ret == 1) {
1032 #if 0
1033 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1034 env->nip, env->error_code, tb);
1035 #endif
1036 /* we restore the process signal mask as the sigreturn should
1037 do it (XXX: use sigsetjmp) */
1038 sigprocmask(SIG_SETMASK, old_set, NULL);
1039 do_raise_exception_err(env->exception_index, env->error_code);
1040 } else {
1041 /* activate soft MMU for this block */
1042 cpu_resume_from_signal(env, puc);
1044 /* never comes here */
1045 return 1;
1048 #elif defined(TARGET_M68K)
1049 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1050 int is_write, sigset_t *old_set,
1051 void *puc)
1053 TranslationBlock *tb;
1054 int ret;
1056 if (cpu_single_env)
1057 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1058 #if defined(DEBUG_SIGNAL)
1059 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1060 pc, address, is_write, *(unsigned long *)old_set);
1061 #endif
1062 /* XXX: locking issue */
1063 if (is_write && page_unprotect(address, pc, puc)) {
1064 return 1;
1066 /* see if it is an MMU fault */
1067 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1068 if (ret < 0)
1069 return 0; /* not an MMU fault */
1070 if (ret == 0)
1071 return 1; /* the MMU fault was handled without causing real CPU fault */
1072 /* now we have a real cpu fault */
1073 tb = tb_find_pc(pc);
1074 if (tb) {
1075 /* the PC is inside the translated code. It means that we have
1076 a virtual CPU fault */
1077 cpu_restore_state(tb, env, pc, puc);
1079 /* we restore the process signal mask as the sigreturn should
1080 do it (XXX: use sigsetjmp) */
1081 sigprocmask(SIG_SETMASK, old_set, NULL);
1082 cpu_loop_exit();
1083 /* never comes here */
1084 return 1;
1087 #elif defined (TARGET_MIPS)
1088 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1089 int is_write, sigset_t *old_set,
1090 void *puc)
1092 TranslationBlock *tb;
1093 int ret;
1095 if (cpu_single_env)
1096 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1097 #if defined(DEBUG_SIGNAL)
1098 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1099 pc, address, is_write, *(unsigned long *)old_set);
1100 #endif
1101 /* XXX: locking issue */
1102 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1103 return 1;
1106 /* see if it is an MMU fault */
1107 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1108 if (ret < 0)
1109 return 0; /* not an MMU fault */
1110 if (ret == 0)
1111 return 1; /* the MMU fault was handled without causing real CPU fault */
1113 /* now we have a real cpu fault */
1114 tb = tb_find_pc(pc);
1115 if (tb) {
1116 /* the PC is inside the translated code. It means that we have
1117 a virtual CPU fault */
1118 cpu_restore_state(tb, env, pc, puc);
1120 if (ret == 1) {
1121 #if 0
1122 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1123 env->PC, env->error_code, tb);
1124 #endif
1125 /* we restore the process signal mask as the sigreturn should
1126 do it (XXX: use sigsetjmp) */
1127 sigprocmask(SIG_SETMASK, old_set, NULL);
1128 do_raise_exception_err(env->exception_index, env->error_code);
1129 } else {
1130 /* activate soft MMU for this block */
1131 cpu_resume_from_signal(env, puc);
1133 /* never comes here */
1134 return 1;
1137 #elif defined (TARGET_SH4)
1138 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1139 int is_write, sigset_t *old_set,
1140 void *puc)
1142 TranslationBlock *tb;
1143 int ret;
1145 if (cpu_single_env)
1146 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1147 #if defined(DEBUG_SIGNAL)
1148 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1149 pc, address, is_write, *(unsigned long *)old_set);
1150 #endif
1151 /* XXX: locking issue */
1152 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1153 return 1;
1156 /* see if it is an MMU fault */
1157 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1158 if (ret < 0)
1159 return 0; /* not an MMU fault */
1160 if (ret == 0)
1161 return 1; /* the MMU fault was handled without causing real CPU fault */
1163 /* now we have a real cpu fault */
1164 tb = tb_find_pc(pc);
1165 if (tb) {
1166 /* the PC is inside the translated code. It means that we have
1167 a virtual CPU fault */
1168 cpu_restore_state(tb, env, pc, puc);
1170 #if 0
1171 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1172 env->nip, env->error_code, tb);
1173 #endif
1174 /* we restore the process signal mask as the sigreturn should
1175 do it (XXX: use sigsetjmp) */
1176 sigprocmask(SIG_SETMASK, old_set, NULL);
1177 cpu_loop_exit();
1178 /* never comes here */
1179 return 1;
1182 #elif defined (TARGET_ALPHA)
1183 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1184 int is_write, sigset_t *old_set,
1185 void *puc)
1187 TranslationBlock *tb;
1188 int ret;
1190 if (cpu_single_env)
1191 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1192 #if defined(DEBUG_SIGNAL)
1193 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1194 pc, address, is_write, *(unsigned long *)old_set);
1195 #endif
1196 /* XXX: locking issue */
1197 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1198 return 1;
1201 /* see if it is an MMU fault */
1202 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1203 if (ret < 0)
1204 return 0; /* not an MMU fault */
1205 if (ret == 0)
1206 return 1; /* the MMU fault was handled without causing real CPU fault */
1208 /* now we have a real cpu fault */
1209 tb = tb_find_pc(pc);
1210 if (tb) {
1211 /* the PC is inside the translated code. It means that we have
1212 a virtual CPU fault */
1213 cpu_restore_state(tb, env, pc, puc);
1215 #if 0
1216 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1217 env->nip, env->error_code, tb);
1218 #endif
1219 /* we restore the process signal mask as the sigreturn should
1220 do it (XXX: use sigsetjmp) */
1221 sigprocmask(SIG_SETMASK, old_set, NULL);
1222 cpu_loop_exit();
1223 /* never comes here */
1224 return 1;
1226 #elif defined (TARGET_CRIS)
1227 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1228 int is_write, sigset_t *old_set,
1229 void *puc)
1231 TranslationBlock *tb;
1232 int ret;
1234 if (cpu_single_env)
1235 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1236 #if defined(DEBUG_SIGNAL)
1237 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1238 pc, address, is_write, *(unsigned long *)old_set);
1239 #endif
1240 /* XXX: locking issue */
1241 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1242 return 1;
1245 /* see if it is an MMU fault */
1246 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1247 if (ret < 0)
1248 return 0; /* not an MMU fault */
1249 if (ret == 0)
1250 return 1; /* the MMU fault was handled without causing real CPU fault */
1252 /* now we have a real cpu fault */
1253 tb = tb_find_pc(pc);
1254 if (tb) {
1255 /* the PC is inside the translated code. It means that we have
1256 a virtual CPU fault */
1257 cpu_restore_state(tb, env, pc, puc);
1259 #if 0
1260 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1261 env->nip, env->error_code, tb);
1262 #endif
1263 /* we restore the process signal mask as the sigreturn should
1264 do it (XXX: use sigsetjmp) */
1265 sigprocmask(SIG_SETMASK, old_set, NULL);
1266 cpu_loop_exit();
1267 /* never comes here */
1268 return 1;
1271 #else
1272 #error unsupported target CPU
1273 #endif
1275 #if defined(__i386__)
1277 #if defined(__APPLE__)
1278 # include <sys/ucontext.h>
1280 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1281 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1282 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1283 #else
1284 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1285 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1286 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1287 #endif
1289 int cpu_signal_handler(int host_signum, void *pinfo,
1290 void *puc)
1292 siginfo_t *info = pinfo;
1293 struct ucontext *uc = puc;
1294 unsigned long pc;
1295 int trapno;
1297 #ifndef REG_EIP
1298 /* for glibc 2.1 */
1299 #define REG_EIP EIP
1300 #define REG_ERR ERR
1301 #define REG_TRAPNO TRAPNO
1302 #endif
1303 pc = EIP_sig(uc);
1304 trapno = TRAP_sig(uc);
1305 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1306 trapno == 0xe ?
1307 (ERROR_sig(uc) >> 1) & 1 : 0,
1308 &uc->uc_sigmask, puc);
1311 #elif defined(__x86_64__)
1313 int cpu_signal_handler(int host_signum, void *pinfo,
1314 void *puc)
1316 siginfo_t *info = pinfo;
1317 struct ucontext *uc = puc;
1318 unsigned long pc;
1320 pc = uc->uc_mcontext.gregs[REG_RIP];
1321 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1322 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1323 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1324 &uc->uc_sigmask, puc);
1327 #elif defined(__powerpc__)
1329 /***********************************************************************
1330 * signal context platform-specific definitions
1331 * From Wine
1333 #ifdef linux
1334 /* All Registers access - only for local access */
1335 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1336 /* Gpr Registers access */
1337 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1338 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1339 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1340 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1341 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1342 # define LR_sig(context) REG_sig(link, context) /* Link register */
1343 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1344 /* Float Registers access */
1345 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1346 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1347 /* Exception Registers access */
1348 # define DAR_sig(context) REG_sig(dar, context)
1349 # define DSISR_sig(context) REG_sig(dsisr, context)
1350 # define TRAP_sig(context) REG_sig(trap, context)
1351 #endif /* linux */
1353 #ifdef __APPLE__
1354 # include <sys/ucontext.h>
1355 typedef struct ucontext SIGCONTEXT;
1356 /* All Registers access - only for local access */
1357 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1358 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1359 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1360 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1361 /* Gpr Registers access */
1362 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1363 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1364 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1365 # define CTR_sig(context) REG_sig(ctr, context)
1366 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1367 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1368 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1369 /* Float Registers access */
1370 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1371 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1372 /* Exception Registers access */
1373 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1374 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1375 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1376 #endif /* __APPLE__ */
1378 int cpu_signal_handler(int host_signum, void *pinfo,
1379 void *puc)
1381 siginfo_t *info = pinfo;
1382 struct ucontext *uc = puc;
1383 unsigned long pc;
1384 int is_write;
1386 pc = IAR_sig(uc);
1387 is_write = 0;
1388 #if 0
1389 /* ppc 4xx case */
1390 if (DSISR_sig(uc) & 0x00800000)
1391 is_write = 1;
1392 #else
1393 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1394 is_write = 1;
1395 #endif
1396 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1397 is_write, &uc->uc_sigmask, puc);
1400 #elif defined(__alpha__)
1402 int cpu_signal_handler(int host_signum, void *pinfo,
1403 void *puc)
1405 siginfo_t *info = pinfo;
1406 struct ucontext *uc = puc;
1407 uint32_t *pc = uc->uc_mcontext.sc_pc;
1408 uint32_t insn = *pc;
1409 int is_write = 0;
1411 /* XXX: need kernel patch to get write flag faster */
1412 switch (insn >> 26) {
1413 case 0x0d: // stw
1414 case 0x0e: // stb
1415 case 0x0f: // stq_u
1416 case 0x24: // stf
1417 case 0x25: // stg
1418 case 0x26: // sts
1419 case 0x27: // stt
1420 case 0x2c: // stl
1421 case 0x2d: // stq
1422 case 0x2e: // stl_c
1423 case 0x2f: // stq_c
1424 is_write = 1;
1427 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1428 is_write, &uc->uc_sigmask, puc);
1430 #elif defined(__sparc__)
1432 int cpu_signal_handler(int host_signum, void *pinfo,
1433 void *puc)
1435 siginfo_t *info = pinfo;
1436 uint32_t *regs = (uint32_t *)(info + 1);
1437 void *sigmask = (regs + 20);
1438 unsigned long pc;
1439 int is_write;
1440 uint32_t insn;
1442 /* XXX: is there a standard glibc define ? */
1443 pc = regs[1];
1444 /* XXX: need kernel patch to get write flag faster */
1445 is_write = 0;
1446 insn = *(uint32_t *)pc;
1447 if ((insn >> 30) == 3) {
1448 switch((insn >> 19) & 0x3f) {
1449 case 0x05: // stb
1450 case 0x06: // sth
1451 case 0x04: // st
1452 case 0x07: // std
1453 case 0x24: // stf
1454 case 0x27: // stdf
1455 case 0x25: // stfsr
1456 is_write = 1;
1457 break;
1460 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1461 is_write, sigmask, NULL);
1464 #elif defined(__arm__)
1466 int cpu_signal_handler(int host_signum, void *pinfo,
1467 void *puc)
1469 siginfo_t *info = pinfo;
1470 struct ucontext *uc = puc;
1471 unsigned long pc;
1472 int is_write;
1474 pc = uc->uc_mcontext.gregs[R15];
1475 /* XXX: compute is_write */
1476 is_write = 0;
1477 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1478 is_write,
1479 &uc->uc_sigmask, puc);
1482 #elif defined(__mc68000)
1484 int cpu_signal_handler(int host_signum, void *pinfo,
1485 void *puc)
1487 siginfo_t *info = pinfo;
1488 struct ucontext *uc = puc;
1489 unsigned long pc;
1490 int is_write;
1492 pc = uc->uc_mcontext.gregs[16];
1493 /* XXX: compute is_write */
1494 is_write = 0;
1495 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1496 is_write,
1497 &uc->uc_sigmask, puc);
1500 #elif defined(__ia64)
1502 #ifndef __ISR_VALID
1503 /* This ought to be in <bits/siginfo.h>... */
1504 # define __ISR_VALID 1
1505 #endif
1507 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1509 siginfo_t *info = pinfo;
1510 struct ucontext *uc = puc;
1511 unsigned long ip;
1512 int is_write = 0;
1514 ip = uc->uc_mcontext.sc_ip;
1515 switch (host_signum) {
1516 case SIGILL:
1517 case SIGFPE:
1518 case SIGSEGV:
1519 case SIGBUS:
1520 case SIGTRAP:
1521 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1522 /* ISR.W (write-access) is bit 33: */
1523 is_write = (info->si_isr >> 33) & 1;
1524 break;
1526 default:
1527 break;
1529 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1530 is_write,
1531 &uc->uc_sigmask, puc);
1534 #elif defined(__s390__)
1536 int cpu_signal_handler(int host_signum, void *pinfo,
1537 void *puc)
1539 siginfo_t *info = pinfo;
1540 struct ucontext *uc = puc;
1541 unsigned long pc;
1542 int is_write;
1544 pc = uc->uc_mcontext.psw.addr;
1545 /* XXX: compute is_write */
1546 is_write = 0;
1547 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1548 is_write, &uc->uc_sigmask, puc);
1551 #elif defined(__mips__)
1553 int cpu_signal_handler(int host_signum, void *pinfo,
1554 void *puc)
1556 siginfo_t *info = pinfo;
1557 struct ucontext *uc = puc;
1558 greg_t pc = uc->uc_mcontext.pc;
1559 int is_write;
1561 /* XXX: compute is_write */
1562 is_write = 0;
1563 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1564 is_write, &uc->uc_sigmask, puc);
1567 #else
1569 #error host CPU specific signal handler needed
1571 #endif
1573 #endif /* !defined(CONFIG_SOFTMMU) */