Merge branch 'qemu-cvs'
[qemu-kvm/fedora.git] / cpu-exec.c
blob42be8ec1756985b3133daa328cffe206295e4a3d
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23 #include <string.h>
25 #if !defined(CONFIG_SOFTMMU)
26 #undef EAX
27 #undef ECX
28 #undef EDX
29 #undef EBX
30 #undef ESP
31 #undef EBP
32 #undef ESI
33 #undef EDI
34 #undef EIP
35 #include <signal.h>
36 #include <sys/ucontext.h>
37 #endif
39 #ifdef USE_KVM
40 #include "qemu-kvm.h"
41 extern int kvm_allowed;
42 #endif
44 int tb_invalidated_flag;
46 //#define DEBUG_EXEC
47 //#define DEBUG_SIGNAL
49 /* translation settings */
50 int translation_settings = 0;
52 #define SAVE_GLOBALS()
53 #define RESTORE_GLOBALS()
55 #if defined(__sparc__) && !defined(HOST_SOLARIS)
56 #include <features.h>
57 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
58 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
59 // Work around ugly bugs in glibc that mangle global register contents
61 static volatile void *saved_env;
62 static volatile unsigned long saved_t0, saved_i7;
63 #undef SAVE_GLOBALS
64 #define SAVE_GLOBALS() do { \
65 saved_env = env; \
66 saved_t0 = T0; \
67 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
68 } while(0)
70 #undef RESTORE_GLOBALS
71 #define RESTORE_GLOBALS() do { \
72 env = (void *)saved_env; \
73 T0 = saved_t0; \
74 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
75 } while(0)
77 static int sparc_setjmp(jmp_buf buf)
79 int ret;
81 SAVE_GLOBALS();
82 ret = setjmp(buf);
83 RESTORE_GLOBALS();
84 return ret;
86 #undef setjmp
87 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
89 static void sparc_longjmp(jmp_buf buf, int val)
91 SAVE_GLOBALS();
92 longjmp(buf, val);
94 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
95 #endif
96 #endif
98 void cpu_loop_exit(void)
100 /* NOTE: the register at this point must be saved by hand because
101 longjmp restore them */
102 regs_to_env();
103 longjmp(env->jmp_env, 1);
106 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
107 #define reg_T2
108 #endif
110 /* exit the current TB from a signal handler. The host registers are
111 restored in a state compatible with the CPU emulator
113 void cpu_resume_from_signal(CPUState *env1, void *puc)
115 #if !defined(CONFIG_SOFTMMU)
116 struct ucontext *uc = puc;
117 #endif
119 env = env1;
121 /* XXX: restore cpu registers saved in host registers */
123 #if !defined(CONFIG_SOFTMMU)
124 if (puc) {
125 /* XXX: use siglongjmp ? */
126 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
128 #endif
129 longjmp(env->jmp_env, 1);
132 CPUTranslationSetting cpu_translation_settings[] = {
133 { CPU_SETTING_NO_CACHE, "no-cache",
134 "Do not use translation blocks cache (very slow!)" },
135 { 0, NULL, NULL },
138 void cpu_set_translation_settings(int translation_flags)
140 translation_settings = translation_flags;
143 static int cmp1(const char *s1, int n, const char *s2)
145 if (strlen(s2) != n)
146 return 0;
147 return memcmp(s1, s2, n) == 0;
150 /* takes a comma separated list of translation settings. Return 0 if error. */
151 int cpu_str_to_translation_mask(const char *str)
153 CPUTranslationSetting *setting;
154 int mask;
155 const char *p, *p1;
157 p = str;
158 mask = 0;
159 for(;;) {
160 p1 = strchr(p, ',');
161 if (!p1)
162 p1 = p + strlen(p);
163 if(cmp1(p,p1-p,"all")) {
164 for(setting = cpu_translation_settings; setting->mask != 0; setting++) {
165 mask |= setting->mask;
167 } else {
168 for(setting = cpu_translation_settings; setting->mask != 0; setting++) {
169 if (cmp1(p, p1 - p, setting->name))
170 goto found;
172 return 0;
174 found:
175 mask |= setting->mask;
176 if (*p1 != ',')
177 break;
178 p = p1 + 1;
180 return mask;
183 static TranslationBlock *tb_find_slow(target_ulong pc,
184 target_ulong cs_base,
185 uint64_t flags)
187 TranslationBlock *tb, **ptb1;
188 int code_gen_size;
189 unsigned int h;
190 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
191 uint8_t *tc_ptr;
193 spin_lock(&tb_lock);
195 tb_invalidated_flag = 0;
197 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
199 /* find translated block using physical mappings */
200 phys_pc = get_phys_addr_code(env, pc);
201 phys_page1 = phys_pc & TARGET_PAGE_MASK;
202 phys_page2 = -1;
203 if (translation_settings & CPU_SETTING_NO_CACHE)
204 goto not_found;
206 h = tb_phys_hash_func(phys_pc);
207 ptb1 = &tb_phys_hash[h];
208 for(;;) {
209 tb = *ptb1;
210 if (!tb)
211 goto not_found;
212 if (tb->pc == pc &&
213 tb->page_addr[0] == phys_page1 &&
214 tb->cs_base == cs_base &&
215 tb->flags == flags) {
216 /* check next page if needed */
217 if (tb->page_addr[1] != -1) {
218 virt_page2 = (pc & TARGET_PAGE_MASK) +
219 TARGET_PAGE_SIZE;
220 phys_page2 = get_phys_addr_code(env, virt_page2);
221 if (tb->page_addr[1] == phys_page2)
222 goto found;
223 } else {
224 goto found;
227 ptb1 = &tb->phys_hash_next;
229 not_found:
230 /* if no translated code available, then translate it now */
231 tb = tb_alloc(pc);
232 if (!tb) {
233 /* flush must be done */
234 tb_flush(env);
235 /* cannot fail at this point */
236 tb = tb_alloc(pc);
237 /* don't forget to invalidate previous TB info */
238 tb_invalidated_flag = 1;
240 tc_ptr = code_gen_ptr;
241 tb->tc_ptr = tc_ptr;
242 tb->cs_base = cs_base;
243 tb->flags = flags;
244 SAVE_GLOBALS();
245 cpu_gen_code(env, tb, &code_gen_size);
246 RESTORE_GLOBALS();
247 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
249 /* check next page if needed */
250 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
251 phys_page2 = -1;
252 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
253 phys_page2 = get_phys_addr_code(env, virt_page2);
255 tb_link_phys(tb, phys_pc, phys_page2);
257 found:
258 /* we add the TB in the virtual pc hash table */
259 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
260 spin_unlock(&tb_lock);
261 return tb;
264 static inline TranslationBlock *tb_find_fast(void)
266 TranslationBlock *tb;
267 target_ulong cs_base, pc;
268 uint64_t flags;
270 /* we record a subset of the CPU state. It will
271 always be the same before a given translated block
272 is executed. */
273 #if defined(TARGET_I386)
274 flags = env->hflags;
275 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
276 flags |= env->intercept;
277 cs_base = env->segs[R_CS].base;
278 pc = cs_base + env->eip;
279 #elif defined(TARGET_ARM)
280 flags = env->thumb | (env->vfp.vec_len << 1)
281 | (env->vfp.vec_stride << 4);
282 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
283 flags |= (1 << 6);
284 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
285 flags |= (1 << 7);
286 flags |= (env->condexec_bits << 8);
287 cs_base = 0;
288 pc = env->regs[15];
289 #elif defined(TARGET_SPARC)
290 #ifdef TARGET_SPARC64
291 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
292 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
293 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
294 #else
295 // FPU enable . Supervisor
296 flags = (env->psref << 4) | env->psrs;
297 #endif
298 cs_base = env->npc;
299 pc = env->pc;
300 #elif defined(TARGET_PPC)
301 flags = env->hflags;
302 cs_base = 0;
303 pc = env->nip;
304 #elif defined(TARGET_MIPS)
305 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
306 cs_base = 0;
307 pc = env->PC[env->current_tc];
308 #elif defined(TARGET_M68K)
309 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
310 | (env->sr & SR_S) /* Bit 13 */
311 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
312 cs_base = 0;
313 pc = env->pc;
314 #elif defined(TARGET_SH4)
315 flags = env->flags;
316 cs_base = 0;
317 pc = env->pc;
318 #elif defined(TARGET_ALPHA)
319 flags = env->ps;
320 cs_base = 0;
321 pc = env->pc;
322 #elif defined(TARGET_CRIS)
323 flags = 0;
324 cs_base = 0;
325 pc = env->pc;
326 #elif defined(TARGET_IA64)
327 flags = 0;
328 cs_base = 0; /* XXXXX */
329 pc = 0;
330 #else
331 #error unsupported CPU
332 #endif
333 if (translation_settings & CPU_SETTING_NO_CACHE)
334 tb = NULL;
335 else
336 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
337 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
338 tb->flags != flags, 0)) {
339 tb = tb_find_slow(pc, cs_base, flags);
340 /* Note: we do it here to avoid a gcc bug on Mac OS X when
341 doing it in tb_find_slow */
342 if (tb_invalidated_flag) {
343 /* as some TB could have been invalidated because
344 of memory exceptions while generating the code, we
345 must recompute the hash index here */
346 T0 = 0;
349 return tb;
352 #define BREAK_CHAIN T0 = 0
354 /* main execution loop */
356 int cpu_exec(CPUState *env1)
358 #define DECLARE_HOST_REGS 1
359 #include "hostregs_helper.h"
360 #if defined(TARGET_SPARC)
361 #if defined(reg_REGWPTR)
362 uint32_t *saved_regwptr;
363 #endif
364 #endif
365 int ret, interrupt_request;
366 void (*gen_func)(void);
367 TranslationBlock *tb;
368 uint8_t *tc_ptr;
370 if (cpu_halted(env1) == EXCP_HALTED)
371 return EXCP_HALTED;
373 cpu_single_env = env1;
375 /* first we save global registers */
376 #define SAVE_HOST_REGS 1
377 #include "hostregs_helper.h"
378 env = env1;
379 SAVE_GLOBALS();
381 env_to_regs();
382 #if defined(TARGET_I386)
383 /* put eflags in CPU temporary format */
384 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
385 DF = 1 - (2 * ((env->eflags >> 10) & 1));
386 CC_OP = CC_OP_EFLAGS;
387 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
388 #elif defined(TARGET_SPARC)
389 #if defined(reg_REGWPTR)
390 saved_regwptr = REGWPTR;
391 #endif
392 #elif defined(TARGET_M68K)
393 env->cc_op = CC_OP_FLAGS;
394 env->cc_dest = env->sr & 0xf;
395 env->cc_x = (env->sr >> 4) & 1;
396 #elif defined(TARGET_ALPHA)
397 #elif defined(TARGET_ARM)
398 #elif defined(TARGET_PPC)
399 #elif defined(TARGET_MIPS)
400 #elif defined(TARGET_SH4)
401 #elif defined(TARGET_CRIS)
402 #elif defined(TARGET_IA64)
403 /* XXXXX */
404 #else
405 #error unsupported target CPU
406 #endif
407 env->exception_index = -1;
409 /* prepare setjmp context for exception handling */
410 for(;;) {
411 if (setjmp(env->jmp_env) == 0) {
412 env->current_tb = NULL;
413 /* if an exception is pending, we execute it here */
414 if (env->exception_index >= 0) {
415 if (env->exception_index >= EXCP_INTERRUPT) {
416 /* exit request from the cpu execution loop */
417 ret = env->exception_index;
418 break;
419 } else if (env->user_mode_only) {
420 /* if user mode only, we simulate a fake exception
421 which will be handled outside the cpu execution
422 loop */
423 #if defined(TARGET_I386)
424 do_interrupt_user(env->exception_index,
425 env->exception_is_int,
426 env->error_code,
427 env->exception_next_eip);
428 #endif
429 ret = env->exception_index;
430 break;
431 } else {
432 #if defined(TARGET_I386)
433 /* simulate a real cpu exception. On i386, it can
434 trigger new exceptions, but we do not handle
435 double or triple faults yet. */
436 do_interrupt(env->exception_index,
437 env->exception_is_int,
438 env->error_code,
439 env->exception_next_eip, 0);
440 /* successfully delivered */
441 env->old_exception = -1;
442 #elif defined(TARGET_PPC)
443 do_interrupt(env);
444 #elif defined(TARGET_MIPS)
445 do_interrupt(env);
446 #elif defined(TARGET_SPARC)
447 do_interrupt(env->exception_index);
448 #elif defined(TARGET_ARM)
449 do_interrupt(env);
450 #elif defined(TARGET_SH4)
451 do_interrupt(env);
452 #elif defined(TARGET_ALPHA)
453 do_interrupt(env);
454 #elif defined(TARGET_CRIS)
455 do_interrupt(env);
456 #elif defined(TARGET_M68K)
457 do_interrupt(0);
458 #elif defined(TARGET_IA64)
459 do_interrupt(env);
460 #endif
462 env->exception_index = -1;
464 #ifdef USE_KQEMU
465 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
466 int ret;
467 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
468 ret = kqemu_cpu_exec(env);
469 /* put eflags in CPU temporary format */
470 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
471 DF = 1 - (2 * ((env->eflags >> 10) & 1));
472 CC_OP = CC_OP_EFLAGS;
473 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
474 if (ret == 1) {
475 /* exception */
476 longjmp(env->jmp_env, 1);
477 } else if (ret == 2) {
478 /* softmmu execution needed */
479 } else {
480 if (env->interrupt_request != 0) {
481 /* hardware interrupt will be executed just after */
482 } else {
483 /* otherwise, we restart */
484 longjmp(env->jmp_env, 1);
488 #endif
490 #ifdef USE_KVM
491 if (kvm_allowed) {
492 kvm_cpu_exec(env);
493 longjmp(env->jmp_env, 1);
495 #endif
496 T0 = 0; /* force lookup of first TB */
497 for(;;) {
498 SAVE_GLOBALS();
499 interrupt_request = env->interrupt_request;
500 if (__builtin_expect(interrupt_request, 0)
501 #if defined(TARGET_I386)
502 && env->hflags & HF_GIF_MASK
503 #endif
505 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
506 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
507 env->exception_index = EXCP_DEBUG;
508 cpu_loop_exit();
510 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
511 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
512 if (interrupt_request & CPU_INTERRUPT_HALT) {
513 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
514 env->halted = 1;
515 env->exception_index = EXCP_HLT;
516 cpu_loop_exit();
518 #endif
519 #if defined(TARGET_I386)
520 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
521 !(env->hflags & HF_SMM_MASK)) {
522 svm_check_intercept(SVM_EXIT_SMI);
523 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
524 do_smm_enter();
525 BREAK_CHAIN;
526 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
527 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
528 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
529 int intno;
530 svm_check_intercept(SVM_EXIT_INTR);
531 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
532 intno = cpu_get_pic_interrupt(env);
533 if (loglevel & CPU_LOG_TB_IN_ASM) {
534 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
536 do_interrupt(intno, 0, 0, 0, 1);
537 /* ensure that no TB jump will be modified as
538 the program flow was changed */
539 BREAK_CHAIN;
540 #if !defined(CONFIG_USER_ONLY)
541 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
542 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
543 int intno;
544 /* FIXME: this should respect TPR */
545 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
546 svm_check_intercept(SVM_EXIT_VINTR);
547 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
548 if (loglevel & CPU_LOG_TB_IN_ASM)
549 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
550 do_interrupt(intno, 0, 0, -1, 1);
551 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
552 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
553 BREAK_CHAIN;
554 #endif
556 #elif defined(TARGET_PPC)
557 #if 0
558 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
559 cpu_ppc_reset(env);
561 #endif
562 if (interrupt_request & CPU_INTERRUPT_HARD) {
563 ppc_hw_interrupt(env);
564 if (env->pending_interrupts == 0)
565 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
566 BREAK_CHAIN;
568 #elif defined(TARGET_MIPS)
569 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
570 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
571 (env->CP0_Status & (1 << CP0St_IE)) &&
572 !(env->CP0_Status & (1 << CP0St_EXL)) &&
573 !(env->CP0_Status & (1 << CP0St_ERL)) &&
574 !(env->hflags & MIPS_HFLAG_DM)) {
575 /* Raise it */
576 env->exception_index = EXCP_EXT_INTERRUPT;
577 env->error_code = 0;
578 do_interrupt(env);
579 BREAK_CHAIN;
581 #elif defined(TARGET_SPARC)
582 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
583 (env->psret != 0)) {
584 int pil = env->interrupt_index & 15;
585 int type = env->interrupt_index & 0xf0;
587 if (((type == TT_EXTINT) &&
588 (pil == 15 || pil > env->psrpil)) ||
589 type != TT_EXTINT) {
590 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
591 do_interrupt(env->interrupt_index);
592 env->interrupt_index = 0;
593 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
594 cpu_check_irqs(env);
595 #endif
596 BREAK_CHAIN;
598 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
599 //do_interrupt(0, 0, 0, 0, 0);
600 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
602 #elif defined(TARGET_ARM)
603 if (interrupt_request & CPU_INTERRUPT_FIQ
604 && !(env->uncached_cpsr & CPSR_F)) {
605 env->exception_index = EXCP_FIQ;
606 do_interrupt(env);
607 BREAK_CHAIN;
609 /* ARMv7-M interrupt return works by loading a magic value
610 into the PC. On real hardware the load causes the
611 return to occur. The qemu implementation performs the
612 jump normally, then does the exception return when the
613 CPU tries to execute code at the magic address.
614 This will cause the magic PC value to be pushed to
615 the stack if an interrupt occured at the wrong time.
616 We avoid this by disabling interrupts when
617 pc contains a magic address. */
618 if (interrupt_request & CPU_INTERRUPT_HARD
619 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
620 || !(env->uncached_cpsr & CPSR_I))) {
621 env->exception_index = EXCP_IRQ;
622 do_interrupt(env);
623 BREAK_CHAIN;
625 #elif defined(TARGET_SH4)
626 if (interrupt_request & CPU_INTERRUPT_HARD) {
627 do_interrupt(env);
628 BREAK_CHAIN;
630 #elif defined(TARGET_ALPHA)
631 if (interrupt_request & CPU_INTERRUPT_HARD) {
632 do_interrupt(env);
633 BREAK_CHAIN;
635 #elif defined(TARGET_CRIS)
636 if (interrupt_request & CPU_INTERRUPT_HARD) {
637 do_interrupt(env);
638 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
639 BREAK_CHAIN;
641 #elif defined(TARGET_M68K)
642 if (interrupt_request & CPU_INTERRUPT_HARD
643 && ((env->sr & SR_I) >> SR_I_SHIFT)
644 < env->pending_level) {
645 /* Real hardware gets the interrupt vector via an
646 IACK cycle at this point. Current emulated
647 hardware doesn't rely on this, so we
648 provide/save the vector when the interrupt is
649 first signalled. */
650 env->exception_index = env->pending_vector;
651 do_interrupt(1);
652 BREAK_CHAIN;
654 #endif
655 /* Don't use the cached interupt_request value,
656 do_interrupt may have updated the EXITTB flag. */
657 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
658 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
659 /* ensure that no TB jump will be modified as
660 the program flow was changed */
661 BREAK_CHAIN;
663 if (interrupt_request & CPU_INTERRUPT_EXIT) {
664 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
665 env->exception_index = EXCP_INTERRUPT;
666 cpu_loop_exit();
669 #ifdef DEBUG_EXEC
670 if ((loglevel & CPU_LOG_TB_CPU)) {
671 /* restore flags in standard format */
672 regs_to_env();
673 #if defined(TARGET_I386)
674 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
675 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
676 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
677 #elif defined(TARGET_ARM)
678 cpu_dump_state(env, logfile, fprintf, 0);
679 #elif defined(TARGET_SPARC)
680 REGWPTR = env->regbase + (env->cwp * 16);
681 env->regwptr = REGWPTR;
682 cpu_dump_state(env, logfile, fprintf, 0);
683 #elif defined(TARGET_PPC)
684 cpu_dump_state(env, logfile, fprintf, 0);
685 #elif defined(TARGET_M68K)
686 cpu_m68k_flush_flags(env, env->cc_op);
687 env->cc_op = CC_OP_FLAGS;
688 env->sr = (env->sr & 0xffe0)
689 | env->cc_dest | (env->cc_x << 4);
690 cpu_dump_state(env, logfile, fprintf, 0);
691 #elif defined(TARGET_MIPS)
692 cpu_dump_state(env, logfile, fprintf, 0);
693 #elif defined(TARGET_SH4)
694 cpu_dump_state(env, logfile, fprintf, 0);
695 #elif defined(TARGET_ALPHA)
696 cpu_dump_state(env, logfile, fprintf, 0);
697 #elif defined(TARGET_CRIS)
698 cpu_dump_state(env, logfile, fprintf, 0);
699 #else
700 #error unsupported target CPU
701 #endif
703 #endif
704 tb = tb_find_fast();
705 #ifdef DEBUG_EXEC
706 if ((loglevel & CPU_LOG_EXEC)) {
707 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
708 (long)tb->tc_ptr, tb->pc,
709 lookup_symbol(tb->pc));
711 #endif
712 RESTORE_GLOBALS();
713 /* see if we can patch the calling TB. When the TB
714 spans two pages, we cannot safely do a direct
715 jump. */
717 if (T0 != 0 &&
718 #if USE_KQEMU
719 (env->kqemu_enabled != 2) &&
720 #endif
721 tb->page_addr[1] == -1) {
722 spin_lock(&tb_lock);
723 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
724 spin_unlock(&tb_lock);
727 tc_ptr = tb->tc_ptr;
728 env->current_tb = tb;
729 /* execute the generated code */
730 gen_func = (void *)tc_ptr;
731 #if defined(__sparc__)
732 __asm__ __volatile__("call %0\n\t"
733 "mov %%o7,%%i0"
734 : /* no outputs */
735 : "r" (gen_func)
736 : "i0", "i1", "i2", "i3", "i4", "i5",
737 "o0", "o1", "o2", "o3", "o4", "o5",
738 "l0", "l1", "l2", "l3", "l4", "l5",
739 "l6", "l7");
740 #elif defined(__arm__)
741 asm volatile ("mov pc, %0\n\t"
742 ".global exec_loop\n\t"
743 "exec_loop:\n\t"
744 : /* no outputs */
745 : "r" (gen_func)
746 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
747 #elif defined(__ia64)
748 struct fptr {
749 void *ip;
750 void *gp;
751 } fp;
753 fp.ip = tc_ptr;
754 fp.gp = code_gen_buffer + 2 * (1 << 20);
755 (*(void (*)(void)) &fp)();
756 #else
757 gen_func();
758 #endif
759 env->current_tb = NULL;
760 /* reset soft MMU for next block (it can currently
761 only be set by a memory fault) */
762 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
763 if (env->hflags & HF_SOFTMMU_MASK) {
764 env->hflags &= ~HF_SOFTMMU_MASK;
765 /* do not allow linking to another block */
766 T0 = 0;
768 #endif
769 #if defined(USE_KQEMU)
770 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
771 if (kqemu_is_ok(env) &&
772 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
773 cpu_loop_exit();
775 #endif
776 } /* for(;;) */
777 } else {
778 env_to_regs();
780 } /* for(;;) */
783 #if defined(TARGET_I386)
784 /* restore flags in standard format */
785 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
786 #elif defined(TARGET_ARM)
787 /* XXX: Save/restore host fpu exception state?. */
788 #elif defined(TARGET_SPARC)
789 #if defined(reg_REGWPTR)
790 REGWPTR = saved_regwptr;
791 #endif
792 #elif defined(TARGET_PPC)
793 #elif defined(TARGET_M68K)
794 cpu_m68k_flush_flags(env, env->cc_op);
795 env->cc_op = CC_OP_FLAGS;
796 env->sr = (env->sr & 0xffe0)
797 | env->cc_dest | (env->cc_x << 4);
798 #elif defined(TARGET_MIPS)
799 #elif defined(TARGET_SH4)
800 #elif defined(TARGET_IA64)
801 #elif defined(TARGET_ALPHA)
802 #elif defined(TARGET_CRIS)
803 /* XXXXX */
804 #else
805 #error unsupported target CPU
806 #endif
808 /* restore global registers */
809 RESTORE_GLOBALS();
810 #include "hostregs_helper.h"
812 /* fail safe : never use cpu_single_env outside cpu_exec() */
813 cpu_single_env = NULL;
814 return ret;
817 /* must only be called from the generated code as an exception can be
818 generated */
819 void tb_invalidate_page_range(target_ulong start, target_ulong end)
821 /* XXX: cannot enable it yet because it yields to MMU exception
822 where NIP != read address on PowerPC */
823 #if 0
824 target_ulong phys_addr;
825 phys_addr = get_phys_addr_code(env, start);
826 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
827 #endif
830 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
832 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
834 CPUX86State *saved_env;
836 saved_env = env;
837 env = s;
838 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
839 selector &= 0xffff;
840 cpu_x86_load_seg_cache(env, seg_reg, selector,
841 (selector << 4), 0xffff, 0);
842 } else {
843 load_seg(seg_reg, selector);
845 env = saved_env;
848 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
850 CPUX86State *saved_env;
852 saved_env = env;
853 env = s;
855 helper_fsave(ptr, data32);
857 env = saved_env;
860 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
862 CPUX86State *saved_env;
864 saved_env = env;
865 env = s;
867 helper_frstor(ptr, data32);
869 env = saved_env;
872 #endif /* TARGET_I386 */
874 #if !defined(CONFIG_SOFTMMU)
876 #if defined(TARGET_I386)
878 /* 'pc' is the host PC at which the exception was raised. 'address' is
879 the effective address of the memory exception. 'is_write' is 1 if a
880 write caused the exception and otherwise 0'. 'old_set' is the
881 signal set which should be restored */
882 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
883 int is_write, sigset_t *old_set,
884 void *puc)
886 TranslationBlock *tb;
887 int ret;
889 if (cpu_single_env)
890 env = cpu_single_env; /* XXX: find a correct solution for multithread */
891 #if defined(DEBUG_SIGNAL)
892 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
893 pc, address, is_write, *(unsigned long *)old_set);
894 #endif
895 /* XXX: locking issue */
896 if (is_write && page_unprotect(h2g(address), pc, puc)) {
897 return 1;
900 /* see if it is an MMU fault */
901 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
902 if (ret < 0)
903 return 0; /* not an MMU fault */
904 if (ret == 0)
905 return 1; /* the MMU fault was handled without causing real CPU fault */
906 /* now we have a real cpu fault */
907 tb = tb_find_pc(pc);
908 if (tb) {
909 /* the PC is inside the translated code. It means that we have
910 a virtual CPU fault */
911 cpu_restore_state(tb, env, pc, puc);
913 if (ret == 1) {
914 #if 0
915 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
916 env->eip, env->cr[2], env->error_code);
917 #endif
918 /* we restore the process signal mask as the sigreturn should
919 do it (XXX: use sigsetjmp) */
920 sigprocmask(SIG_SETMASK, old_set, NULL);
921 raise_exception_err(env->exception_index, env->error_code);
922 } else {
923 /* activate soft MMU for this block */
924 env->hflags |= HF_SOFTMMU_MASK;
925 cpu_resume_from_signal(env, puc);
927 /* never comes here */
928 return 1;
931 #elif defined(TARGET_ARM)
932 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
933 int is_write, sigset_t *old_set,
934 void *puc)
936 TranslationBlock *tb;
937 int ret;
939 if (cpu_single_env)
940 env = cpu_single_env; /* XXX: find a correct solution for multithread */
941 #if defined(DEBUG_SIGNAL)
942 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
943 pc, address, is_write, *(unsigned long *)old_set);
944 #endif
945 /* XXX: locking issue */
946 if (is_write && page_unprotect(h2g(address), pc, puc)) {
947 return 1;
949 /* see if it is an MMU fault */
950 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
951 if (ret < 0)
952 return 0; /* not an MMU fault */
953 if (ret == 0)
954 return 1; /* the MMU fault was handled without causing real CPU fault */
955 /* now we have a real cpu fault */
956 tb = tb_find_pc(pc);
957 if (tb) {
958 /* the PC is inside the translated code. It means that we have
959 a virtual CPU fault */
960 cpu_restore_state(tb, env, pc, puc);
962 /* we restore the process signal mask as the sigreturn should
963 do it (XXX: use sigsetjmp) */
964 sigprocmask(SIG_SETMASK, old_set, NULL);
965 cpu_loop_exit();
967 #elif defined(TARGET_SPARC)
968 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
969 int is_write, sigset_t *old_set,
970 void *puc)
972 TranslationBlock *tb;
973 int ret;
975 if (cpu_single_env)
976 env = cpu_single_env; /* XXX: find a correct solution for multithread */
977 #if defined(DEBUG_SIGNAL)
978 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
979 pc, address, is_write, *(unsigned long *)old_set);
980 #endif
981 /* XXX: locking issue */
982 if (is_write && page_unprotect(h2g(address), pc, puc)) {
983 return 1;
985 /* see if it is an MMU fault */
986 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
987 if (ret < 0)
988 return 0; /* not an MMU fault */
989 if (ret == 0)
990 return 1; /* the MMU fault was handled without causing real CPU fault */
991 /* now we have a real cpu fault */
992 tb = tb_find_pc(pc);
993 if (tb) {
994 /* the PC is inside the translated code. It means that we have
995 a virtual CPU fault */
996 cpu_restore_state(tb, env, pc, puc);
998 /* we restore the process signal mask as the sigreturn should
999 do it (XXX: use sigsetjmp) */
1000 sigprocmask(SIG_SETMASK, old_set, NULL);
1001 cpu_loop_exit();
1003 #elif defined (TARGET_PPC)
1004 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1005 int is_write, sigset_t *old_set,
1006 void *puc)
1008 TranslationBlock *tb;
1009 int ret;
1011 if (cpu_single_env)
1012 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1013 #if defined(DEBUG_SIGNAL)
1014 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1015 pc, address, is_write, *(unsigned long *)old_set);
1016 #endif
1017 /* XXX: locking issue */
1018 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1019 return 1;
1022 /* see if it is an MMU fault */
1023 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1024 if (ret < 0)
1025 return 0; /* not an MMU fault */
1026 if (ret == 0)
1027 return 1; /* the MMU fault was handled without causing real CPU fault */
1029 /* now we have a real cpu fault */
1030 tb = tb_find_pc(pc);
1031 if (tb) {
1032 /* the PC is inside the translated code. It means that we have
1033 a virtual CPU fault */
1034 cpu_restore_state(tb, env, pc, puc);
1036 if (ret == 1) {
1037 #if 0
1038 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1039 env->nip, env->error_code, tb);
1040 #endif
1041 /* we restore the process signal mask as the sigreturn should
1042 do it (XXX: use sigsetjmp) */
1043 sigprocmask(SIG_SETMASK, old_set, NULL);
1044 do_raise_exception_err(env->exception_index, env->error_code);
1045 } else {
1046 /* activate soft MMU for this block */
1047 cpu_resume_from_signal(env, puc);
1049 /* never comes here */
1050 return 1;
1053 #elif defined(TARGET_M68K)
1054 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1055 int is_write, sigset_t *old_set,
1056 void *puc)
1058 TranslationBlock *tb;
1059 int ret;
1061 if (cpu_single_env)
1062 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1063 #if defined(DEBUG_SIGNAL)
1064 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1065 pc, address, is_write, *(unsigned long *)old_set);
1066 #endif
1067 /* XXX: locking issue */
1068 if (is_write && page_unprotect(address, pc, puc)) {
1069 return 1;
1071 /* see if it is an MMU fault */
1072 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1073 if (ret < 0)
1074 return 0; /* not an MMU fault */
1075 if (ret == 0)
1076 return 1; /* the MMU fault was handled without causing real CPU fault */
1077 /* now we have a real cpu fault */
1078 tb = tb_find_pc(pc);
1079 if (tb) {
1080 /* the PC is inside the translated code. It means that we have
1081 a virtual CPU fault */
1082 cpu_restore_state(tb, env, pc, puc);
1084 /* we restore the process signal mask as the sigreturn should
1085 do it (XXX: use sigsetjmp) */
1086 sigprocmask(SIG_SETMASK, old_set, NULL);
1087 cpu_loop_exit();
1088 /* never comes here */
1089 return 1;
1092 #elif defined (TARGET_MIPS)
1093 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1094 int is_write, sigset_t *old_set,
1095 void *puc)
1097 TranslationBlock *tb;
1098 int ret;
1100 if (cpu_single_env)
1101 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1102 #if defined(DEBUG_SIGNAL)
1103 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1104 pc, address, is_write, *(unsigned long *)old_set);
1105 #endif
1106 /* XXX: locking issue */
1107 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1108 return 1;
1111 /* see if it is an MMU fault */
1112 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1113 if (ret < 0)
1114 return 0; /* not an MMU fault */
1115 if (ret == 0)
1116 return 1; /* the MMU fault was handled without causing real CPU fault */
1118 /* now we have a real cpu fault */
1119 tb = tb_find_pc(pc);
1120 if (tb) {
1121 /* the PC is inside the translated code. It means that we have
1122 a virtual CPU fault */
1123 cpu_restore_state(tb, env, pc, puc);
1125 if (ret == 1) {
1126 #if 0
1127 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1128 env->PC, env->error_code, tb);
1129 #endif
1130 /* we restore the process signal mask as the sigreturn should
1131 do it (XXX: use sigsetjmp) */
1132 sigprocmask(SIG_SETMASK, old_set, NULL);
1133 do_raise_exception_err(env->exception_index, env->error_code);
1134 } else {
1135 /* activate soft MMU for this block */
1136 cpu_resume_from_signal(env, puc);
1138 /* never comes here */
1139 return 1;
1142 #elif defined (TARGET_SH4)
1143 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1144 int is_write, sigset_t *old_set,
1145 void *puc)
1147 TranslationBlock *tb;
1148 int ret;
1150 if (cpu_single_env)
1151 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1152 #if defined(DEBUG_SIGNAL)
1153 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1154 pc, address, is_write, *(unsigned long *)old_set);
1155 #endif
1156 /* XXX: locking issue */
1157 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1158 return 1;
1161 /* see if it is an MMU fault */
1162 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1163 if (ret < 0)
1164 return 0; /* not an MMU fault */
1165 if (ret == 0)
1166 return 1; /* the MMU fault was handled without causing real CPU fault */
1168 /* now we have a real cpu fault */
1169 tb = tb_find_pc(pc);
1170 if (tb) {
1171 /* the PC is inside the translated code. It means that we have
1172 a virtual CPU fault */
1173 cpu_restore_state(tb, env, pc, puc);
1175 #if 0
1176 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1177 env->nip, env->error_code, tb);
1178 #endif
1179 /* we restore the process signal mask as the sigreturn should
1180 do it (XXX: use sigsetjmp) */
1181 sigprocmask(SIG_SETMASK, old_set, NULL);
1182 cpu_loop_exit();
1183 /* never comes here */
1184 return 1;
1187 #elif defined (TARGET_ALPHA)
1188 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1189 int is_write, sigset_t *old_set,
1190 void *puc)
1192 TranslationBlock *tb;
1193 int ret;
1195 if (cpu_single_env)
1196 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1197 #if defined(DEBUG_SIGNAL)
1198 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1199 pc, address, is_write, *(unsigned long *)old_set);
1200 #endif
1201 /* XXX: locking issue */
1202 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1203 return 1;
1206 /* see if it is an MMU fault */
1207 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1208 if (ret < 0)
1209 return 0; /* not an MMU fault */
1210 if (ret == 0)
1211 return 1; /* the MMU fault was handled without causing real CPU fault */
1213 /* now we have a real cpu fault */
1214 tb = tb_find_pc(pc);
1215 if (tb) {
1216 /* the PC is inside the translated code. It means that we have
1217 a virtual CPU fault */
1218 cpu_restore_state(tb, env, pc, puc);
1220 #if 0
1221 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1222 env->nip, env->error_code, tb);
1223 #endif
1224 /* we restore the process signal mask as the sigreturn should
1225 do it (XXX: use sigsetjmp) */
1226 sigprocmask(SIG_SETMASK, old_set, NULL);
1227 cpu_loop_exit();
1228 /* never comes here */
1229 return 1;
1231 #elif defined (TARGET_CRIS)
1232 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1233 int is_write, sigset_t *old_set,
1234 void *puc)
1236 TranslationBlock *tb;
1237 int ret;
1239 if (cpu_single_env)
1240 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1241 #if defined(DEBUG_SIGNAL)
1242 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1243 pc, address, is_write, *(unsigned long *)old_set);
1244 #endif
1245 /* XXX: locking issue */
1246 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1247 return 1;
1250 /* see if it is an MMU fault */
1251 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1252 if (ret < 0)
1253 return 0; /* not an MMU fault */
1254 if (ret == 0)
1255 return 1; /* the MMU fault was handled without causing real CPU fault */
1257 /* now we have a real cpu fault */
1258 tb = tb_find_pc(pc);
1259 if (tb) {
1260 /* the PC is inside the translated code. It means that we have
1261 a virtual CPU fault */
1262 cpu_restore_state(tb, env, pc, puc);
1264 #if 0
1265 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1266 env->nip, env->error_code, tb);
1267 #endif
1268 /* we restore the process signal mask as the sigreturn should
1269 do it (XXX: use sigsetjmp) */
1270 sigprocmask(SIG_SETMASK, old_set, NULL);
1271 cpu_loop_exit();
1272 /* never comes here */
1273 return 1;
1276 #else
1277 #error unsupported target CPU
1278 #endif
1280 #if defined(__i386__)
1282 #if defined(__APPLE__)
1283 # include <sys/ucontext.h>
1285 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1286 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1287 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1288 #else
1289 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1290 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1291 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1292 #endif
1294 int cpu_signal_handler(int host_signum, void *pinfo,
1295 void *puc)
1297 siginfo_t *info = pinfo;
1298 struct ucontext *uc = puc;
1299 unsigned long pc;
1300 int trapno;
1302 #ifndef REG_EIP
1303 /* for glibc 2.1 */
1304 #define REG_EIP EIP
1305 #define REG_ERR ERR
1306 #define REG_TRAPNO TRAPNO
1307 #endif
1308 pc = EIP_sig(uc);
1309 trapno = TRAP_sig(uc);
1310 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1311 trapno == 0xe ?
1312 (ERROR_sig(uc) >> 1) & 1 : 0,
1313 &uc->uc_sigmask, puc);
1316 #elif defined(__x86_64__)
1318 int cpu_signal_handler(int host_signum, void *pinfo,
1319 void *puc)
1321 siginfo_t *info = pinfo;
1322 struct ucontext *uc = puc;
1323 unsigned long pc;
1325 pc = uc->uc_mcontext.gregs[REG_RIP];
1326 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1327 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1328 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1329 &uc->uc_sigmask, puc);
1332 #elif defined(__powerpc__)
1334 /***********************************************************************
1335 * signal context platform-specific definitions
1336 * From Wine
1338 #ifdef linux
1339 /* All Registers access - only for local access */
1340 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1341 /* Gpr Registers access */
1342 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1343 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1344 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1345 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1346 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1347 # define LR_sig(context) REG_sig(link, context) /* Link register */
1348 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1349 /* Float Registers access */
1350 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1351 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1352 /* Exception Registers access */
1353 # define DAR_sig(context) REG_sig(dar, context)
1354 # define DSISR_sig(context) REG_sig(dsisr, context)
1355 # define TRAP_sig(context) REG_sig(trap, context)
1356 #endif /* linux */
1358 #ifdef __APPLE__
1359 # include <sys/ucontext.h>
1360 typedef struct ucontext SIGCONTEXT;
1361 /* All Registers access - only for local access */
1362 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1363 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1364 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1365 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1366 /* Gpr Registers access */
1367 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1368 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1369 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1370 # define CTR_sig(context) REG_sig(ctr, context)
1371 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1372 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1373 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1374 /* Float Registers access */
1375 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1376 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1377 /* Exception Registers access */
1378 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1379 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1380 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1381 #endif /* __APPLE__ */
1383 int cpu_signal_handler(int host_signum, void *pinfo,
1384 void *puc)
1386 siginfo_t *info = pinfo;
1387 struct ucontext *uc = puc;
1388 unsigned long pc;
1389 int is_write;
1391 pc = IAR_sig(uc);
1392 is_write = 0;
1393 #if 0
1394 /* ppc 4xx case */
1395 if (DSISR_sig(uc) & 0x00800000)
1396 is_write = 1;
1397 #else
1398 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1399 is_write = 1;
1400 #endif
1401 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1402 is_write, &uc->uc_sigmask, puc);
1405 #elif defined(__alpha__)
1407 int cpu_signal_handler(int host_signum, void *pinfo,
1408 void *puc)
1410 siginfo_t *info = pinfo;
1411 struct ucontext *uc = puc;
1412 uint32_t *pc = uc->uc_mcontext.sc_pc;
1413 uint32_t insn = *pc;
1414 int is_write = 0;
1416 /* XXX: need kernel patch to get write flag faster */
1417 switch (insn >> 26) {
1418 case 0x0d: // stw
1419 case 0x0e: // stb
1420 case 0x0f: // stq_u
1421 case 0x24: // stf
1422 case 0x25: // stg
1423 case 0x26: // sts
1424 case 0x27: // stt
1425 case 0x2c: // stl
1426 case 0x2d: // stq
1427 case 0x2e: // stl_c
1428 case 0x2f: // stq_c
1429 is_write = 1;
1432 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1433 is_write, &uc->uc_sigmask, puc);
1435 #elif defined(__sparc__)
1437 int cpu_signal_handler(int host_signum, void *pinfo,
1438 void *puc)
1440 siginfo_t *info = pinfo;
1441 uint32_t *regs = (uint32_t *)(info + 1);
1442 void *sigmask = (regs + 20);
1443 unsigned long pc;
1444 int is_write;
1445 uint32_t insn;
1447 /* XXX: is there a standard glibc define ? */
1448 pc = regs[1];
1449 /* XXX: need kernel patch to get write flag faster */
1450 is_write = 0;
1451 insn = *(uint32_t *)pc;
1452 if ((insn >> 30) == 3) {
1453 switch((insn >> 19) & 0x3f) {
1454 case 0x05: // stb
1455 case 0x06: // sth
1456 case 0x04: // st
1457 case 0x07: // std
1458 case 0x24: // stf
1459 case 0x27: // stdf
1460 case 0x25: // stfsr
1461 is_write = 1;
1462 break;
1465 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1466 is_write, sigmask, NULL);
1469 #elif defined(__arm__)
1471 int cpu_signal_handler(int host_signum, void *pinfo,
1472 void *puc)
1474 siginfo_t *info = pinfo;
1475 struct ucontext *uc = puc;
1476 unsigned long pc;
1477 int is_write;
1479 pc = uc->uc_mcontext.gregs[R15];
1480 /* XXX: compute is_write */
1481 is_write = 0;
1482 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1483 is_write,
1484 &uc->uc_sigmask, puc);
1487 #elif defined(__mc68000)
1489 int cpu_signal_handler(int host_signum, void *pinfo,
1490 void *puc)
1492 siginfo_t *info = pinfo;
1493 struct ucontext *uc = puc;
1494 unsigned long pc;
1495 int is_write;
1497 pc = uc->uc_mcontext.gregs[16];
1498 /* XXX: compute is_write */
1499 is_write = 0;
1500 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1501 is_write,
1502 &uc->uc_sigmask, puc);
1505 #elif defined(__ia64)
1507 #ifndef __ISR_VALID
1508 /* This ought to be in <bits/siginfo.h>... */
1509 # define __ISR_VALID 1
1510 #endif
1512 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1514 siginfo_t *info = pinfo;
1515 struct ucontext *uc = puc;
1516 unsigned long ip;
1517 int is_write = 0;
1519 ip = uc->uc_mcontext.sc_ip;
1520 switch (host_signum) {
1521 case SIGILL:
1522 case SIGFPE:
1523 case SIGSEGV:
1524 case SIGBUS:
1525 case SIGTRAP:
1526 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1527 /* ISR.W (write-access) is bit 33: */
1528 is_write = (info->si_isr >> 33) & 1;
1529 break;
1531 default:
1532 break;
1534 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1535 is_write,
1536 &uc->uc_sigmask, puc);
1539 #elif defined(__s390__)
1541 int cpu_signal_handler(int host_signum, void *pinfo,
1542 void *puc)
1544 siginfo_t *info = pinfo;
1545 struct ucontext *uc = puc;
1546 unsigned long pc;
1547 int is_write;
1549 pc = uc->uc_mcontext.psw.addr;
1550 /* XXX: compute is_write */
1551 is_write = 0;
1552 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1553 is_write, &uc->uc_sigmask, puc);
1556 #elif defined(__mips__)
1558 int cpu_signal_handler(int host_signum, void *pinfo,
1559 void *puc)
1561 siginfo_t *info = pinfo;
1562 struct ucontext *uc = puc;
1563 greg_t pc = uc->uc_mcontext.pc;
1564 int is_write;
1566 /* XXX: compute is_write */
1567 is_write = 0;
1568 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1569 is_write, &uc->uc_sigmask, puc);
1572 #else
1574 #error host CPU specific signal handler needed
1576 #endif
1578 #endif /* !defined(CONFIG_SOFTMMU) */