scsi-generic implemnentation, missing in last commit.
[qemu/qemu_0_9_1_stable.git] / cpu-exec.c
blob8134c229c5ea0cee864da59daf11a26b75b5bd5c
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
38 int tb_invalidated_flag;
40 //#define DEBUG_EXEC
41 //#define DEBUG_SIGNAL
43 #define SAVE_GLOBALS()
44 #define RESTORE_GLOBALS()
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
47 #include <features.h>
48 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
49 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
50 // Work around ugly bugs in glibc that mangle global register contents
52 static volatile void *saved_env;
53 static volatile unsigned long saved_t0, saved_i7;
54 #undef SAVE_GLOBALS
55 #define SAVE_GLOBALS() do { \
56 saved_env = env; \
57 saved_t0 = T0; \
58 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
59 } while(0)
61 #undef RESTORE_GLOBALS
62 #define RESTORE_GLOBALS() do { \
63 env = (void *)saved_env; \
64 T0 = saved_t0; \
65 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
66 } while(0)
68 static int sparc_setjmp(jmp_buf buf)
70 int ret;
72 SAVE_GLOBALS();
73 ret = setjmp(buf);
74 RESTORE_GLOBALS();
75 return ret;
77 #undef setjmp
78 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
80 static void sparc_longjmp(jmp_buf buf, int val)
82 SAVE_GLOBALS();
83 longjmp(buf, val);
85 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
86 #endif
87 #endif
89 void cpu_loop_exit(void)
91 /* NOTE: the register at this point must be saved by hand because
92 longjmp restore them */
93 regs_to_env();
94 longjmp(env->jmp_env, 1);
97 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
98 #define reg_T2
99 #endif
101 /* exit the current TB from a signal handler. The host registers are
102 restored in a state compatible with the CPU emulator
104 void cpu_resume_from_signal(CPUState *env1, void *puc)
106 #if !defined(CONFIG_SOFTMMU)
107 struct ucontext *uc = puc;
108 #endif
110 env = env1;
112 /* XXX: restore cpu registers saved in host registers */
114 #if !defined(CONFIG_SOFTMMU)
115 if (puc) {
116 /* XXX: use siglongjmp ? */
117 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
119 #endif
120 longjmp(env->jmp_env, 1);
124 static TranslationBlock *tb_find_slow(target_ulong pc,
125 target_ulong cs_base,
126 uint64_t flags)
128 TranslationBlock *tb, **ptb1;
129 int code_gen_size;
130 unsigned int h;
131 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
132 uint8_t *tc_ptr;
134 spin_lock(&tb_lock);
136 tb_invalidated_flag = 0;
138 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
140 /* find translated block using physical mappings */
141 phys_pc = get_phys_addr_code(env, pc);
142 phys_page1 = phys_pc & TARGET_PAGE_MASK;
143 phys_page2 = -1;
144 h = tb_phys_hash_func(phys_pc);
145 ptb1 = &tb_phys_hash[h];
146 for(;;) {
147 tb = *ptb1;
148 if (!tb)
149 goto not_found;
150 if (tb->pc == pc &&
151 tb->page_addr[0] == phys_page1 &&
152 tb->cs_base == cs_base &&
153 tb->flags == flags) {
154 /* check next page if needed */
155 if (tb->page_addr[1] != -1) {
156 virt_page2 = (pc & TARGET_PAGE_MASK) +
157 TARGET_PAGE_SIZE;
158 phys_page2 = get_phys_addr_code(env, virt_page2);
159 if (tb->page_addr[1] == phys_page2)
160 goto found;
161 } else {
162 goto found;
165 ptb1 = &tb->phys_hash_next;
167 not_found:
168 /* if no translated code available, then translate it now */
169 tb = tb_alloc(pc);
170 if (!tb) {
171 /* flush must be done */
172 tb_flush(env);
173 /* cannot fail at this point */
174 tb = tb_alloc(pc);
175 /* don't forget to invalidate previous TB info */
176 tb_invalidated_flag = 1;
178 tc_ptr = code_gen_ptr;
179 tb->tc_ptr = tc_ptr;
180 tb->cs_base = cs_base;
181 tb->flags = flags;
182 SAVE_GLOBALS();
183 cpu_gen_code(env, tb, &code_gen_size);
184 RESTORE_GLOBALS();
185 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
187 /* check next page if needed */
188 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
189 phys_page2 = -1;
190 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
191 phys_page2 = get_phys_addr_code(env, virt_page2);
193 tb_link_phys(tb, phys_pc, phys_page2);
195 found:
196 /* we add the TB in the virtual pc hash table */
197 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
198 spin_unlock(&tb_lock);
199 return tb;
202 static inline TranslationBlock *tb_find_fast(void)
204 TranslationBlock *tb;
205 target_ulong cs_base, pc;
206 uint64_t flags;
208 /* we record a subset of the CPU state. It will
209 always be the same before a given translated block
210 is executed. */
211 #if defined(TARGET_I386)
212 flags = env->hflags;
213 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
214 flags |= env->intercept;
215 cs_base = env->segs[R_CS].base;
216 pc = cs_base + env->eip;
217 #elif defined(TARGET_ARM)
218 flags = env->thumb | (env->vfp.vec_len << 1)
219 | (env->vfp.vec_stride << 4);
220 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
221 flags |= (1 << 6);
222 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
223 flags |= (1 << 7);
224 flags |= (env->condexec_bits << 8);
225 cs_base = 0;
226 pc = env->regs[15];
227 #elif defined(TARGET_SPARC)
228 #ifdef TARGET_SPARC64
229 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
230 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
231 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
232 #else
233 // FPU enable . Supervisor
234 flags = (env->psref << 4) | env->psrs;
235 #endif
236 cs_base = env->npc;
237 pc = env->pc;
238 #elif defined(TARGET_PPC)
239 flags = env->hflags;
240 cs_base = 0;
241 pc = env->nip;
242 #elif defined(TARGET_MIPS)
243 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
244 cs_base = 0;
245 pc = env->PC[env->current_tc];
246 #elif defined(TARGET_M68K)
247 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
248 | (env->sr & SR_S) /* Bit 13 */
249 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
250 cs_base = 0;
251 pc = env->pc;
252 #elif defined(TARGET_SH4)
253 flags = env->flags;
254 cs_base = 0;
255 pc = env->pc;
256 #elif defined(TARGET_ALPHA)
257 flags = env->ps;
258 cs_base = 0;
259 pc = env->pc;
260 #elif defined(TARGET_CRIS)
261 flags = 0;
262 cs_base = 0;
263 pc = env->pc;
264 #else
265 #error unsupported CPU
266 #endif
267 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
268 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
269 tb->flags != flags, 0)) {
270 tb = tb_find_slow(pc, cs_base, flags);
271 /* Note: we do it here to avoid a gcc bug on Mac OS X when
272 doing it in tb_find_slow */
273 if (tb_invalidated_flag) {
274 /* as some TB could have been invalidated because
275 of memory exceptions while generating the code, we
276 must recompute the hash index here */
277 T0 = 0;
280 return tb;
283 #define BREAK_CHAIN T0 = 0
285 /* main execution loop */
287 int cpu_exec(CPUState *env1)
289 #define DECLARE_HOST_REGS 1
290 #include "hostregs_helper.h"
291 #if defined(TARGET_SPARC)
292 #if defined(reg_REGWPTR)
293 uint32_t *saved_regwptr;
294 #endif
295 #endif
296 int ret, interrupt_request;
297 void (*gen_func)(void);
298 TranslationBlock *tb;
299 uint8_t *tc_ptr;
301 if (cpu_halted(env1) == EXCP_HALTED)
302 return EXCP_HALTED;
304 cpu_single_env = env1;
306 /* first we save global registers */
307 #define SAVE_HOST_REGS 1
308 #include "hostregs_helper.h"
309 env = env1;
310 SAVE_GLOBALS();
312 env_to_regs();
313 #if defined(TARGET_I386)
314 /* put eflags in CPU temporary format */
315 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
316 DF = 1 - (2 * ((env->eflags >> 10) & 1));
317 CC_OP = CC_OP_EFLAGS;
318 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
319 #elif defined(TARGET_SPARC)
320 #if defined(reg_REGWPTR)
321 saved_regwptr = REGWPTR;
322 #endif
323 #elif defined(TARGET_M68K)
324 env->cc_op = CC_OP_FLAGS;
325 env->cc_dest = env->sr & 0xf;
326 env->cc_x = (env->sr >> 4) & 1;
327 #elif defined(TARGET_ALPHA)
328 #elif defined(TARGET_ARM)
329 #elif defined(TARGET_PPC)
330 #elif defined(TARGET_MIPS)
331 #elif defined(TARGET_SH4)
332 #elif defined(TARGET_CRIS)
333 /* XXXXX */
334 #else
335 #error unsupported target CPU
336 #endif
337 env->exception_index = -1;
339 /* prepare setjmp context for exception handling */
340 for(;;) {
341 if (setjmp(env->jmp_env) == 0) {
342 env->current_tb = NULL;
343 /* if an exception is pending, we execute it here */
344 if (env->exception_index >= 0) {
345 if (env->exception_index >= EXCP_INTERRUPT) {
346 /* exit request from the cpu execution loop */
347 ret = env->exception_index;
348 break;
349 } else if (env->user_mode_only) {
350 /* if user mode only, we simulate a fake exception
351 which will be handled outside the cpu execution
352 loop */
353 #if defined(TARGET_I386)
354 do_interrupt_user(env->exception_index,
355 env->exception_is_int,
356 env->error_code,
357 env->exception_next_eip);
358 #endif
359 ret = env->exception_index;
360 break;
361 } else {
362 #if defined(TARGET_I386)
363 /* simulate a real cpu exception. On i386, it can
364 trigger new exceptions, but we do not handle
365 double or triple faults yet. */
366 do_interrupt(env->exception_index,
367 env->exception_is_int,
368 env->error_code,
369 env->exception_next_eip, 0);
370 /* successfully delivered */
371 env->old_exception = -1;
372 #elif defined(TARGET_PPC)
373 do_interrupt(env);
374 #elif defined(TARGET_MIPS)
375 do_interrupt(env);
376 #elif defined(TARGET_SPARC)
377 do_interrupt(env->exception_index);
378 #elif defined(TARGET_ARM)
379 do_interrupt(env);
380 #elif defined(TARGET_SH4)
381 do_interrupt(env);
382 #elif defined(TARGET_ALPHA)
383 do_interrupt(env);
384 #elif defined(TARGET_CRIS)
385 do_interrupt(env);
386 #elif defined(TARGET_M68K)
387 do_interrupt(0);
388 #endif
390 env->exception_index = -1;
392 #ifdef USE_KQEMU
393 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
394 int ret;
395 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
396 ret = kqemu_cpu_exec(env);
397 /* put eflags in CPU temporary format */
398 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
399 DF = 1 - (2 * ((env->eflags >> 10) & 1));
400 CC_OP = CC_OP_EFLAGS;
401 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
402 if (ret == 1) {
403 /* exception */
404 longjmp(env->jmp_env, 1);
405 } else if (ret == 2) {
406 /* softmmu execution needed */
407 } else {
408 if (env->interrupt_request != 0) {
409 /* hardware interrupt will be executed just after */
410 } else {
411 /* otherwise, we restart */
412 longjmp(env->jmp_env, 1);
416 #endif
418 T0 = 0; /* force lookup of first TB */
419 for(;;) {
420 SAVE_GLOBALS();
421 interrupt_request = env->interrupt_request;
422 if (__builtin_expect(interrupt_request, 0)
423 #if defined(TARGET_I386)
424 && env->hflags & HF_GIF_MASK
425 #endif
427 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
428 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
429 env->exception_index = EXCP_DEBUG;
430 cpu_loop_exit();
432 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
433 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
434 if (interrupt_request & CPU_INTERRUPT_HALT) {
435 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
436 env->halted = 1;
437 env->exception_index = EXCP_HLT;
438 cpu_loop_exit();
440 #endif
441 #if defined(TARGET_I386)
442 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
443 !(env->hflags & HF_SMM_MASK)) {
444 svm_check_intercept(SVM_EXIT_SMI);
445 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
446 do_smm_enter();
447 BREAK_CHAIN;
448 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
449 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
450 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
451 int intno;
452 svm_check_intercept(SVM_EXIT_INTR);
453 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
454 intno = cpu_get_pic_interrupt(env);
455 if (loglevel & CPU_LOG_TB_IN_ASM) {
456 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
458 do_interrupt(intno, 0, 0, 0, 1);
459 /* ensure that no TB jump will be modified as
460 the program flow was changed */
461 BREAK_CHAIN;
462 #if !defined(CONFIG_USER_ONLY)
463 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
464 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
465 int intno;
466 /* FIXME: this should respect TPR */
467 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
468 svm_check_intercept(SVM_EXIT_VINTR);
469 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
470 if (loglevel & CPU_LOG_TB_IN_ASM)
471 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
472 do_interrupt(intno, 0, 0, -1, 1);
473 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
474 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
475 BREAK_CHAIN;
476 #endif
478 #elif defined(TARGET_PPC)
479 #if 0
480 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
481 cpu_ppc_reset(env);
483 #endif
484 if (interrupt_request & CPU_INTERRUPT_HARD) {
485 ppc_hw_interrupt(env);
486 if (env->pending_interrupts == 0)
487 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
488 BREAK_CHAIN;
490 #elif defined(TARGET_MIPS)
491 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
492 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
493 (env->CP0_Status & (1 << CP0St_IE)) &&
494 !(env->CP0_Status & (1 << CP0St_EXL)) &&
495 !(env->CP0_Status & (1 << CP0St_ERL)) &&
496 !(env->hflags & MIPS_HFLAG_DM)) {
497 /* Raise it */
498 env->exception_index = EXCP_EXT_INTERRUPT;
499 env->error_code = 0;
500 do_interrupt(env);
501 BREAK_CHAIN;
503 #elif defined(TARGET_SPARC)
504 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
505 (env->psret != 0)) {
506 int pil = env->interrupt_index & 15;
507 int type = env->interrupt_index & 0xf0;
509 if (((type == TT_EXTINT) &&
510 (pil == 15 || pil > env->psrpil)) ||
511 type != TT_EXTINT) {
512 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
513 do_interrupt(env->interrupt_index);
514 env->interrupt_index = 0;
515 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
516 cpu_check_irqs(env);
517 #endif
518 BREAK_CHAIN;
520 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
521 //do_interrupt(0, 0, 0, 0, 0);
522 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
524 #elif defined(TARGET_ARM)
525 if (interrupt_request & CPU_INTERRUPT_FIQ
526 && !(env->uncached_cpsr & CPSR_F)) {
527 env->exception_index = EXCP_FIQ;
528 do_interrupt(env);
529 BREAK_CHAIN;
531 /* ARMv7-M interrupt return works by loading a magic value
532 into the PC. On real hardware the load causes the
533 return to occur. The qemu implementation performs the
534 jump normally, then does the exception return when the
535 CPU tries to execute code at the magic address.
536 This will cause the magic PC value to be pushed to
537 the stack if an interrupt occured at the wrong time.
538 We avoid this by disabling interrupts when
539 pc contains a magic address. */
540 if (interrupt_request & CPU_INTERRUPT_HARD
541 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
542 || !(env->uncached_cpsr & CPSR_I))) {
543 env->exception_index = EXCP_IRQ;
544 do_interrupt(env);
545 BREAK_CHAIN;
547 #elif defined(TARGET_SH4)
548 if (interrupt_request & CPU_INTERRUPT_HARD) {
549 do_interrupt(env);
550 BREAK_CHAIN;
552 #elif defined(TARGET_ALPHA)
553 if (interrupt_request & CPU_INTERRUPT_HARD) {
554 do_interrupt(env);
555 BREAK_CHAIN;
557 #elif defined(TARGET_CRIS)
558 if (interrupt_request & CPU_INTERRUPT_HARD) {
559 do_interrupt(env);
560 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
561 BREAK_CHAIN;
563 #elif defined(TARGET_M68K)
564 if (interrupt_request & CPU_INTERRUPT_HARD
565 && ((env->sr & SR_I) >> SR_I_SHIFT)
566 < env->pending_level) {
567 /* Real hardware gets the interrupt vector via an
568 IACK cycle at this point. Current emulated
569 hardware doesn't rely on this, so we
570 provide/save the vector when the interrupt is
571 first signalled. */
572 env->exception_index = env->pending_vector;
573 do_interrupt(1);
574 BREAK_CHAIN;
576 #endif
577 /* Don't use the cached interupt_request value,
578 do_interrupt may have updated the EXITTB flag. */
579 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
580 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
581 /* ensure that no TB jump will be modified as
582 the program flow was changed */
583 BREAK_CHAIN;
585 if (interrupt_request & CPU_INTERRUPT_EXIT) {
586 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
587 env->exception_index = EXCP_INTERRUPT;
588 cpu_loop_exit();
591 #ifdef DEBUG_EXEC
592 if ((loglevel & CPU_LOG_TB_CPU)) {
593 /* restore flags in standard format */
594 regs_to_env();
595 #if defined(TARGET_I386)
596 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
597 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
598 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
599 #elif defined(TARGET_ARM)
600 cpu_dump_state(env, logfile, fprintf, 0);
601 #elif defined(TARGET_SPARC)
602 REGWPTR = env->regbase + (env->cwp * 16);
603 env->regwptr = REGWPTR;
604 cpu_dump_state(env, logfile, fprintf, 0);
605 #elif defined(TARGET_PPC)
606 cpu_dump_state(env, logfile, fprintf, 0);
607 #elif defined(TARGET_M68K)
608 cpu_m68k_flush_flags(env, env->cc_op);
609 env->cc_op = CC_OP_FLAGS;
610 env->sr = (env->sr & 0xffe0)
611 | env->cc_dest | (env->cc_x << 4);
612 cpu_dump_state(env, logfile, fprintf, 0);
613 #elif defined(TARGET_MIPS)
614 cpu_dump_state(env, logfile, fprintf, 0);
615 #elif defined(TARGET_SH4)
616 cpu_dump_state(env, logfile, fprintf, 0);
617 #elif defined(TARGET_ALPHA)
618 cpu_dump_state(env, logfile, fprintf, 0);
619 #elif defined(TARGET_CRIS)
620 cpu_dump_state(env, logfile, fprintf, 0);
621 #else
622 #error unsupported target CPU
623 #endif
625 #endif
626 tb = tb_find_fast();
627 #ifdef DEBUG_EXEC
628 if ((loglevel & CPU_LOG_EXEC)) {
629 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
630 (long)tb->tc_ptr, tb->pc,
631 lookup_symbol(tb->pc));
633 #endif
634 RESTORE_GLOBALS();
635 /* see if we can patch the calling TB. When the TB
636 spans two pages, we cannot safely do a direct
637 jump. */
639 if (T0 != 0 &&
640 #if USE_KQEMU
641 (env->kqemu_enabled != 2) &&
642 #endif
643 tb->page_addr[1] == -1) {
644 spin_lock(&tb_lock);
645 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
646 spin_unlock(&tb_lock);
649 tc_ptr = tb->tc_ptr;
650 env->current_tb = tb;
651 /* execute the generated code */
652 gen_func = (void *)tc_ptr;
653 #if defined(__sparc__)
654 __asm__ __volatile__("call %0\n\t"
655 "mov %%o7,%%i0"
656 : /* no outputs */
657 : "r" (gen_func)
658 : "i0", "i1", "i2", "i3", "i4", "i5",
659 "o0", "o1", "o2", "o3", "o4", "o5",
660 "l0", "l1", "l2", "l3", "l4", "l5",
661 "l6", "l7");
662 #elif defined(__arm__)
663 asm volatile ("mov pc, %0\n\t"
664 ".global exec_loop\n\t"
665 "exec_loop:\n\t"
666 : /* no outputs */
667 : "r" (gen_func)
668 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
669 #elif defined(__ia64)
670 struct fptr {
671 void *ip;
672 void *gp;
673 } fp;
675 fp.ip = tc_ptr;
676 fp.gp = code_gen_buffer + 2 * (1 << 20);
677 (*(void (*)(void)) &fp)();
678 #else
679 gen_func();
680 #endif
681 env->current_tb = NULL;
682 /* reset soft MMU for next block (it can currently
683 only be set by a memory fault) */
684 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
685 if (env->hflags & HF_SOFTMMU_MASK) {
686 env->hflags &= ~HF_SOFTMMU_MASK;
687 /* do not allow linking to another block */
688 T0 = 0;
690 #endif
691 #if defined(USE_KQEMU)
692 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
693 if (kqemu_is_ok(env) &&
694 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
695 cpu_loop_exit();
697 #endif
698 } /* for(;;) */
699 } else {
700 env_to_regs();
702 } /* for(;;) */
705 #if defined(TARGET_I386)
706 /* restore flags in standard format */
707 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
708 #elif defined(TARGET_ARM)
709 /* XXX: Save/restore host fpu exception state?. */
710 #elif defined(TARGET_SPARC)
711 #if defined(reg_REGWPTR)
712 REGWPTR = saved_regwptr;
713 #endif
714 #elif defined(TARGET_PPC)
715 #elif defined(TARGET_M68K)
716 cpu_m68k_flush_flags(env, env->cc_op);
717 env->cc_op = CC_OP_FLAGS;
718 env->sr = (env->sr & 0xffe0)
719 | env->cc_dest | (env->cc_x << 4);
720 #elif defined(TARGET_MIPS)
721 #elif defined(TARGET_SH4)
722 #elif defined(TARGET_ALPHA)
723 #elif defined(TARGET_CRIS)
724 /* XXXXX */
725 #else
726 #error unsupported target CPU
727 #endif
729 /* restore global registers */
730 RESTORE_GLOBALS();
731 #include "hostregs_helper.h"
733 /* fail safe : never use cpu_single_env outside cpu_exec() */
734 cpu_single_env = NULL;
735 return ret;
738 /* must only be called from the generated code as an exception can be
739 generated */
740 void tb_invalidate_page_range(target_ulong start, target_ulong end)
742 /* XXX: cannot enable it yet because it yields to MMU exception
743 where NIP != read address on PowerPC */
744 #if 0
745 target_ulong phys_addr;
746 phys_addr = get_phys_addr_code(env, start);
747 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
748 #endif
751 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
753 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
755 CPUX86State *saved_env;
757 saved_env = env;
758 env = s;
759 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
760 selector &= 0xffff;
761 cpu_x86_load_seg_cache(env, seg_reg, selector,
762 (selector << 4), 0xffff, 0);
763 } else {
764 load_seg(seg_reg, selector);
766 env = saved_env;
769 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
771 CPUX86State *saved_env;
773 saved_env = env;
774 env = s;
776 helper_fsave(ptr, data32);
778 env = saved_env;
781 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
783 CPUX86State *saved_env;
785 saved_env = env;
786 env = s;
788 helper_frstor(ptr, data32);
790 env = saved_env;
793 #endif /* TARGET_I386 */
795 #if !defined(CONFIG_SOFTMMU)
797 #if defined(TARGET_I386)
799 /* 'pc' is the host PC at which the exception was raised. 'address' is
800 the effective address of the memory exception. 'is_write' is 1 if a
801 write caused the exception and otherwise 0'. 'old_set' is the
802 signal set which should be restored */
803 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
804 int is_write, sigset_t *old_set,
805 void *puc)
807 TranslationBlock *tb;
808 int ret;
810 if (cpu_single_env)
811 env = cpu_single_env; /* XXX: find a correct solution for multithread */
812 #if defined(DEBUG_SIGNAL)
813 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
814 pc, address, is_write, *(unsigned long *)old_set);
815 #endif
816 /* XXX: locking issue */
817 if (is_write && page_unprotect(h2g(address), pc, puc)) {
818 return 1;
821 /* see if it is an MMU fault */
822 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
823 if (ret < 0)
824 return 0; /* not an MMU fault */
825 if (ret == 0)
826 return 1; /* the MMU fault was handled without causing real CPU fault */
827 /* now we have a real cpu fault */
828 tb = tb_find_pc(pc);
829 if (tb) {
830 /* the PC is inside the translated code. It means that we have
831 a virtual CPU fault */
832 cpu_restore_state(tb, env, pc, puc);
834 if (ret == 1) {
835 #if 0
836 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
837 env->eip, env->cr[2], env->error_code);
838 #endif
839 /* we restore the process signal mask as the sigreturn should
840 do it (XXX: use sigsetjmp) */
841 sigprocmask(SIG_SETMASK, old_set, NULL);
842 raise_exception_err(env->exception_index, env->error_code);
843 } else {
844 /* activate soft MMU for this block */
845 env->hflags |= HF_SOFTMMU_MASK;
846 cpu_resume_from_signal(env, puc);
848 /* never comes here */
849 return 1;
852 #elif defined(TARGET_ARM)
853 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
854 int is_write, sigset_t *old_set,
855 void *puc)
857 TranslationBlock *tb;
858 int ret;
860 if (cpu_single_env)
861 env = cpu_single_env; /* XXX: find a correct solution for multithread */
862 #if defined(DEBUG_SIGNAL)
863 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
864 pc, address, is_write, *(unsigned long *)old_set);
865 #endif
866 /* XXX: locking issue */
867 if (is_write && page_unprotect(h2g(address), pc, puc)) {
868 return 1;
870 /* see if it is an MMU fault */
871 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
872 if (ret < 0)
873 return 0; /* not an MMU fault */
874 if (ret == 0)
875 return 1; /* the MMU fault was handled without causing real CPU fault */
876 /* now we have a real cpu fault */
877 tb = tb_find_pc(pc);
878 if (tb) {
879 /* the PC is inside the translated code. It means that we have
880 a virtual CPU fault */
881 cpu_restore_state(tb, env, pc, puc);
883 /* we restore the process signal mask as the sigreturn should
884 do it (XXX: use sigsetjmp) */
885 sigprocmask(SIG_SETMASK, old_set, NULL);
886 cpu_loop_exit();
888 #elif defined(TARGET_SPARC)
889 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
890 int is_write, sigset_t *old_set,
891 void *puc)
893 TranslationBlock *tb;
894 int ret;
896 if (cpu_single_env)
897 env = cpu_single_env; /* XXX: find a correct solution for multithread */
898 #if defined(DEBUG_SIGNAL)
899 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
900 pc, address, is_write, *(unsigned long *)old_set);
901 #endif
902 /* XXX: locking issue */
903 if (is_write && page_unprotect(h2g(address), pc, puc)) {
904 return 1;
906 /* see if it is an MMU fault */
907 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
908 if (ret < 0)
909 return 0; /* not an MMU fault */
910 if (ret == 0)
911 return 1; /* the MMU fault was handled without causing real CPU fault */
912 /* now we have a real cpu fault */
913 tb = tb_find_pc(pc);
914 if (tb) {
915 /* the PC is inside the translated code. It means that we have
916 a virtual CPU fault */
917 cpu_restore_state(tb, env, pc, puc);
919 /* we restore the process signal mask as the sigreturn should
920 do it (XXX: use sigsetjmp) */
921 sigprocmask(SIG_SETMASK, old_set, NULL);
922 cpu_loop_exit();
924 #elif defined (TARGET_PPC)
925 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
926 int is_write, sigset_t *old_set,
927 void *puc)
929 TranslationBlock *tb;
930 int ret;
932 if (cpu_single_env)
933 env = cpu_single_env; /* XXX: find a correct solution for multithread */
934 #if defined(DEBUG_SIGNAL)
935 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
936 pc, address, is_write, *(unsigned long *)old_set);
937 #endif
938 /* XXX: locking issue */
939 if (is_write && page_unprotect(h2g(address), pc, puc)) {
940 return 1;
943 /* see if it is an MMU fault */
944 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
945 if (ret < 0)
946 return 0; /* not an MMU fault */
947 if (ret == 0)
948 return 1; /* the MMU fault was handled without causing real CPU fault */
950 /* now we have a real cpu fault */
951 tb = tb_find_pc(pc);
952 if (tb) {
953 /* the PC is inside the translated code. It means that we have
954 a virtual CPU fault */
955 cpu_restore_state(tb, env, pc, puc);
957 if (ret == 1) {
958 #if 0
959 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
960 env->nip, env->error_code, tb);
961 #endif
962 /* we restore the process signal mask as the sigreturn should
963 do it (XXX: use sigsetjmp) */
964 sigprocmask(SIG_SETMASK, old_set, NULL);
965 do_raise_exception_err(env->exception_index, env->error_code);
966 } else {
967 /* activate soft MMU for this block */
968 cpu_resume_from_signal(env, puc);
970 /* never comes here */
971 return 1;
974 #elif defined(TARGET_M68K)
975 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
976 int is_write, sigset_t *old_set,
977 void *puc)
979 TranslationBlock *tb;
980 int ret;
982 if (cpu_single_env)
983 env = cpu_single_env; /* XXX: find a correct solution for multithread */
984 #if defined(DEBUG_SIGNAL)
985 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
986 pc, address, is_write, *(unsigned long *)old_set);
987 #endif
988 /* XXX: locking issue */
989 if (is_write && page_unprotect(address, pc, puc)) {
990 return 1;
992 /* see if it is an MMU fault */
993 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
994 if (ret < 0)
995 return 0; /* not an MMU fault */
996 if (ret == 0)
997 return 1; /* the MMU fault was handled without causing real CPU fault */
998 /* now we have a real cpu fault */
999 tb = tb_find_pc(pc);
1000 if (tb) {
1001 /* the PC is inside the translated code. It means that we have
1002 a virtual CPU fault */
1003 cpu_restore_state(tb, env, pc, puc);
1005 /* we restore the process signal mask as the sigreturn should
1006 do it (XXX: use sigsetjmp) */
1007 sigprocmask(SIG_SETMASK, old_set, NULL);
1008 cpu_loop_exit();
1009 /* never comes here */
1010 return 1;
1013 #elif defined (TARGET_MIPS)
1014 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1015 int is_write, sigset_t *old_set,
1016 void *puc)
1018 TranslationBlock *tb;
1019 int ret;
1021 if (cpu_single_env)
1022 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1023 #if defined(DEBUG_SIGNAL)
1024 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1025 pc, address, is_write, *(unsigned long *)old_set);
1026 #endif
1027 /* XXX: locking issue */
1028 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1029 return 1;
1032 /* see if it is an MMU fault */
1033 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1034 if (ret < 0)
1035 return 0; /* not an MMU fault */
1036 if (ret == 0)
1037 return 1; /* the MMU fault was handled without causing real CPU fault */
1039 /* now we have a real cpu fault */
1040 tb = tb_find_pc(pc);
1041 if (tb) {
1042 /* the PC is inside the translated code. It means that we have
1043 a virtual CPU fault */
1044 cpu_restore_state(tb, env, pc, puc);
1046 if (ret == 1) {
1047 #if 0
1048 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1049 env->PC, env->error_code, tb);
1050 #endif
1051 /* we restore the process signal mask as the sigreturn should
1052 do it (XXX: use sigsetjmp) */
1053 sigprocmask(SIG_SETMASK, old_set, NULL);
1054 do_raise_exception_err(env->exception_index, env->error_code);
1055 } else {
1056 /* activate soft MMU for this block */
1057 cpu_resume_from_signal(env, puc);
1059 /* never comes here */
1060 return 1;
1063 #elif defined (TARGET_SH4)
1064 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1065 int is_write, sigset_t *old_set,
1066 void *puc)
1068 TranslationBlock *tb;
1069 int ret;
1071 if (cpu_single_env)
1072 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1073 #if defined(DEBUG_SIGNAL)
1074 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1075 pc, address, is_write, *(unsigned long *)old_set);
1076 #endif
1077 /* XXX: locking issue */
1078 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1079 return 1;
1082 /* see if it is an MMU fault */
1083 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1084 if (ret < 0)
1085 return 0; /* not an MMU fault */
1086 if (ret == 0)
1087 return 1; /* the MMU fault was handled without causing real CPU fault */
1089 /* now we have a real cpu fault */
1090 tb = tb_find_pc(pc);
1091 if (tb) {
1092 /* the PC is inside the translated code. It means that we have
1093 a virtual CPU fault */
1094 cpu_restore_state(tb, env, pc, puc);
1096 #if 0
1097 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1098 env->nip, env->error_code, tb);
1099 #endif
1100 /* we restore the process signal mask as the sigreturn should
1101 do it (XXX: use sigsetjmp) */
1102 sigprocmask(SIG_SETMASK, old_set, NULL);
1103 cpu_loop_exit();
1104 /* never comes here */
1105 return 1;
1108 #elif defined (TARGET_ALPHA)
1109 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1110 int is_write, sigset_t *old_set,
1111 void *puc)
1113 TranslationBlock *tb;
1114 int ret;
1116 if (cpu_single_env)
1117 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1118 #if defined(DEBUG_SIGNAL)
1119 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1120 pc, address, is_write, *(unsigned long *)old_set);
1121 #endif
1122 /* XXX: locking issue */
1123 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1124 return 1;
1127 /* see if it is an MMU fault */
1128 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1129 if (ret < 0)
1130 return 0; /* not an MMU fault */
1131 if (ret == 0)
1132 return 1; /* the MMU fault was handled without causing real CPU fault */
1134 /* now we have a real cpu fault */
1135 tb = tb_find_pc(pc);
1136 if (tb) {
1137 /* the PC is inside the translated code. It means that we have
1138 a virtual CPU fault */
1139 cpu_restore_state(tb, env, pc, puc);
1141 #if 0
1142 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1143 env->nip, env->error_code, tb);
1144 #endif
1145 /* we restore the process signal mask as the sigreturn should
1146 do it (XXX: use sigsetjmp) */
1147 sigprocmask(SIG_SETMASK, old_set, NULL);
1148 cpu_loop_exit();
1149 /* never comes here */
1150 return 1;
1152 #elif defined (TARGET_CRIS)
1153 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1154 int is_write, sigset_t *old_set,
1155 void *puc)
1157 TranslationBlock *tb;
1158 int ret;
1160 if (cpu_single_env)
1161 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1162 #if defined(DEBUG_SIGNAL)
1163 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1164 pc, address, is_write, *(unsigned long *)old_set);
1165 #endif
1166 /* XXX: locking issue */
1167 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1168 return 1;
1171 /* see if it is an MMU fault */
1172 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1173 if (ret < 0)
1174 return 0; /* not an MMU fault */
1175 if (ret == 0)
1176 return 1; /* the MMU fault was handled without causing real CPU fault */
1178 /* now we have a real cpu fault */
1179 tb = tb_find_pc(pc);
1180 if (tb) {
1181 /* the PC is inside the translated code. It means that we have
1182 a virtual CPU fault */
1183 cpu_restore_state(tb, env, pc, puc);
1185 #if 0
1186 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1187 env->nip, env->error_code, tb);
1188 #endif
1189 /* we restore the process signal mask as the sigreturn should
1190 do it (XXX: use sigsetjmp) */
1191 sigprocmask(SIG_SETMASK, old_set, NULL);
1192 cpu_loop_exit();
1193 /* never comes here */
1194 return 1;
1197 #else
1198 #error unsupported target CPU
1199 #endif
1201 #if defined(__i386__)
1203 #if defined(__APPLE__)
1204 # include <sys/ucontext.h>
1206 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1207 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1208 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1209 #else
1210 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1211 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1212 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1213 #endif
1215 int cpu_signal_handler(int host_signum, void *pinfo,
1216 void *puc)
1218 siginfo_t *info = pinfo;
1219 struct ucontext *uc = puc;
1220 unsigned long pc;
1221 int trapno;
1223 #ifndef REG_EIP
1224 /* for glibc 2.1 */
1225 #define REG_EIP EIP
1226 #define REG_ERR ERR
1227 #define REG_TRAPNO TRAPNO
1228 #endif
1229 pc = EIP_sig(uc);
1230 trapno = TRAP_sig(uc);
1231 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1232 trapno == 0xe ?
1233 (ERROR_sig(uc) >> 1) & 1 : 0,
1234 &uc->uc_sigmask, puc);
1237 #elif defined(__x86_64__)
1239 int cpu_signal_handler(int host_signum, void *pinfo,
1240 void *puc)
1242 siginfo_t *info = pinfo;
1243 struct ucontext *uc = puc;
1244 unsigned long pc;
1246 pc = uc->uc_mcontext.gregs[REG_RIP];
1247 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1248 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1249 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1250 &uc->uc_sigmask, puc);
1253 #elif defined(__powerpc__)
1255 /***********************************************************************
1256 * signal context platform-specific definitions
1257 * From Wine
1259 #ifdef linux
1260 /* All Registers access - only for local access */
1261 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1262 /* Gpr Registers access */
1263 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1264 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1265 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1266 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1267 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1268 # define LR_sig(context) REG_sig(link, context) /* Link register */
1269 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1270 /* Float Registers access */
1271 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1272 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1273 /* Exception Registers access */
1274 # define DAR_sig(context) REG_sig(dar, context)
1275 # define DSISR_sig(context) REG_sig(dsisr, context)
1276 # define TRAP_sig(context) REG_sig(trap, context)
1277 #endif /* linux */
1279 #ifdef __APPLE__
1280 # include <sys/ucontext.h>
1281 typedef struct ucontext SIGCONTEXT;
1282 /* All Registers access - only for local access */
1283 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1284 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1285 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1286 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1287 /* Gpr Registers access */
1288 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1289 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1290 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1291 # define CTR_sig(context) REG_sig(ctr, context)
1292 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1293 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1294 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1295 /* Float Registers access */
1296 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1297 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1298 /* Exception Registers access */
1299 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1300 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1301 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1302 #endif /* __APPLE__ */
1304 int cpu_signal_handler(int host_signum, void *pinfo,
1305 void *puc)
1307 siginfo_t *info = pinfo;
1308 struct ucontext *uc = puc;
1309 unsigned long pc;
1310 int is_write;
1312 pc = IAR_sig(uc);
1313 is_write = 0;
1314 #if 0
1315 /* ppc 4xx case */
1316 if (DSISR_sig(uc) & 0x00800000)
1317 is_write = 1;
1318 #else
1319 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1320 is_write = 1;
1321 #endif
1322 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1323 is_write, &uc->uc_sigmask, puc);
1326 #elif defined(__alpha__)
1328 int cpu_signal_handler(int host_signum, void *pinfo,
1329 void *puc)
1331 siginfo_t *info = pinfo;
1332 struct ucontext *uc = puc;
1333 uint32_t *pc = uc->uc_mcontext.sc_pc;
1334 uint32_t insn = *pc;
1335 int is_write = 0;
1337 /* XXX: need kernel patch to get write flag faster */
1338 switch (insn >> 26) {
1339 case 0x0d: // stw
1340 case 0x0e: // stb
1341 case 0x0f: // stq_u
1342 case 0x24: // stf
1343 case 0x25: // stg
1344 case 0x26: // sts
1345 case 0x27: // stt
1346 case 0x2c: // stl
1347 case 0x2d: // stq
1348 case 0x2e: // stl_c
1349 case 0x2f: // stq_c
1350 is_write = 1;
1353 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1354 is_write, &uc->uc_sigmask, puc);
1356 #elif defined(__sparc__)
1358 int cpu_signal_handler(int host_signum, void *pinfo,
1359 void *puc)
1361 siginfo_t *info = pinfo;
1362 uint32_t *regs = (uint32_t *)(info + 1);
1363 void *sigmask = (regs + 20);
1364 unsigned long pc;
1365 int is_write;
1366 uint32_t insn;
1368 /* XXX: is there a standard glibc define ? */
1369 pc = regs[1];
1370 /* XXX: need kernel patch to get write flag faster */
1371 is_write = 0;
1372 insn = *(uint32_t *)pc;
1373 if ((insn >> 30) == 3) {
1374 switch((insn >> 19) & 0x3f) {
1375 case 0x05: // stb
1376 case 0x06: // sth
1377 case 0x04: // st
1378 case 0x07: // std
1379 case 0x24: // stf
1380 case 0x27: // stdf
1381 case 0x25: // stfsr
1382 is_write = 1;
1383 break;
1386 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1387 is_write, sigmask, NULL);
1390 #elif defined(__arm__)
1392 int cpu_signal_handler(int host_signum, void *pinfo,
1393 void *puc)
1395 siginfo_t *info = pinfo;
1396 struct ucontext *uc = puc;
1397 unsigned long pc;
1398 int is_write;
1400 pc = uc->uc_mcontext.gregs[R15];
1401 /* XXX: compute is_write */
1402 is_write = 0;
1403 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1404 is_write,
1405 &uc->uc_sigmask, puc);
1408 #elif defined(__mc68000)
1410 int cpu_signal_handler(int host_signum, void *pinfo,
1411 void *puc)
1413 siginfo_t *info = pinfo;
1414 struct ucontext *uc = puc;
1415 unsigned long pc;
1416 int is_write;
1418 pc = uc->uc_mcontext.gregs[16];
1419 /* XXX: compute is_write */
1420 is_write = 0;
1421 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1422 is_write,
1423 &uc->uc_sigmask, puc);
1426 #elif defined(__ia64)
1428 #ifndef __ISR_VALID
1429 /* This ought to be in <bits/siginfo.h>... */
1430 # define __ISR_VALID 1
1431 #endif
1433 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1435 siginfo_t *info = pinfo;
1436 struct ucontext *uc = puc;
1437 unsigned long ip;
1438 int is_write = 0;
1440 ip = uc->uc_mcontext.sc_ip;
1441 switch (host_signum) {
1442 case SIGILL:
1443 case SIGFPE:
1444 case SIGSEGV:
1445 case SIGBUS:
1446 case SIGTRAP:
1447 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1448 /* ISR.W (write-access) is bit 33: */
1449 is_write = (info->si_isr >> 33) & 1;
1450 break;
1452 default:
1453 break;
1455 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1456 is_write,
1457 &uc->uc_sigmask, puc);
1460 #elif defined(__s390__)
1462 int cpu_signal_handler(int host_signum, void *pinfo,
1463 void *puc)
1465 siginfo_t *info = pinfo;
1466 struct ucontext *uc = puc;
1467 unsigned long pc;
1468 int is_write;
1470 pc = uc->uc_mcontext.psw.addr;
1471 /* XXX: compute is_write */
1472 is_write = 0;
1473 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1474 is_write, &uc->uc_sigmask, puc);
1477 #elif defined(__mips__)
1479 int cpu_signal_handler(int host_signum, void *pinfo,
1480 void *puc)
1482 siginfo_t *info = pinfo;
1483 struct ucontext *uc = puc;
1484 greg_t pc = uc->uc_mcontext.pc;
1485 int is_write;
1487 /* XXX: compute is_write */
1488 is_write = 0;
1489 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1490 is_write, &uc->uc_sigmask, puc);
1493 #else
1495 #error host CPU specific signal handler needed
1497 #endif
1499 #endif /* !defined(CONFIG_SOFTMMU) */