Ia64: include prototype for qemu_mallocz
[qemu-kvm/fedora.git] / cpu-exec.c
blob636061caba091dfba6d0039f95c9bb490a285a73
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
38 #include "qemu-kvm.h"
40 int tb_invalidated_flag;
42 //#define DEBUG_EXEC
43 //#define DEBUG_SIGNAL
45 #define SAVE_GLOBALS()
46 #define RESTORE_GLOBALS()
48 #if defined(__sparc__) && !defined(HOST_SOLARIS)
49 #include <features.h>
50 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
51 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
52 // Work around ugly bugs in glibc that mangle global register contents
54 static volatile void *saved_env;
55 static volatile unsigned long saved_t0, saved_i7;
56 #undef SAVE_GLOBALS
57 #define SAVE_GLOBALS() do { \
58 saved_env = env; \
59 saved_t0 = T0; \
60 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
61 } while(0)
63 #undef RESTORE_GLOBALS
64 #define RESTORE_GLOBALS() do { \
65 env = (void *)saved_env; \
66 T0 = saved_t0; \
67 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
68 } while(0)
70 static int sparc_setjmp(jmp_buf buf)
72 int ret;
74 SAVE_GLOBALS();
75 ret = setjmp(buf);
76 RESTORE_GLOBALS();
77 return ret;
79 #undef setjmp
80 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
82 static void sparc_longjmp(jmp_buf buf, int val)
84 SAVE_GLOBALS();
85 longjmp(buf, val);
87 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
88 #endif
89 #endif
91 void cpu_loop_exit(void)
93 /* NOTE: the register at this point must be saved by hand because
94 longjmp restore them */
95 regs_to_env();
96 longjmp(env->jmp_env, 1);
99 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
100 #define reg_T2
101 #endif
103 /* exit the current TB from a signal handler. The host registers are
104 restored in a state compatible with the CPU emulator
106 void cpu_resume_from_signal(CPUState *env1, void *puc)
108 #if !defined(CONFIG_SOFTMMU)
109 struct ucontext *uc = puc;
110 #endif
112 env = env1;
114 /* XXX: restore cpu registers saved in host registers */
116 #if !defined(CONFIG_SOFTMMU)
117 if (puc) {
118 /* XXX: use siglongjmp ? */
119 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
121 #endif
122 longjmp(env->jmp_env, 1);
125 static TranslationBlock *tb_find_slow(target_ulong pc,
126 target_ulong cs_base,
127 uint64_t flags)
129 TranslationBlock *tb, **ptb1;
130 int code_gen_size;
131 unsigned int h;
132 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
133 uint8_t *tc_ptr;
135 spin_lock(&tb_lock);
137 tb_invalidated_flag = 0;
139 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
141 /* find translated block using physical mappings */
142 phys_pc = get_phys_addr_code(env, pc);
143 phys_page1 = phys_pc & TARGET_PAGE_MASK;
144 phys_page2 = -1;
145 h = tb_phys_hash_func(phys_pc);
146 ptb1 = &tb_phys_hash[h];
147 for(;;) {
148 tb = *ptb1;
149 if (!tb)
150 goto not_found;
151 if (tb->pc == pc &&
152 tb->page_addr[0] == phys_page1 &&
153 tb->cs_base == cs_base &&
154 tb->flags == flags) {
155 /* check next page if needed */
156 if (tb->page_addr[1] != -1) {
157 virt_page2 = (pc & TARGET_PAGE_MASK) +
158 TARGET_PAGE_SIZE;
159 phys_page2 = get_phys_addr_code(env, virt_page2);
160 if (tb->page_addr[1] == phys_page2)
161 goto found;
162 } else {
163 goto found;
166 ptb1 = &tb->phys_hash_next;
168 not_found:
169 /* if no translated code available, then translate it now */
170 tb = tb_alloc(pc);
171 if (!tb) {
172 /* flush must be done */
173 tb_flush(env);
174 /* cannot fail at this point */
175 tb = tb_alloc(pc);
176 /* don't forget to invalidate previous TB info */
177 tb_invalidated_flag = 1;
179 tc_ptr = code_gen_ptr;
180 tb->tc_ptr = tc_ptr;
181 tb->cs_base = cs_base;
182 tb->flags = flags;
183 SAVE_GLOBALS();
184 cpu_gen_code(env, tb, &code_gen_size);
185 RESTORE_GLOBALS();
186 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
188 /* check next page if needed */
189 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
190 phys_page2 = -1;
191 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
192 phys_page2 = get_phys_addr_code(env, virt_page2);
194 tb_link_phys(tb, phys_pc, phys_page2);
196 found:
197 /* we add the TB in the virtual pc hash table */
198 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
199 spin_unlock(&tb_lock);
200 return tb;
203 static inline TranslationBlock *tb_find_fast(void)
205 TranslationBlock *tb;
206 target_ulong cs_base, pc;
207 uint64_t flags;
209 /* we record a subset of the CPU state. It will
210 always be the same before a given translated block
211 is executed. */
212 #if defined(TARGET_I386)
213 flags = env->hflags;
214 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
215 flags |= env->intercept;
216 cs_base = env->segs[R_CS].base;
217 pc = cs_base + env->eip;
218 #elif defined(TARGET_ARM)
219 flags = env->thumb | (env->vfp.vec_len << 1)
220 | (env->vfp.vec_stride << 4);
221 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
222 flags |= (1 << 6);
223 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
224 flags |= (1 << 7);
225 flags |= (env->condexec_bits << 8);
226 cs_base = 0;
227 pc = env->regs[15];
228 #elif defined(TARGET_SPARC)
229 #ifdef TARGET_SPARC64
230 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
231 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
232 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
233 #else
234 // FPU enable . Supervisor
235 flags = (env->psref << 4) | env->psrs;
236 #endif
237 cs_base = env->npc;
238 pc = env->pc;
239 #elif defined(TARGET_PPC)
240 flags = env->hflags;
241 cs_base = 0;
242 pc = env->nip;
243 #elif defined(TARGET_MIPS)
244 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
245 cs_base = 0;
246 pc = env->PC[env->current_tc];
247 #elif defined(TARGET_M68K)
248 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
249 | (env->sr & SR_S) /* Bit 13 */
250 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
251 cs_base = 0;
252 pc = env->pc;
253 #elif defined(TARGET_SH4)
254 flags = env->flags;
255 cs_base = 0;
256 pc = env->pc;
257 #elif defined(TARGET_ALPHA)
258 flags = env->ps;
259 cs_base = 0;
260 pc = env->pc;
261 #elif defined(TARGET_CRIS)
262 flags = 0;
263 cs_base = 0;
264 pc = env->pc;
265 #elif defined(TARGET_IA64)
266 flags = 0;
267 cs_base = 0; /* XXXXX */
268 pc = 0;
269 #else
270 #error unsupported CPU
271 #endif
272 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
273 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
274 tb->flags != flags, 0)) {
275 tb = tb_find_slow(pc, cs_base, flags);
276 /* Note: we do it here to avoid a gcc bug on Mac OS X when
277 doing it in tb_find_slow */
278 if (tb_invalidated_flag) {
279 /* as some TB could have been invalidated because
280 of memory exceptions while generating the code, we
281 must recompute the hash index here */
282 T0 = 0;
285 return tb;
288 #define BREAK_CHAIN T0 = 0
290 /* main execution loop */
292 int cpu_exec(CPUState *env1)
294 #define DECLARE_HOST_REGS 1
295 #include "hostregs_helper.h"
296 #if defined(TARGET_SPARC)
297 #if defined(reg_REGWPTR)
298 uint32_t *saved_regwptr;
299 #endif
300 #endif
301 int ret, interrupt_request;
302 long (*gen_func)(void);
303 TranslationBlock *tb;
304 uint8_t *tc_ptr;
306 if (cpu_halted(env1) == EXCP_HALTED)
307 return EXCP_HALTED;
309 cpu_single_env = env1;
311 /* first we save global registers */
312 #define SAVE_HOST_REGS 1
313 #include "hostregs_helper.h"
314 env = env1;
315 SAVE_GLOBALS();
317 env_to_regs();
318 #if defined(TARGET_I386)
319 /* put eflags in CPU temporary format */
320 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
321 DF = 1 - (2 * ((env->eflags >> 10) & 1));
322 CC_OP = CC_OP_EFLAGS;
323 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
324 #elif defined(TARGET_SPARC)
325 #if defined(reg_REGWPTR)
326 saved_regwptr = REGWPTR;
327 #endif
328 #elif defined(TARGET_M68K)
329 env->cc_op = CC_OP_FLAGS;
330 env->cc_dest = env->sr & 0xf;
331 env->cc_x = (env->sr >> 4) & 1;
332 #elif defined(TARGET_ALPHA)
333 #elif defined(TARGET_ARM)
334 #elif defined(TARGET_PPC)
335 #elif defined(TARGET_MIPS)
336 #elif defined(TARGET_SH4)
337 #elif defined(TARGET_CRIS)
338 #elif defined(TARGET_IA64)
339 /* XXXXX */
340 #else
341 #error unsupported target CPU
342 #endif
343 env->exception_index = -1;
345 /* prepare setjmp context for exception handling */
346 for(;;) {
347 if (setjmp(env->jmp_env) == 0) {
348 env->current_tb = NULL;
349 /* if an exception is pending, we execute it here */
350 if (env->exception_index >= 0) {
351 if (env->exception_index >= EXCP_INTERRUPT) {
352 /* exit request from the cpu execution loop */
353 ret = env->exception_index;
354 break;
355 } else if (env->user_mode_only) {
356 /* if user mode only, we simulate a fake exception
357 which will be handled outside the cpu execution
358 loop */
359 #if defined(TARGET_I386)
360 do_interrupt_user(env->exception_index,
361 env->exception_is_int,
362 env->error_code,
363 env->exception_next_eip);
364 #endif
365 ret = env->exception_index;
366 break;
367 } else {
368 #if defined(TARGET_I386)
369 /* simulate a real cpu exception. On i386, it can
370 trigger new exceptions, but we do not handle
371 double or triple faults yet. */
372 do_interrupt(env->exception_index,
373 env->exception_is_int,
374 env->error_code,
375 env->exception_next_eip, 0);
376 /* successfully delivered */
377 env->old_exception = -1;
378 #elif defined(TARGET_PPC)
379 do_interrupt(env);
380 #elif defined(TARGET_MIPS)
381 do_interrupt(env);
382 #elif defined(TARGET_SPARC)
383 do_interrupt(env->exception_index);
384 #elif defined(TARGET_ARM)
385 do_interrupt(env);
386 #elif defined(TARGET_SH4)
387 do_interrupt(env);
388 #elif defined(TARGET_ALPHA)
389 do_interrupt(env);
390 #elif defined(TARGET_CRIS)
391 do_interrupt(env);
392 #elif defined(TARGET_M68K)
393 do_interrupt(0);
394 #elif defined(TARGET_IA64)
395 do_interrupt(env);
396 #endif
398 env->exception_index = -1;
400 #ifdef USE_KQEMU
401 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
402 int ret;
403 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
404 ret = kqemu_cpu_exec(env);
405 /* put eflags in CPU temporary format */
406 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
407 DF = 1 - (2 * ((env->eflags >> 10) & 1));
408 CC_OP = CC_OP_EFLAGS;
409 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
410 if (ret == 1) {
411 /* exception */
412 longjmp(env->jmp_env, 1);
413 } else if (ret == 2) {
414 /* softmmu execution needed */
415 } else {
416 if (env->interrupt_request != 0) {
417 /* hardware interrupt will be executed just after */
418 } else {
419 /* otherwise, we restart */
420 longjmp(env->jmp_env, 1);
424 #endif
426 if (kvm_enabled()) {
427 kvm_cpu_exec(env);
428 longjmp(env->jmp_env, 1);
430 T0 = 0; /* force lookup of first TB */
431 for(;;) {
432 SAVE_GLOBALS();
433 interrupt_request = env->interrupt_request;
434 if (__builtin_expect(interrupt_request, 0)
435 #if defined(TARGET_I386)
436 && env->hflags & HF_GIF_MASK
437 #endif
439 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
440 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
441 env->exception_index = EXCP_DEBUG;
442 cpu_loop_exit();
444 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
445 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
446 if (interrupt_request & CPU_INTERRUPT_HALT) {
447 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
448 env->halted = 1;
449 env->exception_index = EXCP_HLT;
450 cpu_loop_exit();
452 #endif
453 #if defined(TARGET_I386)
454 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
455 !(env->hflags & HF_SMM_MASK)) {
456 svm_check_intercept(SVM_EXIT_SMI);
457 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
458 do_smm_enter();
459 BREAK_CHAIN;
460 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
461 !(env->hflags & HF_NMI_MASK)) {
462 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
463 env->hflags |= HF_NMI_MASK;
464 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
465 BREAK_CHAIN;
466 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
467 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
468 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
469 int intno;
470 svm_check_intercept(SVM_EXIT_INTR);
471 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
472 intno = cpu_get_pic_interrupt(env);
473 if (loglevel & CPU_LOG_TB_IN_ASM) {
474 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
476 do_interrupt(intno, 0, 0, 0, 1);
477 /* ensure that no TB jump will be modified as
478 the program flow was changed */
479 BREAK_CHAIN;
480 #if !defined(CONFIG_USER_ONLY)
481 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
482 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
483 int intno;
484 /* FIXME: this should respect TPR */
485 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
486 svm_check_intercept(SVM_EXIT_VINTR);
487 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
488 if (loglevel & CPU_LOG_TB_IN_ASM)
489 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
490 do_interrupt(intno, 0, 0, -1, 1);
491 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
492 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
493 BREAK_CHAIN;
494 #endif
496 #elif defined(TARGET_PPC)
497 #if 0
498 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
499 cpu_ppc_reset(env);
501 #endif
502 if (interrupt_request & CPU_INTERRUPT_HARD) {
503 ppc_hw_interrupt(env);
504 if (env->pending_interrupts == 0)
505 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
506 BREAK_CHAIN;
508 #elif defined(TARGET_MIPS)
509 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
510 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
511 (env->CP0_Status & (1 << CP0St_IE)) &&
512 !(env->CP0_Status & (1 << CP0St_EXL)) &&
513 !(env->CP0_Status & (1 << CP0St_ERL)) &&
514 !(env->hflags & MIPS_HFLAG_DM)) {
515 /* Raise it */
516 env->exception_index = EXCP_EXT_INTERRUPT;
517 env->error_code = 0;
518 do_interrupt(env);
519 BREAK_CHAIN;
521 #elif defined(TARGET_SPARC)
522 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
523 (env->psret != 0)) {
524 int pil = env->interrupt_index & 15;
525 int type = env->interrupt_index & 0xf0;
527 if (((type == TT_EXTINT) &&
528 (pil == 15 || pil > env->psrpil)) ||
529 type != TT_EXTINT) {
530 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
531 do_interrupt(env->interrupt_index);
532 env->interrupt_index = 0;
533 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
534 cpu_check_irqs(env);
535 #endif
536 BREAK_CHAIN;
538 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
539 //do_interrupt(0, 0, 0, 0, 0);
540 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
542 #elif defined(TARGET_ARM)
543 if (interrupt_request & CPU_INTERRUPT_FIQ
544 && !(env->uncached_cpsr & CPSR_F)) {
545 env->exception_index = EXCP_FIQ;
546 do_interrupt(env);
547 BREAK_CHAIN;
549 /* ARMv7-M interrupt return works by loading a magic value
550 into the PC. On real hardware the load causes the
551 return to occur. The qemu implementation performs the
552 jump normally, then does the exception return when the
553 CPU tries to execute code at the magic address.
554 This will cause the magic PC value to be pushed to
555 the stack if an interrupt occured at the wrong time.
556 We avoid this by disabling interrupts when
557 pc contains a magic address. */
558 if (interrupt_request & CPU_INTERRUPT_HARD
559 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
560 || !(env->uncached_cpsr & CPSR_I))) {
561 env->exception_index = EXCP_IRQ;
562 do_interrupt(env);
563 BREAK_CHAIN;
565 #elif defined(TARGET_SH4)
566 if (interrupt_request & CPU_INTERRUPT_HARD) {
567 do_interrupt(env);
568 BREAK_CHAIN;
570 #elif defined(TARGET_ALPHA)
571 if (interrupt_request & CPU_INTERRUPT_HARD) {
572 do_interrupt(env);
573 BREAK_CHAIN;
575 #elif defined(TARGET_CRIS)
576 if (interrupt_request & CPU_INTERRUPT_HARD) {
577 do_interrupt(env);
578 BREAK_CHAIN;
580 #elif defined(TARGET_M68K)
581 if (interrupt_request & CPU_INTERRUPT_HARD
582 && ((env->sr & SR_I) >> SR_I_SHIFT)
583 < env->pending_level) {
584 /* Real hardware gets the interrupt vector via an
585 IACK cycle at this point. Current emulated
586 hardware doesn't rely on this, so we
587 provide/save the vector when the interrupt is
588 first signalled. */
589 env->exception_index = env->pending_vector;
590 do_interrupt(1);
591 BREAK_CHAIN;
593 #endif
594 /* Don't use the cached interupt_request value,
595 do_interrupt may have updated the EXITTB flag. */
596 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
597 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
598 /* ensure that no TB jump will be modified as
599 the program flow was changed */
600 BREAK_CHAIN;
602 if (interrupt_request & CPU_INTERRUPT_EXIT) {
603 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
604 env->exception_index = EXCP_INTERRUPT;
605 cpu_loop_exit();
608 #ifdef DEBUG_EXEC
609 if ((loglevel & CPU_LOG_TB_CPU)) {
610 /* restore flags in standard format */
611 regs_to_env();
612 #if defined(TARGET_I386)
613 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
614 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
615 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
616 #elif defined(TARGET_ARM)
617 cpu_dump_state(env, logfile, fprintf, 0);
618 #elif defined(TARGET_SPARC)
619 REGWPTR = env->regbase + (env->cwp * 16);
620 env->regwptr = REGWPTR;
621 cpu_dump_state(env, logfile, fprintf, 0);
622 #elif defined(TARGET_PPC)
623 cpu_dump_state(env, logfile, fprintf, 0);
624 #elif defined(TARGET_M68K)
625 cpu_m68k_flush_flags(env, env->cc_op);
626 env->cc_op = CC_OP_FLAGS;
627 env->sr = (env->sr & 0xffe0)
628 | env->cc_dest | (env->cc_x << 4);
629 cpu_dump_state(env, logfile, fprintf, 0);
630 #elif defined(TARGET_MIPS)
631 cpu_dump_state(env, logfile, fprintf, 0);
632 #elif defined(TARGET_SH4)
633 cpu_dump_state(env, logfile, fprintf, 0);
634 #elif defined(TARGET_ALPHA)
635 cpu_dump_state(env, logfile, fprintf, 0);
636 #elif defined(TARGET_CRIS)
637 cpu_dump_state(env, logfile, fprintf, 0);
638 #else
639 #error unsupported target CPU
640 #endif
642 #endif
643 tb = tb_find_fast();
644 #ifdef DEBUG_EXEC
645 if ((loglevel & CPU_LOG_EXEC)) {
646 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
647 (long)tb->tc_ptr, tb->pc,
648 lookup_symbol(tb->pc));
650 #endif
651 RESTORE_GLOBALS();
652 /* see if we can patch the calling TB. When the TB
653 spans two pages, we cannot safely do a direct
654 jump. */
656 if (T0 != 0 &&
657 #if USE_KQEMU
658 (env->kqemu_enabled != 2) &&
659 #endif
660 tb->page_addr[1] == -1) {
661 spin_lock(&tb_lock);
662 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
663 spin_unlock(&tb_lock);
666 tc_ptr = tb->tc_ptr;
667 env->current_tb = tb;
668 /* execute the generated code */
669 gen_func = (void *)tc_ptr;
670 #if defined(__sparc__)
671 __asm__ __volatile__("call %0\n\t"
672 "mov %%o7,%%i0"
673 : /* no outputs */
674 : "r" (gen_func)
675 : "i0", "i1", "i2", "i3", "i4", "i5",
676 "o0", "o1", "o2", "o3", "o4", "o5",
677 "l0", "l1", "l2", "l3", "l4", "l5",
678 "l6", "l7");
679 #elif defined(__hppa__)
680 asm volatile ("ble 0(%%sr4,%1)\n"
681 "copy %%r31,%%r18\n"
682 "copy %%r28,%0\n"
683 : "=r" (T0)
684 : "r" (gen_func)
685 : "r1", "r2", "r3", "r4", "r5", "r6", "r7",
686 "r8", "r9", "r10", "r11", "r12", "r13",
687 "r18", "r19", "r20", "r21", "r22", "r23",
688 "r24", "r25", "r26", "r27", "r28", "r29",
689 "r30", "r31");
690 #elif defined(__arm__)
691 asm volatile ("mov pc, %0\n\t"
692 ".global exec_loop\n\t"
693 "exec_loop:\n\t"
694 : /* no outputs */
695 : "r" (gen_func)
696 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
697 #elif defined(__ia64)
698 struct fptr {
699 void *ip;
700 void *gp;
701 } fp;
703 fp.ip = tc_ptr;
704 fp.gp = code_gen_buffer + 2 * (1 << 20);
705 (*(void (*)(void)) &fp)();
706 #else
707 T0 = gen_func();
708 #endif
709 env->current_tb = NULL;
710 /* reset soft MMU for next block (it can currently
711 only be set by a memory fault) */
712 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
713 if (env->hflags & HF_SOFTMMU_MASK) {
714 env->hflags &= ~HF_SOFTMMU_MASK;
715 /* do not allow linking to another block */
716 T0 = 0;
718 #endif
719 #if defined(USE_KQEMU)
720 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
721 if (kqemu_is_ok(env) &&
722 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
723 cpu_loop_exit();
725 #endif
726 } /* for(;;) */
727 } else {
728 env_to_regs();
730 } /* for(;;) */
733 #if defined(TARGET_I386)
734 /* restore flags in standard format */
735 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
736 #elif defined(TARGET_ARM)
737 /* XXX: Save/restore host fpu exception state?. */
738 #elif defined(TARGET_SPARC)
739 #if defined(reg_REGWPTR)
740 REGWPTR = saved_regwptr;
741 #endif
742 #elif defined(TARGET_PPC)
743 #elif defined(TARGET_M68K)
744 cpu_m68k_flush_flags(env, env->cc_op);
745 env->cc_op = CC_OP_FLAGS;
746 env->sr = (env->sr & 0xffe0)
747 | env->cc_dest | (env->cc_x << 4);
748 #elif defined(TARGET_MIPS)
749 #elif defined(TARGET_SH4)
750 #elif defined(TARGET_IA64)
751 #elif defined(TARGET_ALPHA)
752 #elif defined(TARGET_CRIS)
753 /* XXXXX */
754 #else
755 #error unsupported target CPU
756 #endif
758 /* restore global registers */
759 RESTORE_GLOBALS();
760 #include "hostregs_helper.h"
762 /* fail safe : never use cpu_single_env outside cpu_exec() */
763 cpu_single_env = NULL;
764 return ret;
767 /* must only be called from the generated code as an exception can be
768 generated */
769 void tb_invalidate_page_range(target_ulong start, target_ulong end)
771 /* XXX: cannot enable it yet because it yields to MMU exception
772 where NIP != read address on PowerPC */
773 #if 0
774 target_ulong phys_addr;
775 phys_addr = get_phys_addr_code(env, start);
776 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
777 #endif
780 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
782 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
784 CPUX86State *saved_env;
786 saved_env = env;
787 env = s;
788 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
789 selector &= 0xffff;
790 cpu_x86_load_seg_cache(env, seg_reg, selector,
791 (selector << 4), 0xffff, 0);
792 } else {
793 load_seg(seg_reg, selector);
795 env = saved_env;
798 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
800 CPUX86State *saved_env;
802 saved_env = env;
803 env = s;
805 helper_fsave(ptr, data32);
807 env = saved_env;
810 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
812 CPUX86State *saved_env;
814 saved_env = env;
815 env = s;
817 helper_frstor(ptr, data32);
819 env = saved_env;
822 #endif /* TARGET_I386 */
824 #if !defined(CONFIG_SOFTMMU)
826 #if defined(TARGET_I386)
828 /* 'pc' is the host PC at which the exception was raised. 'address' is
829 the effective address of the memory exception. 'is_write' is 1 if a
830 write caused the exception and otherwise 0'. 'old_set' is the
831 signal set which should be restored */
832 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
833 int is_write, sigset_t *old_set,
834 void *puc)
836 TranslationBlock *tb;
837 int ret;
839 if (cpu_single_env)
840 env = cpu_single_env; /* XXX: find a correct solution for multithread */
841 #if defined(DEBUG_SIGNAL)
842 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
843 pc, address, is_write, *(unsigned long *)old_set);
844 #endif
845 /* XXX: locking issue */
846 if (is_write && page_unprotect(h2g(address), pc, puc)) {
847 return 1;
850 /* see if it is an MMU fault */
851 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
852 if (ret < 0)
853 return 0; /* not an MMU fault */
854 if (ret == 0)
855 return 1; /* the MMU fault was handled without causing real CPU fault */
856 /* now we have a real cpu fault */
857 tb = tb_find_pc(pc);
858 if (tb) {
859 /* the PC is inside the translated code. It means that we have
860 a virtual CPU fault */
861 cpu_restore_state(tb, env, pc, puc);
863 if (ret == 1) {
864 #if 0
865 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
866 env->eip, env->cr[2], env->error_code);
867 #endif
868 /* we restore the process signal mask as the sigreturn should
869 do it (XXX: use sigsetjmp) */
870 sigprocmask(SIG_SETMASK, old_set, NULL);
871 raise_exception_err(env->exception_index, env->error_code);
872 } else {
873 /* activate soft MMU for this block */
874 env->hflags |= HF_SOFTMMU_MASK;
875 cpu_resume_from_signal(env, puc);
877 /* never comes here */
878 return 1;
881 #elif defined(TARGET_ARM)
882 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
883 int is_write, sigset_t *old_set,
884 void *puc)
886 TranslationBlock *tb;
887 int ret;
889 if (cpu_single_env)
890 env = cpu_single_env; /* XXX: find a correct solution for multithread */
891 #if defined(DEBUG_SIGNAL)
892 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
893 pc, address, is_write, *(unsigned long *)old_set);
894 #endif
895 /* XXX: locking issue */
896 if (is_write && page_unprotect(h2g(address), pc, puc)) {
897 return 1;
899 /* see if it is an MMU fault */
900 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
901 if (ret < 0)
902 return 0; /* not an MMU fault */
903 if (ret == 0)
904 return 1; /* the MMU fault was handled without causing real CPU fault */
905 /* now we have a real cpu fault */
906 tb = tb_find_pc(pc);
907 if (tb) {
908 /* the PC is inside the translated code. It means that we have
909 a virtual CPU fault */
910 cpu_restore_state(tb, env, pc, puc);
912 /* we restore the process signal mask as the sigreturn should
913 do it (XXX: use sigsetjmp) */
914 sigprocmask(SIG_SETMASK, old_set, NULL);
915 cpu_loop_exit();
916 /* never comes here */
917 return 1;
919 #elif defined(TARGET_SPARC)
920 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
921 int is_write, sigset_t *old_set,
922 void *puc)
924 TranslationBlock *tb;
925 int ret;
927 if (cpu_single_env)
928 env = cpu_single_env; /* XXX: find a correct solution for multithread */
929 #if defined(DEBUG_SIGNAL)
930 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
931 pc, address, is_write, *(unsigned long *)old_set);
932 #endif
933 /* XXX: locking issue */
934 if (is_write && page_unprotect(h2g(address), pc, puc)) {
935 return 1;
937 /* see if it is an MMU fault */
938 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
939 if (ret < 0)
940 return 0; /* not an MMU fault */
941 if (ret == 0)
942 return 1; /* the MMU fault was handled without causing real CPU fault */
943 /* now we have a real cpu fault */
944 tb = tb_find_pc(pc);
945 if (tb) {
946 /* the PC is inside the translated code. It means that we have
947 a virtual CPU fault */
948 cpu_restore_state(tb, env, pc, puc);
950 /* we restore the process signal mask as the sigreturn should
951 do it (XXX: use sigsetjmp) */
952 sigprocmask(SIG_SETMASK, old_set, NULL);
953 cpu_loop_exit();
954 /* never comes here */
955 return 1;
957 #elif defined (TARGET_PPC)
958 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
959 int is_write, sigset_t *old_set,
960 void *puc)
962 TranslationBlock *tb;
963 int ret;
965 if (cpu_single_env)
966 env = cpu_single_env; /* XXX: find a correct solution for multithread */
967 #if defined(DEBUG_SIGNAL)
968 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
969 pc, address, is_write, *(unsigned long *)old_set);
970 #endif
971 /* XXX: locking issue */
972 if (is_write && page_unprotect(h2g(address), pc, puc)) {
973 return 1;
976 /* see if it is an MMU fault */
977 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
978 if (ret < 0)
979 return 0; /* not an MMU fault */
980 if (ret == 0)
981 return 1; /* the MMU fault was handled without causing real CPU fault */
983 /* now we have a real cpu fault */
984 tb = tb_find_pc(pc);
985 if (tb) {
986 /* the PC is inside the translated code. It means that we have
987 a virtual CPU fault */
988 cpu_restore_state(tb, env, pc, puc);
990 if (ret == 1) {
991 #if 0
992 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
993 env->nip, env->error_code, tb);
994 #endif
995 /* we restore the process signal mask as the sigreturn should
996 do it (XXX: use sigsetjmp) */
997 sigprocmask(SIG_SETMASK, old_set, NULL);
998 do_raise_exception_err(env->exception_index, env->error_code);
999 } else {
1000 /* activate soft MMU for this block */
1001 cpu_resume_from_signal(env, puc);
1003 /* never comes here */
1004 return 1;
1007 #elif defined(TARGET_M68K)
1008 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1009 int is_write, sigset_t *old_set,
1010 void *puc)
1012 TranslationBlock *tb;
1013 int ret;
1015 if (cpu_single_env)
1016 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1017 #if defined(DEBUG_SIGNAL)
1018 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1019 pc, address, is_write, *(unsigned long *)old_set);
1020 #endif
1021 /* XXX: locking issue */
1022 if (is_write && page_unprotect(address, pc, puc)) {
1023 return 1;
1025 /* see if it is an MMU fault */
1026 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1027 if (ret < 0)
1028 return 0; /* not an MMU fault */
1029 if (ret == 0)
1030 return 1; /* the MMU fault was handled without causing real CPU fault */
1031 /* now we have a real cpu fault */
1032 tb = tb_find_pc(pc);
1033 if (tb) {
1034 /* the PC is inside the translated code. It means that we have
1035 a virtual CPU fault */
1036 cpu_restore_state(tb, env, pc, puc);
1038 /* we restore the process signal mask as the sigreturn should
1039 do it (XXX: use sigsetjmp) */
1040 sigprocmask(SIG_SETMASK, old_set, NULL);
1041 cpu_loop_exit();
1042 /* never comes here */
1043 return 1;
1046 #elif defined (TARGET_MIPS)
1047 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1048 int is_write, sigset_t *old_set,
1049 void *puc)
1051 TranslationBlock *tb;
1052 int ret;
1054 if (cpu_single_env)
1055 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1056 #if defined(DEBUG_SIGNAL)
1057 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1058 pc, address, is_write, *(unsigned long *)old_set);
1059 #endif
1060 /* XXX: locking issue */
1061 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1062 return 1;
1065 /* see if it is an MMU fault */
1066 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1067 if (ret < 0)
1068 return 0; /* not an MMU fault */
1069 if (ret == 0)
1070 return 1; /* the MMU fault was handled without causing real CPU fault */
1072 /* now we have a real cpu fault */
1073 tb = tb_find_pc(pc);
1074 if (tb) {
1075 /* the PC is inside the translated code. It means that we have
1076 a virtual CPU fault */
1077 cpu_restore_state(tb, env, pc, puc);
1079 if (ret == 1) {
1080 #if 0
1081 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1082 env->PC, env->error_code, tb);
1083 #endif
1084 /* we restore the process signal mask as the sigreturn should
1085 do it (XXX: use sigsetjmp) */
1086 sigprocmask(SIG_SETMASK, old_set, NULL);
1087 do_raise_exception_err(env->exception_index, env->error_code);
1088 } else {
1089 /* activate soft MMU for this block */
1090 cpu_resume_from_signal(env, puc);
1092 /* never comes here */
1093 return 1;
1096 #elif defined (TARGET_SH4)
1097 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1098 int is_write, sigset_t *old_set,
1099 void *puc)
1101 TranslationBlock *tb;
1102 int ret;
1104 if (cpu_single_env)
1105 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1106 #if defined(DEBUG_SIGNAL)
1107 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1108 pc, address, is_write, *(unsigned long *)old_set);
1109 #endif
1110 /* XXX: locking issue */
1111 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1112 return 1;
1115 /* see if it is an MMU fault */
1116 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1117 if (ret < 0)
1118 return 0; /* not an MMU fault */
1119 if (ret == 0)
1120 return 1; /* the MMU fault was handled without causing real CPU fault */
1122 /* now we have a real cpu fault */
1123 tb = tb_find_pc(pc);
1124 if (tb) {
1125 /* the PC is inside the translated code. It means that we have
1126 a virtual CPU fault */
1127 cpu_restore_state(tb, env, pc, puc);
1129 #if 0
1130 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1131 env->nip, env->error_code, tb);
1132 #endif
1133 /* we restore the process signal mask as the sigreturn should
1134 do it (XXX: use sigsetjmp) */
1135 sigprocmask(SIG_SETMASK, old_set, NULL);
1136 cpu_loop_exit();
1137 /* never comes here */
1138 return 1;
1141 #elif defined (TARGET_ALPHA)
1142 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1143 int is_write, sigset_t *old_set,
1144 void *puc)
1146 TranslationBlock *tb;
1147 int ret;
1149 if (cpu_single_env)
1150 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1151 #if defined(DEBUG_SIGNAL)
1152 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1153 pc, address, is_write, *(unsigned long *)old_set);
1154 #endif
1155 /* XXX: locking issue */
1156 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1157 return 1;
1160 /* see if it is an MMU fault */
1161 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1162 if (ret < 0)
1163 return 0; /* not an MMU fault */
1164 if (ret == 0)
1165 return 1; /* the MMU fault was handled without causing real CPU fault */
1167 /* now we have a real cpu fault */
1168 tb = tb_find_pc(pc);
1169 if (tb) {
1170 /* the PC is inside the translated code. It means that we have
1171 a virtual CPU fault */
1172 cpu_restore_state(tb, env, pc, puc);
1174 #if 0
1175 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1176 env->nip, env->error_code, tb);
1177 #endif
1178 /* we restore the process signal mask as the sigreturn should
1179 do it (XXX: use sigsetjmp) */
1180 sigprocmask(SIG_SETMASK, old_set, NULL);
1181 cpu_loop_exit();
1182 /* never comes here */
1183 return 1;
1185 #elif defined (TARGET_CRIS)
1186 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1187 int is_write, sigset_t *old_set,
1188 void *puc)
1190 TranslationBlock *tb;
1191 int ret;
1193 if (cpu_single_env)
1194 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1195 #if defined(DEBUG_SIGNAL)
1196 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1197 pc, address, is_write, *(unsigned long *)old_set);
1198 #endif
1199 /* XXX: locking issue */
1200 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1201 return 1;
1204 /* see if it is an MMU fault */
1205 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1206 if (ret < 0)
1207 return 0; /* not an MMU fault */
1208 if (ret == 0)
1209 return 1; /* the MMU fault was handled without causing real CPU fault */
1211 /* now we have a real cpu fault */
1212 tb = tb_find_pc(pc);
1213 if (tb) {
1214 /* the PC is inside the translated code. It means that we have
1215 a virtual CPU fault */
1216 cpu_restore_state(tb, env, pc, puc);
1218 /* we restore the process signal mask as the sigreturn should
1219 do it (XXX: use sigsetjmp) */
1220 sigprocmask(SIG_SETMASK, old_set, NULL);
1221 cpu_loop_exit();
1222 /* never comes here */
1223 return 1;
1226 #else
1227 #error unsupported target CPU
1228 #endif
1230 #if defined(__i386__)
1232 #if defined(__APPLE__)
1233 # include <sys/ucontext.h>
1235 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1236 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1237 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1238 #else
1239 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1240 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1241 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1242 #endif
1244 int cpu_signal_handler(int host_signum, void *pinfo,
1245 void *puc)
1247 siginfo_t *info = pinfo;
1248 struct ucontext *uc = puc;
1249 unsigned long pc;
1250 int trapno;
1252 #ifndef REG_EIP
1253 /* for glibc 2.1 */
1254 #define REG_EIP EIP
1255 #define REG_ERR ERR
1256 #define REG_TRAPNO TRAPNO
1257 #endif
1258 pc = EIP_sig(uc);
1259 trapno = TRAP_sig(uc);
1260 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1261 trapno == 0xe ?
1262 (ERROR_sig(uc) >> 1) & 1 : 0,
1263 &uc->uc_sigmask, puc);
1266 #elif defined(__x86_64__)
1268 int cpu_signal_handler(int host_signum, void *pinfo,
1269 void *puc)
1271 siginfo_t *info = pinfo;
1272 struct ucontext *uc = puc;
1273 unsigned long pc;
1275 pc = uc->uc_mcontext.gregs[REG_RIP];
1276 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1277 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1278 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1279 &uc->uc_sigmask, puc);
1282 #elif defined(__powerpc__)
1284 /***********************************************************************
1285 * signal context platform-specific definitions
1286 * From Wine
1288 #ifdef linux
1289 /* All Registers access - only for local access */
1290 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1291 /* Gpr Registers access */
1292 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1293 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1294 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1295 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1296 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1297 # define LR_sig(context) REG_sig(link, context) /* Link register */
1298 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1299 /* Float Registers access */
1300 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1301 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1302 /* Exception Registers access */
1303 # define DAR_sig(context) REG_sig(dar, context)
1304 # define DSISR_sig(context) REG_sig(dsisr, context)
1305 # define TRAP_sig(context) REG_sig(trap, context)
1306 #endif /* linux */
1308 #ifdef __APPLE__
1309 # include <sys/ucontext.h>
1310 typedef struct ucontext SIGCONTEXT;
1311 /* All Registers access - only for local access */
1312 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1313 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1314 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1315 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1316 /* Gpr Registers access */
1317 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1318 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1319 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1320 # define CTR_sig(context) REG_sig(ctr, context)
1321 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1322 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1323 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1324 /* Float Registers access */
1325 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1326 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1327 /* Exception Registers access */
1328 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1329 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1330 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1331 #endif /* __APPLE__ */
1333 int cpu_signal_handler(int host_signum, void *pinfo,
1334 void *puc)
1336 siginfo_t *info = pinfo;
1337 struct ucontext *uc = puc;
1338 unsigned long pc;
1339 int is_write;
1341 pc = IAR_sig(uc);
1342 is_write = 0;
1343 #if 0
1344 /* ppc 4xx case */
1345 if (DSISR_sig(uc) & 0x00800000)
1346 is_write = 1;
1347 #else
1348 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1349 is_write = 1;
1350 #endif
1351 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1352 is_write, &uc->uc_sigmask, puc);
1355 #elif defined(__alpha__)
1357 int cpu_signal_handler(int host_signum, void *pinfo,
1358 void *puc)
1360 siginfo_t *info = pinfo;
1361 struct ucontext *uc = puc;
1362 uint32_t *pc = uc->uc_mcontext.sc_pc;
1363 uint32_t insn = *pc;
1364 int is_write = 0;
1366 /* XXX: need kernel patch to get write flag faster */
1367 switch (insn >> 26) {
1368 case 0x0d: // stw
1369 case 0x0e: // stb
1370 case 0x0f: // stq_u
1371 case 0x24: // stf
1372 case 0x25: // stg
1373 case 0x26: // sts
1374 case 0x27: // stt
1375 case 0x2c: // stl
1376 case 0x2d: // stq
1377 case 0x2e: // stl_c
1378 case 0x2f: // stq_c
1379 is_write = 1;
1382 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1383 is_write, &uc->uc_sigmask, puc);
1385 #elif defined(__sparc__)
1387 int cpu_signal_handler(int host_signum, void *pinfo,
1388 void *puc)
1390 siginfo_t *info = pinfo;
1391 uint32_t *regs = (uint32_t *)(info + 1);
1392 void *sigmask = (regs + 20);
1393 unsigned long pc;
1394 int is_write;
1395 uint32_t insn;
1397 /* XXX: is there a standard glibc define ? */
1398 pc = regs[1];
1399 /* XXX: need kernel patch to get write flag faster */
1400 is_write = 0;
1401 insn = *(uint32_t *)pc;
1402 if ((insn >> 30) == 3) {
1403 switch((insn >> 19) & 0x3f) {
1404 case 0x05: // stb
1405 case 0x06: // sth
1406 case 0x04: // st
1407 case 0x07: // std
1408 case 0x24: // stf
1409 case 0x27: // stdf
1410 case 0x25: // stfsr
1411 is_write = 1;
1412 break;
1415 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1416 is_write, sigmask, NULL);
1419 #elif defined(__arm__)
1421 int cpu_signal_handler(int host_signum, void *pinfo,
1422 void *puc)
1424 siginfo_t *info = pinfo;
1425 struct ucontext *uc = puc;
1426 unsigned long pc;
1427 int is_write;
1429 pc = uc->uc_mcontext.gregs[R15];
1430 /* XXX: compute is_write */
1431 is_write = 0;
1432 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1433 is_write,
1434 &uc->uc_sigmask, puc);
1437 #elif defined(__mc68000)
1439 int cpu_signal_handler(int host_signum, void *pinfo,
1440 void *puc)
1442 siginfo_t *info = pinfo;
1443 struct ucontext *uc = puc;
1444 unsigned long pc;
1445 int is_write;
1447 pc = uc->uc_mcontext.gregs[16];
1448 /* XXX: compute is_write */
1449 is_write = 0;
1450 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1451 is_write,
1452 &uc->uc_sigmask, puc);
1455 #elif defined(__ia64)
1457 #ifndef __ISR_VALID
1458 /* This ought to be in <bits/siginfo.h>... */
1459 # define __ISR_VALID 1
1460 #endif
1462 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1464 siginfo_t *info = pinfo;
1465 struct ucontext *uc = puc;
1466 unsigned long ip;
1467 int is_write = 0;
1469 ip = uc->uc_mcontext.sc_ip;
1470 switch (host_signum) {
1471 case SIGILL:
1472 case SIGFPE:
1473 case SIGSEGV:
1474 case SIGBUS:
1475 case SIGTRAP:
1476 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1477 /* ISR.W (write-access) is bit 33: */
1478 is_write = (info->si_isr >> 33) & 1;
1479 break;
1481 default:
1482 break;
1484 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1485 is_write,
1486 &uc->uc_sigmask, puc);
1489 #elif defined(__s390__)
1491 int cpu_signal_handler(int host_signum, void *pinfo,
1492 void *puc)
1494 siginfo_t *info = pinfo;
1495 struct ucontext *uc = puc;
1496 unsigned long pc;
1497 int is_write;
1499 pc = uc->uc_mcontext.psw.addr;
1500 /* XXX: compute is_write */
1501 is_write = 0;
1502 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1503 is_write, &uc->uc_sigmask, puc);
1506 #elif defined(__mips__)
1508 int cpu_signal_handler(int host_signum, void *pinfo,
1509 void *puc)
1511 siginfo_t *info = pinfo;
1512 struct ucontext *uc = puc;
1513 greg_t pc = uc->uc_mcontext.pc;
1514 int is_write;
1516 /* XXX: compute is_write */
1517 is_write = 0;
1518 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1519 is_write, &uc->uc_sigmask, puc);
1522 #elif defined(__hppa__)
1524 int cpu_signal_handler(int host_signum, void *pinfo,
1525 void *puc)
1527 struct siginfo *info = pinfo;
1528 struct ucontext *uc = puc;
1529 unsigned long pc;
1530 int is_write;
1532 pc = uc->uc_mcontext.sc_iaoq[0];
1533 /* FIXME: compute is_write */
1534 is_write = 0;
1535 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1536 is_write,
1537 &uc->uc_sigmask, puc);
1540 #else
1542 #error host CPU specific signal handler needed
1544 #endif
1546 #endif /* !defined(CONFIG_SOFTMMU) */