Manipulate the gpe bits and send sci up the os
[qemu-kvm/fedora.git] / cpu-exec.c
blob1a52a204942142b2f32fbbb2896d0e4d38c98015
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
38 #include "qemu-kvm.h"
40 int tb_invalidated_flag;
42 //#define DEBUG_EXEC
43 //#define DEBUG_SIGNAL
45 #define SAVE_GLOBALS()
46 #define RESTORE_GLOBALS()
48 #if defined(__sparc__) && !defined(HOST_SOLARIS)
49 #include <features.h>
50 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
51 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
52 // Work around ugly bugs in glibc that mangle global register contents
54 static volatile void *saved_env;
55 static volatile unsigned long saved_t0, saved_i7;
56 #undef SAVE_GLOBALS
57 #define SAVE_GLOBALS() do { \
58 saved_env = env; \
59 saved_t0 = T0; \
60 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
61 } while(0)
63 #undef RESTORE_GLOBALS
64 #define RESTORE_GLOBALS() do { \
65 env = (void *)saved_env; \
66 T0 = saved_t0; \
67 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
68 } while(0)
70 static int sparc_setjmp(jmp_buf buf)
72 int ret;
74 SAVE_GLOBALS();
75 ret = setjmp(buf);
76 RESTORE_GLOBALS();
77 return ret;
79 #undef setjmp
80 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
82 static void sparc_longjmp(jmp_buf buf, int val)
84 SAVE_GLOBALS();
85 longjmp(buf, val);
87 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
88 #endif
89 #endif
91 void cpu_loop_exit(void)
93 /* NOTE: the register at this point must be saved by hand because
94 longjmp restore them */
95 regs_to_env();
96 longjmp(env->jmp_env, 1);
99 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
100 #define reg_T2
101 #endif
103 /* exit the current TB from a signal handler. The host registers are
104 restored in a state compatible with the CPU emulator
106 void cpu_resume_from_signal(CPUState *env1, void *puc)
108 #if !defined(CONFIG_SOFTMMU)
109 struct ucontext *uc = puc;
110 #endif
112 env = env1;
114 /* XXX: restore cpu registers saved in host registers */
116 #if !defined(CONFIG_SOFTMMU)
117 if (puc) {
118 /* XXX: use siglongjmp ? */
119 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
121 #endif
122 longjmp(env->jmp_env, 1);
125 static TranslationBlock *tb_find_slow(target_ulong pc,
126 target_ulong cs_base,
127 uint64_t flags)
129 TranslationBlock *tb, **ptb1;
130 int code_gen_size;
131 unsigned int h;
132 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
133 uint8_t *tc_ptr;
135 spin_lock(&tb_lock);
137 tb_invalidated_flag = 0;
139 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
141 /* find translated block using physical mappings */
142 phys_pc = get_phys_addr_code(env, pc);
143 phys_page1 = phys_pc & TARGET_PAGE_MASK;
144 phys_page2 = -1;
145 h = tb_phys_hash_func(phys_pc);
146 ptb1 = &tb_phys_hash[h];
147 for(;;) {
148 tb = *ptb1;
149 if (!tb)
150 goto not_found;
151 if (tb->pc == pc &&
152 tb->page_addr[0] == phys_page1 &&
153 tb->cs_base == cs_base &&
154 tb->flags == flags) {
155 /* check next page if needed */
156 if (tb->page_addr[1] != -1) {
157 virt_page2 = (pc & TARGET_PAGE_MASK) +
158 TARGET_PAGE_SIZE;
159 phys_page2 = get_phys_addr_code(env, virt_page2);
160 if (tb->page_addr[1] == phys_page2)
161 goto found;
162 } else {
163 goto found;
166 ptb1 = &tb->phys_hash_next;
168 not_found:
169 /* if no translated code available, then translate it now */
170 tb = tb_alloc(pc);
171 if (!tb) {
172 /* flush must be done */
173 tb_flush(env);
174 /* cannot fail at this point */
175 tb = tb_alloc(pc);
176 /* don't forget to invalidate previous TB info */
177 tb_invalidated_flag = 1;
179 tc_ptr = code_gen_ptr;
180 tb->tc_ptr = tc_ptr;
181 tb->cs_base = cs_base;
182 tb->flags = flags;
183 SAVE_GLOBALS();
184 cpu_gen_code(env, tb, &code_gen_size);
185 RESTORE_GLOBALS();
186 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
188 /* check next page if needed */
189 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
190 phys_page2 = -1;
191 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
192 phys_page2 = get_phys_addr_code(env, virt_page2);
194 tb_link_phys(tb, phys_pc, phys_page2);
196 found:
197 /* we add the TB in the virtual pc hash table */
198 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
199 spin_unlock(&tb_lock);
200 return tb;
203 static inline TranslationBlock *tb_find_fast(void)
205 TranslationBlock *tb;
206 target_ulong cs_base, pc;
207 uint64_t flags;
209 /* we record a subset of the CPU state. It will
210 always be the same before a given translated block
211 is executed. */
212 #if defined(TARGET_I386)
213 flags = env->hflags;
214 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
215 flags |= env->intercept;
216 cs_base = env->segs[R_CS].base;
217 pc = cs_base + env->eip;
218 #elif defined(TARGET_ARM)
219 flags = env->thumb | (env->vfp.vec_len << 1)
220 | (env->vfp.vec_stride << 4);
221 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
222 flags |= (1 << 6);
223 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
224 flags |= (1 << 7);
225 flags |= (env->condexec_bits << 8);
226 cs_base = 0;
227 pc = env->regs[15];
228 #elif defined(TARGET_SPARC)
229 #ifdef TARGET_SPARC64
230 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
231 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
232 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
233 #else
234 // FPU enable . Supervisor
235 flags = (env->psref << 4) | env->psrs;
236 #endif
237 cs_base = env->npc;
238 pc = env->pc;
239 #elif defined(TARGET_PPC)
240 flags = env->hflags;
241 cs_base = 0;
242 pc = env->nip;
243 #elif defined(TARGET_MIPS)
244 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
245 cs_base = 0;
246 pc = env->PC[env->current_tc];
247 #elif defined(TARGET_M68K)
248 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
249 | (env->sr & SR_S) /* Bit 13 */
250 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
251 cs_base = 0;
252 pc = env->pc;
253 #elif defined(TARGET_SH4)
254 flags = env->flags;
255 cs_base = 0;
256 pc = env->pc;
257 #elif defined(TARGET_ALPHA)
258 flags = env->ps;
259 cs_base = 0;
260 pc = env->pc;
261 #elif defined(TARGET_CRIS)
262 flags = 0;
263 cs_base = 0;
264 pc = env->pc;
265 #elif defined(TARGET_IA64)
266 flags = 0;
267 cs_base = 0; /* XXXXX */
268 pc = 0;
269 #else
270 #error unsupported CPU
271 #endif
272 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
273 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
274 tb->flags != flags, 0)) {
275 tb = tb_find_slow(pc, cs_base, flags);
276 /* Note: we do it here to avoid a gcc bug on Mac OS X when
277 doing it in tb_find_slow */
278 if (tb_invalidated_flag) {
279 /* as some TB could have been invalidated because
280 of memory exceptions while generating the code, we
281 must recompute the hash index here */
282 T0 = 0;
285 return tb;
288 #define BREAK_CHAIN T0 = 0
290 /* main execution loop */
292 int cpu_exec(CPUState *env1)
294 #define DECLARE_HOST_REGS 1
295 #include "hostregs_helper.h"
296 #if defined(TARGET_SPARC)
297 #if defined(reg_REGWPTR)
298 uint32_t *saved_regwptr;
299 #endif
300 #endif
301 int ret, interrupt_request;
302 long (*gen_func)(void);
303 TranslationBlock *tb;
304 uint8_t *tc_ptr;
306 if (cpu_halted(env1) == EXCP_HALTED)
307 return EXCP_HALTED;
309 cpu_single_env = env1;
311 /* first we save global registers */
312 #define SAVE_HOST_REGS 1
313 #include "hostregs_helper.h"
314 env = env1;
315 SAVE_GLOBALS();
317 env_to_regs();
318 #if defined(TARGET_I386)
319 /* put eflags in CPU temporary format */
320 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
321 DF = 1 - (2 * ((env->eflags >> 10) & 1));
322 CC_OP = CC_OP_EFLAGS;
323 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
324 #elif defined(TARGET_SPARC)
325 #if defined(reg_REGWPTR)
326 saved_regwptr = REGWPTR;
327 #endif
328 #elif defined(TARGET_M68K)
329 env->cc_op = CC_OP_FLAGS;
330 env->cc_dest = env->sr & 0xf;
331 env->cc_x = (env->sr >> 4) & 1;
332 #elif defined(TARGET_ALPHA)
333 #elif defined(TARGET_ARM)
334 #elif defined(TARGET_PPC)
335 #elif defined(TARGET_MIPS)
336 #elif defined(TARGET_SH4)
337 #elif defined(TARGET_CRIS)
338 #elif defined(TARGET_IA64)
339 /* XXXXX */
340 #else
341 #error unsupported target CPU
342 #endif
343 env->exception_index = -1;
345 /* prepare setjmp context for exception handling */
346 for(;;) {
347 if (setjmp(env->jmp_env) == 0) {
348 env->current_tb = NULL;
349 /* if an exception is pending, we execute it here */
350 if (env->exception_index >= 0) {
351 if (env->exception_index >= EXCP_INTERRUPT) {
352 /* exit request from the cpu execution loop */
353 ret = env->exception_index;
354 break;
355 } else if (env->user_mode_only) {
356 /* if user mode only, we simulate a fake exception
357 which will be handled outside the cpu execution
358 loop */
359 #if defined(TARGET_I386)
360 do_interrupt_user(env->exception_index,
361 env->exception_is_int,
362 env->error_code,
363 env->exception_next_eip);
364 #endif
365 ret = env->exception_index;
366 break;
367 } else {
368 #if defined(TARGET_I386)
369 /* simulate a real cpu exception. On i386, it can
370 trigger new exceptions, but we do not handle
371 double or triple faults yet. */
372 do_interrupt(env->exception_index,
373 env->exception_is_int,
374 env->error_code,
375 env->exception_next_eip, 0);
376 /* successfully delivered */
377 env->old_exception = -1;
378 #elif defined(TARGET_PPC)
379 do_interrupt(env);
380 #elif defined(TARGET_MIPS)
381 do_interrupt(env);
382 #elif defined(TARGET_SPARC)
383 do_interrupt(env->exception_index);
384 #elif defined(TARGET_ARM)
385 do_interrupt(env);
386 #elif defined(TARGET_SH4)
387 do_interrupt(env);
388 #elif defined(TARGET_ALPHA)
389 do_interrupt(env);
390 #elif defined(TARGET_CRIS)
391 do_interrupt(env);
392 #elif defined(TARGET_M68K)
393 do_interrupt(0);
394 #elif defined(TARGET_IA64)
395 do_interrupt(env);
396 #endif
398 env->exception_index = -1;
400 #ifdef USE_KQEMU
401 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
402 int ret;
403 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
404 ret = kqemu_cpu_exec(env);
405 /* put eflags in CPU temporary format */
406 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
407 DF = 1 - (2 * ((env->eflags >> 10) & 1));
408 CC_OP = CC_OP_EFLAGS;
409 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
410 if (ret == 1) {
411 /* exception */
412 longjmp(env->jmp_env, 1);
413 } else if (ret == 2) {
414 /* softmmu execution needed */
415 } else {
416 if (env->interrupt_request != 0) {
417 /* hardware interrupt will be executed just after */
418 } else {
419 /* otherwise, we restart */
420 longjmp(env->jmp_env, 1);
424 #endif
426 if (kvm_enabled()) {
427 kvm_cpu_exec(env);
428 longjmp(env->jmp_env, 1);
430 T0 = 0; /* force lookup of first TB */
431 for(;;) {
432 SAVE_GLOBALS();
433 interrupt_request = env->interrupt_request;
434 if (__builtin_expect(interrupt_request, 0)
435 #if defined(TARGET_I386)
436 && env->hflags & HF_GIF_MASK
437 #endif
439 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
440 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
441 env->exception_index = EXCP_DEBUG;
442 cpu_loop_exit();
444 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
445 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
446 if (interrupt_request & CPU_INTERRUPT_HALT) {
447 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
448 env->halted = 1;
449 env->exception_index = EXCP_HLT;
450 cpu_loop_exit();
452 #endif
453 #if defined(TARGET_I386)
454 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
455 !(env->hflags & HF_SMM_MASK)) {
456 svm_check_intercept(SVM_EXIT_SMI);
457 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
458 do_smm_enter();
459 BREAK_CHAIN;
460 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
461 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
462 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
463 int intno;
464 svm_check_intercept(SVM_EXIT_INTR);
465 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
466 intno = cpu_get_pic_interrupt(env);
467 if (loglevel & CPU_LOG_TB_IN_ASM) {
468 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
470 do_interrupt(intno, 0, 0, 0, 1);
471 /* ensure that no TB jump will be modified as
472 the program flow was changed */
473 BREAK_CHAIN;
474 #if !defined(CONFIG_USER_ONLY)
475 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
476 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
477 int intno;
478 /* FIXME: this should respect TPR */
479 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
480 svm_check_intercept(SVM_EXIT_VINTR);
481 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
482 if (loglevel & CPU_LOG_TB_IN_ASM)
483 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
484 do_interrupt(intno, 0, 0, -1, 1);
485 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
486 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
487 BREAK_CHAIN;
488 #endif
490 #elif defined(TARGET_PPC)
491 #if 0
492 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
493 cpu_ppc_reset(env);
495 #endif
496 if (interrupt_request & CPU_INTERRUPT_HARD) {
497 ppc_hw_interrupt(env);
498 if (env->pending_interrupts == 0)
499 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
500 BREAK_CHAIN;
502 #elif defined(TARGET_MIPS)
503 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
504 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
505 (env->CP0_Status & (1 << CP0St_IE)) &&
506 !(env->CP0_Status & (1 << CP0St_EXL)) &&
507 !(env->CP0_Status & (1 << CP0St_ERL)) &&
508 !(env->hflags & MIPS_HFLAG_DM)) {
509 /* Raise it */
510 env->exception_index = EXCP_EXT_INTERRUPT;
511 env->error_code = 0;
512 do_interrupt(env);
513 BREAK_CHAIN;
515 #elif defined(TARGET_SPARC)
516 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
517 (env->psret != 0)) {
518 int pil = env->interrupt_index & 15;
519 int type = env->interrupt_index & 0xf0;
521 if (((type == TT_EXTINT) &&
522 (pil == 15 || pil > env->psrpil)) ||
523 type != TT_EXTINT) {
524 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
525 do_interrupt(env->interrupt_index);
526 env->interrupt_index = 0;
527 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
528 cpu_check_irqs(env);
529 #endif
530 BREAK_CHAIN;
532 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
533 //do_interrupt(0, 0, 0, 0, 0);
534 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
536 #elif defined(TARGET_ARM)
537 if (interrupt_request & CPU_INTERRUPT_FIQ
538 && !(env->uncached_cpsr & CPSR_F)) {
539 env->exception_index = EXCP_FIQ;
540 do_interrupt(env);
541 BREAK_CHAIN;
543 /* ARMv7-M interrupt return works by loading a magic value
544 into the PC. On real hardware the load causes the
545 return to occur. The qemu implementation performs the
546 jump normally, then does the exception return when the
547 CPU tries to execute code at the magic address.
548 This will cause the magic PC value to be pushed to
549 the stack if an interrupt occured at the wrong time.
550 We avoid this by disabling interrupts when
551 pc contains a magic address. */
552 if (interrupt_request & CPU_INTERRUPT_HARD
553 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
554 || !(env->uncached_cpsr & CPSR_I))) {
555 env->exception_index = EXCP_IRQ;
556 do_interrupt(env);
557 BREAK_CHAIN;
559 #elif defined(TARGET_SH4)
560 if (interrupt_request & CPU_INTERRUPT_HARD) {
561 do_interrupt(env);
562 BREAK_CHAIN;
564 #elif defined(TARGET_ALPHA)
565 if (interrupt_request & CPU_INTERRUPT_HARD) {
566 do_interrupt(env);
567 BREAK_CHAIN;
569 #elif defined(TARGET_CRIS)
570 if (interrupt_request & CPU_INTERRUPT_HARD) {
571 do_interrupt(env);
572 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
573 BREAK_CHAIN;
575 #elif defined(TARGET_M68K)
576 if (interrupt_request & CPU_INTERRUPT_HARD
577 && ((env->sr & SR_I) >> SR_I_SHIFT)
578 < env->pending_level) {
579 /* Real hardware gets the interrupt vector via an
580 IACK cycle at this point. Current emulated
581 hardware doesn't rely on this, so we
582 provide/save the vector when the interrupt is
583 first signalled. */
584 env->exception_index = env->pending_vector;
585 do_interrupt(1);
586 BREAK_CHAIN;
588 #endif
589 /* Don't use the cached interupt_request value,
590 do_interrupt may have updated the EXITTB flag. */
591 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
592 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
593 /* ensure that no TB jump will be modified as
594 the program flow was changed */
595 BREAK_CHAIN;
597 if (interrupt_request & CPU_INTERRUPT_EXIT) {
598 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
599 env->exception_index = EXCP_INTERRUPT;
600 cpu_loop_exit();
603 #ifdef DEBUG_EXEC
604 if ((loglevel & CPU_LOG_TB_CPU)) {
605 /* restore flags in standard format */
606 regs_to_env();
607 #if defined(TARGET_I386)
608 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
609 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
610 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
611 #elif defined(TARGET_ARM)
612 cpu_dump_state(env, logfile, fprintf, 0);
613 #elif defined(TARGET_SPARC)
614 REGWPTR = env->regbase + (env->cwp * 16);
615 env->regwptr = REGWPTR;
616 cpu_dump_state(env, logfile, fprintf, 0);
617 #elif defined(TARGET_PPC)
618 cpu_dump_state(env, logfile, fprintf, 0);
619 #elif defined(TARGET_M68K)
620 cpu_m68k_flush_flags(env, env->cc_op);
621 env->cc_op = CC_OP_FLAGS;
622 env->sr = (env->sr & 0xffe0)
623 | env->cc_dest | (env->cc_x << 4);
624 cpu_dump_state(env, logfile, fprintf, 0);
625 #elif defined(TARGET_MIPS)
626 cpu_dump_state(env, logfile, fprintf, 0);
627 #elif defined(TARGET_SH4)
628 cpu_dump_state(env, logfile, fprintf, 0);
629 #elif defined(TARGET_ALPHA)
630 cpu_dump_state(env, logfile, fprintf, 0);
631 #elif defined(TARGET_CRIS)
632 cpu_dump_state(env, logfile, fprintf, 0);
633 #else
634 #error unsupported target CPU
635 #endif
637 #endif
638 tb = tb_find_fast();
639 #ifdef DEBUG_EXEC
640 if ((loglevel & CPU_LOG_EXEC)) {
641 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
642 (long)tb->tc_ptr, tb->pc,
643 lookup_symbol(tb->pc));
645 #endif
646 RESTORE_GLOBALS();
647 /* see if we can patch the calling TB. When the TB
648 spans two pages, we cannot safely do a direct
649 jump. */
651 if (T0 != 0 &&
652 #if USE_KQEMU
653 (env->kqemu_enabled != 2) &&
654 #endif
655 tb->page_addr[1] == -1) {
656 spin_lock(&tb_lock);
657 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
658 spin_unlock(&tb_lock);
661 tc_ptr = tb->tc_ptr;
662 env->current_tb = tb;
663 /* execute the generated code */
664 gen_func = (void *)tc_ptr;
665 #if defined(__sparc__)
666 __asm__ __volatile__("call %0\n\t"
667 "mov %%o7,%%i0"
668 : /* no outputs */
669 : "r" (gen_func)
670 : "i0", "i1", "i2", "i3", "i4", "i5",
671 "o0", "o1", "o2", "o3", "o4", "o5",
672 "l0", "l1", "l2", "l3", "l4", "l5",
673 "l6", "l7");
674 #elif defined(__arm__)
675 asm volatile ("mov pc, %0\n\t"
676 ".global exec_loop\n\t"
677 "exec_loop:\n\t"
678 : /* no outputs */
679 : "r" (gen_func)
680 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
681 #elif defined(__ia64)
682 struct fptr {
683 void *ip;
684 void *gp;
685 } fp;
687 fp.ip = tc_ptr;
688 fp.gp = code_gen_buffer + 2 * (1 << 20);
689 (*(void (*)(void)) &fp)();
690 #else
691 T0 = gen_func();
692 #endif
693 env->current_tb = NULL;
694 /* reset soft MMU for next block (it can currently
695 only be set by a memory fault) */
696 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
697 if (env->hflags & HF_SOFTMMU_MASK) {
698 env->hflags &= ~HF_SOFTMMU_MASK;
699 /* do not allow linking to another block */
700 T0 = 0;
702 #endif
703 #if defined(USE_KQEMU)
704 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
705 if (kqemu_is_ok(env) &&
706 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
707 cpu_loop_exit();
709 #endif
710 } /* for(;;) */
711 } else {
712 env_to_regs();
714 } /* for(;;) */
717 #if defined(TARGET_I386)
718 /* restore flags in standard format */
719 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
720 #elif defined(TARGET_ARM)
721 /* XXX: Save/restore host fpu exception state?. */
722 #elif defined(TARGET_SPARC)
723 #if defined(reg_REGWPTR)
724 REGWPTR = saved_regwptr;
725 #endif
726 #elif defined(TARGET_PPC)
727 #elif defined(TARGET_M68K)
728 cpu_m68k_flush_flags(env, env->cc_op);
729 env->cc_op = CC_OP_FLAGS;
730 env->sr = (env->sr & 0xffe0)
731 | env->cc_dest | (env->cc_x << 4);
732 #elif defined(TARGET_MIPS)
733 #elif defined(TARGET_SH4)
734 #elif defined(TARGET_IA64)
735 #elif defined(TARGET_ALPHA)
736 #elif defined(TARGET_CRIS)
737 /* XXXXX */
738 #else
739 #error unsupported target CPU
740 #endif
742 /* restore global registers */
743 RESTORE_GLOBALS();
744 #include "hostregs_helper.h"
746 /* fail safe : never use cpu_single_env outside cpu_exec() */
747 cpu_single_env = NULL;
748 return ret;
751 /* must only be called from the generated code as an exception can be
752 generated */
753 void tb_invalidate_page_range(target_ulong start, target_ulong end)
755 /* XXX: cannot enable it yet because it yields to MMU exception
756 where NIP != read address on PowerPC */
757 #if 0
758 target_ulong phys_addr;
759 phys_addr = get_phys_addr_code(env, start);
760 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
761 #endif
764 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
766 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
768 CPUX86State *saved_env;
770 saved_env = env;
771 env = s;
772 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
773 selector &= 0xffff;
774 cpu_x86_load_seg_cache(env, seg_reg, selector,
775 (selector << 4), 0xffff, 0);
776 } else {
777 load_seg(seg_reg, selector);
779 env = saved_env;
782 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
784 CPUX86State *saved_env;
786 saved_env = env;
787 env = s;
789 helper_fsave(ptr, data32);
791 env = saved_env;
794 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
796 CPUX86State *saved_env;
798 saved_env = env;
799 env = s;
801 helper_frstor(ptr, data32);
803 env = saved_env;
806 #endif /* TARGET_I386 */
808 #if !defined(CONFIG_SOFTMMU)
810 #if defined(TARGET_I386)
812 /* 'pc' is the host PC at which the exception was raised. 'address' is
813 the effective address of the memory exception. 'is_write' is 1 if a
814 write caused the exception and otherwise 0'. 'old_set' is the
815 signal set which should be restored */
816 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
817 int is_write, sigset_t *old_set,
818 void *puc)
820 TranslationBlock *tb;
821 int ret;
823 if (cpu_single_env)
824 env = cpu_single_env; /* XXX: find a correct solution for multithread */
825 #if defined(DEBUG_SIGNAL)
826 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
827 pc, address, is_write, *(unsigned long *)old_set);
828 #endif
829 /* XXX: locking issue */
830 if (is_write && page_unprotect(h2g(address), pc, puc)) {
831 return 1;
834 /* see if it is an MMU fault */
835 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
836 if (ret < 0)
837 return 0; /* not an MMU fault */
838 if (ret == 0)
839 return 1; /* the MMU fault was handled without causing real CPU fault */
840 /* now we have a real cpu fault */
841 tb = tb_find_pc(pc);
842 if (tb) {
843 /* the PC is inside the translated code. It means that we have
844 a virtual CPU fault */
845 cpu_restore_state(tb, env, pc, puc);
847 if (ret == 1) {
848 #if 0
849 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
850 env->eip, env->cr[2], env->error_code);
851 #endif
852 /* we restore the process signal mask as the sigreturn should
853 do it (XXX: use sigsetjmp) */
854 sigprocmask(SIG_SETMASK, old_set, NULL);
855 raise_exception_err(env->exception_index, env->error_code);
856 } else {
857 /* activate soft MMU for this block */
858 env->hflags |= HF_SOFTMMU_MASK;
859 cpu_resume_from_signal(env, puc);
861 /* never comes here */
862 return 1;
865 #elif defined(TARGET_ARM)
866 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
867 int is_write, sigset_t *old_set,
868 void *puc)
870 TranslationBlock *tb;
871 int ret;
873 if (cpu_single_env)
874 env = cpu_single_env; /* XXX: find a correct solution for multithread */
875 #if defined(DEBUG_SIGNAL)
876 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
877 pc, address, is_write, *(unsigned long *)old_set);
878 #endif
879 /* XXX: locking issue */
880 if (is_write && page_unprotect(h2g(address), pc, puc)) {
881 return 1;
883 /* see if it is an MMU fault */
884 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
885 if (ret < 0)
886 return 0; /* not an MMU fault */
887 if (ret == 0)
888 return 1; /* the MMU fault was handled without causing real CPU fault */
889 /* now we have a real cpu fault */
890 tb = tb_find_pc(pc);
891 if (tb) {
892 /* the PC is inside the translated code. It means that we have
893 a virtual CPU fault */
894 cpu_restore_state(tb, env, pc, puc);
896 /* we restore the process signal mask as the sigreturn should
897 do it (XXX: use sigsetjmp) */
898 sigprocmask(SIG_SETMASK, old_set, NULL);
899 cpu_loop_exit();
901 #elif defined(TARGET_SPARC)
902 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
903 int is_write, sigset_t *old_set,
904 void *puc)
906 TranslationBlock *tb;
907 int ret;
909 if (cpu_single_env)
910 env = cpu_single_env; /* XXX: find a correct solution for multithread */
911 #if defined(DEBUG_SIGNAL)
912 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
913 pc, address, is_write, *(unsigned long *)old_set);
914 #endif
915 /* XXX: locking issue */
916 if (is_write && page_unprotect(h2g(address), pc, puc)) {
917 return 1;
919 /* see if it is an MMU fault */
920 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
921 if (ret < 0)
922 return 0; /* not an MMU fault */
923 if (ret == 0)
924 return 1; /* the MMU fault was handled without causing real CPU fault */
925 /* now we have a real cpu fault */
926 tb = tb_find_pc(pc);
927 if (tb) {
928 /* the PC is inside the translated code. It means that we have
929 a virtual CPU fault */
930 cpu_restore_state(tb, env, pc, puc);
932 /* we restore the process signal mask as the sigreturn should
933 do it (XXX: use sigsetjmp) */
934 sigprocmask(SIG_SETMASK, old_set, NULL);
935 cpu_loop_exit();
937 #elif defined (TARGET_PPC)
938 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
939 int is_write, sigset_t *old_set,
940 void *puc)
942 TranslationBlock *tb;
943 int ret;
945 if (cpu_single_env)
946 env = cpu_single_env; /* XXX: find a correct solution for multithread */
947 #if defined(DEBUG_SIGNAL)
948 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
949 pc, address, is_write, *(unsigned long *)old_set);
950 #endif
951 /* XXX: locking issue */
952 if (is_write && page_unprotect(h2g(address), pc, puc)) {
953 return 1;
956 /* see if it is an MMU fault */
957 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
958 if (ret < 0)
959 return 0; /* not an MMU fault */
960 if (ret == 0)
961 return 1; /* the MMU fault was handled without causing real CPU fault */
963 /* now we have a real cpu fault */
964 tb = tb_find_pc(pc);
965 if (tb) {
966 /* the PC is inside the translated code. It means that we have
967 a virtual CPU fault */
968 cpu_restore_state(tb, env, pc, puc);
970 if (ret == 1) {
971 #if 0
972 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
973 env->nip, env->error_code, tb);
974 #endif
975 /* we restore the process signal mask as the sigreturn should
976 do it (XXX: use sigsetjmp) */
977 sigprocmask(SIG_SETMASK, old_set, NULL);
978 do_raise_exception_err(env->exception_index, env->error_code);
979 } else {
980 /* activate soft MMU for this block */
981 cpu_resume_from_signal(env, puc);
983 /* never comes here */
984 return 1;
987 #elif defined(TARGET_M68K)
988 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
989 int is_write, sigset_t *old_set,
990 void *puc)
992 TranslationBlock *tb;
993 int ret;
995 if (cpu_single_env)
996 env = cpu_single_env; /* XXX: find a correct solution for multithread */
997 #if defined(DEBUG_SIGNAL)
998 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
999 pc, address, is_write, *(unsigned long *)old_set);
1000 #endif
1001 /* XXX: locking issue */
1002 if (is_write && page_unprotect(address, pc, puc)) {
1003 return 1;
1005 /* see if it is an MMU fault */
1006 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1007 if (ret < 0)
1008 return 0; /* not an MMU fault */
1009 if (ret == 0)
1010 return 1; /* the MMU fault was handled without causing real CPU fault */
1011 /* now we have a real cpu fault */
1012 tb = tb_find_pc(pc);
1013 if (tb) {
1014 /* the PC is inside the translated code. It means that we have
1015 a virtual CPU fault */
1016 cpu_restore_state(tb, env, pc, puc);
1018 /* we restore the process signal mask as the sigreturn should
1019 do it (XXX: use sigsetjmp) */
1020 sigprocmask(SIG_SETMASK, old_set, NULL);
1021 cpu_loop_exit();
1022 /* never comes here */
1023 return 1;
1026 #elif defined (TARGET_MIPS)
1027 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1028 int is_write, sigset_t *old_set,
1029 void *puc)
1031 TranslationBlock *tb;
1032 int ret;
1034 if (cpu_single_env)
1035 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1036 #if defined(DEBUG_SIGNAL)
1037 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1038 pc, address, is_write, *(unsigned long *)old_set);
1039 #endif
1040 /* XXX: locking issue */
1041 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1042 return 1;
1045 /* see if it is an MMU fault */
1046 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1047 if (ret < 0)
1048 return 0; /* not an MMU fault */
1049 if (ret == 0)
1050 return 1; /* the MMU fault was handled without causing real CPU fault */
1052 /* now we have a real cpu fault */
1053 tb = tb_find_pc(pc);
1054 if (tb) {
1055 /* the PC is inside the translated code. It means that we have
1056 a virtual CPU fault */
1057 cpu_restore_state(tb, env, pc, puc);
1059 if (ret == 1) {
1060 #if 0
1061 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1062 env->PC, env->error_code, tb);
1063 #endif
1064 /* we restore the process signal mask as the sigreturn should
1065 do it (XXX: use sigsetjmp) */
1066 sigprocmask(SIG_SETMASK, old_set, NULL);
1067 do_raise_exception_err(env->exception_index, env->error_code);
1068 } else {
1069 /* activate soft MMU for this block */
1070 cpu_resume_from_signal(env, puc);
1072 /* never comes here */
1073 return 1;
1076 #elif defined (TARGET_SH4)
1077 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1078 int is_write, sigset_t *old_set,
1079 void *puc)
1081 TranslationBlock *tb;
1082 int ret;
1084 if (cpu_single_env)
1085 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1086 #if defined(DEBUG_SIGNAL)
1087 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1088 pc, address, is_write, *(unsigned long *)old_set);
1089 #endif
1090 /* XXX: locking issue */
1091 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1092 return 1;
1095 /* see if it is an MMU fault */
1096 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1097 if (ret < 0)
1098 return 0; /* not an MMU fault */
1099 if (ret == 0)
1100 return 1; /* the MMU fault was handled without causing real CPU fault */
1102 /* now we have a real cpu fault */
1103 tb = tb_find_pc(pc);
1104 if (tb) {
1105 /* the PC is inside the translated code. It means that we have
1106 a virtual CPU fault */
1107 cpu_restore_state(tb, env, pc, puc);
1109 #if 0
1110 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1111 env->nip, env->error_code, tb);
1112 #endif
1113 /* we restore the process signal mask as the sigreturn should
1114 do it (XXX: use sigsetjmp) */
1115 sigprocmask(SIG_SETMASK, old_set, NULL);
1116 cpu_loop_exit();
1117 /* never comes here */
1118 return 1;
1121 #elif defined (TARGET_ALPHA)
1122 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1123 int is_write, sigset_t *old_set,
1124 void *puc)
1126 TranslationBlock *tb;
1127 int ret;
1129 if (cpu_single_env)
1130 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1131 #if defined(DEBUG_SIGNAL)
1132 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1133 pc, address, is_write, *(unsigned long *)old_set);
1134 #endif
1135 /* XXX: locking issue */
1136 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1137 return 1;
1140 /* see if it is an MMU fault */
1141 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1142 if (ret < 0)
1143 return 0; /* not an MMU fault */
1144 if (ret == 0)
1145 return 1; /* the MMU fault was handled without causing real CPU fault */
1147 /* now we have a real cpu fault */
1148 tb = tb_find_pc(pc);
1149 if (tb) {
1150 /* the PC is inside the translated code. It means that we have
1151 a virtual CPU fault */
1152 cpu_restore_state(tb, env, pc, puc);
1154 #if 0
1155 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1156 env->nip, env->error_code, tb);
1157 #endif
1158 /* we restore the process signal mask as the sigreturn should
1159 do it (XXX: use sigsetjmp) */
1160 sigprocmask(SIG_SETMASK, old_set, NULL);
1161 cpu_loop_exit();
1162 /* never comes here */
1163 return 1;
1165 #elif defined (TARGET_CRIS)
1166 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1167 int is_write, sigset_t *old_set,
1168 void *puc)
1170 TranslationBlock *tb;
1171 int ret;
1173 if (cpu_single_env)
1174 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1175 #if defined(DEBUG_SIGNAL)
1176 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1177 pc, address, is_write, *(unsigned long *)old_set);
1178 #endif
1179 /* XXX: locking issue */
1180 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1181 return 1;
1184 /* see if it is an MMU fault */
1185 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1186 if (ret < 0)
1187 return 0; /* not an MMU fault */
1188 if (ret == 0)
1189 return 1; /* the MMU fault was handled without causing real CPU fault */
1191 /* now we have a real cpu fault */
1192 tb = tb_find_pc(pc);
1193 if (tb) {
1194 /* the PC is inside the translated code. It means that we have
1195 a virtual CPU fault */
1196 cpu_restore_state(tb, env, pc, puc);
1198 #if 0
1199 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1200 env->nip, env->error_code, tb);
1201 #endif
1202 /* we restore the process signal mask as the sigreturn should
1203 do it (XXX: use sigsetjmp) */
1204 sigprocmask(SIG_SETMASK, old_set, NULL);
1205 cpu_loop_exit();
1206 /* never comes here */
1207 return 1;
1210 #else
1211 #error unsupported target CPU
1212 #endif
1214 #if defined(__i386__)
1216 #if defined(__APPLE__)
1217 # include <sys/ucontext.h>
1219 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1220 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1221 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1222 #else
1223 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1224 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1225 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1226 #endif
1228 int cpu_signal_handler(int host_signum, void *pinfo,
1229 void *puc)
1231 siginfo_t *info = pinfo;
1232 struct ucontext *uc = puc;
1233 unsigned long pc;
1234 int trapno;
1236 #ifndef REG_EIP
1237 /* for glibc 2.1 */
1238 #define REG_EIP EIP
1239 #define REG_ERR ERR
1240 #define REG_TRAPNO TRAPNO
1241 #endif
1242 pc = EIP_sig(uc);
1243 trapno = TRAP_sig(uc);
1244 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1245 trapno == 0xe ?
1246 (ERROR_sig(uc) >> 1) & 1 : 0,
1247 &uc->uc_sigmask, puc);
1250 #elif defined(__x86_64__)
1252 int cpu_signal_handler(int host_signum, void *pinfo,
1253 void *puc)
1255 siginfo_t *info = pinfo;
1256 struct ucontext *uc = puc;
1257 unsigned long pc;
1259 pc = uc->uc_mcontext.gregs[REG_RIP];
1260 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1261 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1262 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1263 &uc->uc_sigmask, puc);
1266 #elif defined(__powerpc__)
1268 /***********************************************************************
1269 * signal context platform-specific definitions
1270 * From Wine
1272 #ifdef linux
1273 /* All Registers access - only for local access */
1274 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1275 /* Gpr Registers access */
1276 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1277 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1278 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1279 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1280 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1281 # define LR_sig(context) REG_sig(link, context) /* Link register */
1282 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1283 /* Float Registers access */
1284 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1285 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1286 /* Exception Registers access */
1287 # define DAR_sig(context) REG_sig(dar, context)
1288 # define DSISR_sig(context) REG_sig(dsisr, context)
1289 # define TRAP_sig(context) REG_sig(trap, context)
1290 #endif /* linux */
1292 #ifdef __APPLE__
1293 # include <sys/ucontext.h>
1294 typedef struct ucontext SIGCONTEXT;
1295 /* All Registers access - only for local access */
1296 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1297 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1298 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1299 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1300 /* Gpr Registers access */
1301 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1302 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1303 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1304 # define CTR_sig(context) REG_sig(ctr, context)
1305 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1306 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1307 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1308 /* Float Registers access */
1309 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1310 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1311 /* Exception Registers access */
1312 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1313 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1314 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1315 #endif /* __APPLE__ */
1317 int cpu_signal_handler(int host_signum, void *pinfo,
1318 void *puc)
1320 siginfo_t *info = pinfo;
1321 struct ucontext *uc = puc;
1322 unsigned long pc;
1323 int is_write;
1325 pc = IAR_sig(uc);
1326 is_write = 0;
1327 #if 0
1328 /* ppc 4xx case */
1329 if (DSISR_sig(uc) & 0x00800000)
1330 is_write = 1;
1331 #else
1332 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1333 is_write = 1;
1334 #endif
1335 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1336 is_write, &uc->uc_sigmask, puc);
1339 #elif defined(__alpha__)
1341 int cpu_signal_handler(int host_signum, void *pinfo,
1342 void *puc)
1344 siginfo_t *info = pinfo;
1345 struct ucontext *uc = puc;
1346 uint32_t *pc = uc->uc_mcontext.sc_pc;
1347 uint32_t insn = *pc;
1348 int is_write = 0;
1350 /* XXX: need kernel patch to get write flag faster */
1351 switch (insn >> 26) {
1352 case 0x0d: // stw
1353 case 0x0e: // stb
1354 case 0x0f: // stq_u
1355 case 0x24: // stf
1356 case 0x25: // stg
1357 case 0x26: // sts
1358 case 0x27: // stt
1359 case 0x2c: // stl
1360 case 0x2d: // stq
1361 case 0x2e: // stl_c
1362 case 0x2f: // stq_c
1363 is_write = 1;
1366 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1367 is_write, &uc->uc_sigmask, puc);
1369 #elif defined(__sparc__)
1371 int cpu_signal_handler(int host_signum, void *pinfo,
1372 void *puc)
1374 siginfo_t *info = pinfo;
1375 uint32_t *regs = (uint32_t *)(info + 1);
1376 void *sigmask = (regs + 20);
1377 unsigned long pc;
1378 int is_write;
1379 uint32_t insn;
1381 /* XXX: is there a standard glibc define ? */
1382 pc = regs[1];
1383 /* XXX: need kernel patch to get write flag faster */
1384 is_write = 0;
1385 insn = *(uint32_t *)pc;
1386 if ((insn >> 30) == 3) {
1387 switch((insn >> 19) & 0x3f) {
1388 case 0x05: // stb
1389 case 0x06: // sth
1390 case 0x04: // st
1391 case 0x07: // std
1392 case 0x24: // stf
1393 case 0x27: // stdf
1394 case 0x25: // stfsr
1395 is_write = 1;
1396 break;
1399 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1400 is_write, sigmask, NULL);
1403 #elif defined(__arm__)
1405 int cpu_signal_handler(int host_signum, void *pinfo,
1406 void *puc)
1408 siginfo_t *info = pinfo;
1409 struct ucontext *uc = puc;
1410 unsigned long pc;
1411 int is_write;
1413 pc = uc->uc_mcontext.gregs[R15];
1414 /* XXX: compute is_write */
1415 is_write = 0;
1416 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1417 is_write,
1418 &uc->uc_sigmask, puc);
1421 #elif defined(__mc68000)
1423 int cpu_signal_handler(int host_signum, void *pinfo,
1424 void *puc)
1426 siginfo_t *info = pinfo;
1427 struct ucontext *uc = puc;
1428 unsigned long pc;
1429 int is_write;
1431 pc = uc->uc_mcontext.gregs[16];
1432 /* XXX: compute is_write */
1433 is_write = 0;
1434 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1435 is_write,
1436 &uc->uc_sigmask, puc);
1439 #elif defined(__ia64)
1441 #ifndef __ISR_VALID
1442 /* This ought to be in <bits/siginfo.h>... */
1443 # define __ISR_VALID 1
1444 #endif
1446 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1448 siginfo_t *info = pinfo;
1449 struct ucontext *uc = puc;
1450 unsigned long ip;
1451 int is_write = 0;
1453 ip = uc->uc_mcontext.sc_ip;
1454 switch (host_signum) {
1455 case SIGILL:
1456 case SIGFPE:
1457 case SIGSEGV:
1458 case SIGBUS:
1459 case SIGTRAP:
1460 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1461 /* ISR.W (write-access) is bit 33: */
1462 is_write = (info->si_isr >> 33) & 1;
1463 break;
1465 default:
1466 break;
1468 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1469 is_write,
1470 &uc->uc_sigmask, puc);
1473 #elif defined(__s390__)
1475 int cpu_signal_handler(int host_signum, void *pinfo,
1476 void *puc)
1478 siginfo_t *info = pinfo;
1479 struct ucontext *uc = puc;
1480 unsigned long pc;
1481 int is_write;
1483 pc = uc->uc_mcontext.psw.addr;
1484 /* XXX: compute is_write */
1485 is_write = 0;
1486 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1487 is_write, &uc->uc_sigmask, puc);
1490 #elif defined(__mips__)
1492 int cpu_signal_handler(int host_signum, void *pinfo,
1493 void *puc)
1495 siginfo_t *info = pinfo;
1496 struct ucontext *uc = puc;
1497 greg_t pc = uc->uc_mcontext.pc;
1498 int is_write;
1500 /* XXX: compute is_write */
1501 is_write = 0;
1502 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1503 is_write, &uc->uc_sigmask, puc);
1506 #else
1508 #error host CPU specific signal handler needed
1510 #endif
1512 #endif /* !defined(CONFIG_SOFTMMU) */