kvm: external module: adjust for new host kernels install location
[qemu-kvm/fedora.git] / cpu-exec.c
blobc5f3e5fbee9bd950dcae387443e7fe45fab445a6
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
38 #include "qemu-kvm.h"
40 int tb_invalidated_flag;
42 //#define DEBUG_EXEC
43 //#define DEBUG_SIGNAL
45 #define SAVE_GLOBALS()
46 #define RESTORE_GLOBALS()
48 #if defined(__sparc__) && !defined(HOST_SOLARIS)
49 #include <features.h>
50 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
51 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
52 // Work around ugly bugs in glibc that mangle global register contents
54 static volatile void *saved_env;
55 static volatile unsigned long saved_t0, saved_i7;
56 #undef SAVE_GLOBALS
57 #define SAVE_GLOBALS() do { \
58 saved_env = env; \
59 saved_t0 = T0; \
60 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
61 } while(0)
63 #undef RESTORE_GLOBALS
64 #define RESTORE_GLOBALS() do { \
65 env = (void *)saved_env; \
66 T0 = saved_t0; \
67 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
68 } while(0)
70 static int sparc_setjmp(jmp_buf buf)
72 int ret;
74 SAVE_GLOBALS();
75 ret = setjmp(buf);
76 RESTORE_GLOBALS();
77 return ret;
79 #undef setjmp
80 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
82 static void sparc_longjmp(jmp_buf buf, int val)
84 SAVE_GLOBALS();
85 longjmp(buf, val);
87 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
88 #endif
89 #endif
91 void cpu_loop_exit(void)
93 /* NOTE: the register at this point must be saved by hand because
94 longjmp restore them */
95 regs_to_env();
96 longjmp(env->jmp_env, 1);
99 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
100 #define reg_T2
101 #endif
103 /* exit the current TB from a signal handler. The host registers are
104 restored in a state compatible with the CPU emulator
106 void cpu_resume_from_signal(CPUState *env1, void *puc)
108 #if !defined(CONFIG_SOFTMMU)
109 struct ucontext *uc = puc;
110 #endif
112 env = env1;
114 /* XXX: restore cpu registers saved in host registers */
116 #if !defined(CONFIG_SOFTMMU)
117 if (puc) {
118 /* XXX: use siglongjmp ? */
119 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
121 #endif
122 longjmp(env->jmp_env, 1);
125 static TranslationBlock *tb_find_slow(target_ulong pc,
126 target_ulong cs_base,
127 uint64_t flags)
129 TranslationBlock *tb, **ptb1;
130 int code_gen_size;
131 unsigned int h;
132 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
133 uint8_t *tc_ptr;
135 spin_lock(&tb_lock);
137 tb_invalidated_flag = 0;
139 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
141 /* find translated block using physical mappings */
142 phys_pc = get_phys_addr_code(env, pc);
143 phys_page1 = phys_pc & TARGET_PAGE_MASK;
144 phys_page2 = -1;
145 h = tb_phys_hash_func(phys_pc);
146 ptb1 = &tb_phys_hash[h];
147 for(;;) {
148 tb = *ptb1;
149 if (!tb)
150 goto not_found;
151 if (tb->pc == pc &&
152 tb->page_addr[0] == phys_page1 &&
153 tb->cs_base == cs_base &&
154 tb->flags == flags) {
155 /* check next page if needed */
156 if (tb->page_addr[1] != -1) {
157 virt_page2 = (pc & TARGET_PAGE_MASK) +
158 TARGET_PAGE_SIZE;
159 phys_page2 = get_phys_addr_code(env, virt_page2);
160 if (tb->page_addr[1] == phys_page2)
161 goto found;
162 } else {
163 goto found;
166 ptb1 = &tb->phys_hash_next;
168 not_found:
169 /* if no translated code available, then translate it now */
170 tb = tb_alloc(pc);
171 if (!tb) {
172 /* flush must be done */
173 tb_flush(env);
174 /* cannot fail at this point */
175 tb = tb_alloc(pc);
176 /* don't forget to invalidate previous TB info */
177 tb_invalidated_flag = 1;
179 tc_ptr = code_gen_ptr;
180 tb->tc_ptr = tc_ptr;
181 tb->cs_base = cs_base;
182 tb->flags = flags;
183 SAVE_GLOBALS();
184 cpu_gen_code(env, tb, &code_gen_size);
185 RESTORE_GLOBALS();
186 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
188 /* check next page if needed */
189 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
190 phys_page2 = -1;
191 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
192 phys_page2 = get_phys_addr_code(env, virt_page2);
194 tb_link_phys(tb, phys_pc, phys_page2);
196 found:
197 /* we add the TB in the virtual pc hash table */
198 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
199 spin_unlock(&tb_lock);
200 return tb;
203 static inline TranslationBlock *tb_find_fast(void)
205 TranslationBlock *tb;
206 target_ulong cs_base, pc;
207 uint64_t flags;
209 /* we record a subset of the CPU state. It will
210 always be the same before a given translated block
211 is executed. */
212 #if defined(TARGET_I386)
213 flags = env->hflags;
214 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
215 flags |= env->intercept;
216 cs_base = env->segs[R_CS].base;
217 pc = cs_base + env->eip;
218 #elif defined(TARGET_ARM)
219 flags = env->thumb | (env->vfp.vec_len << 1)
220 | (env->vfp.vec_stride << 4);
221 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
222 flags |= (1 << 6);
223 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
224 flags |= (1 << 7);
225 flags |= (env->condexec_bits << 8);
226 cs_base = 0;
227 pc = env->regs[15];
228 #elif defined(TARGET_SPARC)
229 #ifdef TARGET_SPARC64
230 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
231 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
232 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
233 #else
234 // FPU enable . Supervisor
235 flags = (env->psref << 4) | env->psrs;
236 #endif
237 cs_base = env->npc;
238 pc = env->pc;
239 #elif defined(TARGET_PPC)
240 flags = env->hflags;
241 cs_base = 0;
242 pc = env->nip;
243 #elif defined(TARGET_MIPS)
244 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
245 cs_base = 0;
246 pc = env->PC[env->current_tc];
247 #elif defined(TARGET_M68K)
248 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
249 | (env->sr & SR_S) /* Bit 13 */
250 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
251 cs_base = 0;
252 pc = env->pc;
253 #elif defined(TARGET_SH4)
254 flags = env->flags;
255 cs_base = 0;
256 pc = env->pc;
257 #elif defined(TARGET_ALPHA)
258 flags = env->ps;
259 cs_base = 0;
260 pc = env->pc;
261 #elif defined(TARGET_CRIS)
262 flags = 0;
263 cs_base = 0;
264 pc = env->pc;
265 #elif defined(TARGET_IA64)
266 flags = 0;
267 cs_base = 0; /* XXXXX */
268 pc = 0;
269 #else
270 #error unsupported CPU
271 #endif
272 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
273 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
274 tb->flags != flags, 0)) {
275 tb = tb_find_slow(pc, cs_base, flags);
276 /* Note: we do it here to avoid a gcc bug on Mac OS X when
277 doing it in tb_find_slow */
278 if (tb_invalidated_flag) {
279 /* as some TB could have been invalidated because
280 of memory exceptions while generating the code, we
281 must recompute the hash index here */
282 T0 = 0;
285 return tb;
288 #define BREAK_CHAIN T0 = 0
290 /* main execution loop */
292 int cpu_exec(CPUState *env1)
294 #define DECLARE_HOST_REGS 1
295 #include "hostregs_helper.h"
296 #if defined(TARGET_SPARC)
297 #if defined(reg_REGWPTR)
298 uint32_t *saved_regwptr;
299 #endif
300 #endif
301 int ret, interrupt_request;
302 long (*gen_func)(void);
303 TranslationBlock *tb;
304 uint8_t *tc_ptr;
306 if (cpu_halted(env1) == EXCP_HALTED)
307 return EXCP_HALTED;
309 cpu_single_env = env1;
311 /* first we save global registers */
312 #define SAVE_HOST_REGS 1
313 #include "hostregs_helper.h"
314 env = env1;
315 SAVE_GLOBALS();
317 env_to_regs();
318 #if defined(TARGET_I386)
319 /* put eflags in CPU temporary format */
320 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
321 DF = 1 - (2 * ((env->eflags >> 10) & 1));
322 CC_OP = CC_OP_EFLAGS;
323 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
324 #elif defined(TARGET_SPARC)
325 #if defined(reg_REGWPTR)
326 saved_regwptr = REGWPTR;
327 #endif
328 #elif defined(TARGET_M68K)
329 env->cc_op = CC_OP_FLAGS;
330 env->cc_dest = env->sr & 0xf;
331 env->cc_x = (env->sr >> 4) & 1;
332 #elif defined(TARGET_ALPHA)
333 #elif defined(TARGET_ARM)
334 #elif defined(TARGET_PPC)
335 #elif defined(TARGET_MIPS)
336 #elif defined(TARGET_SH4)
337 #elif defined(TARGET_CRIS)
338 #elif defined(TARGET_IA64)
339 /* XXXXX */
340 #else
341 #error unsupported target CPU
342 #endif
343 env->exception_index = -1;
345 /* prepare setjmp context for exception handling */
346 for(;;) {
347 if (setjmp(env->jmp_env) == 0) {
348 env->current_tb = NULL;
349 /* if an exception is pending, we execute it here */
350 if (env->exception_index >= 0) {
351 if (env->exception_index >= EXCP_INTERRUPT) {
352 /* exit request from the cpu execution loop */
353 ret = env->exception_index;
354 break;
355 } else if (env->user_mode_only) {
356 /* if user mode only, we simulate a fake exception
357 which will be handled outside the cpu execution
358 loop */
359 #if defined(TARGET_I386)
360 do_interrupt_user(env->exception_index,
361 env->exception_is_int,
362 env->error_code,
363 env->exception_next_eip);
364 #endif
365 ret = env->exception_index;
366 break;
367 } else {
368 #if defined(TARGET_I386)
369 /* simulate a real cpu exception. On i386, it can
370 trigger new exceptions, but we do not handle
371 double or triple faults yet. */
372 do_interrupt(env->exception_index,
373 env->exception_is_int,
374 env->error_code,
375 env->exception_next_eip, 0);
376 /* successfully delivered */
377 env->old_exception = -1;
378 #elif defined(TARGET_PPC)
379 do_interrupt(env);
380 #elif defined(TARGET_MIPS)
381 do_interrupt(env);
382 #elif defined(TARGET_SPARC)
383 do_interrupt(env->exception_index);
384 #elif defined(TARGET_ARM)
385 do_interrupt(env);
386 #elif defined(TARGET_SH4)
387 do_interrupt(env);
388 #elif defined(TARGET_ALPHA)
389 do_interrupt(env);
390 #elif defined(TARGET_CRIS)
391 do_interrupt(env);
392 #elif defined(TARGET_M68K)
393 do_interrupt(0);
394 #elif defined(TARGET_IA64)
395 do_interrupt(env);
396 #endif
398 env->exception_index = -1;
400 #ifdef USE_KQEMU
401 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
402 int ret;
403 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
404 ret = kqemu_cpu_exec(env);
405 /* put eflags in CPU temporary format */
406 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
407 DF = 1 - (2 * ((env->eflags >> 10) & 1));
408 CC_OP = CC_OP_EFLAGS;
409 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
410 if (ret == 1) {
411 /* exception */
412 longjmp(env->jmp_env, 1);
413 } else if (ret == 2) {
414 /* softmmu execution needed */
415 } else {
416 if (env->interrupt_request != 0) {
417 /* hardware interrupt will be executed just after */
418 } else {
419 /* otherwise, we restart */
420 longjmp(env->jmp_env, 1);
424 #endif
426 if (kvm_enabled()) {
427 kvm_cpu_exec(env);
428 longjmp(env->jmp_env, 1);
430 T0 = 0; /* force lookup of first TB */
431 for(;;) {
432 SAVE_GLOBALS();
433 interrupt_request = env->interrupt_request;
434 if (__builtin_expect(interrupt_request, 0)
435 #if defined(TARGET_I386)
436 && env->hflags & HF_GIF_MASK
437 #endif
439 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
440 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
441 env->exception_index = EXCP_DEBUG;
442 cpu_loop_exit();
444 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
445 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
446 if (interrupt_request & CPU_INTERRUPT_HALT) {
447 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
448 env->halted = 1;
449 env->exception_index = EXCP_HLT;
450 cpu_loop_exit();
452 #endif
453 #if defined(TARGET_I386)
454 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
455 !(env->hflags & HF_SMM_MASK)) {
456 svm_check_intercept(SVM_EXIT_SMI);
457 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
458 do_smm_enter();
459 BREAK_CHAIN;
460 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
461 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
462 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
463 int intno;
464 svm_check_intercept(SVM_EXIT_INTR);
465 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
466 intno = cpu_get_pic_interrupt(env);
467 if (loglevel & CPU_LOG_TB_IN_ASM) {
468 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
470 do_interrupt(intno, 0, 0, 0, 1);
471 /* ensure that no TB jump will be modified as
472 the program flow was changed */
473 BREAK_CHAIN;
474 #if !defined(CONFIG_USER_ONLY)
475 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
476 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
477 int intno;
478 /* FIXME: this should respect TPR */
479 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
480 svm_check_intercept(SVM_EXIT_VINTR);
481 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
482 if (loglevel & CPU_LOG_TB_IN_ASM)
483 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
484 do_interrupt(intno, 0, 0, -1, 1);
485 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
486 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
487 BREAK_CHAIN;
488 #endif
490 #elif defined(TARGET_PPC)
491 #if 0
492 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
493 cpu_ppc_reset(env);
495 #endif
496 if (interrupt_request & CPU_INTERRUPT_HARD) {
497 ppc_hw_interrupt(env);
498 if (env->pending_interrupts == 0)
499 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
500 BREAK_CHAIN;
502 #elif defined(TARGET_MIPS)
503 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
504 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
505 (env->CP0_Status & (1 << CP0St_IE)) &&
506 !(env->CP0_Status & (1 << CP0St_EXL)) &&
507 !(env->CP0_Status & (1 << CP0St_ERL)) &&
508 !(env->hflags & MIPS_HFLAG_DM)) {
509 /* Raise it */
510 env->exception_index = EXCP_EXT_INTERRUPT;
511 env->error_code = 0;
512 do_interrupt(env);
513 BREAK_CHAIN;
515 #elif defined(TARGET_SPARC)
516 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
517 (env->psret != 0)) {
518 int pil = env->interrupt_index & 15;
519 int type = env->interrupt_index & 0xf0;
521 if (((type == TT_EXTINT) &&
522 (pil == 15 || pil > env->psrpil)) ||
523 type != TT_EXTINT) {
524 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
525 do_interrupt(env->interrupt_index);
526 env->interrupt_index = 0;
527 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
528 cpu_check_irqs(env);
529 #endif
530 BREAK_CHAIN;
532 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
533 //do_interrupt(0, 0, 0, 0, 0);
534 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
536 #elif defined(TARGET_ARM)
537 if (interrupt_request & CPU_INTERRUPT_FIQ
538 && !(env->uncached_cpsr & CPSR_F)) {
539 env->exception_index = EXCP_FIQ;
540 do_interrupt(env);
541 BREAK_CHAIN;
543 /* ARMv7-M interrupt return works by loading a magic value
544 into the PC. On real hardware the load causes the
545 return to occur. The qemu implementation performs the
546 jump normally, then does the exception return when the
547 CPU tries to execute code at the magic address.
548 This will cause the magic PC value to be pushed to
549 the stack if an interrupt occured at the wrong time.
550 We avoid this by disabling interrupts when
551 pc contains a magic address. */
552 if (interrupt_request & CPU_INTERRUPT_HARD
553 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
554 || !(env->uncached_cpsr & CPSR_I))) {
555 env->exception_index = EXCP_IRQ;
556 do_interrupt(env);
557 BREAK_CHAIN;
559 #elif defined(TARGET_SH4)
560 if (interrupt_request & CPU_INTERRUPT_HARD) {
561 do_interrupt(env);
562 BREAK_CHAIN;
564 #elif defined(TARGET_ALPHA)
565 if (interrupt_request & CPU_INTERRUPT_HARD) {
566 do_interrupt(env);
567 BREAK_CHAIN;
569 #elif defined(TARGET_CRIS)
570 if (interrupt_request & CPU_INTERRUPT_HARD) {
571 do_interrupt(env);
572 BREAK_CHAIN;
574 #elif defined(TARGET_M68K)
575 if (interrupt_request & CPU_INTERRUPT_HARD
576 && ((env->sr & SR_I) >> SR_I_SHIFT)
577 < env->pending_level) {
578 /* Real hardware gets the interrupt vector via an
579 IACK cycle at this point. Current emulated
580 hardware doesn't rely on this, so we
581 provide/save the vector when the interrupt is
582 first signalled. */
583 env->exception_index = env->pending_vector;
584 do_interrupt(1);
585 BREAK_CHAIN;
587 #endif
588 /* Don't use the cached interupt_request value,
589 do_interrupt may have updated the EXITTB flag. */
590 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
591 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
592 /* ensure that no TB jump will be modified as
593 the program flow was changed */
594 BREAK_CHAIN;
596 if (interrupt_request & CPU_INTERRUPT_EXIT) {
597 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
598 env->exception_index = EXCP_INTERRUPT;
599 cpu_loop_exit();
602 #ifdef DEBUG_EXEC
603 if ((loglevel & CPU_LOG_TB_CPU)) {
604 /* restore flags in standard format */
605 regs_to_env();
606 #if defined(TARGET_I386)
607 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
608 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
609 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
610 #elif defined(TARGET_ARM)
611 cpu_dump_state(env, logfile, fprintf, 0);
612 #elif defined(TARGET_SPARC)
613 REGWPTR = env->regbase + (env->cwp * 16);
614 env->regwptr = REGWPTR;
615 cpu_dump_state(env, logfile, fprintf, 0);
616 #elif defined(TARGET_PPC)
617 cpu_dump_state(env, logfile, fprintf, 0);
618 #elif defined(TARGET_M68K)
619 cpu_m68k_flush_flags(env, env->cc_op);
620 env->cc_op = CC_OP_FLAGS;
621 env->sr = (env->sr & 0xffe0)
622 | env->cc_dest | (env->cc_x << 4);
623 cpu_dump_state(env, logfile, fprintf, 0);
624 #elif defined(TARGET_MIPS)
625 cpu_dump_state(env, logfile, fprintf, 0);
626 #elif defined(TARGET_SH4)
627 cpu_dump_state(env, logfile, fprintf, 0);
628 #elif defined(TARGET_ALPHA)
629 cpu_dump_state(env, logfile, fprintf, 0);
630 #elif defined(TARGET_CRIS)
631 cpu_dump_state(env, logfile, fprintf, 0);
632 #else
633 #error unsupported target CPU
634 #endif
636 #endif
637 tb = tb_find_fast();
638 #ifdef DEBUG_EXEC
639 if ((loglevel & CPU_LOG_EXEC)) {
640 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
641 (long)tb->tc_ptr, tb->pc,
642 lookup_symbol(tb->pc));
644 #endif
645 RESTORE_GLOBALS();
646 /* see if we can patch the calling TB. When the TB
647 spans two pages, we cannot safely do a direct
648 jump. */
650 if (T0 != 0 &&
651 #if USE_KQEMU
652 (env->kqemu_enabled != 2) &&
653 #endif
654 tb->page_addr[1] == -1) {
655 spin_lock(&tb_lock);
656 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
657 spin_unlock(&tb_lock);
660 tc_ptr = tb->tc_ptr;
661 env->current_tb = tb;
662 /* execute the generated code */
663 gen_func = (void *)tc_ptr;
664 #if defined(__sparc__)
665 __asm__ __volatile__("call %0\n\t"
666 "mov %%o7,%%i0"
667 : /* no outputs */
668 : "r" (gen_func)
669 : "i0", "i1", "i2", "i3", "i4", "i5",
670 "o0", "o1", "o2", "o3", "o4", "o5",
671 "l0", "l1", "l2", "l3", "l4", "l5",
672 "l6", "l7");
673 #elif defined(__arm__)
674 asm volatile ("mov pc, %0\n\t"
675 ".global exec_loop\n\t"
676 "exec_loop:\n\t"
677 : /* no outputs */
678 : "r" (gen_func)
679 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
680 #elif defined(__ia64)
681 struct fptr {
682 void *ip;
683 void *gp;
684 } fp;
686 fp.ip = tc_ptr;
687 fp.gp = code_gen_buffer + 2 * (1 << 20);
688 (*(void (*)(void)) &fp)();
689 #else
690 T0 = gen_func();
691 #endif
692 env->current_tb = NULL;
693 /* reset soft MMU for next block (it can currently
694 only be set by a memory fault) */
695 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
696 if (env->hflags & HF_SOFTMMU_MASK) {
697 env->hflags &= ~HF_SOFTMMU_MASK;
698 /* do not allow linking to another block */
699 T0 = 0;
701 #endif
702 #if defined(USE_KQEMU)
703 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
704 if (kqemu_is_ok(env) &&
705 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
706 cpu_loop_exit();
708 #endif
709 } /* for(;;) */
710 } else {
711 env_to_regs();
713 } /* for(;;) */
716 #if defined(TARGET_I386)
717 /* restore flags in standard format */
718 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
719 #elif defined(TARGET_ARM)
720 /* XXX: Save/restore host fpu exception state?. */
721 #elif defined(TARGET_SPARC)
722 #if defined(reg_REGWPTR)
723 REGWPTR = saved_regwptr;
724 #endif
725 #elif defined(TARGET_PPC)
726 #elif defined(TARGET_M68K)
727 cpu_m68k_flush_flags(env, env->cc_op);
728 env->cc_op = CC_OP_FLAGS;
729 env->sr = (env->sr & 0xffe0)
730 | env->cc_dest | (env->cc_x << 4);
731 #elif defined(TARGET_MIPS)
732 #elif defined(TARGET_SH4)
733 #elif defined(TARGET_IA64)
734 #elif defined(TARGET_ALPHA)
735 #elif defined(TARGET_CRIS)
736 /* XXXXX */
737 #else
738 #error unsupported target CPU
739 #endif
741 /* restore global registers */
742 RESTORE_GLOBALS();
743 #include "hostregs_helper.h"
745 /* fail safe : never use cpu_single_env outside cpu_exec() */
746 cpu_single_env = NULL;
747 return ret;
750 /* must only be called from the generated code as an exception can be
751 generated */
752 void tb_invalidate_page_range(target_ulong start, target_ulong end)
754 /* XXX: cannot enable it yet because it yields to MMU exception
755 where NIP != read address on PowerPC */
756 #if 0
757 target_ulong phys_addr;
758 phys_addr = get_phys_addr_code(env, start);
759 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
760 #endif
763 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
765 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
767 CPUX86State *saved_env;
769 saved_env = env;
770 env = s;
771 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
772 selector &= 0xffff;
773 cpu_x86_load_seg_cache(env, seg_reg, selector,
774 (selector << 4), 0xffff, 0);
775 } else {
776 load_seg(seg_reg, selector);
778 env = saved_env;
781 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
783 CPUX86State *saved_env;
785 saved_env = env;
786 env = s;
788 helper_fsave(ptr, data32);
790 env = saved_env;
793 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
795 CPUX86State *saved_env;
797 saved_env = env;
798 env = s;
800 helper_frstor(ptr, data32);
802 env = saved_env;
805 #endif /* TARGET_I386 */
807 #if !defined(CONFIG_SOFTMMU)
809 #if defined(TARGET_I386)
811 /* 'pc' is the host PC at which the exception was raised. 'address' is
812 the effective address of the memory exception. 'is_write' is 1 if a
813 write caused the exception and otherwise 0'. 'old_set' is the
814 signal set which should be restored */
815 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
816 int is_write, sigset_t *old_set,
817 void *puc)
819 TranslationBlock *tb;
820 int ret;
822 if (cpu_single_env)
823 env = cpu_single_env; /* XXX: find a correct solution for multithread */
824 #if defined(DEBUG_SIGNAL)
825 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
826 pc, address, is_write, *(unsigned long *)old_set);
827 #endif
828 /* XXX: locking issue */
829 if (is_write && page_unprotect(h2g(address), pc, puc)) {
830 return 1;
833 /* see if it is an MMU fault */
834 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
835 if (ret < 0)
836 return 0; /* not an MMU fault */
837 if (ret == 0)
838 return 1; /* the MMU fault was handled without causing real CPU fault */
839 /* now we have a real cpu fault */
840 tb = tb_find_pc(pc);
841 if (tb) {
842 /* the PC is inside the translated code. It means that we have
843 a virtual CPU fault */
844 cpu_restore_state(tb, env, pc, puc);
846 if (ret == 1) {
847 #if 0
848 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
849 env->eip, env->cr[2], env->error_code);
850 #endif
851 /* we restore the process signal mask as the sigreturn should
852 do it (XXX: use sigsetjmp) */
853 sigprocmask(SIG_SETMASK, old_set, NULL);
854 raise_exception_err(env->exception_index, env->error_code);
855 } else {
856 /* activate soft MMU for this block */
857 env->hflags |= HF_SOFTMMU_MASK;
858 cpu_resume_from_signal(env, puc);
860 /* never comes here */
861 return 1;
864 #elif defined(TARGET_ARM)
865 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
866 int is_write, sigset_t *old_set,
867 void *puc)
869 TranslationBlock *tb;
870 int ret;
872 if (cpu_single_env)
873 env = cpu_single_env; /* XXX: find a correct solution for multithread */
874 #if defined(DEBUG_SIGNAL)
875 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
876 pc, address, is_write, *(unsigned long *)old_set);
877 #endif
878 /* XXX: locking issue */
879 if (is_write && page_unprotect(h2g(address), pc, puc)) {
880 return 1;
882 /* see if it is an MMU fault */
883 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
884 if (ret < 0)
885 return 0; /* not an MMU fault */
886 if (ret == 0)
887 return 1; /* the MMU fault was handled without causing real CPU fault */
888 /* now we have a real cpu fault */
889 tb = tb_find_pc(pc);
890 if (tb) {
891 /* the PC is inside the translated code. It means that we have
892 a virtual CPU fault */
893 cpu_restore_state(tb, env, pc, puc);
895 /* we restore the process signal mask as the sigreturn should
896 do it (XXX: use sigsetjmp) */
897 sigprocmask(SIG_SETMASK, old_set, NULL);
898 cpu_loop_exit();
900 #elif defined(TARGET_SPARC)
901 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
902 int is_write, sigset_t *old_set,
903 void *puc)
905 TranslationBlock *tb;
906 int ret;
908 if (cpu_single_env)
909 env = cpu_single_env; /* XXX: find a correct solution for multithread */
910 #if defined(DEBUG_SIGNAL)
911 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
912 pc, address, is_write, *(unsigned long *)old_set);
913 #endif
914 /* XXX: locking issue */
915 if (is_write && page_unprotect(h2g(address), pc, puc)) {
916 return 1;
918 /* see if it is an MMU fault */
919 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
920 if (ret < 0)
921 return 0; /* not an MMU fault */
922 if (ret == 0)
923 return 1; /* the MMU fault was handled without causing real CPU fault */
924 /* now we have a real cpu fault */
925 tb = tb_find_pc(pc);
926 if (tb) {
927 /* the PC is inside the translated code. It means that we have
928 a virtual CPU fault */
929 cpu_restore_state(tb, env, pc, puc);
931 /* we restore the process signal mask as the sigreturn should
932 do it (XXX: use sigsetjmp) */
933 sigprocmask(SIG_SETMASK, old_set, NULL);
934 cpu_loop_exit();
936 #elif defined (TARGET_PPC)
937 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
938 int is_write, sigset_t *old_set,
939 void *puc)
941 TranslationBlock *tb;
942 int ret;
944 if (cpu_single_env)
945 env = cpu_single_env; /* XXX: find a correct solution for multithread */
946 #if defined(DEBUG_SIGNAL)
947 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
948 pc, address, is_write, *(unsigned long *)old_set);
949 #endif
950 /* XXX: locking issue */
951 if (is_write && page_unprotect(h2g(address), pc, puc)) {
952 return 1;
955 /* see if it is an MMU fault */
956 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
957 if (ret < 0)
958 return 0; /* not an MMU fault */
959 if (ret == 0)
960 return 1; /* the MMU fault was handled without causing real CPU fault */
962 /* now we have a real cpu fault */
963 tb = tb_find_pc(pc);
964 if (tb) {
965 /* the PC is inside the translated code. It means that we have
966 a virtual CPU fault */
967 cpu_restore_state(tb, env, pc, puc);
969 if (ret == 1) {
970 #if 0
971 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
972 env->nip, env->error_code, tb);
973 #endif
974 /* we restore the process signal mask as the sigreturn should
975 do it (XXX: use sigsetjmp) */
976 sigprocmask(SIG_SETMASK, old_set, NULL);
977 do_raise_exception_err(env->exception_index, env->error_code);
978 } else {
979 /* activate soft MMU for this block */
980 cpu_resume_from_signal(env, puc);
982 /* never comes here */
983 return 1;
986 #elif defined(TARGET_M68K)
987 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
988 int is_write, sigset_t *old_set,
989 void *puc)
991 TranslationBlock *tb;
992 int ret;
994 if (cpu_single_env)
995 env = cpu_single_env; /* XXX: find a correct solution for multithread */
996 #if defined(DEBUG_SIGNAL)
997 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
998 pc, address, is_write, *(unsigned long *)old_set);
999 #endif
1000 /* XXX: locking issue */
1001 if (is_write && page_unprotect(address, pc, puc)) {
1002 return 1;
1004 /* see if it is an MMU fault */
1005 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1006 if (ret < 0)
1007 return 0; /* not an MMU fault */
1008 if (ret == 0)
1009 return 1; /* the MMU fault was handled without causing real CPU fault */
1010 /* now we have a real cpu fault */
1011 tb = tb_find_pc(pc);
1012 if (tb) {
1013 /* the PC is inside the translated code. It means that we have
1014 a virtual CPU fault */
1015 cpu_restore_state(tb, env, pc, puc);
1017 /* we restore the process signal mask as the sigreturn should
1018 do it (XXX: use sigsetjmp) */
1019 sigprocmask(SIG_SETMASK, old_set, NULL);
1020 cpu_loop_exit();
1021 /* never comes here */
1022 return 1;
1025 #elif defined (TARGET_MIPS)
1026 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1027 int is_write, sigset_t *old_set,
1028 void *puc)
1030 TranslationBlock *tb;
1031 int ret;
1033 if (cpu_single_env)
1034 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1035 #if defined(DEBUG_SIGNAL)
1036 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1037 pc, address, is_write, *(unsigned long *)old_set);
1038 #endif
1039 /* XXX: locking issue */
1040 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1041 return 1;
1044 /* see if it is an MMU fault */
1045 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1046 if (ret < 0)
1047 return 0; /* not an MMU fault */
1048 if (ret == 0)
1049 return 1; /* the MMU fault was handled without causing real CPU fault */
1051 /* now we have a real cpu fault */
1052 tb = tb_find_pc(pc);
1053 if (tb) {
1054 /* the PC is inside the translated code. It means that we have
1055 a virtual CPU fault */
1056 cpu_restore_state(tb, env, pc, puc);
1058 if (ret == 1) {
1059 #if 0
1060 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1061 env->PC, env->error_code, tb);
1062 #endif
1063 /* we restore the process signal mask as the sigreturn should
1064 do it (XXX: use sigsetjmp) */
1065 sigprocmask(SIG_SETMASK, old_set, NULL);
1066 do_raise_exception_err(env->exception_index, env->error_code);
1067 } else {
1068 /* activate soft MMU for this block */
1069 cpu_resume_from_signal(env, puc);
1071 /* never comes here */
1072 return 1;
1075 #elif defined (TARGET_SH4)
1076 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1077 int is_write, sigset_t *old_set,
1078 void *puc)
1080 TranslationBlock *tb;
1081 int ret;
1083 if (cpu_single_env)
1084 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1085 #if defined(DEBUG_SIGNAL)
1086 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1087 pc, address, is_write, *(unsigned long *)old_set);
1088 #endif
1089 /* XXX: locking issue */
1090 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1091 return 1;
1094 /* see if it is an MMU fault */
1095 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1096 if (ret < 0)
1097 return 0; /* not an MMU fault */
1098 if (ret == 0)
1099 return 1; /* the MMU fault was handled without causing real CPU fault */
1101 /* now we have a real cpu fault */
1102 tb = tb_find_pc(pc);
1103 if (tb) {
1104 /* the PC is inside the translated code. It means that we have
1105 a virtual CPU fault */
1106 cpu_restore_state(tb, env, pc, puc);
1108 #if 0
1109 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1110 env->nip, env->error_code, tb);
1111 #endif
1112 /* we restore the process signal mask as the sigreturn should
1113 do it (XXX: use sigsetjmp) */
1114 sigprocmask(SIG_SETMASK, old_set, NULL);
1115 cpu_loop_exit();
1116 /* never comes here */
1117 return 1;
1120 #elif defined (TARGET_ALPHA)
1121 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1122 int is_write, sigset_t *old_set,
1123 void *puc)
1125 TranslationBlock *tb;
1126 int ret;
1128 if (cpu_single_env)
1129 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1130 #if defined(DEBUG_SIGNAL)
1131 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1132 pc, address, is_write, *(unsigned long *)old_set);
1133 #endif
1134 /* XXX: locking issue */
1135 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1136 return 1;
1139 /* see if it is an MMU fault */
1140 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1141 if (ret < 0)
1142 return 0; /* not an MMU fault */
1143 if (ret == 0)
1144 return 1; /* the MMU fault was handled without causing real CPU fault */
1146 /* now we have a real cpu fault */
1147 tb = tb_find_pc(pc);
1148 if (tb) {
1149 /* the PC is inside the translated code. It means that we have
1150 a virtual CPU fault */
1151 cpu_restore_state(tb, env, pc, puc);
1153 #if 0
1154 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1155 env->nip, env->error_code, tb);
1156 #endif
1157 /* we restore the process signal mask as the sigreturn should
1158 do it (XXX: use sigsetjmp) */
1159 sigprocmask(SIG_SETMASK, old_set, NULL);
1160 cpu_loop_exit();
1161 /* never comes here */
1162 return 1;
1164 #elif defined (TARGET_CRIS)
1165 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1166 int is_write, sigset_t *old_set,
1167 void *puc)
1169 TranslationBlock *tb;
1170 int ret;
1172 if (cpu_single_env)
1173 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1174 #if defined(DEBUG_SIGNAL)
1175 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1176 pc, address, is_write, *(unsigned long *)old_set);
1177 #endif
1178 /* XXX: locking issue */
1179 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1180 return 1;
1183 /* see if it is an MMU fault */
1184 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1185 if (ret < 0)
1186 return 0; /* not an MMU fault */
1187 if (ret == 0)
1188 return 1; /* the MMU fault was handled without causing real CPU fault */
1190 /* now we have a real cpu fault */
1191 tb = tb_find_pc(pc);
1192 if (tb) {
1193 /* the PC is inside the translated code. It means that we have
1194 a virtual CPU fault */
1195 cpu_restore_state(tb, env, pc, puc);
1197 /* we restore the process signal mask as the sigreturn should
1198 do it (XXX: use sigsetjmp) */
1199 sigprocmask(SIG_SETMASK, old_set, NULL);
1200 cpu_loop_exit();
1201 /* never comes here */
1202 return 1;
1205 #else
1206 #error unsupported target CPU
1207 #endif
1209 #if defined(__i386__)
1211 #if defined(__APPLE__)
1212 # include <sys/ucontext.h>
1214 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1215 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1216 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1217 #else
1218 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1219 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1220 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1221 #endif
1223 int cpu_signal_handler(int host_signum, void *pinfo,
1224 void *puc)
1226 siginfo_t *info = pinfo;
1227 struct ucontext *uc = puc;
1228 unsigned long pc;
1229 int trapno;
1231 #ifndef REG_EIP
1232 /* for glibc 2.1 */
1233 #define REG_EIP EIP
1234 #define REG_ERR ERR
1235 #define REG_TRAPNO TRAPNO
1236 #endif
1237 pc = EIP_sig(uc);
1238 trapno = TRAP_sig(uc);
1239 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1240 trapno == 0xe ?
1241 (ERROR_sig(uc) >> 1) & 1 : 0,
1242 &uc->uc_sigmask, puc);
1245 #elif defined(__x86_64__)
1247 int cpu_signal_handler(int host_signum, void *pinfo,
1248 void *puc)
1250 siginfo_t *info = pinfo;
1251 struct ucontext *uc = puc;
1252 unsigned long pc;
1254 pc = uc->uc_mcontext.gregs[REG_RIP];
1255 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1256 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1257 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1258 &uc->uc_sigmask, puc);
1261 #elif defined(__powerpc__)
1263 /***********************************************************************
1264 * signal context platform-specific definitions
1265 * From Wine
1267 #ifdef linux
1268 /* All Registers access - only for local access */
1269 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1270 /* Gpr Registers access */
1271 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1272 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1273 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1274 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1275 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1276 # define LR_sig(context) REG_sig(link, context) /* Link register */
1277 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1278 /* Float Registers access */
1279 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1280 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1281 /* Exception Registers access */
1282 # define DAR_sig(context) REG_sig(dar, context)
1283 # define DSISR_sig(context) REG_sig(dsisr, context)
1284 # define TRAP_sig(context) REG_sig(trap, context)
1285 #endif /* linux */
1287 #ifdef __APPLE__
1288 # include <sys/ucontext.h>
1289 typedef struct ucontext SIGCONTEXT;
1290 /* All Registers access - only for local access */
1291 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1292 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1293 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1294 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1295 /* Gpr Registers access */
1296 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1297 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1298 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1299 # define CTR_sig(context) REG_sig(ctr, context)
1300 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1301 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1302 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1303 /* Float Registers access */
1304 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1305 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1306 /* Exception Registers access */
1307 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1308 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1309 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1310 #endif /* __APPLE__ */
1312 int cpu_signal_handler(int host_signum, void *pinfo,
1313 void *puc)
1315 siginfo_t *info = pinfo;
1316 struct ucontext *uc = puc;
1317 unsigned long pc;
1318 int is_write;
1320 pc = IAR_sig(uc);
1321 is_write = 0;
1322 #if 0
1323 /* ppc 4xx case */
1324 if (DSISR_sig(uc) & 0x00800000)
1325 is_write = 1;
1326 #else
1327 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1328 is_write = 1;
1329 #endif
1330 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1331 is_write, &uc->uc_sigmask, puc);
1334 #elif defined(__alpha__)
1336 int cpu_signal_handler(int host_signum, void *pinfo,
1337 void *puc)
1339 siginfo_t *info = pinfo;
1340 struct ucontext *uc = puc;
1341 uint32_t *pc = uc->uc_mcontext.sc_pc;
1342 uint32_t insn = *pc;
1343 int is_write = 0;
1345 /* XXX: need kernel patch to get write flag faster */
1346 switch (insn >> 26) {
1347 case 0x0d: // stw
1348 case 0x0e: // stb
1349 case 0x0f: // stq_u
1350 case 0x24: // stf
1351 case 0x25: // stg
1352 case 0x26: // sts
1353 case 0x27: // stt
1354 case 0x2c: // stl
1355 case 0x2d: // stq
1356 case 0x2e: // stl_c
1357 case 0x2f: // stq_c
1358 is_write = 1;
1361 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1362 is_write, &uc->uc_sigmask, puc);
1364 #elif defined(__sparc__)
1366 int cpu_signal_handler(int host_signum, void *pinfo,
1367 void *puc)
1369 siginfo_t *info = pinfo;
1370 uint32_t *regs = (uint32_t *)(info + 1);
1371 void *sigmask = (regs + 20);
1372 unsigned long pc;
1373 int is_write;
1374 uint32_t insn;
1376 /* XXX: is there a standard glibc define ? */
1377 pc = regs[1];
1378 /* XXX: need kernel patch to get write flag faster */
1379 is_write = 0;
1380 insn = *(uint32_t *)pc;
1381 if ((insn >> 30) == 3) {
1382 switch((insn >> 19) & 0x3f) {
1383 case 0x05: // stb
1384 case 0x06: // sth
1385 case 0x04: // st
1386 case 0x07: // std
1387 case 0x24: // stf
1388 case 0x27: // stdf
1389 case 0x25: // stfsr
1390 is_write = 1;
1391 break;
1394 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1395 is_write, sigmask, NULL);
1398 #elif defined(__arm__)
1400 int cpu_signal_handler(int host_signum, void *pinfo,
1401 void *puc)
1403 siginfo_t *info = pinfo;
1404 struct ucontext *uc = puc;
1405 unsigned long pc;
1406 int is_write;
1408 pc = uc->uc_mcontext.gregs[R15];
1409 /* XXX: compute is_write */
1410 is_write = 0;
1411 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1412 is_write,
1413 &uc->uc_sigmask, puc);
1416 #elif defined(__mc68000)
1418 int cpu_signal_handler(int host_signum, void *pinfo,
1419 void *puc)
1421 siginfo_t *info = pinfo;
1422 struct ucontext *uc = puc;
1423 unsigned long pc;
1424 int is_write;
1426 pc = uc->uc_mcontext.gregs[16];
1427 /* XXX: compute is_write */
1428 is_write = 0;
1429 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1430 is_write,
1431 &uc->uc_sigmask, puc);
1434 #elif defined(__ia64)
1436 #ifndef __ISR_VALID
1437 /* This ought to be in <bits/siginfo.h>... */
1438 # define __ISR_VALID 1
1439 #endif
1441 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1443 siginfo_t *info = pinfo;
1444 struct ucontext *uc = puc;
1445 unsigned long ip;
1446 int is_write = 0;
1448 ip = uc->uc_mcontext.sc_ip;
1449 switch (host_signum) {
1450 case SIGILL:
1451 case SIGFPE:
1452 case SIGSEGV:
1453 case SIGBUS:
1454 case SIGTRAP:
1455 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1456 /* ISR.W (write-access) is bit 33: */
1457 is_write = (info->si_isr >> 33) & 1;
1458 break;
1460 default:
1461 break;
1463 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1464 is_write,
1465 &uc->uc_sigmask, puc);
1468 #elif defined(__s390__)
1470 int cpu_signal_handler(int host_signum, void *pinfo,
1471 void *puc)
1473 siginfo_t *info = pinfo;
1474 struct ucontext *uc = puc;
1475 unsigned long pc;
1476 int is_write;
1478 pc = uc->uc_mcontext.psw.addr;
1479 /* XXX: compute is_write */
1480 is_write = 0;
1481 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1482 is_write, &uc->uc_sigmask, puc);
1485 #elif defined(__mips__)
1487 int cpu_signal_handler(int host_signum, void *pinfo,
1488 void *puc)
1490 siginfo_t *info = pinfo;
1491 struct ucontext *uc = puc;
1492 greg_t pc = uc->uc_mcontext.pc;
1493 int is_write;
1495 /* XXX: compute is_write */
1496 is_write = 0;
1497 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1498 is_write, &uc->uc_sigmask, puc);
1501 #else
1503 #error host CPU specific signal handler needed
1505 #endif
1507 #endif /* !defined(CONFIG_SOFTMMU) */