kvm: bios: use preprocessor for pci link routing
[qemu-kvm/fedora.git] / cpu-exec.c
blob252927fbeaec15528bac1db3351acb6ad1feba14
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #include <sys/ucontext.h>
38 #endif
40 #include "qemu-kvm.h"
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
44 #undef env
45 #define env cpu_single_env
46 #endif
48 int tb_invalidated_flag;
49 static unsigned long next_tb;
51 //#define DEBUG_EXEC
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
58 regs_to_env();
59 longjmp(env->jmp_env, 1);
62 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
63 #define reg_T2
64 #endif
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState *env1, void *puc)
71 #if !defined(CONFIG_SOFTMMU)
72 struct ucontext *uc = puc;
73 #endif
75 env = env1;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
80 if (puc) {
81 /* XXX: use siglongjmp ? */
82 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84 #endif
85 longjmp(env->jmp_env, 1);
88 static TranslationBlock *tb_find_slow(target_ulong pc,
89 target_ulong cs_base,
90 uint64_t flags)
92 TranslationBlock *tb, **ptb1;
93 int code_gen_size;
94 unsigned int h;
95 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
96 uint8_t *tc_ptr;
98 spin_lock(&tb_lock);
100 tb_invalidated_flag = 0;
102 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
104 /* find translated block using physical mappings */
105 phys_pc = get_phys_addr_code(env, pc);
106 phys_page1 = phys_pc & TARGET_PAGE_MASK;
107 phys_page2 = -1;
108 h = tb_phys_hash_func(phys_pc);
109 ptb1 = &tb_phys_hash[h];
110 for(;;) {
111 tb = *ptb1;
112 if (!tb)
113 goto not_found;
114 if (tb->pc == pc &&
115 tb->page_addr[0] == phys_page1 &&
116 tb->cs_base == cs_base &&
117 tb->flags == flags) {
118 /* check next page if needed */
119 if (tb->page_addr[1] != -1) {
120 virt_page2 = (pc & TARGET_PAGE_MASK) +
121 TARGET_PAGE_SIZE;
122 phys_page2 = get_phys_addr_code(env, virt_page2);
123 if (tb->page_addr[1] == phys_page2)
124 goto found;
125 } else {
126 goto found;
129 ptb1 = &tb->phys_hash_next;
131 not_found:
132 /* if no translated code available, then translate it now */
133 tb = tb_alloc(pc);
134 if (!tb) {
135 /* flush must be done */
136 tb_flush(env);
137 /* cannot fail at this point */
138 tb = tb_alloc(pc);
139 /* don't forget to invalidate previous TB info */
140 tb_invalidated_flag = 1;
142 tc_ptr = code_gen_ptr;
143 tb->tc_ptr = tc_ptr;
144 tb->cs_base = cs_base;
145 tb->flags = flags;
146 cpu_gen_code(env, tb, &code_gen_size);
147 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
149 /* check next page if needed */
150 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
151 phys_page2 = -1;
152 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
153 phys_page2 = get_phys_addr_code(env, virt_page2);
155 tb_link_phys(tb, phys_pc, phys_page2);
157 found:
158 /* we add the TB in the virtual pc hash table */
159 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
160 spin_unlock(&tb_lock);
161 return tb;
164 static inline TranslationBlock *tb_find_fast(void)
166 TranslationBlock *tb;
167 target_ulong cs_base, pc;
168 uint64_t flags;
170 /* we record a subset of the CPU state. It will
171 always be the same before a given translated block
172 is executed. */
173 #if defined(TARGET_I386)
174 flags = env->hflags;
175 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
176 cs_base = env->segs[R_CS].base;
177 pc = cs_base + env->eip;
178 #elif defined(TARGET_ARM)
179 flags = env->thumb | (env->vfp.vec_len << 1)
180 | (env->vfp.vec_stride << 4);
181 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
182 flags |= (1 << 6);
183 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
184 flags |= (1 << 7);
185 flags |= (env->condexec_bits << 8);
186 cs_base = 0;
187 pc = env->regs[15];
188 #elif defined(TARGET_SPARC)
189 #ifdef TARGET_SPARC64
190 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
191 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
192 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
193 #else
194 // FPU enable . Supervisor
195 flags = (env->psref << 4) | env->psrs;
196 #endif
197 cs_base = env->npc;
198 pc = env->pc;
199 #elif defined(TARGET_PPC)
200 flags = env->hflags;
201 cs_base = 0;
202 pc = env->nip;
203 #elif defined(TARGET_MIPS)
204 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
205 cs_base = 0;
206 pc = env->PC[env->current_tc];
207 #elif defined(TARGET_M68K)
208 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
209 | (env->sr & SR_S) /* Bit 13 */
210 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
211 cs_base = 0;
212 pc = env->pc;
213 #elif defined(TARGET_SH4)
214 flags = env->flags;
215 cs_base = 0;
216 pc = env->pc;
217 #elif defined(TARGET_ALPHA)
218 flags = env->ps;
219 cs_base = 0;
220 pc = env->pc;
221 #elif defined(TARGET_CRIS)
222 flags = env->pregs[PR_CCS] & (U_FLAG | X_FLAG);
223 flags |= env->dslot;
224 cs_base = 0;
225 pc = env->pc;
226 #elif defined(TARGET_IA64)
227 flags = 0;
228 cs_base = 0; /* XXXXX */
229 pc = 0;
230 #else
231 #error unsupported CPU
232 #endif
233 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
234 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
235 tb->flags != flags, 0)) {
236 tb = tb_find_slow(pc, cs_base, flags);
237 /* Note: we do it here to avoid a gcc bug on Mac OS X when
238 doing it in tb_find_slow */
239 if (tb_invalidated_flag) {
240 /* as some TB could have been invalidated because
241 of memory exceptions while generating the code, we
242 must recompute the hash index here */
243 next_tb = 0;
246 return tb;
249 /* main execution loop */
251 int cpu_exec(CPUState *env1)
253 #define DECLARE_HOST_REGS 1
254 #include "hostregs_helper.h"
255 int ret, interrupt_request;
256 TranslationBlock *tb;
257 uint8_t *tc_ptr;
259 if (cpu_halted(env1) == EXCP_HALTED)
260 return EXCP_HALTED;
262 cpu_single_env = env1;
264 /* first we save global registers */
265 #define SAVE_HOST_REGS 1
266 #include "hostregs_helper.h"
267 env = env1;
269 env_to_regs();
270 #if defined(TARGET_I386)
271 /* put eflags in CPU temporary format */
272 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
273 DF = 1 - (2 * ((env->eflags >> 10) & 1));
274 CC_OP = CC_OP_EFLAGS;
275 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
276 #elif defined(TARGET_SPARC)
277 #elif defined(TARGET_M68K)
278 env->cc_op = CC_OP_FLAGS;
279 env->cc_dest = env->sr & 0xf;
280 env->cc_x = (env->sr >> 4) & 1;
281 #elif defined(TARGET_ALPHA)
282 #elif defined(TARGET_ARM)
283 #elif defined(TARGET_PPC)
284 #elif defined(TARGET_MIPS)
285 #elif defined(TARGET_SH4)
286 #elif defined(TARGET_CRIS)
287 #elif defined(TARGET_IA64)
288 /* XXXXX */
289 #else
290 #error unsupported target CPU
291 #endif
292 env->exception_index = -1;
294 /* prepare setjmp context for exception handling */
295 for(;;) {
296 if (setjmp(env->jmp_env) == 0) {
297 env->current_tb = NULL;
298 /* if an exception is pending, we execute it here */
299 if (env->exception_index >= 0) {
300 if (env->exception_index >= EXCP_INTERRUPT) {
301 /* exit request from the cpu execution loop */
302 ret = env->exception_index;
303 break;
304 } else if (env->user_mode_only) {
305 /* if user mode only, we simulate a fake exception
306 which will be handled outside the cpu execution
307 loop */
308 #if defined(TARGET_I386)
309 do_interrupt_user(env->exception_index,
310 env->exception_is_int,
311 env->error_code,
312 env->exception_next_eip);
313 /* successfully delivered */
314 env->old_exception = -1;
315 #endif
316 ret = env->exception_index;
317 break;
318 } else {
319 #if defined(TARGET_I386)
320 /* simulate a real cpu exception. On i386, it can
321 trigger new exceptions, but we do not handle
322 double or triple faults yet. */
323 do_interrupt(env->exception_index,
324 env->exception_is_int,
325 env->error_code,
326 env->exception_next_eip, 0);
327 /* successfully delivered */
328 env->old_exception = -1;
329 #elif defined(TARGET_PPC)
330 do_interrupt(env);
331 #elif defined(TARGET_MIPS)
332 do_interrupt(env);
333 #elif defined(TARGET_SPARC)
334 do_interrupt(env);
335 #elif defined(TARGET_ARM)
336 do_interrupt(env);
337 #elif defined(TARGET_SH4)
338 do_interrupt(env);
339 #elif defined(TARGET_ALPHA)
340 do_interrupt(env);
341 #elif defined(TARGET_CRIS)
342 do_interrupt(env);
343 #elif defined(TARGET_M68K)
344 do_interrupt(0);
345 #elif defined(TARGET_IA64)
346 do_interrupt(env);
347 #endif
349 env->exception_index = -1;
351 #ifdef USE_KQEMU
352 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
353 int ret;
354 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
355 ret = kqemu_cpu_exec(env);
356 /* put eflags in CPU temporary format */
357 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
358 DF = 1 - (2 * ((env->eflags >> 10) & 1));
359 CC_OP = CC_OP_EFLAGS;
360 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
361 if (ret == 1) {
362 /* exception */
363 longjmp(env->jmp_env, 1);
364 } else if (ret == 2) {
365 /* softmmu execution needed */
366 } else {
367 if (env->interrupt_request != 0) {
368 /* hardware interrupt will be executed just after */
369 } else {
370 /* otherwise, we restart */
371 longjmp(env->jmp_env, 1);
375 #endif
377 if (kvm_enabled()) {
378 kvm_cpu_exec(env);
379 longjmp(env->jmp_env, 1);
381 next_tb = 0; /* force lookup of first TB */
382 for(;;) {
383 interrupt_request = env->interrupt_request;
384 if (__builtin_expect(interrupt_request, 0) &&
385 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
386 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
387 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
388 env->exception_index = EXCP_DEBUG;
389 cpu_loop_exit();
391 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
392 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
393 if (interrupt_request & CPU_INTERRUPT_HALT) {
394 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
395 env->halted = 1;
396 env->exception_index = EXCP_HLT;
397 cpu_loop_exit();
399 #endif
400 #if defined(TARGET_I386)
401 if (env->hflags2 & HF2_GIF_MASK) {
402 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
403 !(env->hflags & HF_SMM_MASK)) {
404 svm_check_intercept(SVM_EXIT_SMI);
405 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
406 do_smm_enter();
407 next_tb = 0;
408 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
409 !(env->hflags2 & HF2_NMI_MASK)) {
410 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
411 env->hflags2 |= HF2_NMI_MASK;
412 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
413 next_tb = 0;
414 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
415 (((env->hflags2 & HF2_VINTR_MASK) &&
416 (env->hflags2 & HF2_HIF_MASK)) ||
417 (!(env->hflags2 & HF2_VINTR_MASK) &&
418 (env->eflags & IF_MASK &&
419 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
420 int intno;
421 svm_check_intercept(SVM_EXIT_INTR);
422 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
423 intno = cpu_get_pic_interrupt(env);
424 if (loglevel & CPU_LOG_TB_IN_ASM) {
425 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
427 do_interrupt(intno, 0, 0, 0, 1);
428 /* ensure that no TB jump will be modified as
429 the program flow was changed */
430 next_tb = 0;
431 #if !defined(CONFIG_USER_ONLY)
432 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
433 (env->eflags & IF_MASK) &&
434 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
435 int intno;
436 /* FIXME: this should respect TPR */
437 svm_check_intercept(SVM_EXIT_VINTR);
438 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
439 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
440 if (loglevel & CPU_LOG_TB_IN_ASM)
441 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
442 do_interrupt(intno, 0, 0, 0, 1);
443 next_tb = 0;
444 #endif
447 #elif defined(TARGET_PPC)
448 #if 0
449 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
450 cpu_ppc_reset(env);
452 #endif
453 if (interrupt_request & CPU_INTERRUPT_HARD) {
454 ppc_hw_interrupt(env);
455 if (env->pending_interrupts == 0)
456 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
457 next_tb = 0;
459 #elif defined(TARGET_MIPS)
460 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
461 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
462 (env->CP0_Status & (1 << CP0St_IE)) &&
463 !(env->CP0_Status & (1 << CP0St_EXL)) &&
464 !(env->CP0_Status & (1 << CP0St_ERL)) &&
465 !(env->hflags & MIPS_HFLAG_DM)) {
466 /* Raise it */
467 env->exception_index = EXCP_EXT_INTERRUPT;
468 env->error_code = 0;
469 do_interrupt(env);
470 next_tb = 0;
472 #elif defined(TARGET_SPARC)
473 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
474 (env->psret != 0)) {
475 int pil = env->interrupt_index & 15;
476 int type = env->interrupt_index & 0xf0;
478 if (((type == TT_EXTINT) &&
479 (pil == 15 || pil > env->psrpil)) ||
480 type != TT_EXTINT) {
481 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
482 env->exception_index = env->interrupt_index;
483 do_interrupt(env);
484 env->interrupt_index = 0;
485 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
486 cpu_check_irqs(env);
487 #endif
488 next_tb = 0;
490 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
491 //do_interrupt(0, 0, 0, 0, 0);
492 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
494 #elif defined(TARGET_ARM)
495 if (interrupt_request & CPU_INTERRUPT_FIQ
496 && !(env->uncached_cpsr & CPSR_F)) {
497 env->exception_index = EXCP_FIQ;
498 do_interrupt(env);
499 next_tb = 0;
501 /* ARMv7-M interrupt return works by loading a magic value
502 into the PC. On real hardware the load causes the
503 return to occur. The qemu implementation performs the
504 jump normally, then does the exception return when the
505 CPU tries to execute code at the magic address.
506 This will cause the magic PC value to be pushed to
507 the stack if an interrupt occured at the wrong time.
508 We avoid this by disabling interrupts when
509 pc contains a magic address. */
510 if (interrupt_request & CPU_INTERRUPT_HARD
511 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
512 || !(env->uncached_cpsr & CPSR_I))) {
513 env->exception_index = EXCP_IRQ;
514 do_interrupt(env);
515 next_tb = 0;
517 #elif defined(TARGET_SH4)
518 if (interrupt_request & CPU_INTERRUPT_HARD) {
519 do_interrupt(env);
520 next_tb = 0;
522 #elif defined(TARGET_ALPHA)
523 if (interrupt_request & CPU_INTERRUPT_HARD) {
524 do_interrupt(env);
525 next_tb = 0;
527 #elif defined(TARGET_CRIS)
528 if (interrupt_request & CPU_INTERRUPT_HARD) {
529 do_interrupt(env);
530 next_tb = 0;
532 #elif defined(TARGET_M68K)
533 if (interrupt_request & CPU_INTERRUPT_HARD
534 && ((env->sr & SR_I) >> SR_I_SHIFT)
535 < env->pending_level) {
536 /* Real hardware gets the interrupt vector via an
537 IACK cycle at this point. Current emulated
538 hardware doesn't rely on this, so we
539 provide/save the vector when the interrupt is
540 first signalled. */
541 env->exception_index = env->pending_vector;
542 do_interrupt(1);
543 next_tb = 0;
545 #endif
546 /* Don't use the cached interupt_request value,
547 do_interrupt may have updated the EXITTB flag. */
548 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
549 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
550 /* ensure that no TB jump will be modified as
551 the program flow was changed */
552 next_tb = 0;
554 if (interrupt_request & CPU_INTERRUPT_EXIT) {
555 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
556 env->exception_index = EXCP_INTERRUPT;
557 cpu_loop_exit();
560 #ifdef DEBUG_EXEC
561 if ((loglevel & CPU_LOG_TB_CPU)) {
562 /* restore flags in standard format */
563 regs_to_env();
564 #if defined(TARGET_I386)
565 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
566 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
567 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
568 #elif defined(TARGET_ARM)
569 cpu_dump_state(env, logfile, fprintf, 0);
570 #elif defined(TARGET_SPARC)
571 cpu_dump_state(env, logfile, fprintf, 0);
572 #elif defined(TARGET_PPC)
573 cpu_dump_state(env, logfile, fprintf, 0);
574 #elif defined(TARGET_M68K)
575 cpu_m68k_flush_flags(env, env->cc_op);
576 env->cc_op = CC_OP_FLAGS;
577 env->sr = (env->sr & 0xffe0)
578 | env->cc_dest | (env->cc_x << 4);
579 cpu_dump_state(env, logfile, fprintf, 0);
580 #elif defined(TARGET_MIPS)
581 cpu_dump_state(env, logfile, fprintf, 0);
582 #elif defined(TARGET_SH4)
583 cpu_dump_state(env, logfile, fprintf, 0);
584 #elif defined(TARGET_ALPHA)
585 cpu_dump_state(env, logfile, fprintf, 0);
586 #elif defined(TARGET_CRIS)
587 cpu_dump_state(env, logfile, fprintf, 0);
588 #else
589 #error unsupported target CPU
590 #endif
592 #endif
593 tb = tb_find_fast();
594 #ifdef DEBUG_EXEC
595 if ((loglevel & CPU_LOG_EXEC)) {
596 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
597 (long)tb->tc_ptr, tb->pc,
598 lookup_symbol(tb->pc));
600 #endif
601 /* see if we can patch the calling TB. When the TB
602 spans two pages, we cannot safely do a direct
603 jump. */
605 if (next_tb != 0 &&
606 #ifdef USE_KQEMU
607 (env->kqemu_enabled != 2) &&
608 #endif
609 tb->page_addr[1] == -1) {
610 spin_lock(&tb_lock);
611 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
612 spin_unlock(&tb_lock);
615 tc_ptr = tb->tc_ptr;
616 env->current_tb = tb;
617 /* execute the generated code */
618 #if defined(__sparc__) && !defined(HOST_SOLARIS)
619 #undef env
620 env = cpu_single_env;
621 #define env cpu_single_env
622 #endif
623 next_tb = tcg_qemu_tb_exec(tc_ptr);
624 env->current_tb = NULL;
625 /* reset soft MMU for next block (it can currently
626 only be set by a memory fault) */
627 #if defined(USE_KQEMU)
628 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
629 if (kqemu_is_ok(env) &&
630 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
631 cpu_loop_exit();
633 #endif
634 } /* for(;;) */
635 } else {
636 env_to_regs();
638 } /* for(;;) */
641 #if defined(TARGET_I386)
642 /* restore flags in standard format */
643 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
644 #elif defined(TARGET_ARM)
645 /* XXX: Save/restore host fpu exception state?. */
646 #elif defined(TARGET_SPARC)
647 #elif defined(TARGET_PPC)
648 #elif defined(TARGET_M68K)
649 cpu_m68k_flush_flags(env, env->cc_op);
650 env->cc_op = CC_OP_FLAGS;
651 env->sr = (env->sr & 0xffe0)
652 | env->cc_dest | (env->cc_x << 4);
653 #elif defined(TARGET_MIPS)
654 #elif defined(TARGET_SH4)
655 #elif defined(TARGET_IA64)
656 #elif defined(TARGET_ALPHA)
657 #elif defined(TARGET_CRIS)
658 /* XXXXX */
659 #else
660 #error unsupported target CPU
661 #endif
663 /* restore global registers */
664 #include "hostregs_helper.h"
666 /* fail safe : never use cpu_single_env outside cpu_exec() */
667 cpu_single_env = NULL;
668 return ret;
671 /* must only be called from the generated code as an exception can be
672 generated */
673 void tb_invalidate_page_range(target_ulong start, target_ulong end)
675 /* XXX: cannot enable it yet because it yields to MMU exception
676 where NIP != read address on PowerPC */
677 #if 0
678 target_ulong phys_addr;
679 phys_addr = get_phys_addr_code(env, start);
680 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
681 #endif
684 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
686 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
688 CPUX86State *saved_env;
690 saved_env = env;
691 env = s;
692 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
693 selector &= 0xffff;
694 cpu_x86_load_seg_cache(env, seg_reg, selector,
695 (selector << 4), 0xffff, 0);
696 } else {
697 helper_load_seg(seg_reg, selector);
699 env = saved_env;
702 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
704 CPUX86State *saved_env;
706 saved_env = env;
707 env = s;
709 helper_fsave(ptr, data32);
711 env = saved_env;
714 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
716 CPUX86State *saved_env;
718 saved_env = env;
719 env = s;
721 helper_frstor(ptr, data32);
723 env = saved_env;
726 #endif /* TARGET_I386 */
728 #if !defined(CONFIG_SOFTMMU)
730 #if defined(TARGET_I386)
732 /* 'pc' is the host PC at which the exception was raised. 'address' is
733 the effective address of the memory exception. 'is_write' is 1 if a
734 write caused the exception and otherwise 0'. 'old_set' is the
735 signal set which should be restored */
736 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
737 int is_write, sigset_t *old_set,
738 void *puc)
740 TranslationBlock *tb;
741 int ret;
743 if (cpu_single_env)
744 env = cpu_single_env; /* XXX: find a correct solution for multithread */
745 #if defined(DEBUG_SIGNAL)
746 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
747 pc, address, is_write, *(unsigned long *)old_set);
748 #endif
749 /* XXX: locking issue */
750 if (is_write && page_unprotect(h2g(address), pc, puc)) {
751 return 1;
754 /* see if it is an MMU fault */
755 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
756 if (ret < 0)
757 return 0; /* not an MMU fault */
758 if (ret == 0)
759 return 1; /* the MMU fault was handled without causing real CPU fault */
760 /* now we have a real cpu fault */
761 tb = tb_find_pc(pc);
762 if (tb) {
763 /* the PC is inside the translated code. It means that we have
764 a virtual CPU fault */
765 cpu_restore_state(tb, env, pc, puc);
767 if (ret == 1) {
768 #if 0
769 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
770 env->eip, env->cr[2], env->error_code);
771 #endif
772 /* we restore the process signal mask as the sigreturn should
773 do it (XXX: use sigsetjmp) */
774 sigprocmask(SIG_SETMASK, old_set, NULL);
775 raise_exception_err(env->exception_index, env->error_code);
776 } else {
777 /* activate soft MMU for this block */
778 env->hflags |= HF_SOFTMMU_MASK;
779 cpu_resume_from_signal(env, puc);
781 /* never comes here */
782 return 1;
785 #elif defined(TARGET_ARM)
786 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
787 int is_write, sigset_t *old_set,
788 void *puc)
790 TranslationBlock *tb;
791 int ret;
793 if (cpu_single_env)
794 env = cpu_single_env; /* XXX: find a correct solution for multithread */
795 #if defined(DEBUG_SIGNAL)
796 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
797 pc, address, is_write, *(unsigned long *)old_set);
798 #endif
799 /* XXX: locking issue */
800 if (is_write && page_unprotect(h2g(address), pc, puc)) {
801 return 1;
803 /* see if it is an MMU fault */
804 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
805 if (ret < 0)
806 return 0; /* not an MMU fault */
807 if (ret == 0)
808 return 1; /* the MMU fault was handled without causing real CPU fault */
809 /* now we have a real cpu fault */
810 tb = tb_find_pc(pc);
811 if (tb) {
812 /* the PC is inside the translated code. It means that we have
813 a virtual CPU fault */
814 cpu_restore_state(tb, env, pc, puc);
816 /* we restore the process signal mask as the sigreturn should
817 do it (XXX: use sigsetjmp) */
818 sigprocmask(SIG_SETMASK, old_set, NULL);
819 cpu_loop_exit();
820 /* never comes here */
821 return 1;
823 #elif defined(TARGET_SPARC)
824 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
825 int is_write, sigset_t *old_set,
826 void *puc)
828 TranslationBlock *tb;
829 int ret;
831 if (cpu_single_env)
832 env = cpu_single_env; /* XXX: find a correct solution for multithread */
833 #if defined(DEBUG_SIGNAL)
834 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
835 pc, address, is_write, *(unsigned long *)old_set);
836 #endif
837 /* XXX: locking issue */
838 if (is_write && page_unprotect(h2g(address), pc, puc)) {
839 return 1;
841 /* see if it is an MMU fault */
842 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
843 if (ret < 0)
844 return 0; /* not an MMU fault */
845 if (ret == 0)
846 return 1; /* the MMU fault was handled without causing real CPU fault */
847 /* now we have a real cpu fault */
848 tb = tb_find_pc(pc);
849 if (tb) {
850 /* the PC is inside the translated code. It means that we have
851 a virtual CPU fault */
852 cpu_restore_state(tb, env, pc, puc);
854 /* we restore the process signal mask as the sigreturn should
855 do it (XXX: use sigsetjmp) */
856 sigprocmask(SIG_SETMASK, old_set, NULL);
857 cpu_loop_exit();
858 /* never comes here */
859 return 1;
861 #elif defined (TARGET_PPC)
862 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
863 int is_write, sigset_t *old_set,
864 void *puc)
866 TranslationBlock *tb;
867 int ret;
869 if (cpu_single_env)
870 env = cpu_single_env; /* XXX: find a correct solution for multithread */
871 #if defined(DEBUG_SIGNAL)
872 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
873 pc, address, is_write, *(unsigned long *)old_set);
874 #endif
875 /* XXX: locking issue */
876 if (is_write && page_unprotect(h2g(address), pc, puc)) {
877 return 1;
880 /* see if it is an MMU fault */
881 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
882 if (ret < 0)
883 return 0; /* not an MMU fault */
884 if (ret == 0)
885 return 1; /* the MMU fault was handled without causing real CPU fault */
887 /* now we have a real cpu fault */
888 tb = tb_find_pc(pc);
889 if (tb) {
890 /* the PC is inside the translated code. It means that we have
891 a virtual CPU fault */
892 cpu_restore_state(tb, env, pc, puc);
894 if (ret == 1) {
895 #if 0
896 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
897 env->nip, env->error_code, tb);
898 #endif
899 /* we restore the process signal mask as the sigreturn should
900 do it (XXX: use sigsetjmp) */
901 sigprocmask(SIG_SETMASK, old_set, NULL);
902 do_raise_exception_err(env->exception_index, env->error_code);
903 } else {
904 /* activate soft MMU for this block */
905 cpu_resume_from_signal(env, puc);
907 /* never comes here */
908 return 1;
911 #elif defined(TARGET_M68K)
912 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
913 int is_write, sigset_t *old_set,
914 void *puc)
916 TranslationBlock *tb;
917 int ret;
919 if (cpu_single_env)
920 env = cpu_single_env; /* XXX: find a correct solution for multithread */
921 #if defined(DEBUG_SIGNAL)
922 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
923 pc, address, is_write, *(unsigned long *)old_set);
924 #endif
925 /* XXX: locking issue */
926 if (is_write && page_unprotect(address, pc, puc)) {
927 return 1;
929 /* see if it is an MMU fault */
930 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
931 if (ret < 0)
932 return 0; /* not an MMU fault */
933 if (ret == 0)
934 return 1; /* the MMU fault was handled without causing real CPU fault */
935 /* now we have a real cpu fault */
936 tb = tb_find_pc(pc);
937 if (tb) {
938 /* the PC is inside the translated code. It means that we have
939 a virtual CPU fault */
940 cpu_restore_state(tb, env, pc, puc);
942 /* we restore the process signal mask as the sigreturn should
943 do it (XXX: use sigsetjmp) */
944 sigprocmask(SIG_SETMASK, old_set, NULL);
945 cpu_loop_exit();
946 /* never comes here */
947 return 1;
950 #elif defined (TARGET_MIPS)
951 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
952 int is_write, sigset_t *old_set,
953 void *puc)
955 TranslationBlock *tb;
956 int ret;
958 if (cpu_single_env)
959 env = cpu_single_env; /* XXX: find a correct solution for multithread */
960 #if defined(DEBUG_SIGNAL)
961 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
962 pc, address, is_write, *(unsigned long *)old_set);
963 #endif
964 /* XXX: locking issue */
965 if (is_write && page_unprotect(h2g(address), pc, puc)) {
966 return 1;
969 /* see if it is an MMU fault */
970 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
971 if (ret < 0)
972 return 0; /* not an MMU fault */
973 if (ret == 0)
974 return 1; /* the MMU fault was handled without causing real CPU fault */
976 /* now we have a real cpu fault */
977 tb = tb_find_pc(pc);
978 if (tb) {
979 /* the PC is inside the translated code. It means that we have
980 a virtual CPU fault */
981 cpu_restore_state(tb, env, pc, puc);
983 if (ret == 1) {
984 #if 0
985 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
986 env->PC, env->error_code, tb);
987 #endif
988 /* we restore the process signal mask as the sigreturn should
989 do it (XXX: use sigsetjmp) */
990 sigprocmask(SIG_SETMASK, old_set, NULL);
991 do_raise_exception_err(env->exception_index, env->error_code);
992 } else {
993 /* activate soft MMU for this block */
994 cpu_resume_from_signal(env, puc);
996 /* never comes here */
997 return 1;
1000 #elif defined (TARGET_SH4)
1001 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1002 int is_write, sigset_t *old_set,
1003 void *puc)
1005 TranslationBlock *tb;
1006 int ret;
1008 if (cpu_single_env)
1009 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1010 #if defined(DEBUG_SIGNAL)
1011 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1012 pc, address, is_write, *(unsigned long *)old_set);
1013 #endif
1014 /* XXX: locking issue */
1015 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1016 return 1;
1019 /* see if it is an MMU fault */
1020 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1021 if (ret < 0)
1022 return 0; /* not an MMU fault */
1023 if (ret == 0)
1024 return 1; /* the MMU fault was handled without causing real CPU fault */
1026 /* now we have a real cpu fault */
1027 tb = tb_find_pc(pc);
1028 if (tb) {
1029 /* the PC is inside the translated code. It means that we have
1030 a virtual CPU fault */
1031 cpu_restore_state(tb, env, pc, puc);
1033 #if 0
1034 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1035 env->nip, env->error_code, tb);
1036 #endif
1037 /* we restore the process signal mask as the sigreturn should
1038 do it (XXX: use sigsetjmp) */
1039 sigprocmask(SIG_SETMASK, old_set, NULL);
1040 cpu_loop_exit();
1041 /* never comes here */
1042 return 1;
1045 #elif defined (TARGET_ALPHA)
1046 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1047 int is_write, sigset_t *old_set,
1048 void *puc)
1050 TranslationBlock *tb;
1051 int ret;
1053 if (cpu_single_env)
1054 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1055 #if defined(DEBUG_SIGNAL)
1056 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1057 pc, address, is_write, *(unsigned long *)old_set);
1058 #endif
1059 /* XXX: locking issue */
1060 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1061 return 1;
1064 /* see if it is an MMU fault */
1065 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1066 if (ret < 0)
1067 return 0; /* not an MMU fault */
1068 if (ret == 0)
1069 return 1; /* the MMU fault was handled without causing real CPU fault */
1071 /* now we have a real cpu fault */
1072 tb = tb_find_pc(pc);
1073 if (tb) {
1074 /* the PC is inside the translated code. It means that we have
1075 a virtual CPU fault */
1076 cpu_restore_state(tb, env, pc, puc);
1078 #if 0
1079 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1080 env->nip, env->error_code, tb);
1081 #endif
1082 /* we restore the process signal mask as the sigreturn should
1083 do it (XXX: use sigsetjmp) */
1084 sigprocmask(SIG_SETMASK, old_set, NULL);
1085 cpu_loop_exit();
1086 /* never comes here */
1087 return 1;
1089 #elif defined (TARGET_CRIS)
1090 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1091 int is_write, sigset_t *old_set,
1092 void *puc)
1094 TranslationBlock *tb;
1095 int ret;
1097 if (cpu_single_env)
1098 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1099 #if defined(DEBUG_SIGNAL)
1100 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1101 pc, address, is_write, *(unsigned long *)old_set);
1102 #endif
1103 /* XXX: locking issue */
1104 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1105 return 1;
1108 /* see if it is an MMU fault */
1109 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1110 if (ret < 0)
1111 return 0; /* not an MMU fault */
1112 if (ret == 0)
1113 return 1; /* the MMU fault was handled without causing real CPU fault */
1115 /* now we have a real cpu fault */
1116 tb = tb_find_pc(pc);
1117 if (tb) {
1118 /* the PC is inside the translated code. It means that we have
1119 a virtual CPU fault */
1120 cpu_restore_state(tb, env, pc, puc);
1122 /* we restore the process signal mask as the sigreturn should
1123 do it (XXX: use sigsetjmp) */
1124 sigprocmask(SIG_SETMASK, old_set, NULL);
1125 cpu_loop_exit();
1126 /* never comes here */
1127 return 1;
1130 #else
1131 #error unsupported target CPU
1132 #endif
1134 #if defined(__i386__)
1136 #if defined(__APPLE__)
1137 # include <sys/ucontext.h>
1139 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1140 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1141 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1142 #else
1143 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1144 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1145 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1146 #endif
1148 int cpu_signal_handler(int host_signum, void *pinfo,
1149 void *puc)
1151 siginfo_t *info = pinfo;
1152 struct ucontext *uc = puc;
1153 unsigned long pc;
1154 int trapno;
1156 #ifndef REG_EIP
1157 /* for glibc 2.1 */
1158 #define REG_EIP EIP
1159 #define REG_ERR ERR
1160 #define REG_TRAPNO TRAPNO
1161 #endif
1162 pc = EIP_sig(uc);
1163 trapno = TRAP_sig(uc);
1164 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1165 trapno == 0xe ?
1166 (ERROR_sig(uc) >> 1) & 1 : 0,
1167 &uc->uc_sigmask, puc);
1170 #elif defined(__x86_64__)
1172 int cpu_signal_handler(int host_signum, void *pinfo,
1173 void *puc)
1175 siginfo_t *info = pinfo;
1176 struct ucontext *uc = puc;
1177 unsigned long pc;
1179 pc = uc->uc_mcontext.gregs[REG_RIP];
1180 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1181 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1182 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1183 &uc->uc_sigmask, puc);
1186 #elif defined(__powerpc__)
1188 /***********************************************************************
1189 * signal context platform-specific definitions
1190 * From Wine
1192 #ifdef linux
1193 /* All Registers access - only for local access */
1194 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1195 /* Gpr Registers access */
1196 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1197 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1198 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1199 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1200 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1201 # define LR_sig(context) REG_sig(link, context) /* Link register */
1202 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1203 /* Float Registers access */
1204 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1205 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1206 /* Exception Registers access */
1207 # define DAR_sig(context) REG_sig(dar, context)
1208 # define DSISR_sig(context) REG_sig(dsisr, context)
1209 # define TRAP_sig(context) REG_sig(trap, context)
1210 #endif /* linux */
1212 #ifdef __APPLE__
1213 # include <sys/ucontext.h>
1214 typedef struct ucontext SIGCONTEXT;
1215 /* All Registers access - only for local access */
1216 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1217 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1218 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1219 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1220 /* Gpr Registers access */
1221 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1222 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1223 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1224 # define CTR_sig(context) REG_sig(ctr, context)
1225 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1226 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1227 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1228 /* Float Registers access */
1229 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1230 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1231 /* Exception Registers access */
1232 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1233 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1234 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1235 #endif /* __APPLE__ */
1237 int cpu_signal_handler(int host_signum, void *pinfo,
1238 void *puc)
1240 siginfo_t *info = pinfo;
1241 struct ucontext *uc = puc;
1242 unsigned long pc;
1243 int is_write;
1245 pc = IAR_sig(uc);
1246 is_write = 0;
1247 #if 0
1248 /* ppc 4xx case */
1249 if (DSISR_sig(uc) & 0x00800000)
1250 is_write = 1;
1251 #else
1252 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1253 is_write = 1;
1254 #endif
1255 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1256 is_write, &uc->uc_sigmask, puc);
1259 #elif defined(__alpha__)
1261 int cpu_signal_handler(int host_signum, void *pinfo,
1262 void *puc)
1264 siginfo_t *info = pinfo;
1265 struct ucontext *uc = puc;
1266 uint32_t *pc = uc->uc_mcontext.sc_pc;
1267 uint32_t insn = *pc;
1268 int is_write = 0;
1270 /* XXX: need kernel patch to get write flag faster */
1271 switch (insn >> 26) {
1272 case 0x0d: // stw
1273 case 0x0e: // stb
1274 case 0x0f: // stq_u
1275 case 0x24: // stf
1276 case 0x25: // stg
1277 case 0x26: // sts
1278 case 0x27: // stt
1279 case 0x2c: // stl
1280 case 0x2d: // stq
1281 case 0x2e: // stl_c
1282 case 0x2f: // stq_c
1283 is_write = 1;
1286 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1287 is_write, &uc->uc_sigmask, puc);
1289 #elif defined(__sparc__)
1291 int cpu_signal_handler(int host_signum, void *pinfo,
1292 void *puc)
1294 siginfo_t *info = pinfo;
1295 int is_write;
1296 uint32_t insn;
1297 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1298 uint32_t *regs = (uint32_t *)(info + 1);
1299 void *sigmask = (regs + 20);
1300 /* XXX: is there a standard glibc define ? */
1301 unsigned long pc = regs[1];
1302 #else
1303 struct sigcontext *sc = puc;
1304 unsigned long pc = sc->sigc_regs.tpc;
1305 void *sigmask = (void *)sc->sigc_mask;
1306 #endif
1308 /* XXX: need kernel patch to get write flag faster */
1309 is_write = 0;
1310 insn = *(uint32_t *)pc;
1311 if ((insn >> 30) == 3) {
1312 switch((insn >> 19) & 0x3f) {
1313 case 0x05: // stb
1314 case 0x06: // sth
1315 case 0x04: // st
1316 case 0x07: // std
1317 case 0x24: // stf
1318 case 0x27: // stdf
1319 case 0x25: // stfsr
1320 is_write = 1;
1321 break;
1324 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1325 is_write, sigmask, NULL);
1328 #elif defined(__arm__)
1330 int cpu_signal_handler(int host_signum, void *pinfo,
1331 void *puc)
1333 siginfo_t *info = pinfo;
1334 struct ucontext *uc = puc;
1335 unsigned long pc;
1336 int is_write;
1338 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ =< 3))
1339 pc = uc->uc_mcontext.gregs[R15];
1340 #else
1341 pc = uc->uc_mcontext.arm_pc;
1342 #endif
1343 /* XXX: compute is_write */
1344 is_write = 0;
1345 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1346 is_write,
1347 &uc->uc_sigmask, puc);
1350 #elif defined(__mc68000)
1352 int cpu_signal_handler(int host_signum, void *pinfo,
1353 void *puc)
1355 siginfo_t *info = pinfo;
1356 struct ucontext *uc = puc;
1357 unsigned long pc;
1358 int is_write;
1360 pc = uc->uc_mcontext.gregs[16];
1361 /* XXX: compute is_write */
1362 is_write = 0;
1363 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1364 is_write,
1365 &uc->uc_sigmask, puc);
1368 #elif defined(__ia64)
1370 #ifndef __ISR_VALID
1371 /* This ought to be in <bits/siginfo.h>... */
1372 # define __ISR_VALID 1
1373 #endif
1375 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1377 siginfo_t *info = pinfo;
1378 struct ucontext *uc = puc;
1379 unsigned long ip;
1380 int is_write = 0;
1382 ip = uc->uc_mcontext.sc_ip;
1383 switch (host_signum) {
1384 case SIGILL:
1385 case SIGFPE:
1386 case SIGSEGV:
1387 case SIGBUS:
1388 case SIGTRAP:
1389 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1390 /* ISR.W (write-access) is bit 33: */
1391 is_write = (info->si_isr >> 33) & 1;
1392 break;
1394 default:
1395 break;
1397 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1398 is_write,
1399 &uc->uc_sigmask, puc);
1402 #elif defined(__s390__)
1404 int cpu_signal_handler(int host_signum, void *pinfo,
1405 void *puc)
1407 siginfo_t *info = pinfo;
1408 struct ucontext *uc = puc;
1409 unsigned long pc;
1410 int is_write;
1412 pc = uc->uc_mcontext.psw.addr;
1413 /* XXX: compute is_write */
1414 is_write = 0;
1415 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1416 is_write, &uc->uc_sigmask, puc);
1419 #elif defined(__mips__)
1421 int cpu_signal_handler(int host_signum, void *pinfo,
1422 void *puc)
1424 siginfo_t *info = pinfo;
1425 struct ucontext *uc = puc;
1426 greg_t pc = uc->uc_mcontext.pc;
1427 int is_write;
1429 /* XXX: compute is_write */
1430 is_write = 0;
1431 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1432 is_write, &uc->uc_sigmask, puc);
1435 #elif defined(__hppa__)
1437 int cpu_signal_handler(int host_signum, void *pinfo,
1438 void *puc)
1440 struct siginfo *info = pinfo;
1441 struct ucontext *uc = puc;
1442 unsigned long pc;
1443 int is_write;
1445 pc = uc->uc_mcontext.sc_iaoq[0];
1446 /* FIXME: compute is_write */
1447 is_write = 0;
1448 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1449 is_write,
1450 &uc->uc_sigmask, puc);
1453 #else
1455 #error host CPU specific signal handler needed
1457 #endif
1459 #endif /* !defined(CONFIG_SOFTMMU) */