kvm: external module: update .gitignore
[qemu-kvm/fedora.git] / cpu-exec.c
blobd37e3acbdcfca745d67c36aec1aad73e7ba1bfb0
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #if !defined(TARGET_IA64)
25 #include "tcg.h"
26 #endif
28 #if !defined(CONFIG_SOFTMMU)
29 #undef EAX
30 #undef ECX
31 #undef EDX
32 #undef EBX
33 #undef ESP
34 #undef EBP
35 #undef ESI
36 #undef EDI
37 #undef EIP
38 #include <signal.h>
39 #include <sys/ucontext.h>
40 #endif
42 #include "qemu-kvm.h"
44 #if defined(__sparc__) && !defined(HOST_SOLARIS)
45 // Work around ugly bugs in glibc that mangle global register contents
46 #undef env
47 #define env cpu_single_env
48 #endif
50 int tb_invalidated_flag;
52 //#define DEBUG_EXEC
53 //#define DEBUG_SIGNAL
55 void cpu_loop_exit(void)
57 /* NOTE: the register at this point must be saved by hand because
58 longjmp restore them */
59 regs_to_env();
60 longjmp(env->jmp_env, 1);
63 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
64 #define reg_T2
65 #endif
67 /* exit the current TB from a signal handler. The host registers are
68 restored in a state compatible with the CPU emulator
70 void cpu_resume_from_signal(CPUState *env1, void *puc)
72 #if !defined(CONFIG_SOFTMMU)
73 struct ucontext *uc = puc;
74 #endif
76 env = env1;
78 /* XXX: restore cpu registers saved in host registers */
80 #if !defined(CONFIG_SOFTMMU)
81 if (puc) {
82 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
85 #endif
86 longjmp(env->jmp_env, 1);
89 /* Execute the code without caching the generated code. An interpreter
90 could be used if available. */
91 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
93 unsigned long next_tb;
94 TranslationBlock *tb;
96 /* Should never happen.
97 We only end up here when an existing TB is too long. */
98 if (max_cycles > CF_COUNT_MASK)
99 max_cycles = CF_COUNT_MASK;
101 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
102 max_cycles);
103 env->current_tb = tb;
104 /* execute the generated code */
105 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
107 if ((next_tb & 3) == 2) {
108 /* Restore PC. This may happen if async event occurs before
109 the TB starts executing. */
110 CPU_PC_FROM_TB(env, tb);
112 tb_phys_invalidate(tb, -1);
113 tb_free(tb);
116 static TranslationBlock *tb_find_slow(target_ulong pc,
117 target_ulong cs_base,
118 uint64_t flags)
120 TranslationBlock *tb, **ptb1;
121 unsigned int h;
122 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
124 tb_invalidated_flag = 0;
126 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
128 /* find translated block using physical mappings */
129 phys_pc = get_phys_addr_code(env, pc);
130 phys_page1 = phys_pc & TARGET_PAGE_MASK;
131 phys_page2 = -1;
132 h = tb_phys_hash_func(phys_pc);
133 ptb1 = &tb_phys_hash[h];
134 for(;;) {
135 tb = *ptb1;
136 if (!tb)
137 goto not_found;
138 if (tb->pc == pc &&
139 tb->page_addr[0] == phys_page1 &&
140 tb->cs_base == cs_base &&
141 tb->flags == flags) {
142 /* check next page if needed */
143 if (tb->page_addr[1] != -1) {
144 virt_page2 = (pc & TARGET_PAGE_MASK) +
145 TARGET_PAGE_SIZE;
146 phys_page2 = get_phys_addr_code(env, virt_page2);
147 if (tb->page_addr[1] == phys_page2)
148 goto found;
149 } else {
150 goto found;
153 ptb1 = &tb->phys_hash_next;
155 not_found:
156 /* if no translated code available, then translate it now */
157 tb = tb_gen_code(env, pc, cs_base, flags, 0);
159 found:
160 /* we add the TB in the virtual pc hash table */
161 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
162 return tb;
165 static inline TranslationBlock *tb_find_fast(void)
167 TranslationBlock *tb;
168 target_ulong cs_base, pc;
169 uint64_t flags;
171 /* we record a subset of the CPU state. It will
172 always be the same before a given translated block
173 is executed. */
174 #if defined(TARGET_I386)
175 flags = env->hflags;
176 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
177 cs_base = env->segs[R_CS].base;
178 pc = cs_base + env->eip;
179 #elif defined(TARGET_ARM)
180 flags = env->thumb | (env->vfp.vec_len << 1)
181 | (env->vfp.vec_stride << 4);
182 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
183 flags |= (1 << 6);
184 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
185 flags |= (1 << 7);
186 flags |= (env->condexec_bits << 8);
187 cs_base = 0;
188 pc = env->regs[15];
189 #elif defined(TARGET_SPARC)
190 #ifdef TARGET_SPARC64
191 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
192 flags = ((env->pstate & PS_AM) << 2)
193 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
194 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
195 #else
196 // FPU enable . Supervisor
197 flags = (env->psref << 4) | env->psrs;
198 #endif
199 cs_base = env->npc;
200 pc = env->pc;
201 #elif defined(TARGET_PPC)
202 flags = env->hflags;
203 cs_base = 0;
204 pc = env->nip;
205 #elif defined(TARGET_MIPS)
206 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
207 cs_base = 0;
208 pc = env->active_tc.PC;
209 #elif defined(TARGET_M68K)
210 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
211 | (env->sr & SR_S) /* Bit 13 */
212 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
213 cs_base = 0;
214 pc = env->pc;
215 #elif defined(TARGET_SH4)
216 flags = env->flags;
217 cs_base = 0;
218 pc = env->pc;
219 #elif defined(TARGET_ALPHA)
220 flags = env->ps;
221 cs_base = 0;
222 pc = env->pc;
223 #elif defined(TARGET_CRIS)
224 flags = env->pregs[PR_CCS] & (P_FLAG | U_FLAG | X_FLAG);
225 flags |= env->dslot;
226 cs_base = 0;
227 pc = env->pc;
228 #elif defined(TARGET_IA64)
229 flags = 0;
230 cs_base = 0; /* XXXXX */
231 pc = 0;
232 #else
233 #error unsupported CPU
234 #endif
235 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
236 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
237 tb->flags != flags)) {
238 tb = tb_find_slow(pc, cs_base, flags);
240 return tb;
243 /* main execution loop */
245 int cpu_exec(CPUState *env1)
247 #define DECLARE_HOST_REGS 1
248 #include "hostregs_helper.h"
249 int ret, interrupt_request;
250 TranslationBlock *tb;
251 uint8_t *tc_ptr;
252 unsigned long next_tb;
254 if (cpu_halted(env1) == EXCP_HALTED)
255 return EXCP_HALTED;
257 cpu_single_env = env1;
259 /* first we save global registers */
260 #define SAVE_HOST_REGS 1
261 #include "hostregs_helper.h"
262 env = env1;
264 env_to_regs();
265 #if defined(TARGET_I386)
266 /* put eflags in CPU temporary format */
267 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
268 DF = 1 - (2 * ((env->eflags >> 10) & 1));
269 CC_OP = CC_OP_EFLAGS;
270 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
271 #elif defined(TARGET_SPARC)
272 #elif defined(TARGET_M68K)
273 env->cc_op = CC_OP_FLAGS;
274 env->cc_dest = env->sr & 0xf;
275 env->cc_x = (env->sr >> 4) & 1;
276 #elif defined(TARGET_ALPHA)
277 #elif defined(TARGET_ARM)
278 #elif defined(TARGET_PPC)
279 #elif defined(TARGET_MIPS)
280 #elif defined(TARGET_SH4)
281 #elif defined(TARGET_CRIS)
282 #elif defined(TARGET_IA64)
283 /* XXXXX */
284 #else
285 #error unsupported target CPU
286 #endif
287 env->exception_index = -1;
289 /* prepare setjmp context for exception handling */
290 for(;;) {
291 if (setjmp(env->jmp_env) == 0) {
292 env->current_tb = NULL;
293 /* if an exception is pending, we execute it here */
294 if (env->exception_index >= 0) {
295 if (env->exception_index >= EXCP_INTERRUPT) {
296 /* exit request from the cpu execution loop */
297 ret = env->exception_index;
298 break;
299 } else if (env->user_mode_only) {
300 /* if user mode only, we simulate a fake exception
301 which will be handled outside the cpu execution
302 loop */
303 #if defined(TARGET_I386)
304 do_interrupt_user(env->exception_index,
305 env->exception_is_int,
306 env->error_code,
307 env->exception_next_eip);
308 /* successfully delivered */
309 env->old_exception = -1;
310 #endif
311 ret = env->exception_index;
312 break;
313 } else {
314 #if defined(TARGET_I386)
315 /* simulate a real cpu exception. On i386, it can
316 trigger new exceptions, but we do not handle
317 double or triple faults yet. */
318 do_interrupt(env->exception_index,
319 env->exception_is_int,
320 env->error_code,
321 env->exception_next_eip, 0);
322 /* successfully delivered */
323 env->old_exception = -1;
324 #elif defined(TARGET_PPC)
325 do_interrupt(env);
326 #elif defined(TARGET_MIPS)
327 do_interrupt(env);
328 #elif defined(TARGET_SPARC)
329 do_interrupt(env);
330 #elif defined(TARGET_ARM)
331 do_interrupt(env);
332 #elif defined(TARGET_SH4)
333 do_interrupt(env);
334 #elif defined(TARGET_ALPHA)
335 do_interrupt(env);
336 #elif defined(TARGET_CRIS)
337 do_interrupt(env);
338 #elif defined(TARGET_M68K)
339 do_interrupt(0);
340 #elif defined(TARGET_IA64)
341 do_interrupt(env);
342 #endif
344 env->exception_index = -1;
346 #ifdef USE_KQEMU
347 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
348 int ret;
349 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
350 ret = kqemu_cpu_exec(env);
351 /* put eflags in CPU temporary format */
352 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
353 DF = 1 - (2 * ((env->eflags >> 10) & 1));
354 CC_OP = CC_OP_EFLAGS;
355 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
356 if (ret == 1) {
357 /* exception */
358 longjmp(env->jmp_env, 1);
359 } else if (ret == 2) {
360 /* softmmu execution needed */
361 } else {
362 if (env->interrupt_request != 0) {
363 /* hardware interrupt will be executed just after */
364 } else {
365 /* otherwise, we restart */
366 longjmp(env->jmp_env, 1);
370 #endif
372 if (kvm_enabled()) {
373 kvm_cpu_exec(env);
374 longjmp(env->jmp_env, 1);
376 next_tb = 0; /* force lookup of first TB */
377 for(;;) {
378 interrupt_request = env->interrupt_request;
379 if (unlikely(interrupt_request) &&
380 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
381 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
382 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
383 env->exception_index = EXCP_DEBUG;
384 cpu_loop_exit();
386 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
387 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
388 if (interrupt_request & CPU_INTERRUPT_HALT) {
389 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
390 env->halted = 1;
391 env->exception_index = EXCP_HLT;
392 cpu_loop_exit();
394 #endif
395 #if defined(TARGET_I386)
396 if (env->hflags2 & HF2_GIF_MASK) {
397 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
398 !(env->hflags & HF_SMM_MASK)) {
399 svm_check_intercept(SVM_EXIT_SMI);
400 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
401 do_smm_enter();
402 next_tb = 0;
403 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
404 !(env->hflags2 & HF2_NMI_MASK)) {
405 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
406 env->hflags2 |= HF2_NMI_MASK;
407 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
408 next_tb = 0;
409 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
410 (((env->hflags2 & HF2_VINTR_MASK) &&
411 (env->hflags2 & HF2_HIF_MASK)) ||
412 (!(env->hflags2 & HF2_VINTR_MASK) &&
413 (env->eflags & IF_MASK &&
414 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
415 int intno;
416 svm_check_intercept(SVM_EXIT_INTR);
417 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
418 intno = cpu_get_pic_interrupt(env);
419 if (loglevel & CPU_LOG_TB_IN_ASM) {
420 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
422 do_interrupt(intno, 0, 0, 0, 1);
423 /* ensure that no TB jump will be modified as
424 the program flow was changed */
425 next_tb = 0;
426 #if !defined(CONFIG_USER_ONLY)
427 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
428 (env->eflags & IF_MASK) &&
429 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
430 int intno;
431 /* FIXME: this should respect TPR */
432 svm_check_intercept(SVM_EXIT_VINTR);
433 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
434 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
435 if (loglevel & CPU_LOG_TB_IN_ASM)
436 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
437 do_interrupt(intno, 0, 0, 0, 1);
438 next_tb = 0;
439 #endif
442 #elif defined(TARGET_PPC)
443 #if 0
444 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
445 cpu_ppc_reset(env);
447 #endif
448 if (interrupt_request & CPU_INTERRUPT_HARD) {
449 ppc_hw_interrupt(env);
450 if (env->pending_interrupts == 0)
451 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
452 next_tb = 0;
454 #elif defined(TARGET_MIPS)
455 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
456 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
457 (env->CP0_Status & (1 << CP0St_IE)) &&
458 !(env->CP0_Status & (1 << CP0St_EXL)) &&
459 !(env->CP0_Status & (1 << CP0St_ERL)) &&
460 !(env->hflags & MIPS_HFLAG_DM)) {
461 /* Raise it */
462 env->exception_index = EXCP_EXT_INTERRUPT;
463 env->error_code = 0;
464 do_interrupt(env);
465 next_tb = 0;
467 #elif defined(TARGET_SPARC)
468 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
469 (env->psret != 0)) {
470 int pil = env->interrupt_index & 15;
471 int type = env->interrupt_index & 0xf0;
473 if (((type == TT_EXTINT) &&
474 (pil == 15 || pil > env->psrpil)) ||
475 type != TT_EXTINT) {
476 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
477 env->exception_index = env->interrupt_index;
478 do_interrupt(env);
479 env->interrupt_index = 0;
480 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
481 cpu_check_irqs(env);
482 #endif
483 next_tb = 0;
485 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
486 //do_interrupt(0, 0, 0, 0, 0);
487 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
489 #elif defined(TARGET_ARM)
490 if (interrupt_request & CPU_INTERRUPT_FIQ
491 && !(env->uncached_cpsr & CPSR_F)) {
492 env->exception_index = EXCP_FIQ;
493 do_interrupt(env);
494 next_tb = 0;
496 /* ARMv7-M interrupt return works by loading a magic value
497 into the PC. On real hardware the load causes the
498 return to occur. The qemu implementation performs the
499 jump normally, then does the exception return when the
500 CPU tries to execute code at the magic address.
501 This will cause the magic PC value to be pushed to
502 the stack if an interrupt occured at the wrong time.
503 We avoid this by disabling interrupts when
504 pc contains a magic address. */
505 if (interrupt_request & CPU_INTERRUPT_HARD
506 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
507 || !(env->uncached_cpsr & CPSR_I))) {
508 env->exception_index = EXCP_IRQ;
509 do_interrupt(env);
510 next_tb = 0;
512 #elif defined(TARGET_SH4)
513 if (interrupt_request & CPU_INTERRUPT_HARD) {
514 do_interrupt(env);
515 next_tb = 0;
517 #elif defined(TARGET_ALPHA)
518 if (interrupt_request & CPU_INTERRUPT_HARD) {
519 do_interrupt(env);
520 next_tb = 0;
522 #elif defined(TARGET_CRIS)
523 if (interrupt_request & CPU_INTERRUPT_HARD
524 && (env->pregs[PR_CCS] & I_FLAG)) {
525 env->exception_index = EXCP_IRQ;
526 do_interrupt(env);
527 next_tb = 0;
529 if (interrupt_request & CPU_INTERRUPT_NMI
530 && (env->pregs[PR_CCS] & M_FLAG)) {
531 env->exception_index = EXCP_NMI;
532 do_interrupt(env);
533 next_tb = 0;
535 #elif defined(TARGET_M68K)
536 if (interrupt_request & CPU_INTERRUPT_HARD
537 && ((env->sr & SR_I) >> SR_I_SHIFT)
538 < env->pending_level) {
539 /* Real hardware gets the interrupt vector via an
540 IACK cycle at this point. Current emulated
541 hardware doesn't rely on this, so we
542 provide/save the vector when the interrupt is
543 first signalled. */
544 env->exception_index = env->pending_vector;
545 do_interrupt(1);
546 next_tb = 0;
548 #endif
549 /* Don't use the cached interupt_request value,
550 do_interrupt may have updated the EXITTB flag. */
551 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
552 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
553 /* ensure that no TB jump will be modified as
554 the program flow was changed */
555 next_tb = 0;
557 if (interrupt_request & CPU_INTERRUPT_EXIT) {
558 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
559 env->exception_index = EXCP_INTERRUPT;
560 cpu_loop_exit();
563 #ifdef DEBUG_EXEC
564 if ((loglevel & CPU_LOG_TB_CPU)) {
565 /* restore flags in standard format */
566 regs_to_env();
567 #if defined(TARGET_I386)
568 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
569 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
570 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
571 #elif defined(TARGET_ARM)
572 cpu_dump_state(env, logfile, fprintf, 0);
573 #elif defined(TARGET_SPARC)
574 cpu_dump_state(env, logfile, fprintf, 0);
575 #elif defined(TARGET_PPC)
576 cpu_dump_state(env, logfile, fprintf, 0);
577 #elif defined(TARGET_M68K)
578 cpu_m68k_flush_flags(env, env->cc_op);
579 env->cc_op = CC_OP_FLAGS;
580 env->sr = (env->sr & 0xffe0)
581 | env->cc_dest | (env->cc_x << 4);
582 cpu_dump_state(env, logfile, fprintf, 0);
583 #elif defined(TARGET_MIPS)
584 cpu_dump_state(env, logfile, fprintf, 0);
585 #elif defined(TARGET_SH4)
586 cpu_dump_state(env, logfile, fprintf, 0);
587 #elif defined(TARGET_ALPHA)
588 cpu_dump_state(env, logfile, fprintf, 0);
589 #elif defined(TARGET_CRIS)
590 cpu_dump_state(env, logfile, fprintf, 0);
591 #else
592 #error unsupported target CPU
593 #endif
595 #endif
596 spin_lock(&tb_lock);
597 tb = tb_find_fast();
598 /* Note: we do it here to avoid a gcc bug on Mac OS X when
599 doing it in tb_find_slow */
600 if (tb_invalidated_flag) {
601 /* as some TB could have been invalidated because
602 of memory exceptions while generating the code, we
603 must recompute the hash index here */
604 next_tb = 0;
605 tb_invalidated_flag = 0;
607 #ifdef DEBUG_EXEC
608 if ((loglevel & CPU_LOG_EXEC)) {
609 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
610 (long)tb->tc_ptr, tb->pc,
611 lookup_symbol(tb->pc));
613 #endif
614 /* see if we can patch the calling TB. When the TB
615 spans two pages, we cannot safely do a direct
616 jump. */
618 if (next_tb != 0 &&
619 #ifdef USE_KQEMU
620 (env->kqemu_enabled != 2) &&
621 #endif
622 tb->page_addr[1] == -1) {
623 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
626 spin_unlock(&tb_lock);
627 env->current_tb = tb;
628 while (env->current_tb) {
629 tc_ptr = tb->tc_ptr;
630 /* execute the generated code */
631 #if defined(__sparc__) && !defined(HOST_SOLARIS)
632 #undef env
633 env = cpu_single_env;
634 #define env cpu_single_env
635 #endif
636 next_tb = tcg_qemu_tb_exec(tc_ptr);
637 env->current_tb = NULL;
638 if ((next_tb & 3) == 2) {
639 /* Instruction counter expired. */
640 int insns_left;
641 tb = (TranslationBlock *)(long)(next_tb & ~3);
642 /* Restore PC. */
643 CPU_PC_FROM_TB(env, tb);
644 insns_left = env->icount_decr.u32;
645 if (env->icount_extra && insns_left >= 0) {
646 /* Refill decrementer and continue execution. */
647 env->icount_extra += insns_left;
648 if (env->icount_extra > 0xffff) {
649 insns_left = 0xffff;
650 } else {
651 insns_left = env->icount_extra;
653 env->icount_extra -= insns_left;
654 env->icount_decr.u16.low = insns_left;
655 } else {
656 if (insns_left > 0) {
657 /* Execute remaining instructions. */
658 cpu_exec_nocache(insns_left, tb);
660 env->exception_index = EXCP_INTERRUPT;
661 next_tb = 0;
662 cpu_loop_exit();
666 /* reset soft MMU for next block (it can currently
667 only be set by a memory fault) */
668 #if defined(USE_KQEMU)
669 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
670 if (kqemu_is_ok(env) &&
671 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
672 cpu_loop_exit();
674 #endif
675 } /* for(;;) */
676 } else {
677 env_to_regs();
679 } /* for(;;) */
682 #if defined(TARGET_I386)
683 /* restore flags in standard format */
684 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
685 #elif defined(TARGET_ARM)
686 /* XXX: Save/restore host fpu exception state?. */
687 #elif defined(TARGET_SPARC)
688 #elif defined(TARGET_PPC)
689 #elif defined(TARGET_M68K)
690 cpu_m68k_flush_flags(env, env->cc_op);
691 env->cc_op = CC_OP_FLAGS;
692 env->sr = (env->sr & 0xffe0)
693 | env->cc_dest | (env->cc_x << 4);
694 #elif defined(TARGET_MIPS)
695 #elif defined(TARGET_SH4)
696 #elif defined(TARGET_IA64)
697 #elif defined(TARGET_ALPHA)
698 #elif defined(TARGET_CRIS)
699 /* XXXXX */
700 #else
701 #error unsupported target CPU
702 #endif
704 /* restore global registers */
705 #include "hostregs_helper.h"
707 /* fail safe : never use cpu_single_env outside cpu_exec() */
708 cpu_single_env = NULL;
709 return ret;
712 /* must only be called from the generated code as an exception can be
713 generated */
714 void tb_invalidate_page_range(target_ulong start, target_ulong end)
716 /* XXX: cannot enable it yet because it yields to MMU exception
717 where NIP != read address on PowerPC */
718 #if 0
719 target_ulong phys_addr;
720 phys_addr = get_phys_addr_code(env, start);
721 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
722 #endif
725 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
727 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
729 CPUX86State *saved_env;
731 saved_env = env;
732 env = s;
733 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
734 selector &= 0xffff;
735 cpu_x86_load_seg_cache(env, seg_reg, selector,
736 (selector << 4), 0xffff, 0);
737 } else {
738 helper_load_seg(seg_reg, selector);
740 env = saved_env;
743 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
745 CPUX86State *saved_env;
747 saved_env = env;
748 env = s;
750 helper_fsave(ptr, data32);
752 env = saved_env;
755 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
757 CPUX86State *saved_env;
759 saved_env = env;
760 env = s;
762 helper_frstor(ptr, data32);
764 env = saved_env;
767 #endif /* TARGET_I386 */
769 #if !defined(CONFIG_SOFTMMU)
771 #if defined(TARGET_I386)
773 /* 'pc' is the host PC at which the exception was raised. 'address' is
774 the effective address of the memory exception. 'is_write' is 1 if a
775 write caused the exception and otherwise 0'. 'old_set' is the
776 signal set which should be restored */
777 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
778 int is_write, sigset_t *old_set,
779 void *puc)
781 TranslationBlock *tb;
782 int ret;
784 if (cpu_single_env)
785 env = cpu_single_env; /* XXX: find a correct solution for multithread */
786 #if defined(DEBUG_SIGNAL)
787 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
788 pc, address, is_write, *(unsigned long *)old_set);
789 #endif
790 /* XXX: locking issue */
791 if (is_write && page_unprotect(h2g(address), pc, puc)) {
792 return 1;
795 /* see if it is an MMU fault */
796 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
797 if (ret < 0)
798 return 0; /* not an MMU fault */
799 if (ret == 0)
800 return 1; /* the MMU fault was handled without causing real CPU fault */
801 /* now we have a real cpu fault */
802 tb = tb_find_pc(pc);
803 if (tb) {
804 /* the PC is inside the translated code. It means that we have
805 a virtual CPU fault */
806 cpu_restore_state(tb, env, pc, puc);
808 if (ret == 1) {
809 #if 0
810 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
811 env->eip, env->cr[2], env->error_code);
812 #endif
813 /* we restore the process signal mask as the sigreturn should
814 do it (XXX: use sigsetjmp) */
815 sigprocmask(SIG_SETMASK, old_set, NULL);
816 raise_exception_err(env->exception_index, env->error_code);
817 } else {
818 /* activate soft MMU for this block */
819 env->hflags |= HF_SOFTMMU_MASK;
820 cpu_resume_from_signal(env, puc);
822 /* never comes here */
823 return 1;
826 #elif defined(TARGET_ARM)
827 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
828 int is_write, sigset_t *old_set,
829 void *puc)
831 TranslationBlock *tb;
832 int ret;
834 if (cpu_single_env)
835 env = cpu_single_env; /* XXX: find a correct solution for multithread */
836 #if defined(DEBUG_SIGNAL)
837 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
838 pc, address, is_write, *(unsigned long *)old_set);
839 #endif
840 /* XXX: locking issue */
841 if (is_write && page_unprotect(h2g(address), pc, puc)) {
842 return 1;
844 /* see if it is an MMU fault */
845 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
846 if (ret < 0)
847 return 0; /* not an MMU fault */
848 if (ret == 0)
849 return 1; /* the MMU fault was handled without causing real CPU fault */
850 /* now we have a real cpu fault */
851 tb = tb_find_pc(pc);
852 if (tb) {
853 /* the PC is inside the translated code. It means that we have
854 a virtual CPU fault */
855 cpu_restore_state(tb, env, pc, puc);
857 /* we restore the process signal mask as the sigreturn should
858 do it (XXX: use sigsetjmp) */
859 sigprocmask(SIG_SETMASK, old_set, NULL);
860 cpu_loop_exit();
861 /* never comes here */
862 return 1;
864 #elif defined(TARGET_SPARC)
865 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
866 int is_write, sigset_t *old_set,
867 void *puc)
869 TranslationBlock *tb;
870 int ret;
872 if (cpu_single_env)
873 env = cpu_single_env; /* XXX: find a correct solution for multithread */
874 #if defined(DEBUG_SIGNAL)
875 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
876 pc, address, is_write, *(unsigned long *)old_set);
877 #endif
878 /* XXX: locking issue */
879 if (is_write && page_unprotect(h2g(address), pc, puc)) {
880 return 1;
882 /* see if it is an MMU fault */
883 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
884 if (ret < 0)
885 return 0; /* not an MMU fault */
886 if (ret == 0)
887 return 1; /* the MMU fault was handled without causing real CPU fault */
888 /* now we have a real cpu fault */
889 tb = tb_find_pc(pc);
890 if (tb) {
891 /* the PC is inside the translated code. It means that we have
892 a virtual CPU fault */
893 cpu_restore_state(tb, env, pc, puc);
895 /* we restore the process signal mask as the sigreturn should
896 do it (XXX: use sigsetjmp) */
897 sigprocmask(SIG_SETMASK, old_set, NULL);
898 cpu_loop_exit();
899 /* never comes here */
900 return 1;
902 #elif defined (TARGET_PPC)
903 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
904 int is_write, sigset_t *old_set,
905 void *puc)
907 TranslationBlock *tb;
908 int ret;
910 if (cpu_single_env)
911 env = cpu_single_env; /* XXX: find a correct solution for multithread */
912 #if defined(DEBUG_SIGNAL)
913 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
914 pc, address, is_write, *(unsigned long *)old_set);
915 #endif
916 /* XXX: locking issue */
917 if (is_write && page_unprotect(h2g(address), pc, puc)) {
918 return 1;
921 /* see if it is an MMU fault */
922 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
923 if (ret < 0)
924 return 0; /* not an MMU fault */
925 if (ret == 0)
926 return 1; /* the MMU fault was handled without causing real CPU fault */
928 /* now we have a real cpu fault */
929 tb = tb_find_pc(pc);
930 if (tb) {
931 /* the PC is inside the translated code. It means that we have
932 a virtual CPU fault */
933 cpu_restore_state(tb, env, pc, puc);
935 if (ret == 1) {
936 #if 0
937 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
938 env->nip, env->error_code, tb);
939 #endif
940 /* we restore the process signal mask as the sigreturn should
941 do it (XXX: use sigsetjmp) */
942 sigprocmask(SIG_SETMASK, old_set, NULL);
943 do_raise_exception_err(env->exception_index, env->error_code);
944 } else {
945 /* activate soft MMU for this block */
946 cpu_resume_from_signal(env, puc);
948 /* never comes here */
949 return 1;
952 #elif defined(TARGET_M68K)
953 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
954 int is_write, sigset_t *old_set,
955 void *puc)
957 TranslationBlock *tb;
958 int ret;
960 if (cpu_single_env)
961 env = cpu_single_env; /* XXX: find a correct solution for multithread */
962 #if defined(DEBUG_SIGNAL)
963 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
964 pc, address, is_write, *(unsigned long *)old_set);
965 #endif
966 /* XXX: locking issue */
967 if (is_write && page_unprotect(address, pc, puc)) {
968 return 1;
970 /* see if it is an MMU fault */
971 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
972 if (ret < 0)
973 return 0; /* not an MMU fault */
974 if (ret == 0)
975 return 1; /* the MMU fault was handled without causing real CPU fault */
976 /* now we have a real cpu fault */
977 tb = tb_find_pc(pc);
978 if (tb) {
979 /* the PC is inside the translated code. It means that we have
980 a virtual CPU fault */
981 cpu_restore_state(tb, env, pc, puc);
983 /* we restore the process signal mask as the sigreturn should
984 do it (XXX: use sigsetjmp) */
985 sigprocmask(SIG_SETMASK, old_set, NULL);
986 cpu_loop_exit();
987 /* never comes here */
988 return 1;
991 #elif defined (TARGET_MIPS)
992 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
993 int is_write, sigset_t *old_set,
994 void *puc)
996 TranslationBlock *tb;
997 int ret;
999 if (cpu_single_env)
1000 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1001 #if defined(DEBUG_SIGNAL)
1002 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1003 pc, address, is_write, *(unsigned long *)old_set);
1004 #endif
1005 /* XXX: locking issue */
1006 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1007 return 1;
1010 /* see if it is an MMU fault */
1011 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1012 if (ret < 0)
1013 return 0; /* not an MMU fault */
1014 if (ret == 0)
1015 return 1; /* the MMU fault was handled without causing real CPU fault */
1017 /* now we have a real cpu fault */
1018 tb = tb_find_pc(pc);
1019 if (tb) {
1020 /* the PC is inside the translated code. It means that we have
1021 a virtual CPU fault */
1022 cpu_restore_state(tb, env, pc, puc);
1024 if (ret == 1) {
1025 #if 0
1026 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1027 env->PC, env->error_code, tb);
1028 #endif
1029 /* we restore the process signal mask as the sigreturn should
1030 do it (XXX: use sigsetjmp) */
1031 sigprocmask(SIG_SETMASK, old_set, NULL);
1032 do_raise_exception_err(env->exception_index, env->error_code);
1033 } else {
1034 /* activate soft MMU for this block */
1035 cpu_resume_from_signal(env, puc);
1037 /* never comes here */
1038 return 1;
1041 #elif defined (TARGET_SH4)
1042 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1043 int is_write, sigset_t *old_set,
1044 void *puc)
1046 TranslationBlock *tb;
1047 int ret;
1049 if (cpu_single_env)
1050 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1051 #if defined(DEBUG_SIGNAL)
1052 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1053 pc, address, is_write, *(unsigned long *)old_set);
1054 #endif
1055 /* XXX: locking issue */
1056 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1057 return 1;
1060 /* see if it is an MMU fault */
1061 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1062 if (ret < 0)
1063 return 0; /* not an MMU fault */
1064 if (ret == 0)
1065 return 1; /* the MMU fault was handled without causing real CPU fault */
1067 /* now we have a real cpu fault */
1068 tb = tb_find_pc(pc);
1069 if (tb) {
1070 /* the PC is inside the translated code. It means that we have
1071 a virtual CPU fault */
1072 cpu_restore_state(tb, env, pc, puc);
1074 #if 0
1075 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1076 env->nip, env->error_code, tb);
1077 #endif
1078 /* we restore the process signal mask as the sigreturn should
1079 do it (XXX: use sigsetjmp) */
1080 sigprocmask(SIG_SETMASK, old_set, NULL);
1081 cpu_loop_exit();
1082 /* never comes here */
1083 return 1;
1086 #elif defined (TARGET_ALPHA)
1087 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1088 int is_write, sigset_t *old_set,
1089 void *puc)
1091 TranslationBlock *tb;
1092 int ret;
1094 if (cpu_single_env)
1095 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1096 #if defined(DEBUG_SIGNAL)
1097 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1098 pc, address, is_write, *(unsigned long *)old_set);
1099 #endif
1100 /* XXX: locking issue */
1101 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1102 return 1;
1105 /* see if it is an MMU fault */
1106 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1107 if (ret < 0)
1108 return 0; /* not an MMU fault */
1109 if (ret == 0)
1110 return 1; /* the MMU fault was handled without causing real CPU fault */
1112 /* now we have a real cpu fault */
1113 tb = tb_find_pc(pc);
1114 if (tb) {
1115 /* the PC is inside the translated code. It means that we have
1116 a virtual CPU fault */
1117 cpu_restore_state(tb, env, pc, puc);
1119 #if 0
1120 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1121 env->nip, env->error_code, tb);
1122 #endif
1123 /* we restore the process signal mask as the sigreturn should
1124 do it (XXX: use sigsetjmp) */
1125 sigprocmask(SIG_SETMASK, old_set, NULL);
1126 cpu_loop_exit();
1127 /* never comes here */
1128 return 1;
1130 #elif defined (TARGET_CRIS)
1131 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1132 int is_write, sigset_t *old_set,
1133 void *puc)
1135 TranslationBlock *tb;
1136 int ret;
1138 if (cpu_single_env)
1139 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1140 #if defined(DEBUG_SIGNAL)
1141 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1142 pc, address, is_write, *(unsigned long *)old_set);
1143 #endif
1144 /* XXX: locking issue */
1145 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1146 return 1;
1149 /* see if it is an MMU fault */
1150 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1151 if (ret < 0)
1152 return 0; /* not an MMU fault */
1153 if (ret == 0)
1154 return 1; /* the MMU fault was handled without causing real CPU fault */
1156 /* now we have a real cpu fault */
1157 tb = tb_find_pc(pc);
1158 if (tb) {
1159 /* the PC is inside the translated code. It means that we have
1160 a virtual CPU fault */
1161 cpu_restore_state(tb, env, pc, puc);
1163 /* we restore the process signal mask as the sigreturn should
1164 do it (XXX: use sigsetjmp) */
1165 sigprocmask(SIG_SETMASK, old_set, NULL);
1166 cpu_loop_exit();
1167 /* never comes here */
1168 return 1;
1171 #else
1172 #error unsupported target CPU
1173 #endif
1175 #if defined(__i386__)
1177 #if defined(__APPLE__)
1178 # include <sys/ucontext.h>
1180 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1181 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1182 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1183 #else
1184 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1185 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1186 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1187 #endif
1189 int cpu_signal_handler(int host_signum, void *pinfo,
1190 void *puc)
1192 siginfo_t *info = pinfo;
1193 struct ucontext *uc = puc;
1194 unsigned long pc;
1195 int trapno;
1197 #ifndef REG_EIP
1198 /* for glibc 2.1 */
1199 #define REG_EIP EIP
1200 #define REG_ERR ERR
1201 #define REG_TRAPNO TRAPNO
1202 #endif
1203 pc = EIP_sig(uc);
1204 trapno = TRAP_sig(uc);
1205 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1206 trapno == 0xe ?
1207 (ERROR_sig(uc) >> 1) & 1 : 0,
1208 &uc->uc_sigmask, puc);
1211 #elif defined(__x86_64__)
1213 int cpu_signal_handler(int host_signum, void *pinfo,
1214 void *puc)
1216 siginfo_t *info = pinfo;
1217 struct ucontext *uc = puc;
1218 unsigned long pc;
1220 pc = uc->uc_mcontext.gregs[REG_RIP];
1221 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1222 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1223 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1224 &uc->uc_sigmask, puc);
1227 #elif defined(__powerpc__)
1229 /***********************************************************************
1230 * signal context platform-specific definitions
1231 * From Wine
1233 #ifdef linux
1234 /* All Registers access - only for local access */
1235 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1236 /* Gpr Registers access */
1237 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1238 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1239 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1240 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1241 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1242 # define LR_sig(context) REG_sig(link, context) /* Link register */
1243 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1244 /* Float Registers access */
1245 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1246 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1247 /* Exception Registers access */
1248 # define DAR_sig(context) REG_sig(dar, context)
1249 # define DSISR_sig(context) REG_sig(dsisr, context)
1250 # define TRAP_sig(context) REG_sig(trap, context)
1251 #endif /* linux */
1253 #ifdef __APPLE__
1254 # include <sys/ucontext.h>
1255 typedef struct ucontext SIGCONTEXT;
1256 /* All Registers access - only for local access */
1257 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1258 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1259 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1260 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1261 /* Gpr Registers access */
1262 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1263 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1264 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1265 # define CTR_sig(context) REG_sig(ctr, context)
1266 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1267 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1268 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1269 /* Float Registers access */
1270 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1271 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1272 /* Exception Registers access */
1273 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1274 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1275 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1276 #endif /* __APPLE__ */
1278 int cpu_signal_handler(int host_signum, void *pinfo,
1279 void *puc)
1281 siginfo_t *info = pinfo;
1282 struct ucontext *uc = puc;
1283 unsigned long pc;
1284 int is_write;
1286 pc = IAR_sig(uc);
1287 is_write = 0;
1288 #if 0
1289 /* ppc 4xx case */
1290 if (DSISR_sig(uc) & 0x00800000)
1291 is_write = 1;
1292 #else
1293 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1294 is_write = 1;
1295 #endif
1296 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1297 is_write, &uc->uc_sigmask, puc);
1300 #elif defined(__alpha__)
1302 int cpu_signal_handler(int host_signum, void *pinfo,
1303 void *puc)
1305 siginfo_t *info = pinfo;
1306 struct ucontext *uc = puc;
1307 uint32_t *pc = uc->uc_mcontext.sc_pc;
1308 uint32_t insn = *pc;
1309 int is_write = 0;
1311 /* XXX: need kernel patch to get write flag faster */
1312 switch (insn >> 26) {
1313 case 0x0d: // stw
1314 case 0x0e: // stb
1315 case 0x0f: // stq_u
1316 case 0x24: // stf
1317 case 0x25: // stg
1318 case 0x26: // sts
1319 case 0x27: // stt
1320 case 0x2c: // stl
1321 case 0x2d: // stq
1322 case 0x2e: // stl_c
1323 case 0x2f: // stq_c
1324 is_write = 1;
1327 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1328 is_write, &uc->uc_sigmask, puc);
1330 #elif defined(__sparc__)
1332 int cpu_signal_handler(int host_signum, void *pinfo,
1333 void *puc)
1335 siginfo_t *info = pinfo;
1336 int is_write;
1337 uint32_t insn;
1338 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1339 uint32_t *regs = (uint32_t *)(info + 1);
1340 void *sigmask = (regs + 20);
1341 /* XXX: is there a standard glibc define ? */
1342 unsigned long pc = regs[1];
1343 #else
1344 struct sigcontext *sc = puc;
1345 unsigned long pc = sc->sigc_regs.tpc;
1346 void *sigmask = (void *)sc->sigc_mask;
1347 #endif
1349 /* XXX: need kernel patch to get write flag faster */
1350 is_write = 0;
1351 insn = *(uint32_t *)pc;
1352 if ((insn >> 30) == 3) {
1353 switch((insn >> 19) & 0x3f) {
1354 case 0x05: // stb
1355 case 0x06: // sth
1356 case 0x04: // st
1357 case 0x07: // std
1358 case 0x24: // stf
1359 case 0x27: // stdf
1360 case 0x25: // stfsr
1361 is_write = 1;
1362 break;
1365 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1366 is_write, sigmask, NULL);
1369 #elif defined(__arm__)
1371 int cpu_signal_handler(int host_signum, void *pinfo,
1372 void *puc)
1374 siginfo_t *info = pinfo;
1375 struct ucontext *uc = puc;
1376 unsigned long pc;
1377 int is_write;
1379 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1380 pc = uc->uc_mcontext.gregs[R15];
1381 #else
1382 pc = uc->uc_mcontext.arm_pc;
1383 #endif
1384 /* XXX: compute is_write */
1385 is_write = 0;
1386 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1387 is_write,
1388 &uc->uc_sigmask, puc);
1391 #elif defined(__mc68000)
1393 int cpu_signal_handler(int host_signum, void *pinfo,
1394 void *puc)
1396 siginfo_t *info = pinfo;
1397 struct ucontext *uc = puc;
1398 unsigned long pc;
1399 int is_write;
1401 pc = uc->uc_mcontext.gregs[16];
1402 /* XXX: compute is_write */
1403 is_write = 0;
1404 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1405 is_write,
1406 &uc->uc_sigmask, puc);
1409 #elif defined(__ia64)
1411 #ifndef __ISR_VALID
1412 /* This ought to be in <bits/siginfo.h>... */
1413 # define __ISR_VALID 1
1414 #endif
1416 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1418 siginfo_t *info = pinfo;
1419 struct ucontext *uc = puc;
1420 unsigned long ip;
1421 int is_write = 0;
1423 ip = uc->uc_mcontext.sc_ip;
1424 switch (host_signum) {
1425 case SIGILL:
1426 case SIGFPE:
1427 case SIGSEGV:
1428 case SIGBUS:
1429 case SIGTRAP:
1430 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1431 /* ISR.W (write-access) is bit 33: */
1432 is_write = (info->si_isr >> 33) & 1;
1433 break;
1435 default:
1436 break;
1438 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1439 is_write,
1440 &uc->uc_sigmask, puc);
1443 #elif defined(__s390__)
1445 int cpu_signal_handler(int host_signum, void *pinfo,
1446 void *puc)
1448 siginfo_t *info = pinfo;
1449 struct ucontext *uc = puc;
1450 unsigned long pc;
1451 int is_write;
1453 pc = uc->uc_mcontext.psw.addr;
1454 /* XXX: compute is_write */
1455 is_write = 0;
1456 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1457 is_write, &uc->uc_sigmask, puc);
1460 #elif defined(__mips__)
1462 int cpu_signal_handler(int host_signum, void *pinfo,
1463 void *puc)
1465 siginfo_t *info = pinfo;
1466 struct ucontext *uc = puc;
1467 greg_t pc = uc->uc_mcontext.pc;
1468 int is_write;
1470 /* XXX: compute is_write */
1471 is_write = 0;
1472 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1473 is_write, &uc->uc_sigmask, puc);
1476 #elif defined(__hppa__)
1478 int cpu_signal_handler(int host_signum, void *pinfo,
1479 void *puc)
1481 struct siginfo *info = pinfo;
1482 struct ucontext *uc = puc;
1483 unsigned long pc;
1484 int is_write;
1486 pc = uc->uc_mcontext.sc_iaoq[0];
1487 /* FIXME: compute is_write */
1488 is_write = 0;
1489 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1490 is_write,
1491 &uc->uc_sigmask, puc);
1494 #else
1496 #error host CPU specific signal handler needed
1498 #endif
1500 #endif /* !defined(CONFIG_SOFTMMU) */