ISA version of CS4231A
[qemu/mini2440/sniper_sniper_test.git] / cpu-exec.c
blobb8c208b41e1a4cdb146e9cd39706d9bee9524089
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #include <sys/ucontext.h>
38 #endif
40 #if defined(__sparc__) && !defined(HOST_SOLARIS)
41 // Work around ugly bugs in glibc that mangle global register contents
42 #undef env
43 #define env cpu_single_env
44 #endif
46 int tb_invalidated_flag;
48 //#define DEBUG_EXEC
49 //#define DEBUG_SIGNAL
51 void cpu_loop_exit(void)
53 /* NOTE: the register at this point must be saved by hand because
54 longjmp restore them */
55 regs_to_env();
56 longjmp(env->jmp_env, 1);
59 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
60 #define reg_T2
61 #endif
63 /* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
66 void cpu_resume_from_signal(CPUState *env1, void *puc)
68 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext *uc = puc;
70 #endif
72 env = env1;
74 /* XXX: restore cpu registers saved in host registers */
76 #if !defined(CONFIG_SOFTMMU)
77 if (puc) {
78 /* XXX: use siglongjmp ? */
79 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
81 #endif
82 longjmp(env->jmp_env, 1);
85 static TranslationBlock *tb_find_slow(target_ulong pc,
86 target_ulong cs_base,
87 uint64_t flags)
89 TranslationBlock *tb, **ptb1;
90 int code_gen_size;
91 unsigned int h;
92 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
93 uint8_t *tc_ptr;
95 tb_invalidated_flag = 0;
97 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
99 /* find translated block using physical mappings */
100 phys_pc = get_phys_addr_code(env, pc);
101 phys_page1 = phys_pc & TARGET_PAGE_MASK;
102 phys_page2 = -1;
103 h = tb_phys_hash_func(phys_pc);
104 ptb1 = &tb_phys_hash[h];
105 for(;;) {
106 tb = *ptb1;
107 if (!tb)
108 goto not_found;
109 if (tb->pc == pc &&
110 tb->page_addr[0] == phys_page1 &&
111 tb->cs_base == cs_base &&
112 tb->flags == flags) {
113 /* check next page if needed */
114 if (tb->page_addr[1] != -1) {
115 virt_page2 = (pc & TARGET_PAGE_MASK) +
116 TARGET_PAGE_SIZE;
117 phys_page2 = get_phys_addr_code(env, virt_page2);
118 if (tb->page_addr[1] == phys_page2)
119 goto found;
120 } else {
121 goto found;
124 ptb1 = &tb->phys_hash_next;
126 not_found:
127 /* if no translated code available, then translate it now */
128 tb = tb_alloc(pc);
129 if (!tb) {
130 /* flush must be done */
131 tb_flush(env);
132 /* cannot fail at this point */
133 tb = tb_alloc(pc);
134 /* don't forget to invalidate previous TB info */
135 tb_invalidated_flag = 1;
137 tc_ptr = code_gen_ptr;
138 tb->tc_ptr = tc_ptr;
139 tb->cs_base = cs_base;
140 tb->flags = flags;
141 cpu_gen_code(env, tb, &code_gen_size);
142 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
144 /* check next page if needed */
145 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
146 phys_page2 = -1;
147 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
148 phys_page2 = get_phys_addr_code(env, virt_page2);
150 tb_link_phys(tb, phys_pc, phys_page2);
152 found:
153 /* we add the TB in the virtual pc hash table */
154 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
155 return tb;
158 static inline TranslationBlock *tb_find_fast(void)
160 TranslationBlock *tb;
161 target_ulong cs_base, pc;
162 uint64_t flags;
164 /* we record a subset of the CPU state. It will
165 always be the same before a given translated block
166 is executed. */
167 #if defined(TARGET_I386)
168 flags = env->hflags;
169 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
170 cs_base = env->segs[R_CS].base;
171 pc = cs_base + env->eip;
172 #elif defined(TARGET_ARM)
173 flags = env->thumb | (env->vfp.vec_len << 1)
174 | (env->vfp.vec_stride << 4);
175 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
176 flags |= (1 << 6);
177 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
178 flags |= (1 << 7);
179 flags |= (env->condexec_bits << 8);
180 cs_base = 0;
181 pc = env->regs[15];
182 #elif defined(TARGET_SPARC)
183 #ifdef TARGET_SPARC64
184 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
185 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
186 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
187 #else
188 // FPU enable . Supervisor
189 flags = (env->psref << 4) | env->psrs;
190 #endif
191 cs_base = env->npc;
192 pc = env->pc;
193 #elif defined(TARGET_PPC)
194 flags = env->hflags;
195 cs_base = 0;
196 pc = env->nip;
197 #elif defined(TARGET_MIPS)
198 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
199 cs_base = 0;
200 pc = env->PC[env->current_tc];
201 #elif defined(TARGET_M68K)
202 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
203 | (env->sr & SR_S) /* Bit 13 */
204 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
205 cs_base = 0;
206 pc = env->pc;
207 #elif defined(TARGET_SH4)
208 flags = env->flags;
209 cs_base = 0;
210 pc = env->pc;
211 #elif defined(TARGET_ALPHA)
212 flags = env->ps;
213 cs_base = 0;
214 pc = env->pc;
215 #elif defined(TARGET_CRIS)
216 flags = env->pregs[PR_CCS] & (P_FLAG | U_FLAG | X_FLAG);
217 flags |= env->dslot;
218 cs_base = 0;
219 pc = env->pc;
220 #else
221 #error unsupported CPU
222 #endif
223 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
224 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
225 tb->flags != flags, 0)) {
226 tb = tb_find_slow(pc, cs_base, flags);
228 return tb;
231 /* main execution loop */
233 int cpu_exec(CPUState *env1)
235 #define DECLARE_HOST_REGS 1
236 #include "hostregs_helper.h"
237 int ret, interrupt_request;
238 TranslationBlock *tb;
239 uint8_t *tc_ptr;
240 unsigned long next_tb;
242 if (cpu_halted(env1) == EXCP_HALTED)
243 return EXCP_HALTED;
245 cpu_single_env = env1;
247 /* first we save global registers */
248 #define SAVE_HOST_REGS 1
249 #include "hostregs_helper.h"
250 env = env1;
252 env_to_regs();
253 #if defined(TARGET_I386)
254 /* put eflags in CPU temporary format */
255 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
256 DF = 1 - (2 * ((env->eflags >> 10) & 1));
257 CC_OP = CC_OP_EFLAGS;
258 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
259 #elif defined(TARGET_SPARC)
260 #elif defined(TARGET_M68K)
261 env->cc_op = CC_OP_FLAGS;
262 env->cc_dest = env->sr & 0xf;
263 env->cc_x = (env->sr >> 4) & 1;
264 #elif defined(TARGET_ALPHA)
265 #elif defined(TARGET_ARM)
266 #elif defined(TARGET_PPC)
267 #elif defined(TARGET_MIPS)
268 #elif defined(TARGET_SH4)
269 #elif defined(TARGET_CRIS)
270 /* XXXXX */
271 #else
272 #error unsupported target CPU
273 #endif
274 env->exception_index = -1;
276 /* prepare setjmp context for exception handling */
277 for(;;) {
278 if (setjmp(env->jmp_env) == 0) {
279 env->current_tb = NULL;
280 /* if an exception is pending, we execute it here */
281 if (env->exception_index >= 0) {
282 if (env->exception_index >= EXCP_INTERRUPT) {
283 /* exit request from the cpu execution loop */
284 ret = env->exception_index;
285 break;
286 } else if (env->user_mode_only) {
287 /* if user mode only, we simulate a fake exception
288 which will be handled outside the cpu execution
289 loop */
290 #if defined(TARGET_I386)
291 do_interrupt_user(env->exception_index,
292 env->exception_is_int,
293 env->error_code,
294 env->exception_next_eip);
295 /* successfully delivered */
296 env->old_exception = -1;
297 #endif
298 ret = env->exception_index;
299 break;
300 } else {
301 #if defined(TARGET_I386)
302 /* simulate a real cpu exception. On i386, it can
303 trigger new exceptions, but we do not handle
304 double or triple faults yet. */
305 do_interrupt(env->exception_index,
306 env->exception_is_int,
307 env->error_code,
308 env->exception_next_eip, 0);
309 /* successfully delivered */
310 env->old_exception = -1;
311 #elif defined(TARGET_PPC)
312 do_interrupt(env);
313 #elif defined(TARGET_MIPS)
314 do_interrupt(env);
315 #elif defined(TARGET_SPARC)
316 do_interrupt(env);
317 #elif defined(TARGET_ARM)
318 do_interrupt(env);
319 #elif defined(TARGET_SH4)
320 do_interrupt(env);
321 #elif defined(TARGET_ALPHA)
322 do_interrupt(env);
323 #elif defined(TARGET_CRIS)
324 do_interrupt(env);
325 #elif defined(TARGET_M68K)
326 do_interrupt(0);
327 #endif
329 env->exception_index = -1;
331 #ifdef USE_KQEMU
332 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
333 int ret;
334 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
335 ret = kqemu_cpu_exec(env);
336 /* put eflags in CPU temporary format */
337 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
338 DF = 1 - (2 * ((env->eflags >> 10) & 1));
339 CC_OP = CC_OP_EFLAGS;
340 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
341 if (ret == 1) {
342 /* exception */
343 longjmp(env->jmp_env, 1);
344 } else if (ret == 2) {
345 /* softmmu execution needed */
346 } else {
347 if (env->interrupt_request != 0) {
348 /* hardware interrupt will be executed just after */
349 } else {
350 /* otherwise, we restart */
351 longjmp(env->jmp_env, 1);
355 #endif
357 next_tb = 0; /* force lookup of first TB */
358 for(;;) {
359 interrupt_request = env->interrupt_request;
360 if (__builtin_expect(interrupt_request, 0) &&
361 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
362 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
363 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
364 env->exception_index = EXCP_DEBUG;
365 cpu_loop_exit();
367 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
368 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
369 if (interrupt_request & CPU_INTERRUPT_HALT) {
370 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
371 env->halted = 1;
372 env->exception_index = EXCP_HLT;
373 cpu_loop_exit();
375 #endif
376 #if defined(TARGET_I386)
377 if (env->hflags2 & HF2_GIF_MASK) {
378 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
379 !(env->hflags & HF_SMM_MASK)) {
380 svm_check_intercept(SVM_EXIT_SMI);
381 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
382 do_smm_enter();
383 next_tb = 0;
384 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
385 !(env->hflags2 & HF2_NMI_MASK)) {
386 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
387 env->hflags2 |= HF2_NMI_MASK;
388 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
389 next_tb = 0;
390 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
391 (((env->hflags2 & HF2_VINTR_MASK) &&
392 (env->hflags2 & HF2_HIF_MASK)) ||
393 (!(env->hflags2 & HF2_VINTR_MASK) &&
394 (env->eflags & IF_MASK &&
395 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
396 int intno;
397 svm_check_intercept(SVM_EXIT_INTR);
398 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
399 intno = cpu_get_pic_interrupt(env);
400 if (loglevel & CPU_LOG_TB_IN_ASM) {
401 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
403 do_interrupt(intno, 0, 0, 0, 1);
404 /* ensure that no TB jump will be modified as
405 the program flow was changed */
406 next_tb = 0;
407 #if !defined(CONFIG_USER_ONLY)
408 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
409 (env->eflags & IF_MASK) &&
410 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
411 int intno;
412 /* FIXME: this should respect TPR */
413 svm_check_intercept(SVM_EXIT_VINTR);
414 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
415 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
416 if (loglevel & CPU_LOG_TB_IN_ASM)
417 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
418 do_interrupt(intno, 0, 0, 0, 1);
419 next_tb = 0;
420 #endif
423 #elif defined(TARGET_PPC)
424 #if 0
425 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
426 cpu_ppc_reset(env);
428 #endif
429 if (interrupt_request & CPU_INTERRUPT_HARD) {
430 ppc_hw_interrupt(env);
431 if (env->pending_interrupts == 0)
432 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
433 next_tb = 0;
435 #elif defined(TARGET_MIPS)
436 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
437 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
438 (env->CP0_Status & (1 << CP0St_IE)) &&
439 !(env->CP0_Status & (1 << CP0St_EXL)) &&
440 !(env->CP0_Status & (1 << CP0St_ERL)) &&
441 !(env->hflags & MIPS_HFLAG_DM)) {
442 /* Raise it */
443 env->exception_index = EXCP_EXT_INTERRUPT;
444 env->error_code = 0;
445 do_interrupt(env);
446 next_tb = 0;
448 #elif defined(TARGET_SPARC)
449 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
450 (env->psret != 0)) {
451 int pil = env->interrupt_index & 15;
452 int type = env->interrupt_index & 0xf0;
454 if (((type == TT_EXTINT) &&
455 (pil == 15 || pil > env->psrpil)) ||
456 type != TT_EXTINT) {
457 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
458 env->exception_index = env->interrupt_index;
459 do_interrupt(env);
460 env->interrupt_index = 0;
461 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
462 cpu_check_irqs(env);
463 #endif
464 next_tb = 0;
466 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
467 //do_interrupt(0, 0, 0, 0, 0);
468 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
470 #elif defined(TARGET_ARM)
471 if (interrupt_request & CPU_INTERRUPT_FIQ
472 && !(env->uncached_cpsr & CPSR_F)) {
473 env->exception_index = EXCP_FIQ;
474 do_interrupt(env);
475 next_tb = 0;
477 /* ARMv7-M interrupt return works by loading a magic value
478 into the PC. On real hardware the load causes the
479 return to occur. The qemu implementation performs the
480 jump normally, then does the exception return when the
481 CPU tries to execute code at the magic address.
482 This will cause the magic PC value to be pushed to
483 the stack if an interrupt occured at the wrong time.
484 We avoid this by disabling interrupts when
485 pc contains a magic address. */
486 if (interrupt_request & CPU_INTERRUPT_HARD
487 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
488 || !(env->uncached_cpsr & CPSR_I))) {
489 env->exception_index = EXCP_IRQ;
490 do_interrupt(env);
491 next_tb = 0;
493 #elif defined(TARGET_SH4)
494 if (interrupt_request & CPU_INTERRUPT_HARD) {
495 do_interrupt(env);
496 next_tb = 0;
498 #elif defined(TARGET_ALPHA)
499 if (interrupt_request & CPU_INTERRUPT_HARD) {
500 do_interrupt(env);
501 next_tb = 0;
503 #elif defined(TARGET_CRIS)
504 if (interrupt_request & CPU_INTERRUPT_HARD
505 && (env->pregs[PR_CCS] & I_FLAG)) {
506 env->exception_index = EXCP_IRQ;
507 do_interrupt(env);
508 next_tb = 0;
510 if (interrupt_request & CPU_INTERRUPT_NMI
511 && (env->pregs[PR_CCS] & M_FLAG)) {
512 env->exception_index = EXCP_NMI;
513 do_interrupt(env);
514 next_tb = 0;
516 #elif defined(TARGET_M68K)
517 if (interrupt_request & CPU_INTERRUPT_HARD
518 && ((env->sr & SR_I) >> SR_I_SHIFT)
519 < env->pending_level) {
520 /* Real hardware gets the interrupt vector via an
521 IACK cycle at this point. Current emulated
522 hardware doesn't rely on this, so we
523 provide/save the vector when the interrupt is
524 first signalled. */
525 env->exception_index = env->pending_vector;
526 do_interrupt(1);
527 next_tb = 0;
529 #endif
530 /* Don't use the cached interupt_request value,
531 do_interrupt may have updated the EXITTB flag. */
532 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
533 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
534 /* ensure that no TB jump will be modified as
535 the program flow was changed */
536 next_tb = 0;
538 if (interrupt_request & CPU_INTERRUPT_EXIT) {
539 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
540 env->exception_index = EXCP_INTERRUPT;
541 cpu_loop_exit();
544 #ifdef DEBUG_EXEC
545 if ((loglevel & CPU_LOG_TB_CPU)) {
546 /* restore flags in standard format */
547 regs_to_env();
548 #if defined(TARGET_I386)
549 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
550 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
551 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
552 #elif defined(TARGET_ARM)
553 cpu_dump_state(env, logfile, fprintf, 0);
554 #elif defined(TARGET_SPARC)
555 cpu_dump_state(env, logfile, fprintf, 0);
556 #elif defined(TARGET_PPC)
557 cpu_dump_state(env, logfile, fprintf, 0);
558 #elif defined(TARGET_M68K)
559 cpu_m68k_flush_flags(env, env->cc_op);
560 env->cc_op = CC_OP_FLAGS;
561 env->sr = (env->sr & 0xffe0)
562 | env->cc_dest | (env->cc_x << 4);
563 cpu_dump_state(env, logfile, fprintf, 0);
564 #elif defined(TARGET_MIPS)
565 cpu_dump_state(env, logfile, fprintf, 0);
566 #elif defined(TARGET_SH4)
567 cpu_dump_state(env, logfile, fprintf, 0);
568 #elif defined(TARGET_ALPHA)
569 cpu_dump_state(env, logfile, fprintf, 0);
570 #elif defined(TARGET_CRIS)
571 cpu_dump_state(env, logfile, fprintf, 0);
572 #else
573 #error unsupported target CPU
574 #endif
576 #endif
577 spin_lock(&tb_lock);
578 tb = tb_find_fast();
579 /* Note: we do it here to avoid a gcc bug on Mac OS X when
580 doing it in tb_find_slow */
581 if (tb_invalidated_flag) {
582 /* as some TB could have been invalidated because
583 of memory exceptions while generating the code, we
584 must recompute the hash index here */
585 next_tb = 0;
587 #ifdef DEBUG_EXEC
588 if ((loglevel & CPU_LOG_EXEC)) {
589 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
590 (long)tb->tc_ptr, tb->pc,
591 lookup_symbol(tb->pc));
593 #endif
594 /* see if we can patch the calling TB. When the TB
595 spans two pages, we cannot safely do a direct
596 jump. */
598 if (next_tb != 0 &&
599 #ifdef USE_KQEMU
600 (env->kqemu_enabled != 2) &&
601 #endif
602 tb->page_addr[1] == -1) {
603 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
606 spin_unlock(&tb_lock);
607 tc_ptr = tb->tc_ptr;
608 env->current_tb = tb;
609 /* execute the generated code */
610 #if defined(__sparc__) && !defined(HOST_SOLARIS)
611 #undef env
612 env = cpu_single_env;
613 #define env cpu_single_env
614 #endif
615 next_tb = tcg_qemu_tb_exec(tc_ptr);
616 env->current_tb = NULL;
617 /* reset soft MMU for next block (it can currently
618 only be set by a memory fault) */
619 #if defined(USE_KQEMU)
620 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
621 if (kqemu_is_ok(env) &&
622 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
623 cpu_loop_exit();
625 #endif
626 } /* for(;;) */
627 } else {
628 env_to_regs();
630 } /* for(;;) */
633 #if defined(TARGET_I386)
634 /* restore flags in standard format */
635 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
636 #elif defined(TARGET_ARM)
637 /* XXX: Save/restore host fpu exception state?. */
638 #elif defined(TARGET_SPARC)
639 #elif defined(TARGET_PPC)
640 #elif defined(TARGET_M68K)
641 cpu_m68k_flush_flags(env, env->cc_op);
642 env->cc_op = CC_OP_FLAGS;
643 env->sr = (env->sr & 0xffe0)
644 | env->cc_dest | (env->cc_x << 4);
645 #elif defined(TARGET_MIPS)
646 #elif defined(TARGET_SH4)
647 #elif defined(TARGET_ALPHA)
648 #elif defined(TARGET_CRIS)
649 /* XXXXX */
650 #else
651 #error unsupported target CPU
652 #endif
654 /* restore global registers */
655 #include "hostregs_helper.h"
657 /* fail safe : never use cpu_single_env outside cpu_exec() */
658 cpu_single_env = NULL;
659 return ret;
662 /* must only be called from the generated code as an exception can be
663 generated */
664 void tb_invalidate_page_range(target_ulong start, target_ulong end)
666 /* XXX: cannot enable it yet because it yields to MMU exception
667 where NIP != read address on PowerPC */
668 #if 0
669 target_ulong phys_addr;
670 phys_addr = get_phys_addr_code(env, start);
671 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
672 #endif
675 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
677 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
679 CPUX86State *saved_env;
681 saved_env = env;
682 env = s;
683 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
684 selector &= 0xffff;
685 cpu_x86_load_seg_cache(env, seg_reg, selector,
686 (selector << 4), 0xffff, 0);
687 } else {
688 helper_load_seg(seg_reg, selector);
690 env = saved_env;
693 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
695 CPUX86State *saved_env;
697 saved_env = env;
698 env = s;
700 helper_fsave(ptr, data32);
702 env = saved_env;
705 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
707 CPUX86State *saved_env;
709 saved_env = env;
710 env = s;
712 helper_frstor(ptr, data32);
714 env = saved_env;
717 #endif /* TARGET_I386 */
719 #if !defined(CONFIG_SOFTMMU)
721 #if defined(TARGET_I386)
723 /* 'pc' is the host PC at which the exception was raised. 'address' is
724 the effective address of the memory exception. 'is_write' is 1 if a
725 write caused the exception and otherwise 0'. 'old_set' is the
726 signal set which should be restored */
727 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
728 int is_write, sigset_t *old_set,
729 void *puc)
731 TranslationBlock *tb;
732 int ret;
734 if (cpu_single_env)
735 env = cpu_single_env; /* XXX: find a correct solution for multithread */
736 #if defined(DEBUG_SIGNAL)
737 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
738 pc, address, is_write, *(unsigned long *)old_set);
739 #endif
740 /* XXX: locking issue */
741 if (is_write && page_unprotect(h2g(address), pc, puc)) {
742 return 1;
745 /* see if it is an MMU fault */
746 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
747 if (ret < 0)
748 return 0; /* not an MMU fault */
749 if (ret == 0)
750 return 1; /* the MMU fault was handled without causing real CPU fault */
751 /* now we have a real cpu fault */
752 tb = tb_find_pc(pc);
753 if (tb) {
754 /* the PC is inside the translated code. It means that we have
755 a virtual CPU fault */
756 cpu_restore_state(tb, env, pc, puc);
758 if (ret == 1) {
759 #if 0
760 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
761 env->eip, env->cr[2], env->error_code);
762 #endif
763 /* we restore the process signal mask as the sigreturn should
764 do it (XXX: use sigsetjmp) */
765 sigprocmask(SIG_SETMASK, old_set, NULL);
766 raise_exception_err(env->exception_index, env->error_code);
767 } else {
768 /* activate soft MMU for this block */
769 env->hflags |= HF_SOFTMMU_MASK;
770 cpu_resume_from_signal(env, puc);
772 /* never comes here */
773 return 1;
776 #elif defined(TARGET_ARM)
777 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
778 int is_write, sigset_t *old_set,
779 void *puc)
781 TranslationBlock *tb;
782 int ret;
784 if (cpu_single_env)
785 env = cpu_single_env; /* XXX: find a correct solution for multithread */
786 #if defined(DEBUG_SIGNAL)
787 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
788 pc, address, is_write, *(unsigned long *)old_set);
789 #endif
790 /* XXX: locking issue */
791 if (is_write && page_unprotect(h2g(address), pc, puc)) {
792 return 1;
794 /* see if it is an MMU fault */
795 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
796 if (ret < 0)
797 return 0; /* not an MMU fault */
798 if (ret == 0)
799 return 1; /* the MMU fault was handled without causing real CPU fault */
800 /* now we have a real cpu fault */
801 tb = tb_find_pc(pc);
802 if (tb) {
803 /* the PC is inside the translated code. It means that we have
804 a virtual CPU fault */
805 cpu_restore_state(tb, env, pc, puc);
807 /* we restore the process signal mask as the sigreturn should
808 do it (XXX: use sigsetjmp) */
809 sigprocmask(SIG_SETMASK, old_set, NULL);
810 cpu_loop_exit();
811 /* never comes here */
812 return 1;
814 #elif defined(TARGET_SPARC)
815 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
816 int is_write, sigset_t *old_set,
817 void *puc)
819 TranslationBlock *tb;
820 int ret;
822 if (cpu_single_env)
823 env = cpu_single_env; /* XXX: find a correct solution for multithread */
824 #if defined(DEBUG_SIGNAL)
825 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
826 pc, address, is_write, *(unsigned long *)old_set);
827 #endif
828 /* XXX: locking issue */
829 if (is_write && page_unprotect(h2g(address), pc, puc)) {
830 return 1;
832 /* see if it is an MMU fault */
833 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
834 if (ret < 0)
835 return 0; /* not an MMU fault */
836 if (ret == 0)
837 return 1; /* the MMU fault was handled without causing real CPU fault */
838 /* now we have a real cpu fault */
839 tb = tb_find_pc(pc);
840 if (tb) {
841 /* the PC is inside the translated code. It means that we have
842 a virtual CPU fault */
843 cpu_restore_state(tb, env, pc, puc);
845 /* we restore the process signal mask as the sigreturn should
846 do it (XXX: use sigsetjmp) */
847 sigprocmask(SIG_SETMASK, old_set, NULL);
848 cpu_loop_exit();
849 /* never comes here */
850 return 1;
852 #elif defined (TARGET_PPC)
853 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
854 int is_write, sigset_t *old_set,
855 void *puc)
857 TranslationBlock *tb;
858 int ret;
860 if (cpu_single_env)
861 env = cpu_single_env; /* XXX: find a correct solution for multithread */
862 #if defined(DEBUG_SIGNAL)
863 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
864 pc, address, is_write, *(unsigned long *)old_set);
865 #endif
866 /* XXX: locking issue */
867 if (is_write && page_unprotect(h2g(address), pc, puc)) {
868 return 1;
871 /* see if it is an MMU fault */
872 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
873 if (ret < 0)
874 return 0; /* not an MMU fault */
875 if (ret == 0)
876 return 1; /* the MMU fault was handled without causing real CPU fault */
878 /* now we have a real cpu fault */
879 tb = tb_find_pc(pc);
880 if (tb) {
881 /* the PC is inside the translated code. It means that we have
882 a virtual CPU fault */
883 cpu_restore_state(tb, env, pc, puc);
885 if (ret == 1) {
886 #if 0
887 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
888 env->nip, env->error_code, tb);
889 #endif
890 /* we restore the process signal mask as the sigreturn should
891 do it (XXX: use sigsetjmp) */
892 sigprocmask(SIG_SETMASK, old_set, NULL);
893 do_raise_exception_err(env->exception_index, env->error_code);
894 } else {
895 /* activate soft MMU for this block */
896 cpu_resume_from_signal(env, puc);
898 /* never comes here */
899 return 1;
902 #elif defined(TARGET_M68K)
903 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
904 int is_write, sigset_t *old_set,
905 void *puc)
907 TranslationBlock *tb;
908 int ret;
910 if (cpu_single_env)
911 env = cpu_single_env; /* XXX: find a correct solution for multithread */
912 #if defined(DEBUG_SIGNAL)
913 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
914 pc, address, is_write, *(unsigned long *)old_set);
915 #endif
916 /* XXX: locking issue */
917 if (is_write && page_unprotect(address, pc, puc)) {
918 return 1;
920 /* see if it is an MMU fault */
921 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
922 if (ret < 0)
923 return 0; /* not an MMU fault */
924 if (ret == 0)
925 return 1; /* the MMU fault was handled without causing real CPU fault */
926 /* now we have a real cpu fault */
927 tb = tb_find_pc(pc);
928 if (tb) {
929 /* the PC is inside the translated code. It means that we have
930 a virtual CPU fault */
931 cpu_restore_state(tb, env, pc, puc);
933 /* we restore the process signal mask as the sigreturn should
934 do it (XXX: use sigsetjmp) */
935 sigprocmask(SIG_SETMASK, old_set, NULL);
936 cpu_loop_exit();
937 /* never comes here */
938 return 1;
941 #elif defined (TARGET_MIPS)
942 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
943 int is_write, sigset_t *old_set,
944 void *puc)
946 TranslationBlock *tb;
947 int ret;
949 if (cpu_single_env)
950 env = cpu_single_env; /* XXX: find a correct solution for multithread */
951 #if defined(DEBUG_SIGNAL)
952 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
953 pc, address, is_write, *(unsigned long *)old_set);
954 #endif
955 /* XXX: locking issue */
956 if (is_write && page_unprotect(h2g(address), pc, puc)) {
957 return 1;
960 /* see if it is an MMU fault */
961 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
962 if (ret < 0)
963 return 0; /* not an MMU fault */
964 if (ret == 0)
965 return 1; /* the MMU fault was handled without causing real CPU fault */
967 /* now we have a real cpu fault */
968 tb = tb_find_pc(pc);
969 if (tb) {
970 /* the PC is inside the translated code. It means that we have
971 a virtual CPU fault */
972 cpu_restore_state(tb, env, pc, puc);
974 if (ret == 1) {
975 #if 0
976 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
977 env->PC, env->error_code, tb);
978 #endif
979 /* we restore the process signal mask as the sigreturn should
980 do it (XXX: use sigsetjmp) */
981 sigprocmask(SIG_SETMASK, old_set, NULL);
982 do_raise_exception_err(env->exception_index, env->error_code);
983 } else {
984 /* activate soft MMU for this block */
985 cpu_resume_from_signal(env, puc);
987 /* never comes here */
988 return 1;
991 #elif defined (TARGET_SH4)
992 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
993 int is_write, sigset_t *old_set,
994 void *puc)
996 TranslationBlock *tb;
997 int ret;
999 if (cpu_single_env)
1000 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1001 #if defined(DEBUG_SIGNAL)
1002 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1003 pc, address, is_write, *(unsigned long *)old_set);
1004 #endif
1005 /* XXX: locking issue */
1006 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1007 return 1;
1010 /* see if it is an MMU fault */
1011 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1012 if (ret < 0)
1013 return 0; /* not an MMU fault */
1014 if (ret == 0)
1015 return 1; /* the MMU fault was handled without causing real CPU fault */
1017 /* now we have a real cpu fault */
1018 tb = tb_find_pc(pc);
1019 if (tb) {
1020 /* the PC is inside the translated code. It means that we have
1021 a virtual CPU fault */
1022 cpu_restore_state(tb, env, pc, puc);
1024 #if 0
1025 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1026 env->nip, env->error_code, tb);
1027 #endif
1028 /* we restore the process signal mask as the sigreturn should
1029 do it (XXX: use sigsetjmp) */
1030 sigprocmask(SIG_SETMASK, old_set, NULL);
1031 cpu_loop_exit();
1032 /* never comes here */
1033 return 1;
1036 #elif defined (TARGET_ALPHA)
1037 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1038 int is_write, sigset_t *old_set,
1039 void *puc)
1041 TranslationBlock *tb;
1042 int ret;
1044 if (cpu_single_env)
1045 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1046 #if defined(DEBUG_SIGNAL)
1047 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1048 pc, address, is_write, *(unsigned long *)old_set);
1049 #endif
1050 /* XXX: locking issue */
1051 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1052 return 1;
1055 /* see if it is an MMU fault */
1056 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1057 if (ret < 0)
1058 return 0; /* not an MMU fault */
1059 if (ret == 0)
1060 return 1; /* the MMU fault was handled without causing real CPU fault */
1062 /* now we have a real cpu fault */
1063 tb = tb_find_pc(pc);
1064 if (tb) {
1065 /* the PC is inside the translated code. It means that we have
1066 a virtual CPU fault */
1067 cpu_restore_state(tb, env, pc, puc);
1069 #if 0
1070 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1071 env->nip, env->error_code, tb);
1072 #endif
1073 /* we restore the process signal mask as the sigreturn should
1074 do it (XXX: use sigsetjmp) */
1075 sigprocmask(SIG_SETMASK, old_set, NULL);
1076 cpu_loop_exit();
1077 /* never comes here */
1078 return 1;
1080 #elif defined (TARGET_CRIS)
1081 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1082 int is_write, sigset_t *old_set,
1083 void *puc)
1085 TranslationBlock *tb;
1086 int ret;
1088 if (cpu_single_env)
1089 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1090 #if defined(DEBUG_SIGNAL)
1091 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1092 pc, address, is_write, *(unsigned long *)old_set);
1093 #endif
1094 /* XXX: locking issue */
1095 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1096 return 1;
1099 /* see if it is an MMU fault */
1100 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1101 if (ret < 0)
1102 return 0; /* not an MMU fault */
1103 if (ret == 0)
1104 return 1; /* the MMU fault was handled without causing real CPU fault */
1106 /* now we have a real cpu fault */
1107 tb = tb_find_pc(pc);
1108 if (tb) {
1109 /* the PC is inside the translated code. It means that we have
1110 a virtual CPU fault */
1111 cpu_restore_state(tb, env, pc, puc);
1113 /* we restore the process signal mask as the sigreturn should
1114 do it (XXX: use sigsetjmp) */
1115 sigprocmask(SIG_SETMASK, old_set, NULL);
1116 cpu_loop_exit();
1117 /* never comes here */
1118 return 1;
1121 #else
1122 #error unsupported target CPU
1123 #endif
1125 #if defined(__i386__)
1127 #if defined(__APPLE__)
1128 # include <sys/ucontext.h>
1130 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1131 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1132 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1133 #else
1134 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1135 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1136 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1137 #endif
1139 int cpu_signal_handler(int host_signum, void *pinfo,
1140 void *puc)
1142 siginfo_t *info = pinfo;
1143 struct ucontext *uc = puc;
1144 unsigned long pc;
1145 int trapno;
1147 #ifndef REG_EIP
1148 /* for glibc 2.1 */
1149 #define REG_EIP EIP
1150 #define REG_ERR ERR
1151 #define REG_TRAPNO TRAPNO
1152 #endif
1153 pc = EIP_sig(uc);
1154 trapno = TRAP_sig(uc);
1155 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1156 trapno == 0xe ?
1157 (ERROR_sig(uc) >> 1) & 1 : 0,
1158 &uc->uc_sigmask, puc);
1161 #elif defined(__x86_64__)
1163 int cpu_signal_handler(int host_signum, void *pinfo,
1164 void *puc)
1166 siginfo_t *info = pinfo;
1167 struct ucontext *uc = puc;
1168 unsigned long pc;
1170 pc = uc->uc_mcontext.gregs[REG_RIP];
1171 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1172 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1173 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1174 &uc->uc_sigmask, puc);
1177 #elif defined(__powerpc__)
1179 /***********************************************************************
1180 * signal context platform-specific definitions
1181 * From Wine
1183 #ifdef linux
1184 /* All Registers access - only for local access */
1185 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1186 /* Gpr Registers access */
1187 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1188 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1189 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1190 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1191 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1192 # define LR_sig(context) REG_sig(link, context) /* Link register */
1193 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1194 /* Float Registers access */
1195 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1196 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1197 /* Exception Registers access */
1198 # define DAR_sig(context) REG_sig(dar, context)
1199 # define DSISR_sig(context) REG_sig(dsisr, context)
1200 # define TRAP_sig(context) REG_sig(trap, context)
1201 #endif /* linux */
1203 #ifdef __APPLE__
1204 # include <sys/ucontext.h>
1205 typedef struct ucontext SIGCONTEXT;
1206 /* All Registers access - only for local access */
1207 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1208 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1209 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1210 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1211 /* Gpr Registers access */
1212 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1213 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1214 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1215 # define CTR_sig(context) REG_sig(ctr, context)
1216 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1217 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1218 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1219 /* Float Registers access */
1220 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1221 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1222 /* Exception Registers access */
1223 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1224 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1225 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1226 #endif /* __APPLE__ */
1228 int cpu_signal_handler(int host_signum, void *pinfo,
1229 void *puc)
1231 siginfo_t *info = pinfo;
1232 struct ucontext *uc = puc;
1233 unsigned long pc;
1234 int is_write;
1236 pc = IAR_sig(uc);
1237 is_write = 0;
1238 #if 0
1239 /* ppc 4xx case */
1240 if (DSISR_sig(uc) & 0x00800000)
1241 is_write = 1;
1242 #else
1243 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1244 is_write = 1;
1245 #endif
1246 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1247 is_write, &uc->uc_sigmask, puc);
1250 #elif defined(__alpha__)
1252 int cpu_signal_handler(int host_signum, void *pinfo,
1253 void *puc)
1255 siginfo_t *info = pinfo;
1256 struct ucontext *uc = puc;
1257 uint32_t *pc = uc->uc_mcontext.sc_pc;
1258 uint32_t insn = *pc;
1259 int is_write = 0;
1261 /* XXX: need kernel patch to get write flag faster */
1262 switch (insn >> 26) {
1263 case 0x0d: // stw
1264 case 0x0e: // stb
1265 case 0x0f: // stq_u
1266 case 0x24: // stf
1267 case 0x25: // stg
1268 case 0x26: // sts
1269 case 0x27: // stt
1270 case 0x2c: // stl
1271 case 0x2d: // stq
1272 case 0x2e: // stl_c
1273 case 0x2f: // stq_c
1274 is_write = 1;
1277 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1278 is_write, &uc->uc_sigmask, puc);
1280 #elif defined(__sparc__)
1282 int cpu_signal_handler(int host_signum, void *pinfo,
1283 void *puc)
1285 siginfo_t *info = pinfo;
1286 int is_write;
1287 uint32_t insn;
1288 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1289 uint32_t *regs = (uint32_t *)(info + 1);
1290 void *sigmask = (regs + 20);
1291 /* XXX: is there a standard glibc define ? */
1292 unsigned long pc = regs[1];
1293 #else
1294 struct sigcontext *sc = puc;
1295 unsigned long pc = sc->sigc_regs.tpc;
1296 void *sigmask = (void *)sc->sigc_mask;
1297 #endif
1299 /* XXX: need kernel patch to get write flag faster */
1300 is_write = 0;
1301 insn = *(uint32_t *)pc;
1302 if ((insn >> 30) == 3) {
1303 switch((insn >> 19) & 0x3f) {
1304 case 0x05: // stb
1305 case 0x06: // sth
1306 case 0x04: // st
1307 case 0x07: // std
1308 case 0x24: // stf
1309 case 0x27: // stdf
1310 case 0x25: // stfsr
1311 is_write = 1;
1312 break;
1315 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1316 is_write, sigmask, NULL);
1319 #elif defined(__arm__)
1321 int cpu_signal_handler(int host_signum, void *pinfo,
1322 void *puc)
1324 siginfo_t *info = pinfo;
1325 struct ucontext *uc = puc;
1326 unsigned long pc;
1327 int is_write;
1329 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ =< 3))
1330 pc = uc->uc_mcontext.gregs[R15];
1331 #else
1332 pc = uc->uc_mcontext.arm_pc;
1333 #endif
1334 /* XXX: compute is_write */
1335 is_write = 0;
1336 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1337 is_write,
1338 &uc->uc_sigmask, puc);
1341 #elif defined(__mc68000)
1343 int cpu_signal_handler(int host_signum, void *pinfo,
1344 void *puc)
1346 siginfo_t *info = pinfo;
1347 struct ucontext *uc = puc;
1348 unsigned long pc;
1349 int is_write;
1351 pc = uc->uc_mcontext.gregs[16];
1352 /* XXX: compute is_write */
1353 is_write = 0;
1354 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1355 is_write,
1356 &uc->uc_sigmask, puc);
1359 #elif defined(__ia64)
1361 #ifndef __ISR_VALID
1362 /* This ought to be in <bits/siginfo.h>... */
1363 # define __ISR_VALID 1
1364 #endif
1366 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1368 siginfo_t *info = pinfo;
1369 struct ucontext *uc = puc;
1370 unsigned long ip;
1371 int is_write = 0;
1373 ip = uc->uc_mcontext.sc_ip;
1374 switch (host_signum) {
1375 case SIGILL:
1376 case SIGFPE:
1377 case SIGSEGV:
1378 case SIGBUS:
1379 case SIGTRAP:
1380 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1381 /* ISR.W (write-access) is bit 33: */
1382 is_write = (info->si_isr >> 33) & 1;
1383 break;
1385 default:
1386 break;
1388 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1389 is_write,
1390 &uc->uc_sigmask, puc);
1393 #elif defined(__s390__)
1395 int cpu_signal_handler(int host_signum, void *pinfo,
1396 void *puc)
1398 siginfo_t *info = pinfo;
1399 struct ucontext *uc = puc;
1400 unsigned long pc;
1401 int is_write;
1403 pc = uc->uc_mcontext.psw.addr;
1404 /* XXX: compute is_write */
1405 is_write = 0;
1406 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1407 is_write, &uc->uc_sigmask, puc);
1410 #elif defined(__mips__)
1412 int cpu_signal_handler(int host_signum, void *pinfo,
1413 void *puc)
1415 siginfo_t *info = pinfo;
1416 struct ucontext *uc = puc;
1417 greg_t pc = uc->uc_mcontext.pc;
1418 int is_write;
1420 /* XXX: compute is_write */
1421 is_write = 0;
1422 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1423 is_write, &uc->uc_sigmask, puc);
1426 #elif defined(__hppa__)
1428 int cpu_signal_handler(int host_signum, void *pinfo,
1429 void *puc)
1431 struct siginfo *info = pinfo;
1432 struct ucontext *uc = puc;
1433 unsigned long pc;
1434 int is_write;
1436 pc = uc->uc_mcontext.sc_iaoq[0];
1437 /* FIXME: compute is_write */
1438 is_write = 0;
1439 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1440 is_write,
1441 &uc->uc_sigmask, puc);
1444 #else
1446 #error host CPU specific signal handler needed
1448 #endif
1450 #endif /* !defined(CONFIG_SOFTMMU) */