minor refactoring
[qemu/malc.git] / cpu-exec.c
blob9b2ccb5f8a802c32208e5c2b9a1e5c40c6f587bf
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "kvm.h"
25 #if !defined(CONFIG_SOFTMMU)
26 #undef EAX
27 #undef ECX
28 #undef EDX
29 #undef EBX
30 #undef ESP
31 #undef EBP
32 #undef ESI
33 #undef EDI
34 #undef EIP
35 #include <signal.h>
36 #ifdef __linux__
37 #include <sys/ucontext.h>
38 #endif
39 #endif
41 extern void ontb (CPUState *env);
43 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
45 #undef env
46 #define env cpu_single_env
47 #endif
49 int tb_invalidated_flag;
51 //#define CONFIG_DEBUG_EXEC
52 //#define DEBUG_SIGNAL
54 int qemu_cpu_has_work(CPUState *env)
56 return cpu_has_work(env);
59 void cpu_loop_exit(void)
61 /* NOTE: the register at this point must be saved by hand because
62 longjmp restore them */
63 regs_to_env();
64 longjmp(env->jmp_env, 1);
67 /* exit the current TB from a signal handler. The host registers are
68 restored in a state compatible with the CPU emulator
70 void cpu_resume_from_signal(CPUState *env1, void *puc)
72 #if !defined(CONFIG_SOFTMMU)
73 #ifdef __linux__
74 struct ucontext *uc = puc;
75 #elif defined(__OpenBSD__)
76 struct sigcontext *uc = puc;
77 #endif
78 #endif
80 env = env1;
82 /* XXX: restore cpu registers saved in host registers */
84 #if !defined(CONFIG_SOFTMMU)
85 if (puc) {
86 /* XXX: use siglongjmp ? */
87 #ifdef __linux__
88 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
89 #elif defined(__OpenBSD__)
90 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
91 #endif
93 #endif
94 env->exception_index = -1;
95 longjmp(env->jmp_env, 1);
98 /* Execute the code without caching the generated code. An interpreter
99 could be used if available. */
100 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
102 unsigned long next_tb;
103 TranslationBlock *tb;
105 /* Should never happen.
106 We only end up here when an existing TB is too long. */
107 if (max_cycles > CF_COUNT_MASK)
108 max_cycles = CF_COUNT_MASK;
110 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
111 max_cycles);
112 env->current_tb = tb;
113 /* execute the generated code */
114 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
116 if ((next_tb & 3) == 2) {
117 /* Restore PC. This may happen if async event occurs before
118 the TB starts executing. */
119 cpu_pc_from_tb(env, tb);
121 tb_phys_invalidate(tb, -1);
122 tb_free(tb);
125 static TranslationBlock *tb_find_slow(target_ulong pc,
126 target_ulong cs_base,
127 uint64_t flags)
129 TranslationBlock *tb, **ptb1;
130 unsigned int h;
131 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
133 tb_invalidated_flag = 0;
135 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
137 /* find translated block using physical mappings */
138 phys_pc = get_phys_addr_code(env, pc);
139 phys_page1 = phys_pc & TARGET_PAGE_MASK;
140 phys_page2 = -1;
141 h = tb_phys_hash_func(phys_pc);
142 ptb1 = &tb_phys_hash[h];
143 for(;;) {
144 tb = *ptb1;
145 if (!tb)
146 goto not_found;
147 if (tb->pc == pc &&
148 tb->page_addr[0] == phys_page1 &&
149 tb->cs_base == cs_base &&
150 tb->flags == flags) {
151 /* check next page if needed */
152 if (tb->page_addr[1] != -1) {
153 virt_page2 = (pc & TARGET_PAGE_MASK) +
154 TARGET_PAGE_SIZE;
155 phys_page2 = get_phys_addr_code(env, virt_page2);
156 if (tb->page_addr[1] == phys_page2)
157 goto found;
158 } else {
159 goto found;
162 ptb1 = &tb->phys_hash_next;
164 not_found:
165 /* if no translated code available, then translate it now */
166 tb = tb_gen_code(env, pc, cs_base, flags, 0);
168 found:
169 /* we add the TB in the virtual pc hash table */
170 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
171 return tb;
174 static inline TranslationBlock *tb_find_fast(void)
176 TranslationBlock *tb;
177 target_ulong cs_base, pc;
178 int flags;
180 /* we record a subset of the CPU state. It will
181 always be the same before a given translated block
182 is executed. */
183 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
184 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
185 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
186 tb->flags != flags)) {
187 tb = tb_find_slow(pc, cs_base, flags);
189 return tb;
192 static CPUDebugExcpHandler *debug_excp_handler;
194 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
196 CPUDebugExcpHandler *old_handler = debug_excp_handler;
198 debug_excp_handler = handler;
199 return old_handler;
202 static void cpu_handle_debug_exception(CPUState *env)
204 CPUWatchpoint *wp;
206 if (!env->watchpoint_hit)
207 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
208 wp->flags &= ~BP_WATCHPOINT_HIT;
210 if (debug_excp_handler)
211 debug_excp_handler(env);
214 /* main execution loop */
216 int cpu_exec(CPUState *env1)
218 #define DECLARE_HOST_REGS 1
219 #include "hostregs_helper.h"
220 int ret, interrupt_request;
221 TranslationBlock *tb;
222 uint8_t *tc_ptr;
223 unsigned long next_tb;
225 if (cpu_halted(env1) == EXCP_HALTED)
226 return EXCP_HALTED;
228 cpu_single_env = env1;
230 /* first we save global registers */
231 #define SAVE_HOST_REGS 1
232 #include "hostregs_helper.h"
233 env = env1;
235 env_to_regs();
236 #if defined(TARGET_I386)
237 /* put eflags in CPU temporary format */
238 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
239 DF = 1 - (2 * ((env->eflags >> 10) & 1));
240 CC_OP = CC_OP_EFLAGS;
241 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
242 #elif defined(TARGET_SPARC)
243 #elif defined(TARGET_M68K)
244 env->cc_op = CC_OP_FLAGS;
245 env->cc_dest = env->sr & 0xf;
246 env->cc_x = (env->sr >> 4) & 1;
247 #elif defined(TARGET_ALPHA)
248 #elif defined(TARGET_ARM)
249 #elif defined(TARGET_PPC)
250 #elif defined(TARGET_MICROBLAZE)
251 #elif defined(TARGET_MIPS)
252 #elif defined(TARGET_SH4)
253 #elif defined(TARGET_CRIS)
254 /* XXXXX */
255 #else
256 #error unsupported target CPU
257 #endif
258 env->exception_index = -1;
260 /* prepare setjmp context for exception handling */
261 for(;;) {
262 if (setjmp(env->jmp_env) == 0) {
263 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
264 #undef env
265 env = cpu_single_env;
266 #define env cpu_single_env
267 #endif
268 env->current_tb = NULL;
269 /* if an exception is pending, we execute it here */
270 if (env->exception_index >= 0) {
271 if (env->exception_index >= EXCP_INTERRUPT) {
272 /* exit request from the cpu execution loop */
273 ret = env->exception_index;
274 if (ret == EXCP_DEBUG)
275 cpu_handle_debug_exception(env);
276 break;
277 } else {
278 #if defined(CONFIG_USER_ONLY)
279 /* if user mode only, we simulate a fake exception
280 which will be handled outside the cpu execution
281 loop */
282 #if defined(TARGET_I386)
283 do_interrupt_user(env->exception_index,
284 env->exception_is_int,
285 env->error_code,
286 env->exception_next_eip);
287 /* successfully delivered */
288 env->old_exception = -1;
289 #endif
290 ret = env->exception_index;
291 break;
292 #else
293 #if defined(TARGET_I386)
294 /* simulate a real cpu exception. On i386, it can
295 trigger new exceptions, but we do not handle
296 double or triple faults yet. */
297 do_interrupt(env->exception_index,
298 env->exception_is_int,
299 env->error_code,
300 env->exception_next_eip, 0);
301 /* successfully delivered */
302 env->old_exception = -1;
303 #elif defined(TARGET_PPC)
304 do_interrupt(env);
305 #elif defined(TARGET_MICROBLAZE)
306 do_interrupt(env);
307 #elif defined(TARGET_MIPS)
308 do_interrupt(env);
309 #elif defined(TARGET_SPARC)
310 do_interrupt(env);
311 #elif defined(TARGET_ARM)
312 do_interrupt(env);
313 #elif defined(TARGET_SH4)
314 do_interrupt(env);
315 #elif defined(TARGET_ALPHA)
316 do_interrupt(env);
317 #elif defined(TARGET_CRIS)
318 do_interrupt(env);
319 #elif defined(TARGET_M68K)
320 do_interrupt(0);
321 #endif
322 #endif
324 env->exception_index = -1;
327 if (kvm_enabled()) {
328 kvm_cpu_exec(env);
329 longjmp(env->jmp_env, 1);
332 next_tb = 0; /* force lookup of first TB */
333 for(;;) {
334 interrupt_request = env->interrupt_request;
335 if (unlikely(interrupt_request)) {
336 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
337 /* Mask out external interrupts for this step. */
338 interrupt_request &= ~(CPU_INTERRUPT_HARD |
339 CPU_INTERRUPT_FIQ |
340 CPU_INTERRUPT_SMI |
341 CPU_INTERRUPT_NMI);
343 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
344 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
345 env->exception_index = EXCP_DEBUG;
346 cpu_loop_exit();
348 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
349 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
350 defined(TARGET_MICROBLAZE)
351 if (interrupt_request & CPU_INTERRUPT_HALT) {
352 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
353 env->halted = 1;
354 env->exception_index = EXCP_HLT;
355 cpu_loop_exit();
357 #endif
358 #if defined(TARGET_I386)
359 if (interrupt_request & CPU_INTERRUPT_INIT) {
360 svm_check_intercept(SVM_EXIT_INIT);
361 do_cpu_init(env);
362 env->exception_index = EXCP_HALTED;
363 cpu_loop_exit();
364 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
365 do_cpu_sipi(env);
366 } else if (env->hflags2 & HF2_GIF_MASK) {
367 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
368 !(env->hflags & HF_SMM_MASK)) {
369 svm_check_intercept(SVM_EXIT_SMI);
370 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
371 do_smm_enter();
372 next_tb = 0;
373 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
374 !(env->hflags2 & HF2_NMI_MASK)) {
375 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
376 env->hflags2 |= HF2_NMI_MASK;
377 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
378 next_tb = 0;
379 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
380 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
381 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
382 next_tb = 0;
383 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
384 (((env->hflags2 & HF2_VINTR_MASK) &&
385 (env->hflags2 & HF2_HIF_MASK)) ||
386 (!(env->hflags2 & HF2_VINTR_MASK) &&
387 (env->eflags & IF_MASK &&
388 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
389 int intno;
390 svm_check_intercept(SVM_EXIT_INTR);
391 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
392 intno = cpu_get_pic_interrupt(env);
393 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
394 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
395 #undef env
396 env = cpu_single_env;
397 #define env cpu_single_env
398 #endif
399 do_interrupt(intno, 0, 0, 0, 1);
400 /* ensure that no TB jump will be modified as
401 the program flow was changed */
402 next_tb = 0;
403 #if !defined(CONFIG_USER_ONLY)
404 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
405 (env->eflags & IF_MASK) &&
406 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
407 int intno;
408 /* FIXME: this should respect TPR */
409 svm_check_intercept(SVM_EXIT_VINTR);
410 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
411 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
412 do_interrupt(intno, 0, 0, 0, 1);
413 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
414 next_tb = 0;
415 #endif
418 #elif defined(TARGET_PPC)
419 #if 0
420 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
421 cpu_ppc_reset(env);
423 #endif
424 if (interrupt_request & CPU_INTERRUPT_HARD) {
425 ppc_hw_interrupt(env);
426 if (env->pending_interrupts == 0)
427 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
428 next_tb = 0;
430 #elif defined(TARGET_MICROBLAZE)
431 if ((interrupt_request & CPU_INTERRUPT_HARD)
432 && (env->sregs[SR_MSR] & MSR_IE)
433 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
434 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
435 env->exception_index = EXCP_IRQ;
436 do_interrupt(env);
437 next_tb = 0;
439 #elif defined(TARGET_MIPS)
440 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
441 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
442 (env->CP0_Status & (1 << CP0St_IE)) &&
443 !(env->CP0_Status & (1 << CP0St_EXL)) &&
444 !(env->CP0_Status & (1 << CP0St_ERL)) &&
445 !(env->hflags & MIPS_HFLAG_DM)) {
446 /* Raise it */
447 env->exception_index = EXCP_EXT_INTERRUPT;
448 env->error_code = 0;
449 do_interrupt(env);
450 next_tb = 0;
452 #elif defined(TARGET_SPARC)
453 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
454 cpu_interrupts_enabled(env)) {
455 int pil = env->interrupt_index & 15;
456 int type = env->interrupt_index & 0xf0;
458 if (((type == TT_EXTINT) &&
459 (pil == 15 || pil > env->psrpil)) ||
460 type != TT_EXTINT) {
461 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
462 env->exception_index = env->interrupt_index;
463 do_interrupt(env);
464 env->interrupt_index = 0;
465 next_tb = 0;
467 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
468 //do_interrupt(0, 0, 0, 0, 0);
469 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
471 #elif defined(TARGET_ARM)
472 if (interrupt_request & CPU_INTERRUPT_FIQ
473 && !(env->uncached_cpsr & CPSR_F)) {
474 env->exception_index = EXCP_FIQ;
475 do_interrupt(env);
476 next_tb = 0;
478 /* ARMv7-M interrupt return works by loading a magic value
479 into the PC. On real hardware the load causes the
480 return to occur. The qemu implementation performs the
481 jump normally, then does the exception return when the
482 CPU tries to execute code at the magic address.
483 This will cause the magic PC value to be pushed to
484 the stack if an interrupt occured at the wrong time.
485 We avoid this by disabling interrupts when
486 pc contains a magic address. */
487 if (interrupt_request & CPU_INTERRUPT_HARD
488 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
489 || !(env->uncached_cpsr & CPSR_I))) {
490 env->exception_index = EXCP_IRQ;
491 do_interrupt(env);
492 next_tb = 0;
494 #elif defined(TARGET_SH4)
495 if (interrupt_request & CPU_INTERRUPT_HARD) {
496 do_interrupt(env);
497 next_tb = 0;
499 #elif defined(TARGET_ALPHA)
500 if (interrupt_request & CPU_INTERRUPT_HARD) {
501 do_interrupt(env);
502 next_tb = 0;
504 #elif defined(TARGET_CRIS)
505 if (interrupt_request & CPU_INTERRUPT_HARD
506 && (env->pregs[PR_CCS] & I_FLAG)) {
507 env->exception_index = EXCP_IRQ;
508 do_interrupt(env);
509 next_tb = 0;
511 if (interrupt_request & CPU_INTERRUPT_NMI
512 && (env->pregs[PR_CCS] & M_FLAG)) {
513 env->exception_index = EXCP_NMI;
514 do_interrupt(env);
515 next_tb = 0;
517 #elif defined(TARGET_M68K)
518 if (interrupt_request & CPU_INTERRUPT_HARD
519 && ((env->sr & SR_I) >> SR_I_SHIFT)
520 < env->pending_level) {
521 /* Real hardware gets the interrupt vector via an
522 IACK cycle at this point. Current emulated
523 hardware doesn't rely on this, so we
524 provide/save the vector when the interrupt is
525 first signalled. */
526 env->exception_index = env->pending_vector;
527 do_interrupt(1);
528 next_tb = 0;
530 #endif
531 /* Don't use the cached interupt_request value,
532 do_interrupt may have updated the EXITTB flag. */
533 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
534 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
535 /* ensure that no TB jump will be modified as
536 the program flow was changed */
537 next_tb = 0;
540 if (unlikely(env->exit_request)) {
541 env->exit_request = 0;
542 env->exception_index = EXCP_INTERRUPT;
543 cpu_loop_exit();
545 #ifdef CONFIG_DEBUG_EXEC
546 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
547 /* restore flags in standard format */
548 regs_to_env();
549 #if defined(TARGET_I386)
550 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
551 log_cpu_state(env, X86_DUMP_CCOP);
552 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
553 #elif defined(TARGET_ARM)
554 log_cpu_state(env, 0);
555 #elif defined(TARGET_SPARC)
556 log_cpu_state(env, 0);
557 #elif defined(TARGET_PPC)
558 log_cpu_state(env, 0);
559 #elif defined(TARGET_M68K)
560 cpu_m68k_flush_flags(env, env->cc_op);
561 env->cc_op = CC_OP_FLAGS;
562 env->sr = (env->sr & 0xffe0)
563 | env->cc_dest | (env->cc_x << 4);
564 log_cpu_state(env, 0);
565 #elif defined(TARGET_MICROBLAZE)
566 log_cpu_state(env, 0);
567 #elif defined(TARGET_MIPS)
568 log_cpu_state(env, 0);
569 #elif defined(TARGET_SH4)
570 log_cpu_state(env, 0);
571 #elif defined(TARGET_ALPHA)
572 log_cpu_state(env, 0);
573 #elif defined(TARGET_CRIS)
574 log_cpu_state(env, 0);
575 #else
576 #error unsupported target CPU
577 #endif
579 #endif
580 spin_lock(&tb_lock);
581 tb = tb_find_fast();
582 /* Note: we do it here to avoid a gcc bug on Mac OS X when
583 doing it in tb_find_slow */
584 if (tb_invalidated_flag) {
585 /* as some TB could have been invalidated because
586 of memory exceptions while generating the code, we
587 must recompute the hash index here */
588 next_tb = 0;
589 tb_invalidated_flag = 0;
591 #ifdef CONFIG_DEBUG_EXEC
592 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
593 (long)tb->tc_ptr, tb->pc,
594 lookup_symbol(tb->pc));
595 #endif
596 /* see if we can patch the calling TB. When the TB
597 spans two pages, we cannot safely do a direct
598 jump. */
600 if (next_tb != 0 && tb->page_addr[1] == -1) {
601 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
604 spin_unlock(&tb_lock);
605 env->current_tb = tb;
607 /* cpu_interrupt might be called while translating the
608 TB, but before it is linked into a potentially
609 infinite loop and becomes env->current_tb. Avoid
610 starting execution if there is a pending interrupt. */
611 if (unlikely (env->exit_request))
612 env->current_tb = NULL;
614 while (env->current_tb) {
615 ontb (env);
616 tc_ptr = tb->tc_ptr;
617 /* execute the generated code */
618 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
619 #undef env
620 env = cpu_single_env;
621 #define env cpu_single_env
622 #endif
623 next_tb = tcg_qemu_tb_exec(tc_ptr);
624 env->current_tb = NULL;
625 if ((next_tb & 3) == 2) {
626 /* Instruction counter expired. */
627 int insns_left;
628 tb = (TranslationBlock *)(long)(next_tb & ~3);
629 /* Restore PC. */
630 cpu_pc_from_tb(env, tb);
631 insns_left = env->icount_decr.u32;
632 if (env->icount_extra && insns_left >= 0) {
633 /* Refill decrementer and continue execution. */
634 env->icount_extra += insns_left;
635 if (env->icount_extra > 0xffff) {
636 insns_left = 0xffff;
637 } else {
638 insns_left = env->icount_extra;
640 env->icount_extra -= insns_left;
641 env->icount_decr.u16.low = insns_left;
642 } else {
643 if (insns_left > 0) {
644 /* Execute remaining instructions. */
645 cpu_exec_nocache(insns_left, tb);
647 env->exception_index = EXCP_INTERRUPT;
648 next_tb = 0;
649 cpu_loop_exit();
653 /* reset soft MMU for next block (it can currently
654 only be set by a memory fault) */
655 } /* for(;;) */
656 } else {
657 env_to_regs();
659 } /* for(;;) */
662 #if defined(TARGET_I386)
663 /* restore flags in standard format */
664 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
665 #elif defined(TARGET_ARM)
666 /* XXX: Save/restore host fpu exception state?. */
667 #elif defined(TARGET_SPARC)
668 #elif defined(TARGET_PPC)
669 #elif defined(TARGET_M68K)
670 cpu_m68k_flush_flags(env, env->cc_op);
671 env->cc_op = CC_OP_FLAGS;
672 env->sr = (env->sr & 0xffe0)
673 | env->cc_dest | (env->cc_x << 4);
674 #elif defined(TARGET_MICROBLAZE)
675 #elif defined(TARGET_MIPS)
676 #elif defined(TARGET_SH4)
677 #elif defined(TARGET_ALPHA)
678 #elif defined(TARGET_CRIS)
679 /* XXXXX */
680 #else
681 #error unsupported target CPU
682 #endif
684 /* restore global registers */
685 #include "hostregs_helper.h"
687 /* fail safe : never use cpu_single_env outside cpu_exec() */
688 cpu_single_env = NULL;
689 return ret;
692 /* must only be called from the generated code as an exception can be
693 generated */
694 void tb_invalidate_page_range(target_ulong start, target_ulong end)
696 /* XXX: cannot enable it yet because it yields to MMU exception
697 where NIP != read address on PowerPC */
698 #if 0
699 target_ulong phys_addr;
700 phys_addr = get_phys_addr_code(env, start);
701 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
702 #endif
705 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
707 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
709 CPUX86State *saved_env;
711 saved_env = env;
712 env = s;
713 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
714 selector &= 0xffff;
715 cpu_x86_load_seg_cache(env, seg_reg, selector,
716 (selector << 4), 0xffff, 0);
717 } else {
718 helper_load_seg(seg_reg, selector);
720 env = saved_env;
723 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
725 CPUX86State *saved_env;
727 saved_env = env;
728 env = s;
730 helper_fsave(ptr, data32);
732 env = saved_env;
735 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
737 CPUX86State *saved_env;
739 saved_env = env;
740 env = s;
742 helper_frstor(ptr, data32);
744 env = saved_env;
747 #endif /* TARGET_I386 */
749 #if !defined(CONFIG_SOFTMMU)
751 #if defined(TARGET_I386)
752 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
753 #else
754 #define EXCEPTION_ACTION cpu_loop_exit()
755 #endif
757 /* 'pc' is the host PC at which the exception was raised. 'address' is
758 the effective address of the memory exception. 'is_write' is 1 if a
759 write caused the exception and otherwise 0'. 'old_set' is the
760 signal set which should be restored */
761 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
762 int is_write, sigset_t *old_set,
763 void *puc)
765 TranslationBlock *tb;
766 int ret;
768 if (cpu_single_env)
769 env = cpu_single_env; /* XXX: find a correct solution for multithread */
770 #if defined(DEBUG_SIGNAL)
771 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
772 pc, address, is_write, *(unsigned long *)old_set);
773 #endif
774 /* XXX: locking issue */
775 if (is_write && page_unprotect(h2g(address), pc, puc)) {
776 return 1;
779 /* see if it is an MMU fault */
780 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
781 if (ret < 0)
782 return 0; /* not an MMU fault */
783 if (ret == 0)
784 return 1; /* the MMU fault was handled without causing real CPU fault */
785 /* now we have a real cpu fault */
786 tb = tb_find_pc(pc);
787 if (tb) {
788 /* the PC is inside the translated code. It means that we have
789 a virtual CPU fault */
790 cpu_restore_state(tb, env, pc, puc);
793 /* we restore the process signal mask as the sigreturn should
794 do it (XXX: use sigsetjmp) */
795 sigprocmask(SIG_SETMASK, old_set, NULL);
796 EXCEPTION_ACTION;
798 /* never comes here */
799 return 1;
802 #if defined(__i386__)
804 #if defined(__APPLE__)
805 # include <sys/ucontext.h>
807 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
808 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
809 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
810 # define MASK_sig(context) ((context)->uc_sigmask)
811 #elif defined(__OpenBSD__)
812 # define EIP_sig(context) ((context)->sc_eip)
813 # define TRAP_sig(context) ((context)->sc_trapno)
814 # define ERROR_sig(context) ((context)->sc_err)
815 # define MASK_sig(context) ((context)->sc_mask)
816 #else
817 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
818 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
819 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
820 # define MASK_sig(context) ((context)->uc_sigmask)
821 #endif
823 int cpu_signal_handler(int host_signum, void *pinfo,
824 void *puc)
826 siginfo_t *info = pinfo;
827 #if defined(__OpenBSD__)
828 struct sigcontext *uc = puc;
829 #else
830 struct ucontext *uc = puc;
831 #endif
832 unsigned long pc;
833 int trapno;
835 #ifndef REG_EIP
836 /* for glibc 2.1 */
837 #define REG_EIP EIP
838 #define REG_ERR ERR
839 #define REG_TRAPNO TRAPNO
840 #endif
841 pc = EIP_sig(uc);
842 trapno = TRAP_sig(uc);
843 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
844 trapno == 0xe ?
845 (ERROR_sig(uc) >> 1) & 1 : 0,
846 &MASK_sig(uc), puc);
849 #elif defined(__x86_64__)
851 #ifdef __NetBSD__
852 #define PC_sig(context) _UC_MACHINE_PC(context)
853 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
854 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
855 #define MASK_sig(context) ((context)->uc_sigmask)
856 #elif defined(__OpenBSD__)
857 #define PC_sig(context) ((context)->sc_rip)
858 #define TRAP_sig(context) ((context)->sc_trapno)
859 #define ERROR_sig(context) ((context)->sc_err)
860 #define MASK_sig(context) ((context)->sc_mask)
861 #else
862 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
863 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
864 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
865 #define MASK_sig(context) ((context)->uc_sigmask)
866 #endif
868 int cpu_signal_handler(int host_signum, void *pinfo,
869 void *puc)
871 siginfo_t *info = pinfo;
872 unsigned long pc;
873 #ifdef __NetBSD__
874 ucontext_t *uc = puc;
875 #elif defined(__OpenBSD__)
876 struct sigcontext *uc = puc;
877 #else
878 struct ucontext *uc = puc;
879 #endif
881 pc = PC_sig(uc);
882 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
883 TRAP_sig(uc) == 0xe ?
884 (ERROR_sig(uc) >> 1) & 1 : 0,
885 &MASK_sig(uc), puc);
888 #elif defined(_ARCH_PPC)
890 /***********************************************************************
891 * signal context platform-specific definitions
892 * From Wine
894 #ifdef linux
895 /* All Registers access - only for local access */
896 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
897 /* Gpr Registers access */
898 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
899 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
900 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
901 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
902 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
903 # define LR_sig(context) REG_sig(link, context) /* Link register */
904 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
905 /* Float Registers access */
906 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
907 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
908 /* Exception Registers access */
909 # define DAR_sig(context) REG_sig(dar, context)
910 # define DSISR_sig(context) REG_sig(dsisr, context)
911 # define TRAP_sig(context) REG_sig(trap, context)
912 #endif /* linux */
914 #ifdef __APPLE__
915 # include <sys/ucontext.h>
916 typedef struct ucontext SIGCONTEXT;
917 /* All Registers access - only for local access */
918 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
919 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
920 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
921 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
922 /* Gpr Registers access */
923 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
924 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
925 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
926 # define CTR_sig(context) REG_sig(ctr, context)
927 # define XER_sig(context) REG_sig(xer, context) /* Link register */
928 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
929 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
930 /* Float Registers access */
931 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
932 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
933 /* Exception Registers access */
934 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
935 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
936 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
937 #endif /* __APPLE__ */
939 int cpu_signal_handler(int host_signum, void *pinfo,
940 void *puc)
942 siginfo_t *info = pinfo;
943 struct ucontext *uc = puc;
944 unsigned long pc;
945 int is_write;
947 pc = IAR_sig(uc);
948 is_write = 0;
949 #if 0
950 /* ppc 4xx case */
951 if (DSISR_sig(uc) & 0x00800000)
952 is_write = 1;
953 #else
954 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
955 is_write = 1;
956 #endif
957 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
958 is_write, &uc->uc_sigmask, puc);
961 #elif defined(__alpha__)
963 int cpu_signal_handler(int host_signum, void *pinfo,
964 void *puc)
966 siginfo_t *info = pinfo;
967 struct ucontext *uc = puc;
968 uint32_t *pc = uc->uc_mcontext.sc_pc;
969 uint32_t insn = *pc;
970 int is_write = 0;
972 /* XXX: need kernel patch to get write flag faster */
973 switch (insn >> 26) {
974 case 0x0d: // stw
975 case 0x0e: // stb
976 case 0x0f: // stq_u
977 case 0x24: // stf
978 case 0x25: // stg
979 case 0x26: // sts
980 case 0x27: // stt
981 case 0x2c: // stl
982 case 0x2d: // stq
983 case 0x2e: // stl_c
984 case 0x2f: // stq_c
985 is_write = 1;
988 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
989 is_write, &uc->uc_sigmask, puc);
991 #elif defined(__sparc__)
993 int cpu_signal_handler(int host_signum, void *pinfo,
994 void *puc)
996 siginfo_t *info = pinfo;
997 int is_write;
998 uint32_t insn;
999 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1000 uint32_t *regs = (uint32_t *)(info + 1);
1001 void *sigmask = (regs + 20);
1002 /* XXX: is there a standard glibc define ? */
1003 unsigned long pc = regs[1];
1004 #else
1005 #ifdef __linux__
1006 struct sigcontext *sc = puc;
1007 unsigned long pc = sc->sigc_regs.tpc;
1008 void *sigmask = (void *)sc->sigc_mask;
1009 #elif defined(__OpenBSD__)
1010 struct sigcontext *uc = puc;
1011 unsigned long pc = uc->sc_pc;
1012 void *sigmask = (void *)(long)uc->sc_mask;
1013 #endif
1014 #endif
1016 /* XXX: need kernel patch to get write flag faster */
1017 is_write = 0;
1018 insn = *(uint32_t *)pc;
1019 if ((insn >> 30) == 3) {
1020 switch((insn >> 19) & 0x3f) {
1021 case 0x05: // stb
1022 case 0x15: // stba
1023 case 0x06: // sth
1024 case 0x16: // stha
1025 case 0x04: // st
1026 case 0x14: // sta
1027 case 0x07: // std
1028 case 0x17: // stda
1029 case 0x0e: // stx
1030 case 0x1e: // stxa
1031 case 0x24: // stf
1032 case 0x34: // stfa
1033 case 0x27: // stdf
1034 case 0x37: // stdfa
1035 case 0x26: // stqf
1036 case 0x36: // stqfa
1037 case 0x25: // stfsr
1038 case 0x3c: // casa
1039 case 0x3e: // casxa
1040 is_write = 1;
1041 break;
1044 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1045 is_write, sigmask, NULL);
1048 #elif defined(__arm__)
1050 int cpu_signal_handler(int host_signum, void *pinfo,
1051 void *puc)
1053 siginfo_t *info = pinfo;
1054 struct ucontext *uc = puc;
1055 unsigned long pc;
1056 int is_write;
1058 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1059 pc = uc->uc_mcontext.gregs[R15];
1060 #else
1061 pc = uc->uc_mcontext.arm_pc;
1062 #endif
1063 /* XXX: compute is_write */
1064 is_write = 0;
1065 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1066 is_write,
1067 &uc->uc_sigmask, puc);
1070 #elif defined(__mc68000)
1072 int cpu_signal_handler(int host_signum, void *pinfo,
1073 void *puc)
1075 siginfo_t *info = pinfo;
1076 struct ucontext *uc = puc;
1077 unsigned long pc;
1078 int is_write;
1080 pc = uc->uc_mcontext.gregs[16];
1081 /* XXX: compute is_write */
1082 is_write = 0;
1083 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1084 is_write,
1085 &uc->uc_sigmask, puc);
1088 #elif defined(__ia64)
1090 #ifndef __ISR_VALID
1091 /* This ought to be in <bits/siginfo.h>... */
1092 # define __ISR_VALID 1
1093 #endif
1095 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1097 siginfo_t *info = pinfo;
1098 struct ucontext *uc = puc;
1099 unsigned long ip;
1100 int is_write = 0;
1102 ip = uc->uc_mcontext.sc_ip;
1103 switch (host_signum) {
1104 case SIGILL:
1105 case SIGFPE:
1106 case SIGSEGV:
1107 case SIGBUS:
1108 case SIGTRAP:
1109 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1110 /* ISR.W (write-access) is bit 33: */
1111 is_write = (info->si_isr >> 33) & 1;
1112 break;
1114 default:
1115 break;
1117 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1118 is_write,
1119 &uc->uc_sigmask, puc);
1122 #elif defined(__s390__)
1124 int cpu_signal_handler(int host_signum, void *pinfo,
1125 void *puc)
1127 siginfo_t *info = pinfo;
1128 struct ucontext *uc = puc;
1129 unsigned long pc;
1130 int is_write;
1132 pc = uc->uc_mcontext.psw.addr;
1133 /* XXX: compute is_write */
1134 is_write = 0;
1135 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1136 is_write, &uc->uc_sigmask, puc);
1139 #elif defined(__mips__)
1141 int cpu_signal_handler(int host_signum, void *pinfo,
1142 void *puc)
1144 siginfo_t *info = pinfo;
1145 struct ucontext *uc = puc;
1146 greg_t pc = uc->uc_mcontext.pc;
1147 int is_write;
1149 /* XXX: compute is_write */
1150 is_write = 0;
1151 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1152 is_write, &uc->uc_sigmask, puc);
1155 #elif defined(__hppa__)
1157 int cpu_signal_handler(int host_signum, void *pinfo,
1158 void *puc)
1160 struct siginfo *info = pinfo;
1161 struct ucontext *uc = puc;
1162 unsigned long pc;
1163 int is_write;
1165 pc = uc->uc_mcontext.sc_iaoq[0];
1166 /* FIXME: compute is_write */
1167 is_write = 0;
1168 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1169 is_write,
1170 &uc->uc_sigmask, puc);
1173 #else
1175 #error host CPU specific signal handler needed
1177 #endif
1179 #endif /* !defined(CONFIG_SOFTMMU) */