Merge commit '70783b9c9be31e98421f17327a1127021abae672' into upstream-merge
[qemu-kvm/markmc.git] / cpu-exec.c
blob675e4ccbeb21b40fe8b54fc7982f58171fa9f87c
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #if !defined(TARGET_IA64)
23 #include "tcg.h"
24 #endif
25 #include "kvm.h"
27 #if !defined(CONFIG_SOFTMMU)
28 #undef EAX
29 #undef ECX
30 #undef EDX
31 #undef EBX
32 #undef ESP
33 #undef EBP
34 #undef ESI
35 #undef EDI
36 #undef EIP
37 #include <signal.h>
38 #ifdef __linux__
39 #include <sys/ucontext.h>
40 #endif
41 #endif
43 #include "qemu-kvm.h"
45 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
46 // Work around ugly bugs in glibc that mangle global register contents
47 #undef env
48 #define env cpu_single_env
49 #endif
51 int tb_invalidated_flag;
53 //#define CONFIG_DEBUG_EXEC
54 //#define DEBUG_SIGNAL
56 int qemu_cpu_has_work(CPUState *env)
58 return cpu_has_work(env);
61 void cpu_loop_exit(void)
63 /* NOTE: the register at this point must be saved by hand because
64 longjmp restore them */
65 regs_to_env();
66 longjmp(env->jmp_env, 1);
69 /* exit the current TB from a signal handler. The host registers are
70 restored in a state compatible with the CPU emulator
72 void cpu_resume_from_signal(CPUState *env1, void *puc)
74 #if !defined(CONFIG_SOFTMMU)
75 #ifdef __linux__
76 struct ucontext *uc = puc;
77 #elif defined(__OpenBSD__)
78 struct sigcontext *uc = puc;
79 #endif
80 #endif
82 env = env1;
84 /* XXX: restore cpu registers saved in host registers */
86 #if !defined(CONFIG_SOFTMMU)
87 if (puc) {
88 /* XXX: use siglongjmp ? */
89 #ifdef __linux__
90 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
91 #elif defined(__OpenBSD__)
92 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
93 #endif
95 #endif
96 env->exception_index = -1;
97 longjmp(env->jmp_env, 1);
100 /* Execute the code without caching the generated code. An interpreter
101 could be used if available. */
102 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
104 unsigned long next_tb;
105 TranslationBlock *tb;
107 /* Should never happen.
108 We only end up here when an existing TB is too long. */
109 if (max_cycles > CF_COUNT_MASK)
110 max_cycles = CF_COUNT_MASK;
112 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
113 max_cycles);
114 env->current_tb = tb;
115 /* execute the generated code */
116 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
118 if ((next_tb & 3) == 2) {
119 /* Restore PC. This may happen if async event occurs before
120 the TB starts executing. */
121 cpu_pc_from_tb(env, tb);
123 tb_phys_invalidate(tb, -1);
124 tb_free(tb);
127 static TranslationBlock *tb_find_slow(target_ulong pc,
128 target_ulong cs_base,
129 uint64_t flags)
131 TranslationBlock *tb, **ptb1;
132 unsigned int h;
133 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
135 tb_invalidated_flag = 0;
137 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
139 /* find translated block using physical mappings */
140 phys_pc = get_phys_addr_code(env, pc);
141 phys_page1 = phys_pc & TARGET_PAGE_MASK;
142 phys_page2 = -1;
143 h = tb_phys_hash_func(phys_pc);
144 ptb1 = &tb_phys_hash[h];
145 for(;;) {
146 tb = *ptb1;
147 if (!tb)
148 goto not_found;
149 if (tb->pc == pc &&
150 tb->page_addr[0] == phys_page1 &&
151 tb->cs_base == cs_base &&
152 tb->flags == flags) {
153 /* check next page if needed */
154 if (tb->page_addr[1] != -1) {
155 virt_page2 = (pc & TARGET_PAGE_MASK) +
156 TARGET_PAGE_SIZE;
157 phys_page2 = get_phys_addr_code(env, virt_page2);
158 if (tb->page_addr[1] == phys_page2)
159 goto found;
160 } else {
161 goto found;
164 ptb1 = &tb->phys_hash_next;
166 not_found:
167 /* if no translated code available, then translate it now */
168 tb = tb_gen_code(env, pc, cs_base, flags, 0);
170 found:
171 /* we add the TB in the virtual pc hash table */
172 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
173 return tb;
176 static inline TranslationBlock *tb_find_fast(void)
178 TranslationBlock *tb;
179 target_ulong cs_base, pc;
180 int flags;
182 /* we record a subset of the CPU state. It will
183 always be the same before a given translated block
184 is executed. */
185 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
186 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
187 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
188 tb->flags != flags)) {
189 tb = tb_find_slow(pc, cs_base, flags);
191 return tb;
194 static CPUDebugExcpHandler *debug_excp_handler;
196 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
198 CPUDebugExcpHandler *old_handler = debug_excp_handler;
200 debug_excp_handler = handler;
201 return old_handler;
204 static void cpu_handle_debug_exception(CPUState *env)
206 CPUWatchpoint *wp;
208 if (!env->watchpoint_hit)
209 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
210 wp->flags &= ~BP_WATCHPOINT_HIT;
212 if (debug_excp_handler)
213 debug_excp_handler(env);
216 /* main execution loop */
218 int cpu_exec(CPUState *env1)
220 #define DECLARE_HOST_REGS 1
221 #include "hostregs_helper.h"
222 int ret, interrupt_request;
223 TranslationBlock *tb;
224 uint8_t *tc_ptr;
225 unsigned long next_tb;
227 if (cpu_halted(env1) == EXCP_HALTED)
228 return EXCP_HALTED;
230 cpu_single_env = env1;
232 /* first we save global registers */
233 #define SAVE_HOST_REGS 1
234 #include "hostregs_helper.h"
235 env = env1;
237 env_to_regs();
238 #if defined(TARGET_I386)
239 /* put eflags in CPU temporary format */
240 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
241 DF = 1 - (2 * ((env->eflags >> 10) & 1));
242 CC_OP = CC_OP_EFLAGS;
243 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
244 #elif defined(TARGET_SPARC)
245 #elif defined(TARGET_M68K)
246 env->cc_op = CC_OP_FLAGS;
247 env->cc_dest = env->sr & 0xf;
248 env->cc_x = (env->sr >> 4) & 1;
249 #elif defined(TARGET_ALPHA)
250 #elif defined(TARGET_ARM)
251 #elif defined(TARGET_PPC)
252 #elif defined(TARGET_MICROBLAZE)
253 #elif defined(TARGET_MIPS)
254 #elif defined(TARGET_SH4)
255 #elif defined(TARGET_CRIS)
256 #elif defined(TARGET_IA64)
257 /* XXXXX */
258 #else
259 #error unsupported target CPU
260 #endif
261 env->exception_index = -1;
263 /* prepare setjmp context for exception handling */
264 for(;;) {
265 if (setjmp(env->jmp_env) == 0) {
266 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
267 #undef env
268 env = cpu_single_env;
269 #define env cpu_single_env
270 #endif
271 env->current_tb = NULL;
272 /* if an exception is pending, we execute it here */
273 if (env->exception_index >= 0) {
274 if (env->exception_index >= EXCP_INTERRUPT) {
275 /* exit request from the cpu execution loop */
276 ret = env->exception_index;
277 if (ret == EXCP_DEBUG)
278 cpu_handle_debug_exception(env);
279 break;
280 } else {
281 #if defined(CONFIG_USER_ONLY)
282 /* if user mode only, we simulate a fake exception
283 which will be handled outside the cpu execution
284 loop */
285 #if defined(TARGET_I386)
286 do_interrupt_user(env->exception_index,
287 env->exception_is_int,
288 env->error_code,
289 env->exception_next_eip);
290 /* successfully delivered */
291 env->old_exception = -1;
292 #endif
293 ret = env->exception_index;
294 break;
295 #else
296 #if defined(TARGET_I386)
297 /* simulate a real cpu exception. On i386, it can
298 trigger new exceptions, but we do not handle
299 double or triple faults yet. */
300 do_interrupt(env->exception_index,
301 env->exception_is_int,
302 env->error_code,
303 env->exception_next_eip, 0);
304 /* successfully delivered */
305 env->old_exception = -1;
306 #elif defined(TARGET_PPC)
307 do_interrupt(env);
308 #elif defined(TARGET_MICROBLAZE)
309 do_interrupt(env);
310 #elif defined(TARGET_MIPS)
311 do_interrupt(env);
312 #elif defined(TARGET_SPARC)
313 do_interrupt(env);
314 #elif defined(TARGET_ARM)
315 do_interrupt(env);
316 #elif defined(TARGET_SH4)
317 do_interrupt(env);
318 #elif defined(TARGET_ALPHA)
319 do_interrupt(env);
320 #elif defined(TARGET_CRIS)
321 do_interrupt(env);
322 #elif defined(TARGET_M68K)
323 do_interrupt(0);
324 #elif defined(TARGET_IA64)
325 do_interrupt(env);
326 #endif
327 #endif
329 env->exception_index = -1;
332 if (kvm_enabled()) {
333 kvm_cpu_exec(env);
334 longjmp(env->jmp_env, 1);
337 next_tb = 0; /* force lookup of first TB */
338 for(;;) {
339 interrupt_request = env->interrupt_request;
340 if (unlikely(interrupt_request)) {
341 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
342 /* Mask out external interrupts for this step. */
343 interrupt_request &= ~(CPU_INTERRUPT_HARD |
344 CPU_INTERRUPT_FIQ |
345 CPU_INTERRUPT_SMI |
346 CPU_INTERRUPT_NMI);
348 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
349 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
350 env->exception_index = EXCP_DEBUG;
351 cpu_loop_exit();
353 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
354 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
355 defined(TARGET_MICROBLAZE)
356 if (interrupt_request & CPU_INTERRUPT_HALT) {
357 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
358 env->halted = 1;
359 env->exception_index = EXCP_HLT;
360 cpu_loop_exit();
362 #endif
363 #if defined(TARGET_I386)
364 if (interrupt_request & CPU_INTERRUPT_INIT) {
365 svm_check_intercept(SVM_EXIT_INIT);
366 do_cpu_init(env);
367 env->exception_index = EXCP_HALTED;
368 cpu_loop_exit();
369 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
370 do_cpu_sipi(env);
371 } else if (env->hflags2 & HF2_GIF_MASK) {
372 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
373 !(env->hflags & HF_SMM_MASK)) {
374 svm_check_intercept(SVM_EXIT_SMI);
375 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
376 do_smm_enter();
377 next_tb = 0;
378 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
379 !(env->hflags2 & HF2_NMI_MASK)) {
380 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
381 env->hflags2 |= HF2_NMI_MASK;
382 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
383 next_tb = 0;
384 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
385 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
386 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
387 next_tb = 0;
388 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
389 (((env->hflags2 & HF2_VINTR_MASK) &&
390 (env->hflags2 & HF2_HIF_MASK)) ||
391 (!(env->hflags2 & HF2_VINTR_MASK) &&
392 (env->eflags & IF_MASK &&
393 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
394 int intno;
395 svm_check_intercept(SVM_EXIT_INTR);
396 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
397 intno = cpu_get_pic_interrupt(env);
398 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
399 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
400 #undef env
401 env = cpu_single_env;
402 #define env cpu_single_env
403 #endif
404 do_interrupt(intno, 0, 0, 0, 1);
405 /* ensure that no TB jump will be modified as
406 the program flow was changed */
407 next_tb = 0;
408 #if !defined(CONFIG_USER_ONLY)
409 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
410 (env->eflags & IF_MASK) &&
411 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
412 int intno;
413 /* FIXME: this should respect TPR */
414 svm_check_intercept(SVM_EXIT_VINTR);
415 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
416 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
417 do_interrupt(intno, 0, 0, 0, 1);
418 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
419 next_tb = 0;
420 #endif
423 #elif defined(TARGET_PPC)
424 #if 0
425 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
426 cpu_ppc_reset(env);
428 #endif
429 if (interrupt_request & CPU_INTERRUPT_HARD) {
430 ppc_hw_interrupt(env);
431 if (env->pending_interrupts == 0)
432 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
433 next_tb = 0;
435 #elif defined(TARGET_MICROBLAZE)
436 if ((interrupt_request & CPU_INTERRUPT_HARD)
437 && (env->sregs[SR_MSR] & MSR_IE)
438 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
439 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
440 env->exception_index = EXCP_IRQ;
441 do_interrupt(env);
442 next_tb = 0;
444 #elif defined(TARGET_MIPS)
445 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
446 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
447 (env->CP0_Status & (1 << CP0St_IE)) &&
448 !(env->CP0_Status & (1 << CP0St_EXL)) &&
449 !(env->CP0_Status & (1 << CP0St_ERL)) &&
450 !(env->hflags & MIPS_HFLAG_DM)) {
451 /* Raise it */
452 env->exception_index = EXCP_EXT_INTERRUPT;
453 env->error_code = 0;
454 do_interrupt(env);
455 next_tb = 0;
457 #elif defined(TARGET_SPARC)
458 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
459 cpu_interrupts_enabled(env)) {
460 int pil = env->interrupt_index & 15;
461 int type = env->interrupt_index & 0xf0;
463 if (((type == TT_EXTINT) &&
464 (pil == 15 || pil > env->psrpil)) ||
465 type != TT_EXTINT) {
466 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
467 env->exception_index = env->interrupt_index;
468 do_interrupt(env);
469 env->interrupt_index = 0;
470 next_tb = 0;
472 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
473 //do_interrupt(0, 0, 0, 0, 0);
474 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
476 #elif defined(TARGET_ARM)
477 if (interrupt_request & CPU_INTERRUPT_FIQ
478 && !(env->uncached_cpsr & CPSR_F)) {
479 env->exception_index = EXCP_FIQ;
480 do_interrupt(env);
481 next_tb = 0;
483 /* ARMv7-M interrupt return works by loading a magic value
484 into the PC. On real hardware the load causes the
485 return to occur. The qemu implementation performs the
486 jump normally, then does the exception return when the
487 CPU tries to execute code at the magic address.
488 This will cause the magic PC value to be pushed to
489 the stack if an interrupt occured at the wrong time.
490 We avoid this by disabling interrupts when
491 pc contains a magic address. */
492 if (interrupt_request & CPU_INTERRUPT_HARD
493 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
494 || !(env->uncached_cpsr & CPSR_I))) {
495 env->exception_index = EXCP_IRQ;
496 do_interrupt(env);
497 next_tb = 0;
499 #elif defined(TARGET_SH4)
500 if (interrupt_request & CPU_INTERRUPT_HARD) {
501 do_interrupt(env);
502 next_tb = 0;
504 #elif defined(TARGET_ALPHA)
505 if (interrupt_request & CPU_INTERRUPT_HARD) {
506 do_interrupt(env);
507 next_tb = 0;
509 #elif defined(TARGET_CRIS)
510 if (interrupt_request & CPU_INTERRUPT_HARD
511 && (env->pregs[PR_CCS] & I_FLAG)) {
512 env->exception_index = EXCP_IRQ;
513 do_interrupt(env);
514 next_tb = 0;
516 if (interrupt_request & CPU_INTERRUPT_NMI
517 && (env->pregs[PR_CCS] & M_FLAG)) {
518 env->exception_index = EXCP_NMI;
519 do_interrupt(env);
520 next_tb = 0;
522 #elif defined(TARGET_M68K)
523 if (interrupt_request & CPU_INTERRUPT_HARD
524 && ((env->sr & SR_I) >> SR_I_SHIFT)
525 < env->pending_level) {
526 /* Real hardware gets the interrupt vector via an
527 IACK cycle at this point. Current emulated
528 hardware doesn't rely on this, so we
529 provide/save the vector when the interrupt is
530 first signalled. */
531 env->exception_index = env->pending_vector;
532 do_interrupt(1);
533 next_tb = 0;
535 #endif
536 /* Don't use the cached interupt_request value,
537 do_interrupt may have updated the EXITTB flag. */
538 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
539 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
540 /* ensure that no TB jump will be modified as
541 the program flow was changed */
542 next_tb = 0;
545 if (unlikely(env->exit_request)) {
546 env->exit_request = 0;
547 env->exception_index = EXCP_INTERRUPT;
548 cpu_loop_exit();
550 #ifdef CONFIG_DEBUG_EXEC
551 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
552 /* restore flags in standard format */
553 regs_to_env();
554 #if defined(TARGET_I386)
555 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
556 log_cpu_state(env, X86_DUMP_CCOP);
557 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
558 #elif defined(TARGET_ARM)
559 log_cpu_state(env, 0);
560 #elif defined(TARGET_SPARC)
561 log_cpu_state(env, 0);
562 #elif defined(TARGET_PPC)
563 log_cpu_state(env, 0);
564 #elif defined(TARGET_M68K)
565 cpu_m68k_flush_flags(env, env->cc_op);
566 env->cc_op = CC_OP_FLAGS;
567 env->sr = (env->sr & 0xffe0)
568 | env->cc_dest | (env->cc_x << 4);
569 log_cpu_state(env, 0);
570 #elif defined(TARGET_MICROBLAZE)
571 log_cpu_state(env, 0);
572 #elif defined(TARGET_MIPS)
573 log_cpu_state(env, 0);
574 #elif defined(TARGET_SH4)
575 log_cpu_state(env, 0);
576 #elif defined(TARGET_ALPHA)
577 log_cpu_state(env, 0);
578 #elif defined(TARGET_CRIS)
579 log_cpu_state(env, 0);
580 #else
581 #error unsupported target CPU
582 #endif
584 #endif
585 spin_lock(&tb_lock);
586 tb = tb_find_fast();
587 /* Note: we do it here to avoid a gcc bug on Mac OS X when
588 doing it in tb_find_slow */
589 if (tb_invalidated_flag) {
590 /* as some TB could have been invalidated because
591 of memory exceptions while generating the code, we
592 must recompute the hash index here */
593 next_tb = 0;
594 tb_invalidated_flag = 0;
596 #ifdef CONFIG_DEBUG_EXEC
597 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
598 (long)tb->tc_ptr, tb->pc,
599 lookup_symbol(tb->pc));
600 #endif
601 /* see if we can patch the calling TB. When the TB
602 spans two pages, we cannot safely do a direct
603 jump. */
605 if (next_tb != 0 && tb->page_addr[1] == -1) {
606 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
609 spin_unlock(&tb_lock);
610 env->current_tb = tb;
612 /* cpu_interrupt might be called while translating the
613 TB, but before it is linked into a potentially
614 infinite loop and becomes env->current_tb. Avoid
615 starting execution if there is a pending interrupt. */
616 if (unlikely (env->exit_request))
617 env->current_tb = NULL;
619 while (env->current_tb) {
620 tc_ptr = tb->tc_ptr;
621 /* execute the generated code */
622 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
623 #undef env
624 env = cpu_single_env;
625 #define env cpu_single_env
626 #endif
627 next_tb = tcg_qemu_tb_exec(tc_ptr);
628 env->current_tb = NULL;
629 if ((next_tb & 3) == 2) {
630 /* Instruction counter expired. */
631 int insns_left;
632 tb = (TranslationBlock *)(long)(next_tb & ~3);
633 /* Restore PC. */
634 cpu_pc_from_tb(env, tb);
635 insns_left = env->icount_decr.u32;
636 if (env->icount_extra && insns_left >= 0) {
637 /* Refill decrementer and continue execution. */
638 env->icount_extra += insns_left;
639 if (env->icount_extra > 0xffff) {
640 insns_left = 0xffff;
641 } else {
642 insns_left = env->icount_extra;
644 env->icount_extra -= insns_left;
645 env->icount_decr.u16.low = insns_left;
646 } else {
647 if (insns_left > 0) {
648 /* Execute remaining instructions. */
649 cpu_exec_nocache(insns_left, tb);
651 env->exception_index = EXCP_INTERRUPT;
652 next_tb = 0;
653 cpu_loop_exit();
657 /* reset soft MMU for next block (it can currently
658 only be set by a memory fault) */
659 } /* for(;;) */
660 } else {
661 env_to_regs();
663 } /* for(;;) */
666 #if defined(TARGET_I386)
667 /* restore flags in standard format */
668 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
669 #elif defined(TARGET_ARM)
670 /* XXX: Save/restore host fpu exception state?. */
671 #elif defined(TARGET_SPARC)
672 #elif defined(TARGET_PPC)
673 #elif defined(TARGET_M68K)
674 cpu_m68k_flush_flags(env, env->cc_op);
675 env->cc_op = CC_OP_FLAGS;
676 env->sr = (env->sr & 0xffe0)
677 | env->cc_dest | (env->cc_x << 4);
678 #elif defined(TARGET_MICROBLAZE)
679 #elif defined(TARGET_MIPS)
680 #elif defined(TARGET_SH4)
681 #elif defined(TARGET_IA64)
682 #elif defined(TARGET_ALPHA)
683 #elif defined(TARGET_CRIS)
684 /* XXXXX */
685 #else
686 #error unsupported target CPU
687 #endif
689 /* restore global registers */
690 #include "hostregs_helper.h"
692 /* fail safe : never use cpu_single_env outside cpu_exec() */
693 cpu_single_env = NULL;
694 return ret;
697 /* must only be called from the generated code as an exception can be
698 generated */
699 void tb_invalidate_page_range(target_ulong start, target_ulong end)
701 /* XXX: cannot enable it yet because it yields to MMU exception
702 where NIP != read address on PowerPC */
703 #if 0
704 target_ulong phys_addr;
705 phys_addr = get_phys_addr_code(env, start);
706 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
707 #endif
710 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
712 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
714 CPUX86State *saved_env;
716 saved_env = env;
717 env = s;
718 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
719 selector &= 0xffff;
720 cpu_x86_load_seg_cache(env, seg_reg, selector,
721 (selector << 4), 0xffff, 0);
722 } else {
723 helper_load_seg(seg_reg, selector);
725 env = saved_env;
728 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
730 CPUX86State *saved_env;
732 saved_env = env;
733 env = s;
735 helper_fsave(ptr, data32);
737 env = saved_env;
740 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
742 CPUX86State *saved_env;
744 saved_env = env;
745 env = s;
747 helper_frstor(ptr, data32);
749 env = saved_env;
752 #endif /* TARGET_I386 */
754 #if !defined(CONFIG_SOFTMMU)
756 #if defined(TARGET_I386)
757 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
758 #else
759 #define EXCEPTION_ACTION cpu_loop_exit()
760 #endif
762 /* 'pc' is the host PC at which the exception was raised. 'address' is
763 the effective address of the memory exception. 'is_write' is 1 if a
764 write caused the exception and otherwise 0'. 'old_set' is the
765 signal set which should be restored */
766 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
767 int is_write, sigset_t *old_set,
768 void *puc)
770 TranslationBlock *tb;
771 int ret;
773 if (cpu_single_env)
774 env = cpu_single_env; /* XXX: find a correct solution for multithread */
775 #if defined(DEBUG_SIGNAL)
776 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
777 pc, address, is_write, *(unsigned long *)old_set);
778 #endif
779 /* XXX: locking issue */
780 if (is_write && page_unprotect(h2g(address), pc, puc)) {
781 return 1;
784 /* see if it is an MMU fault */
785 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
786 if (ret < 0)
787 return 0; /* not an MMU fault */
788 if (ret == 0)
789 return 1; /* the MMU fault was handled without causing real CPU fault */
790 /* now we have a real cpu fault */
791 tb = tb_find_pc(pc);
792 if (tb) {
793 /* the PC is inside the translated code. It means that we have
794 a virtual CPU fault */
795 cpu_restore_state(tb, env, pc, puc);
798 /* we restore the process signal mask as the sigreturn should
799 do it (XXX: use sigsetjmp) */
800 sigprocmask(SIG_SETMASK, old_set, NULL);
801 EXCEPTION_ACTION;
803 /* never comes here */
804 return 1;
807 #if defined(__i386__)
809 #if defined(__APPLE__)
810 # include <sys/ucontext.h>
812 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
813 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
814 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
815 # define MASK_sig(context) ((context)->uc_sigmask)
816 #elif defined (__NetBSD__)
817 # include <ucontext.h>
819 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
820 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
821 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
822 # define MASK_sig(context) ((context)->uc_sigmask)
823 #elif defined (__FreeBSD__) || defined(__DragonFly__)
824 # include <ucontext.h>
826 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
827 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
828 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
829 # define MASK_sig(context) ((context)->uc_sigmask)
830 #elif defined(__OpenBSD__)
831 # define EIP_sig(context) ((context)->sc_eip)
832 # define TRAP_sig(context) ((context)->sc_trapno)
833 # define ERROR_sig(context) ((context)->sc_err)
834 # define MASK_sig(context) ((context)->sc_mask)
835 #else
836 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
837 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
838 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
839 # define MASK_sig(context) ((context)->uc_sigmask)
840 #endif
842 int cpu_signal_handler(int host_signum, void *pinfo,
843 void *puc)
845 siginfo_t *info = pinfo;
846 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
847 ucontext_t *uc = puc;
848 #elif defined(__OpenBSD__)
849 struct sigcontext *uc = puc;
850 #else
851 struct ucontext *uc = puc;
852 #endif
853 unsigned long pc;
854 int trapno;
856 #ifndef REG_EIP
857 /* for glibc 2.1 */
858 #define REG_EIP EIP
859 #define REG_ERR ERR
860 #define REG_TRAPNO TRAPNO
861 #endif
862 pc = EIP_sig(uc);
863 trapno = TRAP_sig(uc);
864 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
865 trapno == 0xe ?
866 (ERROR_sig(uc) >> 1) & 1 : 0,
867 &MASK_sig(uc), puc);
870 #elif defined(__x86_64__)
872 #ifdef __NetBSD__
873 #define PC_sig(context) _UC_MACHINE_PC(context)
874 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
875 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
876 #define MASK_sig(context) ((context)->uc_sigmask)
877 #elif defined(__OpenBSD__)
878 #define PC_sig(context) ((context)->sc_rip)
879 #define TRAP_sig(context) ((context)->sc_trapno)
880 #define ERROR_sig(context) ((context)->sc_err)
881 #define MASK_sig(context) ((context)->sc_mask)
882 #elif defined (__FreeBSD__) || defined(__DragonFly__)
883 #include <ucontext.h>
885 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
886 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
887 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
888 #define MASK_sig(context) ((context)->uc_sigmask)
889 #else
890 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
891 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
892 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
893 #define MASK_sig(context) ((context)->uc_sigmask)
894 #endif
896 int cpu_signal_handler(int host_signum, void *pinfo,
897 void *puc)
899 siginfo_t *info = pinfo;
900 unsigned long pc;
901 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
902 ucontext_t *uc = puc;
903 #elif defined(__OpenBSD__)
904 struct sigcontext *uc = puc;
905 #else
906 struct ucontext *uc = puc;
907 #endif
909 pc = PC_sig(uc);
910 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
911 TRAP_sig(uc) == 0xe ?
912 (ERROR_sig(uc) >> 1) & 1 : 0,
913 &MASK_sig(uc), puc);
916 #elif defined(_ARCH_PPC)
918 /***********************************************************************
919 * signal context platform-specific definitions
920 * From Wine
922 #ifdef linux
923 /* All Registers access - only for local access */
924 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
925 /* Gpr Registers access */
926 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
927 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
928 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
929 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
930 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
931 # define LR_sig(context) REG_sig(link, context) /* Link register */
932 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
933 /* Float Registers access */
934 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
935 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
936 /* Exception Registers access */
937 # define DAR_sig(context) REG_sig(dar, context)
938 # define DSISR_sig(context) REG_sig(dsisr, context)
939 # define TRAP_sig(context) REG_sig(trap, context)
940 #endif /* linux */
942 #ifdef __APPLE__
943 # include <sys/ucontext.h>
944 typedef struct ucontext SIGCONTEXT;
945 /* All Registers access - only for local access */
946 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
947 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
948 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
949 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
950 /* Gpr Registers access */
951 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
952 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
953 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
954 # define CTR_sig(context) REG_sig(ctr, context)
955 # define XER_sig(context) REG_sig(xer, context) /* Link register */
956 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
957 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
958 /* Float Registers access */
959 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
960 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
961 /* Exception Registers access */
962 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
963 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
964 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
965 #endif /* __APPLE__ */
967 int cpu_signal_handler(int host_signum, void *pinfo,
968 void *puc)
970 siginfo_t *info = pinfo;
971 struct ucontext *uc = puc;
972 unsigned long pc;
973 int is_write;
975 pc = IAR_sig(uc);
976 is_write = 0;
977 #if 0
978 /* ppc 4xx case */
979 if (DSISR_sig(uc) & 0x00800000)
980 is_write = 1;
981 #else
982 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
983 is_write = 1;
984 #endif
985 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
986 is_write, &uc->uc_sigmask, puc);
989 #elif defined(__alpha__)
991 int cpu_signal_handler(int host_signum, void *pinfo,
992 void *puc)
994 siginfo_t *info = pinfo;
995 struct ucontext *uc = puc;
996 uint32_t *pc = uc->uc_mcontext.sc_pc;
997 uint32_t insn = *pc;
998 int is_write = 0;
1000 /* XXX: need kernel patch to get write flag faster */
1001 switch (insn >> 26) {
1002 case 0x0d: // stw
1003 case 0x0e: // stb
1004 case 0x0f: // stq_u
1005 case 0x24: // stf
1006 case 0x25: // stg
1007 case 0x26: // sts
1008 case 0x27: // stt
1009 case 0x2c: // stl
1010 case 0x2d: // stq
1011 case 0x2e: // stl_c
1012 case 0x2f: // stq_c
1013 is_write = 1;
1016 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1017 is_write, &uc->uc_sigmask, puc);
1019 #elif defined(__sparc__)
1021 int cpu_signal_handler(int host_signum, void *pinfo,
1022 void *puc)
1024 siginfo_t *info = pinfo;
1025 int is_write;
1026 uint32_t insn;
1027 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1028 uint32_t *regs = (uint32_t *)(info + 1);
1029 void *sigmask = (regs + 20);
1030 /* XXX: is there a standard glibc define ? */
1031 unsigned long pc = regs[1];
1032 #else
1033 #ifdef __linux__
1034 struct sigcontext *sc = puc;
1035 unsigned long pc = sc->sigc_regs.tpc;
1036 void *sigmask = (void *)sc->sigc_mask;
1037 #elif defined(__OpenBSD__)
1038 struct sigcontext *uc = puc;
1039 unsigned long pc = uc->sc_pc;
1040 void *sigmask = (void *)(long)uc->sc_mask;
1041 #endif
1042 #endif
1044 /* XXX: need kernel patch to get write flag faster */
1045 is_write = 0;
1046 insn = *(uint32_t *)pc;
1047 if ((insn >> 30) == 3) {
1048 switch((insn >> 19) & 0x3f) {
1049 case 0x05: // stb
1050 case 0x15: // stba
1051 case 0x06: // sth
1052 case 0x16: // stha
1053 case 0x04: // st
1054 case 0x14: // sta
1055 case 0x07: // std
1056 case 0x17: // stda
1057 case 0x0e: // stx
1058 case 0x1e: // stxa
1059 case 0x24: // stf
1060 case 0x34: // stfa
1061 case 0x27: // stdf
1062 case 0x37: // stdfa
1063 case 0x26: // stqf
1064 case 0x36: // stqfa
1065 case 0x25: // stfsr
1066 case 0x3c: // casa
1067 case 0x3e: // casxa
1068 is_write = 1;
1069 break;
1072 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1073 is_write, sigmask, NULL);
1076 #elif defined(__arm__)
1078 int cpu_signal_handler(int host_signum, void *pinfo,
1079 void *puc)
1081 siginfo_t *info = pinfo;
1082 struct ucontext *uc = puc;
1083 unsigned long pc;
1084 int is_write;
1086 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1087 pc = uc->uc_mcontext.gregs[R15];
1088 #else
1089 pc = uc->uc_mcontext.arm_pc;
1090 #endif
1091 /* XXX: compute is_write */
1092 is_write = 0;
1093 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1094 is_write,
1095 &uc->uc_sigmask, puc);
1098 #elif defined(__mc68000)
1100 int cpu_signal_handler(int host_signum, void *pinfo,
1101 void *puc)
1103 siginfo_t *info = pinfo;
1104 struct ucontext *uc = puc;
1105 unsigned long pc;
1106 int is_write;
1108 pc = uc->uc_mcontext.gregs[16];
1109 /* XXX: compute is_write */
1110 is_write = 0;
1111 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1112 is_write,
1113 &uc->uc_sigmask, puc);
1116 #elif defined(__ia64)
1118 #ifndef __ISR_VALID
1119 /* This ought to be in <bits/siginfo.h>... */
1120 # define __ISR_VALID 1
1121 #endif
1123 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1125 siginfo_t *info = pinfo;
1126 struct ucontext *uc = puc;
1127 unsigned long ip;
1128 int is_write = 0;
1130 ip = uc->uc_mcontext.sc_ip;
1131 switch (host_signum) {
1132 case SIGILL:
1133 case SIGFPE:
1134 case SIGSEGV:
1135 case SIGBUS:
1136 case SIGTRAP:
1137 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1138 /* ISR.W (write-access) is bit 33: */
1139 is_write = (info->si_isr >> 33) & 1;
1140 break;
1142 default:
1143 break;
1145 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1146 is_write,
1147 &uc->uc_sigmask, puc);
1150 #elif defined(__s390__)
1152 int cpu_signal_handler(int host_signum, void *pinfo,
1153 void *puc)
1155 siginfo_t *info = pinfo;
1156 struct ucontext *uc = puc;
1157 unsigned long pc;
1158 int is_write;
1160 pc = uc->uc_mcontext.psw.addr;
1161 /* XXX: compute is_write */
1162 is_write = 0;
1163 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1164 is_write, &uc->uc_sigmask, puc);
1167 #elif defined(__mips__)
1169 int cpu_signal_handler(int host_signum, void *pinfo,
1170 void *puc)
1172 siginfo_t *info = pinfo;
1173 struct ucontext *uc = puc;
1174 greg_t pc = uc->uc_mcontext.pc;
1175 int is_write;
1177 /* XXX: compute is_write */
1178 is_write = 0;
1179 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1180 is_write, &uc->uc_sigmask, puc);
1183 #elif defined(__hppa__)
1185 int cpu_signal_handler(int host_signum, void *pinfo,
1186 void *puc)
1188 struct siginfo *info = pinfo;
1189 struct ucontext *uc = puc;
1190 unsigned long pc;
1191 int is_write;
1193 pc = uc->uc_mcontext.sc_iaoq[0];
1194 /* FIXME: compute is_write */
1195 is_write = 0;
1196 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1197 is_write,
1198 &uc->uc_sigmask, puc);
1201 #else
1203 #error host CPU specific signal handler needed
1205 #endif
1207 #endif /* !defined(CONFIG_SOFTMMU) */