Merge remote-tracking branch 'kwolf/block-stable' into staging
[qemu.git] / cpu-exec.c
bloba9fa608cff5ad158ad32d0025209a6ea4dc75ac8
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "qemu-barrier.h"
25 int tb_invalidated_flag;
27 //#define CONFIG_DEBUG_EXEC
29 bool qemu_cpu_has_work(CPUState *env)
31 return cpu_has_work(env);
34 void cpu_loop_exit(CPUState *env)
36 env->current_tb = NULL;
37 longjmp(env->jmp_env, 1);
40 /* exit the current TB from a signal handler. The host registers are
41 restored in a state compatible with the CPU emulator
43 #if defined(CONFIG_SOFTMMU)
44 void cpu_resume_from_signal(CPUState *env, void *puc)
46 /* XXX: restore cpu registers saved in host registers */
48 env->exception_index = -1;
49 longjmp(env->jmp_env, 1);
51 #endif
53 /* Execute the code without caching the generated code. An interpreter
54 could be used if available. */
55 static void cpu_exec_nocache(CPUState *env, int max_cycles,
56 TranslationBlock *orig_tb)
58 unsigned long next_tb;
59 TranslationBlock *tb;
61 /* Should never happen.
62 We only end up here when an existing TB is too long. */
63 if (max_cycles > CF_COUNT_MASK)
64 max_cycles = CF_COUNT_MASK;
66 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
67 max_cycles);
68 env->current_tb = tb;
69 /* execute the generated code */
70 next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
71 env->current_tb = NULL;
73 if ((next_tb & 3) == 2) {
74 /* Restore PC. This may happen if async event occurs before
75 the TB starts executing. */
76 cpu_pc_from_tb(env, tb);
78 tb_phys_invalidate(tb, -1);
79 tb_free(tb);
82 static TranslationBlock *tb_find_slow(CPUState *env,
83 target_ulong pc,
84 target_ulong cs_base,
85 uint64_t flags)
87 TranslationBlock *tb, **ptb1;
88 unsigned int h;
89 tb_page_addr_t phys_pc, phys_page1;
90 target_ulong virt_page2;
92 tb_invalidated_flag = 0;
94 /* find translated block using physical mappings */
95 phys_pc = get_page_addr_code(env, pc);
96 phys_page1 = phys_pc & TARGET_PAGE_MASK;
97 h = tb_phys_hash_func(phys_pc);
98 ptb1 = &tb_phys_hash[h];
99 for(;;) {
100 tb = *ptb1;
101 if (!tb)
102 goto not_found;
103 if (tb->pc == pc &&
104 tb->page_addr[0] == phys_page1 &&
105 tb->cs_base == cs_base &&
106 tb->flags == flags) {
107 /* check next page if needed */
108 if (tb->page_addr[1] != -1) {
109 tb_page_addr_t phys_page2;
111 virt_page2 = (pc & TARGET_PAGE_MASK) +
112 TARGET_PAGE_SIZE;
113 phys_page2 = get_page_addr_code(env, virt_page2);
114 if (tb->page_addr[1] == phys_page2)
115 goto found;
116 } else {
117 goto found;
120 ptb1 = &tb->phys_hash_next;
122 not_found:
123 /* if no translated code available, then translate it now */
124 tb = tb_gen_code(env, pc, cs_base, flags, 0);
126 found:
127 /* Move the last found TB to the head of the list */
128 if (likely(*ptb1)) {
129 *ptb1 = tb->phys_hash_next;
130 tb->phys_hash_next = tb_phys_hash[h];
131 tb_phys_hash[h] = tb;
133 /* we add the TB in the virtual pc hash table */
134 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
135 return tb;
138 static inline TranslationBlock *tb_find_fast(CPUState *env)
140 TranslationBlock *tb;
141 target_ulong cs_base, pc;
142 int flags;
144 /* we record a subset of the CPU state. It will
145 always be the same before a given translated block
146 is executed. */
147 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
148 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
149 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
150 tb->flags != flags)) {
151 tb = tb_find_slow(env, pc, cs_base, flags);
153 return tb;
156 static CPUDebugExcpHandler *debug_excp_handler;
158 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
160 CPUDebugExcpHandler *old_handler = debug_excp_handler;
162 debug_excp_handler = handler;
163 return old_handler;
166 static void cpu_handle_debug_exception(CPUState *env)
168 CPUWatchpoint *wp;
170 if (!env->watchpoint_hit) {
171 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
172 wp->flags &= ~BP_WATCHPOINT_HIT;
175 if (debug_excp_handler) {
176 debug_excp_handler(env);
180 /* main execution loop */
182 volatile sig_atomic_t exit_request;
184 int cpu_exec(CPUState *env)
186 int ret, interrupt_request;
187 TranslationBlock *tb;
188 uint8_t *tc_ptr;
189 unsigned long next_tb;
191 if (env->halted) {
192 if (!cpu_has_work(env)) {
193 return EXCP_HALTED;
196 env->halted = 0;
199 cpu_single_env = env;
201 if (unlikely(exit_request)) {
202 env->exit_request = 1;
205 #if defined(TARGET_I386)
206 /* put eflags in CPU temporary format */
207 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
208 DF = 1 - (2 * ((env->eflags >> 10) & 1));
209 CC_OP = CC_OP_EFLAGS;
210 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
211 #elif defined(TARGET_SPARC)
212 #elif defined(TARGET_M68K)
213 env->cc_op = CC_OP_FLAGS;
214 env->cc_dest = env->sr & 0xf;
215 env->cc_x = (env->sr >> 4) & 1;
216 #elif defined(TARGET_ALPHA)
217 #elif defined(TARGET_ARM)
218 #elif defined(TARGET_UNICORE32)
219 #elif defined(TARGET_PPC)
220 env->reserve_addr = -1;
221 #elif defined(TARGET_LM32)
222 #elif defined(TARGET_MICROBLAZE)
223 #elif defined(TARGET_MIPS)
224 #elif defined(TARGET_SH4)
225 #elif defined(TARGET_CRIS)
226 #elif defined(TARGET_S390X)
227 #elif defined(TARGET_XTENSA)
228 /* XXXXX */
229 #else
230 #error unsupported target CPU
231 #endif
232 env->exception_index = -1;
234 /* prepare setjmp context for exception handling */
235 for(;;) {
236 if (setjmp(env->jmp_env) == 0) {
237 /* if an exception is pending, we execute it here */
238 if (env->exception_index >= 0) {
239 if (env->exception_index >= EXCP_INTERRUPT) {
240 /* exit request from the cpu execution loop */
241 ret = env->exception_index;
242 if (ret == EXCP_DEBUG) {
243 cpu_handle_debug_exception(env);
245 break;
246 } else {
247 #if defined(CONFIG_USER_ONLY)
248 /* if user mode only, we simulate a fake exception
249 which will be handled outside the cpu execution
250 loop */
251 #if defined(TARGET_I386)
252 do_interrupt(env);
253 #endif
254 ret = env->exception_index;
255 break;
256 #else
257 do_interrupt(env);
258 env->exception_index = -1;
259 #endif
263 next_tb = 0; /* force lookup of first TB */
264 for(;;) {
265 interrupt_request = env->interrupt_request;
266 if (unlikely(interrupt_request)) {
267 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
268 /* Mask out external interrupts for this step. */
269 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
271 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
272 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
273 env->exception_index = EXCP_DEBUG;
274 cpu_loop_exit(env);
276 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
277 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
278 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
279 if (interrupt_request & CPU_INTERRUPT_HALT) {
280 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
281 env->halted = 1;
282 env->exception_index = EXCP_HLT;
283 cpu_loop_exit(env);
285 #endif
286 #if defined(TARGET_I386)
287 if (interrupt_request & CPU_INTERRUPT_INIT) {
288 svm_check_intercept(env, SVM_EXIT_INIT);
289 do_cpu_init(env);
290 env->exception_index = EXCP_HALTED;
291 cpu_loop_exit(env);
292 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
293 do_cpu_sipi(env);
294 } else if (env->hflags2 & HF2_GIF_MASK) {
295 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
296 !(env->hflags & HF_SMM_MASK)) {
297 svm_check_intercept(env, SVM_EXIT_SMI);
298 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
299 do_smm_enter(env);
300 next_tb = 0;
301 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
302 !(env->hflags2 & HF2_NMI_MASK)) {
303 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
304 env->hflags2 |= HF2_NMI_MASK;
305 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
306 next_tb = 0;
307 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
308 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
309 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
310 next_tb = 0;
311 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
312 (((env->hflags2 & HF2_VINTR_MASK) &&
313 (env->hflags2 & HF2_HIF_MASK)) ||
314 (!(env->hflags2 & HF2_VINTR_MASK) &&
315 (env->eflags & IF_MASK &&
316 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
317 int intno;
318 svm_check_intercept(env, SVM_EXIT_INTR);
319 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
320 intno = cpu_get_pic_interrupt(env);
321 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
322 do_interrupt_x86_hardirq(env, intno, 1);
323 /* ensure that no TB jump will be modified as
324 the program flow was changed */
325 next_tb = 0;
326 #if !defined(CONFIG_USER_ONLY)
327 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
328 (env->eflags & IF_MASK) &&
329 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
330 int intno;
331 /* FIXME: this should respect TPR */
332 svm_check_intercept(env, SVM_EXIT_VINTR);
333 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
334 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
335 do_interrupt_x86_hardirq(env, intno, 1);
336 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
337 next_tb = 0;
338 #endif
341 #elif defined(TARGET_PPC)
342 #if 0
343 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
344 cpu_reset(env);
346 #endif
347 if (interrupt_request & CPU_INTERRUPT_HARD) {
348 ppc_hw_interrupt(env);
349 if (env->pending_interrupts == 0)
350 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
351 next_tb = 0;
353 #elif defined(TARGET_LM32)
354 if ((interrupt_request & CPU_INTERRUPT_HARD)
355 && (env->ie & IE_IE)) {
356 env->exception_index = EXCP_IRQ;
357 do_interrupt(env);
358 next_tb = 0;
360 #elif defined(TARGET_MICROBLAZE)
361 if ((interrupt_request & CPU_INTERRUPT_HARD)
362 && (env->sregs[SR_MSR] & MSR_IE)
363 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
364 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
365 env->exception_index = EXCP_IRQ;
366 do_interrupt(env);
367 next_tb = 0;
369 #elif defined(TARGET_MIPS)
370 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
371 cpu_mips_hw_interrupts_pending(env)) {
372 /* Raise it */
373 env->exception_index = EXCP_EXT_INTERRUPT;
374 env->error_code = 0;
375 do_interrupt(env);
376 next_tb = 0;
378 #elif defined(TARGET_SPARC)
379 if (interrupt_request & CPU_INTERRUPT_HARD) {
380 if (cpu_interrupts_enabled(env) &&
381 env->interrupt_index > 0) {
382 int pil = env->interrupt_index & 0xf;
383 int type = env->interrupt_index & 0xf0;
385 if (((type == TT_EXTINT) &&
386 cpu_pil_allowed(env, pil)) ||
387 type != TT_EXTINT) {
388 env->exception_index = env->interrupt_index;
389 do_interrupt(env);
390 next_tb = 0;
394 #elif defined(TARGET_ARM)
395 if (interrupt_request & CPU_INTERRUPT_FIQ
396 && !(env->uncached_cpsr & CPSR_F)) {
397 env->exception_index = EXCP_FIQ;
398 do_interrupt(env);
399 next_tb = 0;
401 /* ARMv7-M interrupt return works by loading a magic value
402 into the PC. On real hardware the load causes the
403 return to occur. The qemu implementation performs the
404 jump normally, then does the exception return when the
405 CPU tries to execute code at the magic address.
406 This will cause the magic PC value to be pushed to
407 the stack if an interrupt occurred at the wrong time.
408 We avoid this by disabling interrupts when
409 pc contains a magic address. */
410 if (interrupt_request & CPU_INTERRUPT_HARD
411 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
412 || !(env->uncached_cpsr & CPSR_I))) {
413 env->exception_index = EXCP_IRQ;
414 do_interrupt(env);
415 next_tb = 0;
417 #elif defined(TARGET_UNICORE32)
418 if (interrupt_request & CPU_INTERRUPT_HARD
419 && !(env->uncached_asr & ASR_I)) {
420 do_interrupt(env);
421 next_tb = 0;
423 #elif defined(TARGET_SH4)
424 if (interrupt_request & CPU_INTERRUPT_HARD) {
425 do_interrupt(env);
426 next_tb = 0;
428 #elif defined(TARGET_ALPHA)
430 int idx = -1;
431 /* ??? This hard-codes the OSF/1 interrupt levels. */
432 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
433 case 0 ... 3:
434 if (interrupt_request & CPU_INTERRUPT_HARD) {
435 idx = EXCP_DEV_INTERRUPT;
437 /* FALLTHRU */
438 case 4:
439 if (interrupt_request & CPU_INTERRUPT_TIMER) {
440 idx = EXCP_CLK_INTERRUPT;
442 /* FALLTHRU */
443 case 5:
444 if (interrupt_request & CPU_INTERRUPT_SMP) {
445 idx = EXCP_SMP_INTERRUPT;
447 /* FALLTHRU */
448 case 6:
449 if (interrupt_request & CPU_INTERRUPT_MCHK) {
450 idx = EXCP_MCHK;
453 if (idx >= 0) {
454 env->exception_index = idx;
455 env->error_code = 0;
456 do_interrupt(env);
457 next_tb = 0;
460 #elif defined(TARGET_CRIS)
461 if (interrupt_request & CPU_INTERRUPT_HARD
462 && (env->pregs[PR_CCS] & I_FLAG)
463 && !env->locked_irq) {
464 env->exception_index = EXCP_IRQ;
465 do_interrupt(env);
466 next_tb = 0;
468 if (interrupt_request & CPU_INTERRUPT_NMI
469 && (env->pregs[PR_CCS] & M_FLAG)) {
470 env->exception_index = EXCP_NMI;
471 do_interrupt(env);
472 next_tb = 0;
474 #elif defined(TARGET_M68K)
475 if (interrupt_request & CPU_INTERRUPT_HARD
476 && ((env->sr & SR_I) >> SR_I_SHIFT)
477 < env->pending_level) {
478 /* Real hardware gets the interrupt vector via an
479 IACK cycle at this point. Current emulated
480 hardware doesn't rely on this, so we
481 provide/save the vector when the interrupt is
482 first signalled. */
483 env->exception_index = env->pending_vector;
484 do_interrupt_m68k_hardirq(env);
485 next_tb = 0;
487 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
488 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
489 (env->psw.mask & PSW_MASK_EXT)) {
490 do_interrupt(env);
491 next_tb = 0;
493 #elif defined(TARGET_XTENSA)
494 if (interrupt_request & CPU_INTERRUPT_HARD) {
495 env->exception_index = EXC_IRQ;
496 do_interrupt(env);
497 next_tb = 0;
499 #endif
500 /* Don't use the cached interrupt_request value,
501 do_interrupt may have updated the EXITTB flag. */
502 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
503 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
504 /* ensure that no TB jump will be modified as
505 the program flow was changed */
506 next_tb = 0;
509 if (unlikely(env->exit_request)) {
510 env->exit_request = 0;
511 env->exception_index = EXCP_INTERRUPT;
512 cpu_loop_exit(env);
514 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
515 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
516 /* restore flags in standard format */
517 #if defined(TARGET_I386)
518 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
519 | (DF & DF_MASK);
520 log_cpu_state(env, X86_DUMP_CCOP);
521 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
522 #elif defined(TARGET_M68K)
523 cpu_m68k_flush_flags(env, env->cc_op);
524 env->cc_op = CC_OP_FLAGS;
525 env->sr = (env->sr & 0xffe0)
526 | env->cc_dest | (env->cc_x << 4);
527 log_cpu_state(env, 0);
528 #else
529 log_cpu_state(env, 0);
530 #endif
532 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
533 spin_lock(&tb_lock);
534 tb = tb_find_fast(env);
535 /* Note: we do it here to avoid a gcc bug on Mac OS X when
536 doing it in tb_find_slow */
537 if (tb_invalidated_flag) {
538 /* as some TB could have been invalidated because
539 of memory exceptions while generating the code, we
540 must recompute the hash index here */
541 next_tb = 0;
542 tb_invalidated_flag = 0;
544 #ifdef CONFIG_DEBUG_EXEC
545 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
546 (long)tb->tc_ptr, tb->pc,
547 lookup_symbol(tb->pc));
548 #endif
549 /* see if we can patch the calling TB. When the TB
550 spans two pages, we cannot safely do a direct
551 jump. */
552 if (next_tb != 0 && tb->page_addr[1] == -1) {
553 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
555 spin_unlock(&tb_lock);
557 /* cpu_interrupt might be called while translating the
558 TB, but before it is linked into a potentially
559 infinite loop and becomes env->current_tb. Avoid
560 starting execution if there is a pending interrupt. */
561 env->current_tb = tb;
562 barrier();
563 if (likely(!env->exit_request)) {
564 tc_ptr = tb->tc_ptr;
565 /* execute the generated code */
566 next_tb = tcg_qemu_tb_exec(env, tc_ptr);
567 if ((next_tb & 3) == 2) {
568 /* Instruction counter expired. */
569 int insns_left;
570 tb = (TranslationBlock *)(long)(next_tb & ~3);
571 /* Restore PC. */
572 cpu_pc_from_tb(env, tb);
573 insns_left = env->icount_decr.u32;
574 if (env->icount_extra && insns_left >= 0) {
575 /* Refill decrementer and continue execution. */
576 env->icount_extra += insns_left;
577 if (env->icount_extra > 0xffff) {
578 insns_left = 0xffff;
579 } else {
580 insns_left = env->icount_extra;
582 env->icount_extra -= insns_left;
583 env->icount_decr.u16.low = insns_left;
584 } else {
585 if (insns_left > 0) {
586 /* Execute remaining instructions. */
587 cpu_exec_nocache(env, insns_left, tb);
589 env->exception_index = EXCP_INTERRUPT;
590 next_tb = 0;
591 cpu_loop_exit(env);
595 env->current_tb = NULL;
596 /* reset soft MMU for next block (it can currently
597 only be set by a memory fault) */
598 } /* for(;;) */
599 } else {
600 /* Reload env after longjmp - the compiler may have smashed all
601 * local variables as longjmp is marked 'noreturn'. */
602 env = cpu_single_env;
604 } /* for(;;) */
607 #if defined(TARGET_I386)
608 /* restore flags in standard format */
609 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
610 | (DF & DF_MASK);
611 #elif defined(TARGET_ARM)
612 /* XXX: Save/restore host fpu exception state?. */
613 #elif defined(TARGET_UNICORE32)
614 #elif defined(TARGET_SPARC)
615 #elif defined(TARGET_PPC)
616 #elif defined(TARGET_LM32)
617 #elif defined(TARGET_M68K)
618 cpu_m68k_flush_flags(env, env->cc_op);
619 env->cc_op = CC_OP_FLAGS;
620 env->sr = (env->sr & 0xffe0)
621 | env->cc_dest | (env->cc_x << 4);
622 #elif defined(TARGET_MICROBLAZE)
623 #elif defined(TARGET_MIPS)
624 #elif defined(TARGET_SH4)
625 #elif defined(TARGET_ALPHA)
626 #elif defined(TARGET_CRIS)
627 #elif defined(TARGET_S390X)
628 #elif defined(TARGET_XTENSA)
629 /* XXXXX */
630 #else
631 #error unsupported target CPU
632 #endif
634 /* fail safe : never use cpu_single_env outside cpu_exec() */
635 cpu_single_env = NULL;
636 return ret;