make cpu_physical_memory_set_dirty_tracking equal to upstream
[qemu-kvm/amd-iommu.git] / cpu-exec.c
blob818ae5c3752ba9e4b83f9ecce1b24b6e6c6a891b
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #if !defined(TARGET_IA64)
23 #include "tcg.h"
24 #endif
25 #include "kvm.h"
27 #if !defined(CONFIG_SOFTMMU)
28 #undef EAX
29 #undef ECX
30 #undef EDX
31 #undef EBX
32 #undef ESP
33 #undef EBP
34 #undef ESI
35 #undef EDI
36 #undef EIP
37 #include <signal.h>
38 #ifdef __linux__
39 #include <sys/ucontext.h>
40 #endif
41 #endif
43 #include "qemu-kvm.h"
45 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
46 // Work around ugly bugs in glibc that mangle global register contents
47 #undef env
48 #define env cpu_single_env
49 #endif
51 int tb_invalidated_flag;
53 //#define CONFIG_DEBUG_EXEC
54 //#define DEBUG_SIGNAL
56 int qemu_cpu_has_work(CPUState *env)
58 return cpu_has_work(env);
61 void cpu_loop_exit(void)
63 env->current_tb = NULL;
64 longjmp(env->jmp_env, 1);
67 /* exit the current TB from a signal handler. The host registers are
68 restored in a state compatible with the CPU emulator
70 void cpu_resume_from_signal(CPUState *env1, void *puc)
72 #if !defined(CONFIG_SOFTMMU)
73 #ifdef __linux__
74 struct ucontext *uc = puc;
75 #elif defined(__OpenBSD__)
76 struct sigcontext *uc = puc;
77 #endif
78 #endif
80 env = env1;
82 /* XXX: restore cpu registers saved in host registers */
84 #if !defined(CONFIG_SOFTMMU)
85 if (puc) {
86 /* XXX: use siglongjmp ? */
87 #ifdef __linux__
88 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
89 #elif defined(__OpenBSD__)
90 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
91 #endif
93 #endif
94 env->exception_index = -1;
95 longjmp(env->jmp_env, 1);
98 /* Execute the code without caching the generated code. An interpreter
99 could be used if available. */
100 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
102 unsigned long next_tb;
103 TranslationBlock *tb;
105 /* Should never happen.
106 We only end up here when an existing TB is too long. */
107 if (max_cycles > CF_COUNT_MASK)
108 max_cycles = CF_COUNT_MASK;
110 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
111 max_cycles);
112 env->current_tb = tb;
113 /* execute the generated code */
114 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
115 env->current_tb = NULL;
117 if ((next_tb & 3) == 2) {
118 /* Restore PC. This may happen if async event occurs before
119 the TB starts executing. */
120 cpu_pc_from_tb(env, tb);
122 tb_phys_invalidate(tb, -1);
123 tb_free(tb);
126 static TranslationBlock *tb_find_slow(target_ulong pc,
127 target_ulong cs_base,
128 uint64_t flags)
130 TranslationBlock *tb, **ptb1;
131 unsigned int h;
132 tb_page_addr_t phys_pc, phys_page1, phys_page2;
133 target_ulong virt_page2;
135 tb_invalidated_flag = 0;
137 /* find translated block using physical mappings */
138 phys_pc = get_page_addr_code(env, pc);
139 phys_page1 = phys_pc & TARGET_PAGE_MASK;
140 phys_page2 = -1;
141 h = tb_phys_hash_func(phys_pc);
142 ptb1 = &tb_phys_hash[h];
143 for(;;) {
144 tb = *ptb1;
145 if (!tb)
146 goto not_found;
147 if (tb->pc == pc &&
148 tb->page_addr[0] == phys_page1 &&
149 tb->cs_base == cs_base &&
150 tb->flags == flags) {
151 /* check next page if needed */
152 if (tb->page_addr[1] != -1) {
153 virt_page2 = (pc & TARGET_PAGE_MASK) +
154 TARGET_PAGE_SIZE;
155 phys_page2 = get_page_addr_code(env, virt_page2);
156 if (tb->page_addr[1] == phys_page2)
157 goto found;
158 } else {
159 goto found;
162 ptb1 = &tb->phys_hash_next;
164 not_found:
165 /* if no translated code available, then translate it now */
166 tb = tb_gen_code(env, pc, cs_base, flags, 0);
168 found:
169 /* we add the TB in the virtual pc hash table */
170 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
171 return tb;
174 static inline TranslationBlock *tb_find_fast(void)
176 TranslationBlock *tb;
177 target_ulong cs_base, pc;
178 int flags;
180 /* we record a subset of the CPU state. It will
181 always be the same before a given translated block
182 is executed. */
183 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
184 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
185 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
186 tb->flags != flags)) {
187 tb = tb_find_slow(pc, cs_base, flags);
189 return tb;
192 static CPUDebugExcpHandler *debug_excp_handler;
194 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
196 CPUDebugExcpHandler *old_handler = debug_excp_handler;
198 debug_excp_handler = handler;
199 return old_handler;
202 static void cpu_handle_debug_exception(CPUState *env)
204 CPUWatchpoint *wp;
206 if (!env->watchpoint_hit)
207 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
208 wp->flags &= ~BP_WATCHPOINT_HIT;
210 if (debug_excp_handler)
211 debug_excp_handler(env);
214 /* main execution loop */
216 int cpu_exec(CPUState *env1)
218 volatile host_reg_t saved_env_reg;
219 int ret, interrupt_request;
220 TranslationBlock *tb;
221 uint8_t *tc_ptr;
222 unsigned long next_tb;
224 if (cpu_halted(env1) == EXCP_HALTED)
225 return EXCP_HALTED;
227 cpu_single_env = env1;
229 /* the access to env below is actually saving the global register's
230 value, so that files not including target-xyz/exec.h are free to
231 use it. */
232 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
233 saved_env_reg = (host_reg_t) env;
234 asm("");
235 env = env1;
237 #if defined(TARGET_I386)
238 if (!kvm_enabled()) {
239 /* put eflags in CPU temporary format */
240 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
241 DF = 1 - (2 * ((env->eflags >> 10) & 1));
242 CC_OP = CC_OP_EFLAGS;
243 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
245 #elif defined(TARGET_SPARC)
246 #elif defined(TARGET_M68K)
247 env->cc_op = CC_OP_FLAGS;
248 env->cc_dest = env->sr & 0xf;
249 env->cc_x = (env->sr >> 4) & 1;
250 #elif defined(TARGET_ALPHA)
251 #elif defined(TARGET_ARM)
252 #elif defined(TARGET_PPC)
253 #elif defined(TARGET_MICROBLAZE)
254 #elif defined(TARGET_MIPS)
255 #elif defined(TARGET_SH4)
256 #elif defined(TARGET_CRIS)
257 #elif defined(TARGET_S390X)
258 #elif defined(TARGET_IA64)
259 /* XXXXX */
260 #else
261 #error unsupported target CPU
262 #endif
263 env->exception_index = -1;
265 /* prepare setjmp context for exception handling */
266 for(;;) {
267 if (setjmp(env->jmp_env) == 0) {
268 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
269 #undef env
270 env = cpu_single_env;
271 #define env cpu_single_env
272 #endif
273 /* if an exception is pending, we execute it here */
274 if (env->exception_index >= 0) {
275 if (env->exception_index >= EXCP_INTERRUPT) {
276 /* exit request from the cpu execution loop */
277 ret = env->exception_index;
278 if (ret == EXCP_DEBUG)
279 cpu_handle_debug_exception(env);
280 break;
281 } else {
282 #if defined(CONFIG_USER_ONLY)
283 /* if user mode only, we simulate a fake exception
284 which will be handled outside the cpu execution
285 loop */
286 #if defined(TARGET_I386)
287 do_interrupt_user(env->exception_index,
288 env->exception_is_int,
289 env->error_code,
290 env->exception_next_eip);
291 /* successfully delivered */
292 env->old_exception = -1;
293 #endif
294 ret = env->exception_index;
295 break;
296 #else
297 #if defined(TARGET_I386)
298 /* simulate a real cpu exception. On i386, it can
299 trigger new exceptions, but we do not handle
300 double or triple faults yet. */
301 do_interrupt(env->exception_index,
302 env->exception_is_int,
303 env->error_code,
304 env->exception_next_eip, 0);
305 /* successfully delivered */
306 env->old_exception = -1;
307 #elif defined(TARGET_PPC)
308 do_interrupt(env);
309 #elif defined(TARGET_MICROBLAZE)
310 do_interrupt(env);
311 #elif defined(TARGET_MIPS)
312 do_interrupt(env);
313 #elif defined(TARGET_SPARC)
314 do_interrupt(env);
315 #elif defined(TARGET_ARM)
316 do_interrupt(env);
317 #elif defined(TARGET_SH4)
318 do_interrupt(env);
319 #elif defined(TARGET_ALPHA)
320 do_interrupt(env);
321 #elif defined(TARGET_CRIS)
322 do_interrupt(env);
323 #elif defined(TARGET_M68K)
324 do_interrupt(0);
325 #elif defined(TARGET_IA64)
326 do_interrupt(env);
327 #endif
328 env->exception_index = -1;
329 #endif
333 if (kvm_enabled()) {
334 kvm_cpu_exec(env);
335 longjmp(env->jmp_env, 1);
338 next_tb = 0; /* force lookup of first TB */
339 for(;;) {
340 interrupt_request = env->interrupt_request;
341 if (unlikely(interrupt_request)) {
342 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
343 /* Mask out external interrupts for this step. */
344 interrupt_request &= ~(CPU_INTERRUPT_HARD |
345 CPU_INTERRUPT_FIQ |
346 CPU_INTERRUPT_SMI |
347 CPU_INTERRUPT_NMI);
349 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
350 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
351 env->exception_index = EXCP_DEBUG;
352 cpu_loop_exit();
354 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
355 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
356 defined(TARGET_MICROBLAZE)
357 if (interrupt_request & CPU_INTERRUPT_HALT) {
358 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
359 env->halted = 1;
360 env->exception_index = EXCP_HLT;
361 cpu_loop_exit();
363 #endif
364 #if defined(TARGET_I386)
365 if (interrupt_request & CPU_INTERRUPT_INIT) {
366 svm_check_intercept(SVM_EXIT_INIT);
367 do_cpu_init(env);
368 env->exception_index = EXCP_HALTED;
369 cpu_loop_exit();
370 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
371 do_cpu_sipi(env);
372 } else if (env->hflags2 & HF2_GIF_MASK) {
373 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
374 !(env->hflags & HF_SMM_MASK)) {
375 svm_check_intercept(SVM_EXIT_SMI);
376 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
377 do_smm_enter();
378 next_tb = 0;
379 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
380 !(env->hflags2 & HF2_NMI_MASK)) {
381 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
382 env->hflags2 |= HF2_NMI_MASK;
383 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
384 next_tb = 0;
385 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
386 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
387 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
388 next_tb = 0;
389 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
390 (((env->hflags2 & HF2_VINTR_MASK) &&
391 (env->hflags2 & HF2_HIF_MASK)) ||
392 (!(env->hflags2 & HF2_VINTR_MASK) &&
393 (env->eflags & IF_MASK &&
394 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
395 int intno;
396 svm_check_intercept(SVM_EXIT_INTR);
397 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
398 intno = cpu_get_pic_interrupt(env);
399 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
400 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
401 #undef env
402 env = cpu_single_env;
403 #define env cpu_single_env
404 #endif
405 do_interrupt(intno, 0, 0, 0, 1);
406 /* ensure that no TB jump will be modified as
407 the program flow was changed */
408 next_tb = 0;
409 #if !defined(CONFIG_USER_ONLY)
410 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
411 (env->eflags & IF_MASK) &&
412 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
413 int intno;
414 /* FIXME: this should respect TPR */
415 svm_check_intercept(SVM_EXIT_VINTR);
416 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
417 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
418 do_interrupt(intno, 0, 0, 0, 1);
419 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
420 next_tb = 0;
421 #endif
424 #elif defined(TARGET_PPC)
425 #if 0
426 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
427 cpu_reset(env);
429 #endif
430 if (interrupt_request & CPU_INTERRUPT_HARD) {
431 ppc_hw_interrupt(env);
432 if (env->pending_interrupts == 0)
433 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
434 next_tb = 0;
436 #elif defined(TARGET_MICROBLAZE)
437 if ((interrupt_request & CPU_INTERRUPT_HARD)
438 && (env->sregs[SR_MSR] & MSR_IE)
439 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
440 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
441 env->exception_index = EXCP_IRQ;
442 do_interrupt(env);
443 next_tb = 0;
445 #elif defined(TARGET_MIPS)
446 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
447 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
448 (env->CP0_Status & (1 << CP0St_IE)) &&
449 !(env->CP0_Status & (1 << CP0St_EXL)) &&
450 !(env->CP0_Status & (1 << CP0St_ERL)) &&
451 !(env->hflags & MIPS_HFLAG_DM)) {
452 /* Raise it */
453 env->exception_index = EXCP_EXT_INTERRUPT;
454 env->error_code = 0;
455 do_interrupt(env);
456 next_tb = 0;
458 #elif defined(TARGET_SPARC)
459 if (interrupt_request & CPU_INTERRUPT_HARD) {
460 if (cpu_interrupts_enabled(env) &&
461 env->interrupt_index > 0) {
462 int pil = env->interrupt_index & 0xf;
463 int type = env->interrupt_index & 0xf0;
465 if (((type == TT_EXTINT) &&
466 cpu_pil_allowed(env, pil)) ||
467 type != TT_EXTINT) {
468 env->exception_index = env->interrupt_index;
469 do_interrupt(env);
470 next_tb = 0;
473 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
474 //do_interrupt(0, 0, 0, 0, 0);
475 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
477 #elif defined(TARGET_ARM)
478 if (interrupt_request & CPU_INTERRUPT_FIQ
479 && !(env->uncached_cpsr & CPSR_F)) {
480 env->exception_index = EXCP_FIQ;
481 do_interrupt(env);
482 next_tb = 0;
484 /* ARMv7-M interrupt return works by loading a magic value
485 into the PC. On real hardware the load causes the
486 return to occur. The qemu implementation performs the
487 jump normally, then does the exception return when the
488 CPU tries to execute code at the magic address.
489 This will cause the magic PC value to be pushed to
490 the stack if an interrupt occured at the wrong time.
491 We avoid this by disabling interrupts when
492 pc contains a magic address. */
493 if (interrupt_request & CPU_INTERRUPT_HARD
494 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
495 || !(env->uncached_cpsr & CPSR_I))) {
496 env->exception_index = EXCP_IRQ;
497 do_interrupt(env);
498 next_tb = 0;
500 #elif defined(TARGET_SH4)
501 if (interrupt_request & CPU_INTERRUPT_HARD) {
502 do_interrupt(env);
503 next_tb = 0;
505 #elif defined(TARGET_ALPHA)
506 if (interrupt_request & CPU_INTERRUPT_HARD) {
507 do_interrupt(env);
508 next_tb = 0;
510 #elif defined(TARGET_CRIS)
511 if (interrupt_request & CPU_INTERRUPT_HARD
512 && (env->pregs[PR_CCS] & I_FLAG)
513 && !env->locked_irq) {
514 env->exception_index = EXCP_IRQ;
515 do_interrupt(env);
516 next_tb = 0;
518 if (interrupt_request & CPU_INTERRUPT_NMI
519 && (env->pregs[PR_CCS] & M_FLAG)) {
520 env->exception_index = EXCP_NMI;
521 do_interrupt(env);
522 next_tb = 0;
524 #elif defined(TARGET_M68K)
525 if (interrupt_request & CPU_INTERRUPT_HARD
526 && ((env->sr & SR_I) >> SR_I_SHIFT)
527 < env->pending_level) {
528 /* Real hardware gets the interrupt vector via an
529 IACK cycle at this point. Current emulated
530 hardware doesn't rely on this, so we
531 provide/save the vector when the interrupt is
532 first signalled. */
533 env->exception_index = env->pending_vector;
534 do_interrupt(1);
535 next_tb = 0;
537 #endif
538 /* Don't use the cached interupt_request value,
539 do_interrupt may have updated the EXITTB flag. */
540 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
541 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
542 /* ensure that no TB jump will be modified as
543 the program flow was changed */
544 next_tb = 0;
547 if (unlikely(env->exit_request)) {
548 env->exit_request = 0;
549 env->exception_index = EXCP_INTERRUPT;
550 cpu_loop_exit();
552 #ifdef CONFIG_DEBUG_EXEC
553 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
554 /* restore flags in standard format */
555 #if defined(TARGET_I386)
556 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
557 log_cpu_state(env, X86_DUMP_CCOP);
558 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
559 #elif defined(TARGET_ARM)
560 log_cpu_state(env, 0);
561 #elif defined(TARGET_SPARC)
562 log_cpu_state(env, 0);
563 #elif defined(TARGET_PPC)
564 log_cpu_state(env, 0);
565 #elif defined(TARGET_M68K)
566 cpu_m68k_flush_flags(env, env->cc_op);
567 env->cc_op = CC_OP_FLAGS;
568 env->sr = (env->sr & 0xffe0)
569 | env->cc_dest | (env->cc_x << 4);
570 log_cpu_state(env, 0);
571 #elif defined(TARGET_MICROBLAZE)
572 log_cpu_state(env, 0);
573 #elif defined(TARGET_MIPS)
574 log_cpu_state(env, 0);
575 #elif defined(TARGET_SH4)
576 log_cpu_state(env, 0);
577 #elif defined(TARGET_ALPHA)
578 log_cpu_state(env, 0);
579 #elif defined(TARGET_CRIS)
580 log_cpu_state(env, 0);
581 #else
582 #error unsupported target CPU
583 #endif
585 #endif
586 spin_lock(&tb_lock);
587 tb = tb_find_fast();
588 /* Note: we do it here to avoid a gcc bug on Mac OS X when
589 doing it in tb_find_slow */
590 if (tb_invalidated_flag) {
591 /* as some TB could have been invalidated because
592 of memory exceptions while generating the code, we
593 must recompute the hash index here */
594 next_tb = 0;
595 tb_invalidated_flag = 0;
597 #ifdef CONFIG_DEBUG_EXEC
598 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
599 (long)tb->tc_ptr, tb->pc,
600 lookup_symbol(tb->pc));
601 #endif
602 /* see if we can patch the calling TB. When the TB
603 spans two pages, we cannot safely do a direct
604 jump. */
605 if (next_tb != 0 && tb->page_addr[1] == -1) {
606 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
608 spin_unlock(&tb_lock);
610 /* cpu_interrupt might be called while translating the
611 TB, but before it is linked into a potentially
612 infinite loop and becomes env->current_tb. Avoid
613 starting execution if there is a pending interrupt. */
614 if (!unlikely (env->exit_request)) {
615 env->current_tb = tb;
616 tc_ptr = tb->tc_ptr;
617 /* execute the generated code */
618 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
619 #undef env
620 env = cpu_single_env;
621 #define env cpu_single_env
622 #endif
623 next_tb = tcg_qemu_tb_exec(tc_ptr);
624 env->current_tb = NULL;
625 if ((next_tb & 3) == 2) {
626 /* Instruction counter expired. */
627 int insns_left;
628 tb = (TranslationBlock *)(long)(next_tb & ~3);
629 /* Restore PC. */
630 cpu_pc_from_tb(env, tb);
631 insns_left = env->icount_decr.u32;
632 if (env->icount_extra && insns_left >= 0) {
633 /* Refill decrementer and continue execution. */
634 env->icount_extra += insns_left;
635 if (env->icount_extra > 0xffff) {
636 insns_left = 0xffff;
637 } else {
638 insns_left = env->icount_extra;
640 env->icount_extra -= insns_left;
641 env->icount_decr.u16.low = insns_left;
642 } else {
643 if (insns_left > 0) {
644 /* Execute remaining instructions. */
645 cpu_exec_nocache(insns_left, tb);
647 env->exception_index = EXCP_INTERRUPT;
648 next_tb = 0;
649 cpu_loop_exit();
653 /* reset soft MMU for next block (it can currently
654 only be set by a memory fault) */
655 } /* for(;;) */
657 } /* for(;;) */
660 #if defined(TARGET_I386)
661 /* restore flags in standard format */
662 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
663 #elif defined(TARGET_ARM)
664 /* XXX: Save/restore host fpu exception state?. */
665 #elif defined(TARGET_SPARC)
666 #elif defined(TARGET_PPC)
667 #elif defined(TARGET_M68K)
668 cpu_m68k_flush_flags(env, env->cc_op);
669 env->cc_op = CC_OP_FLAGS;
670 env->sr = (env->sr & 0xffe0)
671 | env->cc_dest | (env->cc_x << 4);
672 #elif defined(TARGET_MICROBLAZE)
673 #elif defined(TARGET_MIPS)
674 #elif defined(TARGET_SH4)
675 #elif defined(TARGET_IA64)
676 #elif defined(TARGET_ALPHA)
677 #elif defined(TARGET_CRIS)
678 #elif defined(TARGET_S390X)
679 /* XXXXX */
680 #else
681 #error unsupported target CPU
682 #endif
684 /* restore global registers */
685 asm("");
686 env = (void *) saved_env_reg;
688 /* fail safe : never use cpu_single_env outside cpu_exec() */
689 cpu_single_env = NULL;
690 return ret;
693 /* must only be called from the generated code as an exception can be
694 generated */
695 void tb_invalidate_page_range(target_ulong start, target_ulong end)
697 /* XXX: cannot enable it yet because it yields to MMU exception
698 where NIP != read address on PowerPC */
699 #if 0
700 target_ulong phys_addr;
701 phys_addr = get_phys_addr_code(env, start);
702 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
703 #endif
706 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
708 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
710 CPUX86State *saved_env;
712 saved_env = env;
713 env = s;
714 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
715 selector &= 0xffff;
716 cpu_x86_load_seg_cache(env, seg_reg, selector,
717 (selector << 4), 0xffff, 0);
718 } else {
719 helper_load_seg(seg_reg, selector);
721 env = saved_env;
724 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
726 CPUX86State *saved_env;
728 saved_env = env;
729 env = s;
731 helper_fsave(ptr, data32);
733 env = saved_env;
736 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
738 CPUX86State *saved_env;
740 saved_env = env;
741 env = s;
743 helper_frstor(ptr, data32);
745 env = saved_env;
748 #endif /* TARGET_I386 */
750 #if !defined(CONFIG_SOFTMMU)
752 #if defined(TARGET_I386)
753 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
754 #else
755 #define EXCEPTION_ACTION cpu_loop_exit()
756 #endif
758 /* 'pc' is the host PC at which the exception was raised. 'address' is
759 the effective address of the memory exception. 'is_write' is 1 if a
760 write caused the exception and otherwise 0'. 'old_set' is the
761 signal set which should be restored */
762 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
763 int is_write, sigset_t *old_set,
764 void *puc)
766 TranslationBlock *tb;
767 int ret;
769 if (cpu_single_env)
770 env = cpu_single_env; /* XXX: find a correct solution for multithread */
771 #if defined(DEBUG_SIGNAL)
772 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
773 pc, address, is_write, *(unsigned long *)old_set);
774 #endif
775 /* XXX: locking issue */
776 if (is_write && page_unprotect(h2g(address), pc, puc)) {
777 return 1;
780 /* see if it is an MMU fault */
781 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
782 if (ret < 0)
783 return 0; /* not an MMU fault */
784 if (ret == 0)
785 return 1; /* the MMU fault was handled without causing real CPU fault */
786 /* now we have a real cpu fault */
787 tb = tb_find_pc(pc);
788 if (tb) {
789 /* the PC is inside the translated code. It means that we have
790 a virtual CPU fault */
791 cpu_restore_state(tb, env, pc, puc);
794 /* we restore the process signal mask as the sigreturn should
795 do it (XXX: use sigsetjmp) */
796 sigprocmask(SIG_SETMASK, old_set, NULL);
797 EXCEPTION_ACTION;
799 /* never comes here */
800 return 1;
803 #if defined(__i386__)
805 #if defined(__APPLE__)
806 # include <sys/ucontext.h>
808 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
809 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
810 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
811 # define MASK_sig(context) ((context)->uc_sigmask)
812 #elif defined (__NetBSD__)
813 # include <ucontext.h>
815 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
816 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
817 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
818 # define MASK_sig(context) ((context)->uc_sigmask)
819 #elif defined (__FreeBSD__) || defined(__DragonFly__)
820 # include <ucontext.h>
822 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
823 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
824 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
825 # define MASK_sig(context) ((context)->uc_sigmask)
826 #elif defined(__OpenBSD__)
827 # define EIP_sig(context) ((context)->sc_eip)
828 # define TRAP_sig(context) ((context)->sc_trapno)
829 # define ERROR_sig(context) ((context)->sc_err)
830 # define MASK_sig(context) ((context)->sc_mask)
831 #else
832 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
833 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
834 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
835 # define MASK_sig(context) ((context)->uc_sigmask)
836 #endif
838 int cpu_signal_handler(int host_signum, void *pinfo,
839 void *puc)
841 siginfo_t *info = pinfo;
842 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
843 ucontext_t *uc = puc;
844 #elif defined(__OpenBSD__)
845 struct sigcontext *uc = puc;
846 #else
847 struct ucontext *uc = puc;
848 #endif
849 unsigned long pc;
850 int trapno;
852 #ifndef REG_EIP
853 /* for glibc 2.1 */
854 #define REG_EIP EIP
855 #define REG_ERR ERR
856 #define REG_TRAPNO TRAPNO
857 #endif
858 pc = EIP_sig(uc);
859 trapno = TRAP_sig(uc);
860 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
861 trapno == 0xe ?
862 (ERROR_sig(uc) >> 1) & 1 : 0,
863 &MASK_sig(uc), puc);
866 #elif defined(__x86_64__)
868 #ifdef __NetBSD__
869 #define PC_sig(context) _UC_MACHINE_PC(context)
870 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
871 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
872 #define MASK_sig(context) ((context)->uc_sigmask)
873 #elif defined(__OpenBSD__)
874 #define PC_sig(context) ((context)->sc_rip)
875 #define TRAP_sig(context) ((context)->sc_trapno)
876 #define ERROR_sig(context) ((context)->sc_err)
877 #define MASK_sig(context) ((context)->sc_mask)
878 #elif defined (__FreeBSD__) || defined(__DragonFly__)
879 #include <ucontext.h>
881 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
882 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
883 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
884 #define MASK_sig(context) ((context)->uc_sigmask)
885 #else
886 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
887 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
888 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
889 #define MASK_sig(context) ((context)->uc_sigmask)
890 #endif
892 int cpu_signal_handler(int host_signum, void *pinfo,
893 void *puc)
895 siginfo_t *info = pinfo;
896 unsigned long pc;
897 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
898 ucontext_t *uc = puc;
899 #elif defined(__OpenBSD__)
900 struct sigcontext *uc = puc;
901 #else
902 struct ucontext *uc = puc;
903 #endif
905 pc = PC_sig(uc);
906 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
907 TRAP_sig(uc) == 0xe ?
908 (ERROR_sig(uc) >> 1) & 1 : 0,
909 &MASK_sig(uc), puc);
912 #elif defined(_ARCH_PPC)
914 /***********************************************************************
915 * signal context platform-specific definitions
916 * From Wine
918 #ifdef linux
919 /* All Registers access - only for local access */
920 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
921 /* Gpr Registers access */
922 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
923 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
924 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
925 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
926 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
927 # define LR_sig(context) REG_sig(link, context) /* Link register */
928 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
929 /* Float Registers access */
930 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
931 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
932 /* Exception Registers access */
933 # define DAR_sig(context) REG_sig(dar, context)
934 # define DSISR_sig(context) REG_sig(dsisr, context)
935 # define TRAP_sig(context) REG_sig(trap, context)
936 #endif /* linux */
938 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
939 #include <ucontext.h>
940 # define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
941 # define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
942 # define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
943 # define XER_sig(context) ((context)->uc_mcontext.mc_xer)
944 # define LR_sig(context) ((context)->uc_mcontext.mc_lr)
945 # define CR_sig(context) ((context)->uc_mcontext.mc_cr)
946 /* Exception Registers access */
947 # define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
948 # define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
949 # define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
950 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
952 #ifdef __APPLE__
953 # include <sys/ucontext.h>
954 typedef struct ucontext SIGCONTEXT;
955 /* All Registers access - only for local access */
956 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
957 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
958 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
959 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
960 /* Gpr Registers access */
961 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
962 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
963 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
964 # define CTR_sig(context) REG_sig(ctr, context)
965 # define XER_sig(context) REG_sig(xer, context) /* Link register */
966 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
967 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
968 /* Float Registers access */
969 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
970 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
971 /* Exception Registers access */
972 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
973 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
974 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
975 #endif /* __APPLE__ */
977 int cpu_signal_handler(int host_signum, void *pinfo,
978 void *puc)
980 siginfo_t *info = pinfo;
981 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
982 ucontext_t *uc = puc;
983 #else
984 struct ucontext *uc = puc;
985 #endif
986 unsigned long pc;
987 int is_write;
989 pc = IAR_sig(uc);
990 is_write = 0;
991 #if 0
992 /* ppc 4xx case */
993 if (DSISR_sig(uc) & 0x00800000)
994 is_write = 1;
995 #else
996 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
997 is_write = 1;
998 #endif
999 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1000 is_write, &uc->uc_sigmask, puc);
1003 #elif defined(__alpha__)
1005 int cpu_signal_handler(int host_signum, void *pinfo,
1006 void *puc)
1008 siginfo_t *info = pinfo;
1009 struct ucontext *uc = puc;
1010 uint32_t *pc = uc->uc_mcontext.sc_pc;
1011 uint32_t insn = *pc;
1012 int is_write = 0;
1014 /* XXX: need kernel patch to get write flag faster */
1015 switch (insn >> 26) {
1016 case 0x0d: // stw
1017 case 0x0e: // stb
1018 case 0x0f: // stq_u
1019 case 0x24: // stf
1020 case 0x25: // stg
1021 case 0x26: // sts
1022 case 0x27: // stt
1023 case 0x2c: // stl
1024 case 0x2d: // stq
1025 case 0x2e: // stl_c
1026 case 0x2f: // stq_c
1027 is_write = 1;
1030 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1031 is_write, &uc->uc_sigmask, puc);
1033 #elif defined(__sparc__)
1035 int cpu_signal_handler(int host_signum, void *pinfo,
1036 void *puc)
1038 siginfo_t *info = pinfo;
1039 int is_write;
1040 uint32_t insn;
1041 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1042 uint32_t *regs = (uint32_t *)(info + 1);
1043 void *sigmask = (regs + 20);
1044 /* XXX: is there a standard glibc define ? */
1045 unsigned long pc = regs[1];
1046 #else
1047 #ifdef __linux__
1048 struct sigcontext *sc = puc;
1049 unsigned long pc = sc->sigc_regs.tpc;
1050 void *sigmask = (void *)sc->sigc_mask;
1051 #elif defined(__OpenBSD__)
1052 struct sigcontext *uc = puc;
1053 unsigned long pc = uc->sc_pc;
1054 void *sigmask = (void *)(long)uc->sc_mask;
1055 #endif
1056 #endif
1058 /* XXX: need kernel patch to get write flag faster */
1059 is_write = 0;
1060 insn = *(uint32_t *)pc;
1061 if ((insn >> 30) == 3) {
1062 switch((insn >> 19) & 0x3f) {
1063 case 0x05: // stb
1064 case 0x15: // stba
1065 case 0x06: // sth
1066 case 0x16: // stha
1067 case 0x04: // st
1068 case 0x14: // sta
1069 case 0x07: // std
1070 case 0x17: // stda
1071 case 0x0e: // stx
1072 case 0x1e: // stxa
1073 case 0x24: // stf
1074 case 0x34: // stfa
1075 case 0x27: // stdf
1076 case 0x37: // stdfa
1077 case 0x26: // stqf
1078 case 0x36: // stqfa
1079 case 0x25: // stfsr
1080 case 0x3c: // casa
1081 case 0x3e: // casxa
1082 is_write = 1;
1083 break;
1086 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1087 is_write, sigmask, NULL);
1090 #elif defined(__arm__)
1092 int cpu_signal_handler(int host_signum, void *pinfo,
1093 void *puc)
1095 siginfo_t *info = pinfo;
1096 struct ucontext *uc = puc;
1097 unsigned long pc;
1098 int is_write;
1100 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1101 pc = uc->uc_mcontext.gregs[R15];
1102 #else
1103 pc = uc->uc_mcontext.arm_pc;
1104 #endif
1105 /* XXX: compute is_write */
1106 is_write = 0;
1107 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1108 is_write,
1109 &uc->uc_sigmask, puc);
1112 #elif defined(__mc68000)
1114 int cpu_signal_handler(int host_signum, void *pinfo,
1115 void *puc)
1117 siginfo_t *info = pinfo;
1118 struct ucontext *uc = puc;
1119 unsigned long pc;
1120 int is_write;
1122 pc = uc->uc_mcontext.gregs[16];
1123 /* XXX: compute is_write */
1124 is_write = 0;
1125 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1126 is_write,
1127 &uc->uc_sigmask, puc);
1130 #elif defined(__ia64)
1132 #ifndef __ISR_VALID
1133 /* This ought to be in <bits/siginfo.h>... */
1134 # define __ISR_VALID 1
1135 #endif
1137 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1139 siginfo_t *info = pinfo;
1140 struct ucontext *uc = puc;
1141 unsigned long ip;
1142 int is_write = 0;
1144 ip = uc->uc_mcontext.sc_ip;
1145 switch (host_signum) {
1146 case SIGILL:
1147 case SIGFPE:
1148 case SIGSEGV:
1149 case SIGBUS:
1150 case SIGTRAP:
1151 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1152 /* ISR.W (write-access) is bit 33: */
1153 is_write = (info->si_isr >> 33) & 1;
1154 break;
1156 default:
1157 break;
1159 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1160 is_write,
1161 &uc->uc_sigmask, puc);
1164 #elif defined(__s390__)
1166 int cpu_signal_handler(int host_signum, void *pinfo,
1167 void *puc)
1169 siginfo_t *info = pinfo;
1170 struct ucontext *uc = puc;
1171 unsigned long pc;
1172 int is_write;
1174 pc = uc->uc_mcontext.psw.addr;
1175 /* XXX: compute is_write */
1176 is_write = 0;
1177 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1178 is_write, &uc->uc_sigmask, puc);
1181 #elif defined(__mips__)
1183 int cpu_signal_handler(int host_signum, void *pinfo,
1184 void *puc)
1186 siginfo_t *info = pinfo;
1187 struct ucontext *uc = puc;
1188 greg_t pc = uc->uc_mcontext.pc;
1189 int is_write;
1191 /* XXX: compute is_write */
1192 is_write = 0;
1193 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1194 is_write, &uc->uc_sigmask, puc);
1197 #elif defined(__hppa__)
1199 int cpu_signal_handler(int host_signum, void *pinfo,
1200 void *puc)
1202 struct siginfo *info = pinfo;
1203 struct ucontext *uc = puc;
1204 unsigned long pc;
1205 int is_write;
1207 pc = uc->uc_mcontext.sc_iaoq[0];
1208 /* FIXME: compute is_write */
1209 is_write = 0;
1210 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1211 is_write,
1212 &uc->uc_sigmask, puc);
1215 #else
1217 #error host CPU specific signal handler needed
1219 #endif
1221 #endif /* !defined(CONFIG_SOFTMMU) */