Initialize apic before vcpu main loop
[qemu-kvm/amd-iommu.git] / cpu-exec.c
blob0f085eab305cc82518e5757a9a36ac3b3984b7cf
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #if !defined(TARGET_IA64)
23 #include "tcg.h"
24 #endif
25 #include "kvm.h"
27 #if !defined(CONFIG_SOFTMMU)
28 #undef EAX
29 #undef ECX
30 #undef EDX
31 #undef EBX
32 #undef ESP
33 #undef EBP
34 #undef ESI
35 #undef EDI
36 #undef EIP
37 #include <signal.h>
38 #ifdef __linux__
39 #include <sys/ucontext.h>
40 #endif
41 #endif
43 #include "qemu-kvm.h"
45 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
46 // Work around ugly bugs in glibc that mangle global register contents
47 #undef env
48 #define env cpu_single_env
49 #endif
51 int tb_invalidated_flag;
53 //#define CONFIG_DEBUG_EXEC
54 //#define DEBUG_SIGNAL
56 int qemu_cpu_has_work(CPUState *env)
58 return cpu_has_work(env);
61 void cpu_loop_exit(void)
63 /* NOTE: the register at this point must be saved by hand because
64 longjmp restore them */
65 regs_to_env();
66 longjmp(env->jmp_env, 1);
69 /* exit the current TB from a signal handler. The host registers are
70 restored in a state compatible with the CPU emulator
72 void cpu_resume_from_signal(CPUState *env1, void *puc)
74 #if !defined(CONFIG_SOFTMMU)
75 #ifdef __linux__
76 struct ucontext *uc = puc;
77 #elif defined(__OpenBSD__)
78 struct sigcontext *uc = puc;
79 #endif
80 #endif
82 env = env1;
84 /* XXX: restore cpu registers saved in host registers */
86 #if !defined(CONFIG_SOFTMMU)
87 if (puc) {
88 /* XXX: use siglongjmp ? */
89 #ifdef __linux__
90 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
91 #elif defined(__OpenBSD__)
92 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
93 #endif
95 #endif
96 env->exception_index = -1;
97 longjmp(env->jmp_env, 1);
100 /* Execute the code without caching the generated code. An interpreter
101 could be used if available. */
102 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
104 unsigned long next_tb;
105 TranslationBlock *tb;
107 /* Should never happen.
108 We only end up here when an existing TB is too long. */
109 if (max_cycles > CF_COUNT_MASK)
110 max_cycles = CF_COUNT_MASK;
112 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
113 max_cycles);
114 env->current_tb = tb;
115 /* execute the generated code */
116 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
118 if ((next_tb & 3) == 2) {
119 /* Restore PC. This may happen if async event occurs before
120 the TB starts executing. */
121 cpu_pc_from_tb(env, tb);
123 tb_phys_invalidate(tb, -1);
124 tb_free(tb);
127 static TranslationBlock *tb_find_slow(target_ulong pc,
128 target_ulong cs_base,
129 uint64_t flags)
131 TranslationBlock *tb, **ptb1;
132 unsigned int h;
133 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
135 tb_invalidated_flag = 0;
137 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
139 /* find translated block using physical mappings */
140 phys_pc = get_phys_addr_code(env, pc);
141 phys_page1 = phys_pc & TARGET_PAGE_MASK;
142 phys_page2 = -1;
143 h = tb_phys_hash_func(phys_pc);
144 ptb1 = &tb_phys_hash[h];
145 for(;;) {
146 tb = *ptb1;
147 if (!tb)
148 goto not_found;
149 if (tb->pc == pc &&
150 tb->page_addr[0] == phys_page1 &&
151 tb->cs_base == cs_base &&
152 tb->flags == flags) {
153 /* check next page if needed */
154 if (tb->page_addr[1] != -1) {
155 virt_page2 = (pc & TARGET_PAGE_MASK) +
156 TARGET_PAGE_SIZE;
157 phys_page2 = get_phys_addr_code(env, virt_page2);
158 if (tb->page_addr[1] == phys_page2)
159 goto found;
160 } else {
161 goto found;
164 ptb1 = &tb->phys_hash_next;
166 not_found:
167 /* if no translated code available, then translate it now */
168 tb = tb_gen_code(env, pc, cs_base, flags, 0);
170 found:
171 /* we add the TB in the virtual pc hash table */
172 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
173 return tb;
176 static inline TranslationBlock *tb_find_fast(void)
178 TranslationBlock *tb;
179 target_ulong cs_base, pc;
180 int flags;
182 /* we record a subset of the CPU state. It will
183 always be the same before a given translated block
184 is executed. */
185 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
186 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
187 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
188 tb->flags != flags)) {
189 tb = tb_find_slow(pc, cs_base, flags);
191 return tb;
194 static CPUDebugExcpHandler *debug_excp_handler;
196 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
198 CPUDebugExcpHandler *old_handler = debug_excp_handler;
200 debug_excp_handler = handler;
201 return old_handler;
204 static void cpu_handle_debug_exception(CPUState *env)
206 CPUWatchpoint *wp;
208 if (!env->watchpoint_hit)
209 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
210 wp->flags &= ~BP_WATCHPOINT_HIT;
212 if (debug_excp_handler)
213 debug_excp_handler(env);
216 /* main execution loop */
218 int cpu_exec(CPUState *env1)
220 #define DECLARE_HOST_REGS 1
221 #include "hostregs_helper.h"
222 int ret, interrupt_request;
223 TranslationBlock *tb;
224 uint8_t *tc_ptr;
225 unsigned long next_tb;
227 if (cpu_halted(env1) == EXCP_HALTED)
228 return EXCP_HALTED;
230 cpu_single_env = env1;
232 /* first we save global registers */
233 #define SAVE_HOST_REGS 1
234 #include "hostregs_helper.h"
235 env = env1;
237 env_to_regs();
238 #if defined(TARGET_I386)
239 /* put eflags in CPU temporary format */
240 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
241 DF = 1 - (2 * ((env->eflags >> 10) & 1));
242 CC_OP = CC_OP_EFLAGS;
243 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
244 #elif defined(TARGET_SPARC)
245 #elif defined(TARGET_M68K)
246 env->cc_op = CC_OP_FLAGS;
247 env->cc_dest = env->sr & 0xf;
248 env->cc_x = (env->sr >> 4) & 1;
249 #elif defined(TARGET_ALPHA)
250 #elif defined(TARGET_ARM)
251 #elif defined(TARGET_PPC)
252 #elif defined(TARGET_MICROBLAZE)
253 #elif defined(TARGET_MIPS)
254 #elif defined(TARGET_SH4)
255 #elif defined(TARGET_CRIS)
256 #elif defined(TARGET_S390X)
257 #elif defined(TARGET_IA64)
258 /* XXXXX */
259 #else
260 #error unsupported target CPU
261 #endif
262 env->exception_index = -1;
264 /* prepare setjmp context for exception handling */
265 for(;;) {
266 if (setjmp(env->jmp_env) == 0) {
267 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
268 #undef env
269 env = cpu_single_env;
270 #define env cpu_single_env
271 #endif
272 env->current_tb = NULL;
273 /* if an exception is pending, we execute it here */
274 if (env->exception_index >= 0) {
275 if (env->exception_index >= EXCP_INTERRUPT) {
276 /* exit request from the cpu execution loop */
277 ret = env->exception_index;
278 if (ret == EXCP_DEBUG)
279 cpu_handle_debug_exception(env);
280 break;
281 } else {
282 #if defined(CONFIG_USER_ONLY)
283 /* if user mode only, we simulate a fake exception
284 which will be handled outside the cpu execution
285 loop */
286 #if defined(TARGET_I386)
287 do_interrupt_user(env->exception_index,
288 env->exception_is_int,
289 env->error_code,
290 env->exception_next_eip);
291 /* successfully delivered */
292 env->old_exception = -1;
293 #endif
294 ret = env->exception_index;
295 break;
296 #else
297 #if defined(TARGET_I386)
298 /* simulate a real cpu exception. On i386, it can
299 trigger new exceptions, but we do not handle
300 double or triple faults yet. */
301 do_interrupt(env->exception_index,
302 env->exception_is_int,
303 env->error_code,
304 env->exception_next_eip, 0);
305 /* successfully delivered */
306 env->old_exception = -1;
307 #elif defined(TARGET_PPC)
308 do_interrupt(env);
309 #elif defined(TARGET_MICROBLAZE)
310 do_interrupt(env);
311 #elif defined(TARGET_MIPS)
312 do_interrupt(env);
313 #elif defined(TARGET_SPARC)
314 do_interrupt(env);
315 #elif defined(TARGET_ARM)
316 do_interrupt(env);
317 #elif defined(TARGET_SH4)
318 do_interrupt(env);
319 #elif defined(TARGET_ALPHA)
320 do_interrupt(env);
321 #elif defined(TARGET_CRIS)
322 do_interrupt(env);
323 #elif defined(TARGET_M68K)
324 do_interrupt(0);
325 #elif defined(TARGET_IA64)
326 do_interrupt(env);
327 #endif
328 #endif
330 env->exception_index = -1;
333 if (kvm_enabled()) {
334 kvm_cpu_exec(env);
335 longjmp(env->jmp_env, 1);
338 next_tb = 0; /* force lookup of first TB */
339 for(;;) {
340 interrupt_request = env->interrupt_request;
341 if (unlikely(interrupt_request)) {
342 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
343 /* Mask out external interrupts for this step. */
344 interrupt_request &= ~(CPU_INTERRUPT_HARD |
345 CPU_INTERRUPT_FIQ |
346 CPU_INTERRUPT_SMI |
347 CPU_INTERRUPT_NMI);
349 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
350 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
351 env->exception_index = EXCP_DEBUG;
352 cpu_loop_exit();
354 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
355 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
356 defined(TARGET_MICROBLAZE)
357 if (interrupt_request & CPU_INTERRUPT_HALT) {
358 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
359 env->halted = 1;
360 env->exception_index = EXCP_HLT;
361 cpu_loop_exit();
363 #endif
364 #if defined(TARGET_I386)
365 if (interrupt_request & CPU_INTERRUPT_INIT) {
366 svm_check_intercept(SVM_EXIT_INIT);
367 do_cpu_init(env);
368 env->exception_index = EXCP_HALTED;
369 cpu_loop_exit();
370 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
371 do_cpu_sipi(env);
372 } else if (env->hflags2 & HF2_GIF_MASK) {
373 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
374 !(env->hflags & HF_SMM_MASK)) {
375 svm_check_intercept(SVM_EXIT_SMI);
376 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
377 do_smm_enter();
378 next_tb = 0;
379 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
380 !(env->hflags2 & HF2_NMI_MASK)) {
381 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
382 env->hflags2 |= HF2_NMI_MASK;
383 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
384 next_tb = 0;
385 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
386 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
387 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
388 next_tb = 0;
389 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
390 (((env->hflags2 & HF2_VINTR_MASK) &&
391 (env->hflags2 & HF2_HIF_MASK)) ||
392 (!(env->hflags2 & HF2_VINTR_MASK) &&
393 (env->eflags & IF_MASK &&
394 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
395 int intno;
396 svm_check_intercept(SVM_EXIT_INTR);
397 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
398 intno = cpu_get_pic_interrupt(env);
399 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
400 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
401 #undef env
402 env = cpu_single_env;
403 #define env cpu_single_env
404 #endif
405 do_interrupt(intno, 0, 0, 0, 1);
406 /* ensure that no TB jump will be modified as
407 the program flow was changed */
408 next_tb = 0;
409 #if !defined(CONFIG_USER_ONLY)
410 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
411 (env->eflags & IF_MASK) &&
412 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
413 int intno;
414 /* FIXME: this should respect TPR */
415 svm_check_intercept(SVM_EXIT_VINTR);
416 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
417 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
418 do_interrupt(intno, 0, 0, 0, 1);
419 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
420 next_tb = 0;
421 #endif
424 #elif defined(TARGET_PPC)
425 #if 0
426 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
427 cpu_reset(env);
429 #endif
430 if (interrupt_request & CPU_INTERRUPT_HARD) {
431 ppc_hw_interrupt(env);
432 if (env->pending_interrupts == 0)
433 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
434 next_tb = 0;
436 #elif defined(TARGET_MICROBLAZE)
437 if ((interrupt_request & CPU_INTERRUPT_HARD)
438 && (env->sregs[SR_MSR] & MSR_IE)
439 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
440 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
441 env->exception_index = EXCP_IRQ;
442 do_interrupt(env);
443 next_tb = 0;
445 #elif defined(TARGET_MIPS)
446 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
447 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
448 (env->CP0_Status & (1 << CP0St_IE)) &&
449 !(env->CP0_Status & (1 << CP0St_EXL)) &&
450 !(env->CP0_Status & (1 << CP0St_ERL)) &&
451 !(env->hflags & MIPS_HFLAG_DM)) {
452 /* Raise it */
453 env->exception_index = EXCP_EXT_INTERRUPT;
454 env->error_code = 0;
455 do_interrupt(env);
456 next_tb = 0;
458 #elif defined(TARGET_SPARC)
459 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
460 cpu_interrupts_enabled(env)) {
461 int pil = env->interrupt_index & 15;
462 int type = env->interrupt_index & 0xf0;
464 if (((type == TT_EXTINT) &&
465 (pil == 15 || pil > env->psrpil)) ||
466 type != TT_EXTINT) {
467 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
468 env->exception_index = env->interrupt_index;
469 do_interrupt(env);
470 env->interrupt_index = 0;
471 next_tb = 0;
473 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
474 //do_interrupt(0, 0, 0, 0, 0);
475 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
477 #elif defined(TARGET_ARM)
478 if (interrupt_request & CPU_INTERRUPT_FIQ
479 && !(env->uncached_cpsr & CPSR_F)) {
480 env->exception_index = EXCP_FIQ;
481 do_interrupt(env);
482 next_tb = 0;
484 /* ARMv7-M interrupt return works by loading a magic value
485 into the PC. On real hardware the load causes the
486 return to occur. The qemu implementation performs the
487 jump normally, then does the exception return when the
488 CPU tries to execute code at the magic address.
489 This will cause the magic PC value to be pushed to
490 the stack if an interrupt occured at the wrong time.
491 We avoid this by disabling interrupts when
492 pc contains a magic address. */
493 if (interrupt_request & CPU_INTERRUPT_HARD
494 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
495 || !(env->uncached_cpsr & CPSR_I))) {
496 env->exception_index = EXCP_IRQ;
497 do_interrupt(env);
498 next_tb = 0;
500 #elif defined(TARGET_SH4)
501 if (interrupt_request & CPU_INTERRUPT_HARD) {
502 do_interrupt(env);
503 next_tb = 0;
505 #elif defined(TARGET_ALPHA)
506 if (interrupt_request & CPU_INTERRUPT_HARD) {
507 do_interrupt(env);
508 next_tb = 0;
510 #elif defined(TARGET_CRIS)
511 if (interrupt_request & CPU_INTERRUPT_HARD
512 && (env->pregs[PR_CCS] & I_FLAG)) {
513 env->exception_index = EXCP_IRQ;
514 do_interrupt(env);
515 next_tb = 0;
517 if (interrupt_request & CPU_INTERRUPT_NMI
518 && (env->pregs[PR_CCS] & M_FLAG)) {
519 env->exception_index = EXCP_NMI;
520 do_interrupt(env);
521 next_tb = 0;
523 #elif defined(TARGET_M68K)
524 if (interrupt_request & CPU_INTERRUPT_HARD
525 && ((env->sr & SR_I) >> SR_I_SHIFT)
526 < env->pending_level) {
527 /* Real hardware gets the interrupt vector via an
528 IACK cycle at this point. Current emulated
529 hardware doesn't rely on this, so we
530 provide/save the vector when the interrupt is
531 first signalled. */
532 env->exception_index = env->pending_vector;
533 do_interrupt(1);
534 next_tb = 0;
536 #endif
537 /* Don't use the cached interupt_request value,
538 do_interrupt may have updated the EXITTB flag. */
539 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
540 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
541 /* ensure that no TB jump will be modified as
542 the program flow was changed */
543 next_tb = 0;
546 if (unlikely(env->exit_request)) {
547 env->exit_request = 0;
548 env->exception_index = EXCP_INTERRUPT;
549 cpu_loop_exit();
551 #ifdef CONFIG_DEBUG_EXEC
552 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
553 /* restore flags in standard format */
554 regs_to_env();
555 #if defined(TARGET_I386)
556 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
557 log_cpu_state(env, X86_DUMP_CCOP);
558 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
559 #elif defined(TARGET_ARM)
560 log_cpu_state(env, 0);
561 #elif defined(TARGET_SPARC)
562 log_cpu_state(env, 0);
563 #elif defined(TARGET_PPC)
564 log_cpu_state(env, 0);
565 #elif defined(TARGET_M68K)
566 cpu_m68k_flush_flags(env, env->cc_op);
567 env->cc_op = CC_OP_FLAGS;
568 env->sr = (env->sr & 0xffe0)
569 | env->cc_dest | (env->cc_x << 4);
570 log_cpu_state(env, 0);
571 #elif defined(TARGET_MICROBLAZE)
572 log_cpu_state(env, 0);
573 #elif defined(TARGET_MIPS)
574 log_cpu_state(env, 0);
575 #elif defined(TARGET_SH4)
576 log_cpu_state(env, 0);
577 #elif defined(TARGET_ALPHA)
578 log_cpu_state(env, 0);
579 #elif defined(TARGET_CRIS)
580 log_cpu_state(env, 0);
581 #else
582 #error unsupported target CPU
583 #endif
585 #endif
586 spin_lock(&tb_lock);
587 tb = tb_find_fast();
588 /* Note: we do it here to avoid a gcc bug on Mac OS X when
589 doing it in tb_find_slow */
590 if (tb_invalidated_flag) {
591 /* as some TB could have been invalidated because
592 of memory exceptions while generating the code, we
593 must recompute the hash index here */
594 next_tb = 0;
595 tb_invalidated_flag = 0;
597 #ifdef CONFIG_DEBUG_EXEC
598 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
599 (long)tb->tc_ptr, tb->pc,
600 lookup_symbol(tb->pc));
601 #endif
602 /* see if we can patch the calling TB. When the TB
603 spans two pages, we cannot safely do a direct
604 jump. */
606 if (next_tb != 0 && tb->page_addr[1] == -1) {
607 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
610 spin_unlock(&tb_lock);
611 env->current_tb = tb;
613 /* cpu_interrupt might be called while translating the
614 TB, but before it is linked into a potentially
615 infinite loop and becomes env->current_tb. Avoid
616 starting execution if there is a pending interrupt. */
617 if (unlikely (env->exit_request))
618 env->current_tb = NULL;
620 while (env->current_tb) {
621 tc_ptr = tb->tc_ptr;
622 /* execute the generated code */
623 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
624 #undef env
625 env = cpu_single_env;
626 #define env cpu_single_env
627 #endif
628 next_tb = tcg_qemu_tb_exec(tc_ptr);
629 env->current_tb = NULL;
630 if ((next_tb & 3) == 2) {
631 /* Instruction counter expired. */
632 int insns_left;
633 tb = (TranslationBlock *)(long)(next_tb & ~3);
634 /* Restore PC. */
635 cpu_pc_from_tb(env, tb);
636 insns_left = env->icount_decr.u32;
637 if (env->icount_extra && insns_left >= 0) {
638 /* Refill decrementer and continue execution. */
639 env->icount_extra += insns_left;
640 if (env->icount_extra > 0xffff) {
641 insns_left = 0xffff;
642 } else {
643 insns_left = env->icount_extra;
645 env->icount_extra -= insns_left;
646 env->icount_decr.u16.low = insns_left;
647 } else {
648 if (insns_left > 0) {
649 /* Execute remaining instructions. */
650 cpu_exec_nocache(insns_left, tb);
652 env->exception_index = EXCP_INTERRUPT;
653 next_tb = 0;
654 cpu_loop_exit();
658 /* reset soft MMU for next block (it can currently
659 only be set by a memory fault) */
660 } /* for(;;) */
661 } else {
662 env_to_regs();
664 } /* for(;;) */
667 #if defined(TARGET_I386)
668 /* restore flags in standard format */
669 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
670 #elif defined(TARGET_ARM)
671 /* XXX: Save/restore host fpu exception state?. */
672 #elif defined(TARGET_SPARC)
673 #elif defined(TARGET_PPC)
674 #elif defined(TARGET_M68K)
675 cpu_m68k_flush_flags(env, env->cc_op);
676 env->cc_op = CC_OP_FLAGS;
677 env->sr = (env->sr & 0xffe0)
678 | env->cc_dest | (env->cc_x << 4);
679 #elif defined(TARGET_MICROBLAZE)
680 #elif defined(TARGET_MIPS)
681 #elif defined(TARGET_SH4)
682 #elif defined(TARGET_IA64)
683 #elif defined(TARGET_ALPHA)
684 #elif defined(TARGET_CRIS)
685 #elif defined(TARGET_S390X)
686 /* XXXXX */
687 #else
688 #error unsupported target CPU
689 #endif
691 /* restore global registers */
692 #include "hostregs_helper.h"
694 /* fail safe : never use cpu_single_env outside cpu_exec() */
695 cpu_single_env = NULL;
696 return ret;
699 /* must only be called from the generated code as an exception can be
700 generated */
701 void tb_invalidate_page_range(target_ulong start, target_ulong end)
703 /* XXX: cannot enable it yet because it yields to MMU exception
704 where NIP != read address on PowerPC */
705 #if 0
706 target_ulong phys_addr;
707 phys_addr = get_phys_addr_code(env, start);
708 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
709 #endif
712 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
714 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
716 CPUX86State *saved_env;
718 saved_env = env;
719 env = s;
720 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
721 selector &= 0xffff;
722 cpu_x86_load_seg_cache(env, seg_reg, selector,
723 (selector << 4), 0xffff, 0);
724 } else {
725 helper_load_seg(seg_reg, selector);
727 env = saved_env;
730 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
732 CPUX86State *saved_env;
734 saved_env = env;
735 env = s;
737 helper_fsave(ptr, data32);
739 env = saved_env;
742 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
744 CPUX86State *saved_env;
746 saved_env = env;
747 env = s;
749 helper_frstor(ptr, data32);
751 env = saved_env;
754 #endif /* TARGET_I386 */
756 #if !defined(CONFIG_SOFTMMU)
758 #if defined(TARGET_I386)
759 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
760 #else
761 #define EXCEPTION_ACTION cpu_loop_exit()
762 #endif
764 /* 'pc' is the host PC at which the exception was raised. 'address' is
765 the effective address of the memory exception. 'is_write' is 1 if a
766 write caused the exception and otherwise 0'. 'old_set' is the
767 signal set which should be restored */
768 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
769 int is_write, sigset_t *old_set,
770 void *puc)
772 TranslationBlock *tb;
773 int ret;
775 if (cpu_single_env)
776 env = cpu_single_env; /* XXX: find a correct solution for multithread */
777 #if defined(DEBUG_SIGNAL)
778 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
779 pc, address, is_write, *(unsigned long *)old_set);
780 #endif
781 /* XXX: locking issue */
782 if (is_write && page_unprotect(h2g(address), pc, puc)) {
783 return 1;
786 /* see if it is an MMU fault */
787 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
788 if (ret < 0)
789 return 0; /* not an MMU fault */
790 if (ret == 0)
791 return 1; /* the MMU fault was handled without causing real CPU fault */
792 /* now we have a real cpu fault */
793 tb = tb_find_pc(pc);
794 if (tb) {
795 /* the PC is inside the translated code. It means that we have
796 a virtual CPU fault */
797 cpu_restore_state(tb, env, pc, puc);
800 /* we restore the process signal mask as the sigreturn should
801 do it (XXX: use sigsetjmp) */
802 sigprocmask(SIG_SETMASK, old_set, NULL);
803 EXCEPTION_ACTION;
805 /* never comes here */
806 return 1;
809 #if defined(__i386__)
811 #if defined(__APPLE__)
812 # include <sys/ucontext.h>
814 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
815 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
816 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
817 # define MASK_sig(context) ((context)->uc_sigmask)
818 #elif defined (__NetBSD__)
819 # include <ucontext.h>
821 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
822 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
823 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
824 # define MASK_sig(context) ((context)->uc_sigmask)
825 #elif defined (__FreeBSD__) || defined(__DragonFly__)
826 # include <ucontext.h>
828 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
829 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
830 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
831 # define MASK_sig(context) ((context)->uc_sigmask)
832 #elif defined(__OpenBSD__)
833 # define EIP_sig(context) ((context)->sc_eip)
834 # define TRAP_sig(context) ((context)->sc_trapno)
835 # define ERROR_sig(context) ((context)->sc_err)
836 # define MASK_sig(context) ((context)->sc_mask)
837 #else
838 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
839 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
840 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
841 # define MASK_sig(context) ((context)->uc_sigmask)
842 #endif
844 int cpu_signal_handler(int host_signum, void *pinfo,
845 void *puc)
847 siginfo_t *info = pinfo;
848 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
849 ucontext_t *uc = puc;
850 #elif defined(__OpenBSD__)
851 struct sigcontext *uc = puc;
852 #else
853 struct ucontext *uc = puc;
854 #endif
855 unsigned long pc;
856 int trapno;
858 #ifndef REG_EIP
859 /* for glibc 2.1 */
860 #define REG_EIP EIP
861 #define REG_ERR ERR
862 #define REG_TRAPNO TRAPNO
863 #endif
864 pc = EIP_sig(uc);
865 trapno = TRAP_sig(uc);
866 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
867 trapno == 0xe ?
868 (ERROR_sig(uc) >> 1) & 1 : 0,
869 &MASK_sig(uc), puc);
872 #elif defined(__x86_64__)
874 #ifdef __NetBSD__
875 #define PC_sig(context) _UC_MACHINE_PC(context)
876 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
877 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
878 #define MASK_sig(context) ((context)->uc_sigmask)
879 #elif defined(__OpenBSD__)
880 #define PC_sig(context) ((context)->sc_rip)
881 #define TRAP_sig(context) ((context)->sc_trapno)
882 #define ERROR_sig(context) ((context)->sc_err)
883 #define MASK_sig(context) ((context)->sc_mask)
884 #elif defined (__FreeBSD__) || defined(__DragonFly__)
885 #include <ucontext.h>
887 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
888 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
889 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
890 #define MASK_sig(context) ((context)->uc_sigmask)
891 #else
892 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
893 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
894 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
895 #define MASK_sig(context) ((context)->uc_sigmask)
896 #endif
898 int cpu_signal_handler(int host_signum, void *pinfo,
899 void *puc)
901 siginfo_t *info = pinfo;
902 unsigned long pc;
903 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
904 ucontext_t *uc = puc;
905 #elif defined(__OpenBSD__)
906 struct sigcontext *uc = puc;
907 #else
908 struct ucontext *uc = puc;
909 #endif
911 pc = PC_sig(uc);
912 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
913 TRAP_sig(uc) == 0xe ?
914 (ERROR_sig(uc) >> 1) & 1 : 0,
915 &MASK_sig(uc), puc);
918 #elif defined(_ARCH_PPC)
920 /***********************************************************************
921 * signal context platform-specific definitions
922 * From Wine
924 #ifdef linux
925 /* All Registers access - only for local access */
926 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
927 /* Gpr Registers access */
928 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
929 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
930 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
931 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
932 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
933 # define LR_sig(context) REG_sig(link, context) /* Link register */
934 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
935 /* Float Registers access */
936 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
937 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
938 /* Exception Registers access */
939 # define DAR_sig(context) REG_sig(dar, context)
940 # define DSISR_sig(context) REG_sig(dsisr, context)
941 # define TRAP_sig(context) REG_sig(trap, context)
942 #endif /* linux */
944 #ifdef __APPLE__
945 # include <sys/ucontext.h>
946 typedef struct ucontext SIGCONTEXT;
947 /* All Registers access - only for local access */
948 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
949 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
950 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
951 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
952 /* Gpr Registers access */
953 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
954 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
955 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
956 # define CTR_sig(context) REG_sig(ctr, context)
957 # define XER_sig(context) REG_sig(xer, context) /* Link register */
958 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
959 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
960 /* Float Registers access */
961 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
962 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
963 /* Exception Registers access */
964 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
965 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
966 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
967 #endif /* __APPLE__ */
969 int cpu_signal_handler(int host_signum, void *pinfo,
970 void *puc)
972 siginfo_t *info = pinfo;
973 struct ucontext *uc = puc;
974 unsigned long pc;
975 int is_write;
977 pc = IAR_sig(uc);
978 is_write = 0;
979 #if 0
980 /* ppc 4xx case */
981 if (DSISR_sig(uc) & 0x00800000)
982 is_write = 1;
983 #else
984 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
985 is_write = 1;
986 #endif
987 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
988 is_write, &uc->uc_sigmask, puc);
991 #elif defined(__alpha__)
993 int cpu_signal_handler(int host_signum, void *pinfo,
994 void *puc)
996 siginfo_t *info = pinfo;
997 struct ucontext *uc = puc;
998 uint32_t *pc = uc->uc_mcontext.sc_pc;
999 uint32_t insn = *pc;
1000 int is_write = 0;
1002 /* XXX: need kernel patch to get write flag faster */
1003 switch (insn >> 26) {
1004 case 0x0d: // stw
1005 case 0x0e: // stb
1006 case 0x0f: // stq_u
1007 case 0x24: // stf
1008 case 0x25: // stg
1009 case 0x26: // sts
1010 case 0x27: // stt
1011 case 0x2c: // stl
1012 case 0x2d: // stq
1013 case 0x2e: // stl_c
1014 case 0x2f: // stq_c
1015 is_write = 1;
1018 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1019 is_write, &uc->uc_sigmask, puc);
1021 #elif defined(__sparc__)
1023 int cpu_signal_handler(int host_signum, void *pinfo,
1024 void *puc)
1026 siginfo_t *info = pinfo;
1027 int is_write;
1028 uint32_t insn;
1029 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1030 uint32_t *regs = (uint32_t *)(info + 1);
1031 void *sigmask = (regs + 20);
1032 /* XXX: is there a standard glibc define ? */
1033 unsigned long pc = regs[1];
1034 #else
1035 #ifdef __linux__
1036 struct sigcontext *sc = puc;
1037 unsigned long pc = sc->sigc_regs.tpc;
1038 void *sigmask = (void *)sc->sigc_mask;
1039 #elif defined(__OpenBSD__)
1040 struct sigcontext *uc = puc;
1041 unsigned long pc = uc->sc_pc;
1042 void *sigmask = (void *)(long)uc->sc_mask;
1043 #endif
1044 #endif
1046 /* XXX: need kernel patch to get write flag faster */
1047 is_write = 0;
1048 insn = *(uint32_t *)pc;
1049 if ((insn >> 30) == 3) {
1050 switch((insn >> 19) & 0x3f) {
1051 case 0x05: // stb
1052 case 0x15: // stba
1053 case 0x06: // sth
1054 case 0x16: // stha
1055 case 0x04: // st
1056 case 0x14: // sta
1057 case 0x07: // std
1058 case 0x17: // stda
1059 case 0x0e: // stx
1060 case 0x1e: // stxa
1061 case 0x24: // stf
1062 case 0x34: // stfa
1063 case 0x27: // stdf
1064 case 0x37: // stdfa
1065 case 0x26: // stqf
1066 case 0x36: // stqfa
1067 case 0x25: // stfsr
1068 case 0x3c: // casa
1069 case 0x3e: // casxa
1070 is_write = 1;
1071 break;
1074 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1075 is_write, sigmask, NULL);
1078 #elif defined(__arm__)
1080 int cpu_signal_handler(int host_signum, void *pinfo,
1081 void *puc)
1083 siginfo_t *info = pinfo;
1084 struct ucontext *uc = puc;
1085 unsigned long pc;
1086 int is_write;
1088 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1089 pc = uc->uc_mcontext.gregs[R15];
1090 #else
1091 pc = uc->uc_mcontext.arm_pc;
1092 #endif
1093 /* XXX: compute is_write */
1094 is_write = 0;
1095 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1096 is_write,
1097 &uc->uc_sigmask, puc);
1100 #elif defined(__mc68000)
1102 int cpu_signal_handler(int host_signum, void *pinfo,
1103 void *puc)
1105 siginfo_t *info = pinfo;
1106 struct ucontext *uc = puc;
1107 unsigned long pc;
1108 int is_write;
1110 pc = uc->uc_mcontext.gregs[16];
1111 /* XXX: compute is_write */
1112 is_write = 0;
1113 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1114 is_write,
1115 &uc->uc_sigmask, puc);
1118 #elif defined(__ia64)
1120 #ifndef __ISR_VALID
1121 /* This ought to be in <bits/siginfo.h>... */
1122 # define __ISR_VALID 1
1123 #endif
1125 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1127 siginfo_t *info = pinfo;
1128 struct ucontext *uc = puc;
1129 unsigned long ip;
1130 int is_write = 0;
1132 ip = uc->uc_mcontext.sc_ip;
1133 switch (host_signum) {
1134 case SIGILL:
1135 case SIGFPE:
1136 case SIGSEGV:
1137 case SIGBUS:
1138 case SIGTRAP:
1139 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1140 /* ISR.W (write-access) is bit 33: */
1141 is_write = (info->si_isr >> 33) & 1;
1142 break;
1144 default:
1145 break;
1147 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1148 is_write,
1149 &uc->uc_sigmask, puc);
1152 #elif defined(__s390__)
1154 int cpu_signal_handler(int host_signum, void *pinfo,
1155 void *puc)
1157 siginfo_t *info = pinfo;
1158 struct ucontext *uc = puc;
1159 unsigned long pc;
1160 int is_write;
1162 pc = uc->uc_mcontext.psw.addr;
1163 /* XXX: compute is_write */
1164 is_write = 0;
1165 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1166 is_write, &uc->uc_sigmask, puc);
1169 #elif defined(__mips__)
1171 int cpu_signal_handler(int host_signum, void *pinfo,
1172 void *puc)
1174 siginfo_t *info = pinfo;
1175 struct ucontext *uc = puc;
1176 greg_t pc = uc->uc_mcontext.pc;
1177 int is_write;
1179 /* XXX: compute is_write */
1180 is_write = 0;
1181 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1182 is_write, &uc->uc_sigmask, puc);
1185 #elif defined(__hppa__)
1187 int cpu_signal_handler(int host_signum, void *pinfo,
1188 void *puc)
1190 struct siginfo *info = pinfo;
1191 struct ucontext *uc = puc;
1192 unsigned long pc;
1193 int is_write;
1195 pc = uc->uc_mcontext.sc_iaoq[0];
1196 /* FIXME: compute is_write */
1197 is_write = 0;
1198 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1199 is_write,
1200 &uc->uc_sigmask, puc);
1203 #else
1205 #error host CPU specific signal handler needed
1207 #endif
1209 #endif /* !defined(CONFIG_SOFTMMU) */