pc-bios: update to latest Seabios
[qemu/aliguori-queue.git] / cpu-exec.c
blob5d6dd514ffed6a46d2f0a2b5dbe556485c53b9f4
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "kvm.h"
25 #if !defined(CONFIG_SOFTMMU)
26 #undef EAX
27 #undef ECX
28 #undef EDX
29 #undef EBX
30 #undef ESP
31 #undef EBP
32 #undef ESI
33 #undef EDI
34 #undef EIP
35 #include <signal.h>
36 #ifdef __linux__
37 #include <sys/ucontext.h>
38 #endif
39 #endif
41 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
42 // Work around ugly bugs in glibc that mangle global register contents
43 #undef env
44 #define env cpu_single_env
45 #endif
47 int tb_invalidated_flag;
49 //#define CONFIG_DEBUG_EXEC
50 //#define DEBUG_SIGNAL
52 int qemu_cpu_has_work(CPUState *env)
54 return cpu_has_work(env);
57 void cpu_loop_exit(void)
59 env->current_tb = NULL;
60 longjmp(env->jmp_env, 1);
63 /* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
66 void cpu_resume_from_signal(CPUState *env1, void *puc)
68 #if !defined(CONFIG_SOFTMMU)
69 #ifdef __linux__
70 struct ucontext *uc = puc;
71 #elif defined(__OpenBSD__)
72 struct sigcontext *uc = puc;
73 #endif
74 #endif
76 env = env1;
78 /* XXX: restore cpu registers saved in host registers */
80 #if !defined(CONFIG_SOFTMMU)
81 if (puc) {
82 /* XXX: use siglongjmp ? */
83 #ifdef __linux__
84 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
85 #elif defined(__OpenBSD__)
86 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
87 #endif
89 #endif
90 env->exception_index = -1;
91 longjmp(env->jmp_env, 1);
94 /* Execute the code without caching the generated code. An interpreter
95 could be used if available. */
96 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
98 unsigned long next_tb;
99 TranslationBlock *tb;
101 /* Should never happen.
102 We only end up here when an existing TB is too long. */
103 if (max_cycles > CF_COUNT_MASK)
104 max_cycles = CF_COUNT_MASK;
106 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
107 max_cycles);
108 env->current_tb = tb;
109 /* execute the generated code */
110 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
111 env->current_tb = NULL;
113 if ((next_tb & 3) == 2) {
114 /* Restore PC. This may happen if async event occurs before
115 the TB starts executing. */
116 cpu_pc_from_tb(env, tb);
118 tb_phys_invalidate(tb, -1);
119 tb_free(tb);
122 static TranslationBlock *tb_find_slow(target_ulong pc,
123 target_ulong cs_base,
124 uint64_t flags)
126 TranslationBlock *tb, **ptb1;
127 unsigned int h;
128 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
130 tb_invalidated_flag = 0;
132 /* find translated block using physical mappings */
133 phys_pc = get_phys_addr_code(env, pc);
134 phys_page1 = phys_pc & TARGET_PAGE_MASK;
135 phys_page2 = -1;
136 h = tb_phys_hash_func(phys_pc);
137 ptb1 = &tb_phys_hash[h];
138 for(;;) {
139 tb = *ptb1;
140 if (!tb)
141 goto not_found;
142 if (tb->pc == pc &&
143 tb->page_addr[0] == phys_page1 &&
144 tb->cs_base == cs_base &&
145 tb->flags == flags) {
146 /* check next page if needed */
147 if (tb->page_addr[1] != -1) {
148 virt_page2 = (pc & TARGET_PAGE_MASK) +
149 TARGET_PAGE_SIZE;
150 phys_page2 = get_phys_addr_code(env, virt_page2);
151 if (tb->page_addr[1] == phys_page2)
152 goto found;
153 } else {
154 goto found;
157 ptb1 = &tb->phys_hash_next;
159 not_found:
160 /* if no translated code available, then translate it now */
161 tb = tb_gen_code(env, pc, cs_base, flags, 0);
163 found:
164 /* we add the TB in the virtual pc hash table */
165 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
166 return tb;
169 static inline TranslationBlock *tb_find_fast(void)
171 TranslationBlock *tb;
172 target_ulong cs_base, pc;
173 int flags;
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
177 is executed. */
178 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
179 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
180 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
181 tb->flags != flags)) {
182 tb = tb_find_slow(pc, cs_base, flags);
184 return tb;
187 static CPUDebugExcpHandler *debug_excp_handler;
189 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
191 CPUDebugExcpHandler *old_handler = debug_excp_handler;
193 debug_excp_handler = handler;
194 return old_handler;
197 static void cpu_handle_debug_exception(CPUState *env)
199 CPUWatchpoint *wp;
201 if (!env->watchpoint_hit)
202 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
203 wp->flags &= ~BP_WATCHPOINT_HIT;
205 if (debug_excp_handler)
206 debug_excp_handler(env);
209 /* main execution loop */
211 int cpu_exec(CPUState *env1)
213 volatile host_reg_t saved_env_reg;
214 int ret, interrupt_request;
215 TranslationBlock *tb;
216 uint8_t *tc_ptr;
217 unsigned long next_tb;
219 if (cpu_halted(env1) == EXCP_HALTED)
220 return EXCP_HALTED;
222 cpu_single_env = env1;
224 /* the access to env below is actually saving the global register's
225 value, so that files not including target-xyz/exec.h are free to
226 use it. */
227 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
228 saved_env_reg = (host_reg_t) env;
229 asm("");
230 env = env1;
232 #if defined(TARGET_I386)
233 if (!kvm_enabled()) {
234 /* put eflags in CPU temporary format */
235 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
236 DF = 1 - (2 * ((env->eflags >> 10) & 1));
237 CC_OP = CC_OP_EFLAGS;
238 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
240 #elif defined(TARGET_SPARC)
241 #elif defined(TARGET_M68K)
242 env->cc_op = CC_OP_FLAGS;
243 env->cc_dest = env->sr & 0xf;
244 env->cc_x = (env->sr >> 4) & 1;
245 #elif defined(TARGET_ALPHA)
246 #elif defined(TARGET_ARM)
247 #elif defined(TARGET_PPC)
248 #elif defined(TARGET_MICROBLAZE)
249 #elif defined(TARGET_MIPS)
250 #elif defined(TARGET_SH4)
251 #elif defined(TARGET_CRIS)
252 #elif defined(TARGET_S390X)
253 /* XXXXX */
254 #else
255 #error unsupported target CPU
256 #endif
257 env->exception_index = -1;
259 /* prepare setjmp context for exception handling */
260 for(;;) {
261 if (setjmp(env->jmp_env) == 0) {
262 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
263 #undef env
264 env = cpu_single_env;
265 #define env cpu_single_env
266 #endif
267 /* if an exception is pending, we execute it here */
268 if (env->exception_index >= 0) {
269 if (env->exception_index >= EXCP_INTERRUPT) {
270 /* exit request from the cpu execution loop */
271 ret = env->exception_index;
272 if (ret == EXCP_DEBUG)
273 cpu_handle_debug_exception(env);
274 break;
275 } else {
276 #if defined(CONFIG_USER_ONLY)
277 /* if user mode only, we simulate a fake exception
278 which will be handled outside the cpu execution
279 loop */
280 #if defined(TARGET_I386)
281 do_interrupt_user(env->exception_index,
282 env->exception_is_int,
283 env->error_code,
284 env->exception_next_eip);
285 /* successfully delivered */
286 env->old_exception = -1;
287 #endif
288 ret = env->exception_index;
289 break;
290 #else
291 #if defined(TARGET_I386)
292 /* simulate a real cpu exception. On i386, it can
293 trigger new exceptions, but we do not handle
294 double or triple faults yet. */
295 do_interrupt(env->exception_index,
296 env->exception_is_int,
297 env->error_code,
298 env->exception_next_eip, 0);
299 /* successfully delivered */
300 env->old_exception = -1;
301 #elif defined(TARGET_PPC)
302 do_interrupt(env);
303 #elif defined(TARGET_MICROBLAZE)
304 do_interrupt(env);
305 #elif defined(TARGET_MIPS)
306 do_interrupt(env);
307 #elif defined(TARGET_SPARC)
308 do_interrupt(env);
309 #elif defined(TARGET_ARM)
310 do_interrupt(env);
311 #elif defined(TARGET_SH4)
312 do_interrupt(env);
313 #elif defined(TARGET_ALPHA)
314 do_interrupt(env);
315 #elif defined(TARGET_CRIS)
316 do_interrupt(env);
317 #elif defined(TARGET_M68K)
318 do_interrupt(0);
319 #endif
320 env->exception_index = -1;
321 #endif
325 if (kvm_enabled()) {
326 kvm_cpu_exec(env);
327 longjmp(env->jmp_env, 1);
330 next_tb = 0; /* force lookup of first TB */
331 for(;;) {
332 interrupt_request = env->interrupt_request;
333 if (unlikely(interrupt_request)) {
334 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
335 /* Mask out external interrupts for this step. */
336 interrupt_request &= ~(CPU_INTERRUPT_HARD |
337 CPU_INTERRUPT_FIQ |
338 CPU_INTERRUPT_SMI |
339 CPU_INTERRUPT_NMI);
341 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
342 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
343 env->exception_index = EXCP_DEBUG;
344 cpu_loop_exit();
346 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
347 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
348 defined(TARGET_MICROBLAZE)
349 if (interrupt_request & CPU_INTERRUPT_HALT) {
350 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
351 env->halted = 1;
352 env->exception_index = EXCP_HLT;
353 cpu_loop_exit();
355 #endif
356 #if defined(TARGET_I386)
357 if (interrupt_request & CPU_INTERRUPT_INIT) {
358 svm_check_intercept(SVM_EXIT_INIT);
359 do_cpu_init(env);
360 env->exception_index = EXCP_HALTED;
361 cpu_loop_exit();
362 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
363 do_cpu_sipi(env);
364 } else if (env->hflags2 & HF2_GIF_MASK) {
365 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
366 !(env->hflags & HF_SMM_MASK)) {
367 svm_check_intercept(SVM_EXIT_SMI);
368 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
369 do_smm_enter();
370 next_tb = 0;
371 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
372 !(env->hflags2 & HF2_NMI_MASK)) {
373 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
374 env->hflags2 |= HF2_NMI_MASK;
375 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
376 next_tb = 0;
377 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
378 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
379 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
380 next_tb = 0;
381 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
382 (((env->hflags2 & HF2_VINTR_MASK) &&
383 (env->hflags2 & HF2_HIF_MASK)) ||
384 (!(env->hflags2 & HF2_VINTR_MASK) &&
385 (env->eflags & IF_MASK &&
386 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
387 int intno;
388 svm_check_intercept(SVM_EXIT_INTR);
389 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
390 intno = cpu_get_pic_interrupt(env);
391 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
392 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
393 #undef env
394 env = cpu_single_env;
395 #define env cpu_single_env
396 #endif
397 do_interrupt(intno, 0, 0, 0, 1);
398 /* ensure that no TB jump will be modified as
399 the program flow was changed */
400 next_tb = 0;
401 #if !defined(CONFIG_USER_ONLY)
402 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
403 (env->eflags & IF_MASK) &&
404 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
405 int intno;
406 /* FIXME: this should respect TPR */
407 svm_check_intercept(SVM_EXIT_VINTR);
408 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
409 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
410 do_interrupt(intno, 0, 0, 0, 1);
411 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
412 next_tb = 0;
413 #endif
416 #elif defined(TARGET_PPC)
417 #if 0
418 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
419 cpu_reset(env);
421 #endif
422 if (interrupt_request & CPU_INTERRUPT_HARD) {
423 ppc_hw_interrupt(env);
424 if (env->pending_interrupts == 0)
425 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
426 next_tb = 0;
428 #elif defined(TARGET_MICROBLAZE)
429 if ((interrupt_request & CPU_INTERRUPT_HARD)
430 && (env->sregs[SR_MSR] & MSR_IE)
431 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
432 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
433 env->exception_index = EXCP_IRQ;
434 do_interrupt(env);
435 next_tb = 0;
437 #elif defined(TARGET_MIPS)
438 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
439 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
440 (env->CP0_Status & (1 << CP0St_IE)) &&
441 !(env->CP0_Status & (1 << CP0St_EXL)) &&
442 !(env->CP0_Status & (1 << CP0St_ERL)) &&
443 !(env->hflags & MIPS_HFLAG_DM)) {
444 /* Raise it */
445 env->exception_index = EXCP_EXT_INTERRUPT;
446 env->error_code = 0;
447 do_interrupt(env);
448 next_tb = 0;
450 #elif defined(TARGET_SPARC)
451 if (interrupt_request & CPU_INTERRUPT_HARD) {
452 if (cpu_interrupts_enabled(env) &&
453 env->interrupt_index > 0) {
454 int pil = env->interrupt_index & 0xf;
455 int type = env->interrupt_index & 0xf0;
457 if (((type == TT_EXTINT) &&
458 cpu_pil_allowed(env, pil)) ||
459 type != TT_EXTINT) {
460 env->exception_index = env->interrupt_index;
461 do_interrupt(env);
462 next_tb = 0;
465 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
466 //do_interrupt(0, 0, 0, 0, 0);
467 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
469 #elif defined(TARGET_ARM)
470 if (interrupt_request & CPU_INTERRUPT_FIQ
471 && !(env->uncached_cpsr & CPSR_F)) {
472 env->exception_index = EXCP_FIQ;
473 do_interrupt(env);
474 next_tb = 0;
476 /* ARMv7-M interrupt return works by loading a magic value
477 into the PC. On real hardware the load causes the
478 return to occur. The qemu implementation performs the
479 jump normally, then does the exception return when the
480 CPU tries to execute code at the magic address.
481 This will cause the magic PC value to be pushed to
482 the stack if an interrupt occured at the wrong time.
483 We avoid this by disabling interrupts when
484 pc contains a magic address. */
485 if (interrupt_request & CPU_INTERRUPT_HARD
486 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
487 || !(env->uncached_cpsr & CPSR_I))) {
488 env->exception_index = EXCP_IRQ;
489 do_interrupt(env);
490 next_tb = 0;
492 #elif defined(TARGET_SH4)
493 if (interrupt_request & CPU_INTERRUPT_HARD) {
494 do_interrupt(env);
495 next_tb = 0;
497 #elif defined(TARGET_ALPHA)
498 if (interrupt_request & CPU_INTERRUPT_HARD) {
499 do_interrupt(env);
500 next_tb = 0;
502 #elif defined(TARGET_CRIS)
503 if (interrupt_request & CPU_INTERRUPT_HARD
504 && (env->pregs[PR_CCS] & I_FLAG)
505 && !env->locked_irq) {
506 env->exception_index = EXCP_IRQ;
507 do_interrupt(env);
508 next_tb = 0;
510 if (interrupt_request & CPU_INTERRUPT_NMI
511 && (env->pregs[PR_CCS] & M_FLAG)) {
512 env->exception_index = EXCP_NMI;
513 do_interrupt(env);
514 next_tb = 0;
516 #elif defined(TARGET_M68K)
517 if (interrupt_request & CPU_INTERRUPT_HARD
518 && ((env->sr & SR_I) >> SR_I_SHIFT)
519 < env->pending_level) {
520 /* Real hardware gets the interrupt vector via an
521 IACK cycle at this point. Current emulated
522 hardware doesn't rely on this, so we
523 provide/save the vector when the interrupt is
524 first signalled. */
525 env->exception_index = env->pending_vector;
526 do_interrupt(1);
527 next_tb = 0;
529 #endif
530 /* Don't use the cached interupt_request value,
531 do_interrupt may have updated the EXITTB flag. */
532 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
533 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
534 /* ensure that no TB jump will be modified as
535 the program flow was changed */
536 next_tb = 0;
539 if (unlikely(env->exit_request)) {
540 env->exit_request = 0;
541 env->exception_index = EXCP_INTERRUPT;
542 cpu_loop_exit();
544 #ifdef CONFIG_DEBUG_EXEC
545 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
546 /* restore flags in standard format */
547 #if defined(TARGET_I386)
548 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
549 log_cpu_state(env, X86_DUMP_CCOP);
550 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
551 #elif defined(TARGET_ARM)
552 log_cpu_state(env, 0);
553 #elif defined(TARGET_SPARC)
554 log_cpu_state(env, 0);
555 #elif defined(TARGET_PPC)
556 log_cpu_state(env, 0);
557 #elif defined(TARGET_M68K)
558 cpu_m68k_flush_flags(env, env->cc_op);
559 env->cc_op = CC_OP_FLAGS;
560 env->sr = (env->sr & 0xffe0)
561 | env->cc_dest | (env->cc_x << 4);
562 log_cpu_state(env, 0);
563 #elif defined(TARGET_MICROBLAZE)
564 log_cpu_state(env, 0);
565 #elif defined(TARGET_MIPS)
566 log_cpu_state(env, 0);
567 #elif defined(TARGET_SH4)
568 log_cpu_state(env, 0);
569 #elif defined(TARGET_ALPHA)
570 log_cpu_state(env, 0);
571 #elif defined(TARGET_CRIS)
572 log_cpu_state(env, 0);
573 #else
574 #error unsupported target CPU
575 #endif
577 #endif
578 spin_lock(&tb_lock);
579 tb = tb_find_fast();
580 /* Note: we do it here to avoid a gcc bug on Mac OS X when
581 doing it in tb_find_slow */
582 if (tb_invalidated_flag) {
583 /* as some TB could have been invalidated because
584 of memory exceptions while generating the code, we
585 must recompute the hash index here */
586 next_tb = 0;
587 tb_invalidated_flag = 0;
589 #ifdef CONFIG_DEBUG_EXEC
590 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
591 (long)tb->tc_ptr, tb->pc,
592 lookup_symbol(tb->pc));
593 #endif
594 /* see if we can patch the calling TB. When the TB
595 spans two pages, we cannot safely do a direct
596 jump. */
597 if (next_tb != 0 && tb->page_addr[1] == -1) {
598 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
600 spin_unlock(&tb_lock);
602 /* cpu_interrupt might be called while translating the
603 TB, but before it is linked into a potentially
604 infinite loop and becomes env->current_tb. Avoid
605 starting execution if there is a pending interrupt. */
606 if (!unlikely (env->exit_request)) {
607 env->current_tb = tb;
608 tc_ptr = tb->tc_ptr;
609 /* execute the generated code */
610 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
611 #undef env
612 env = cpu_single_env;
613 #define env cpu_single_env
614 #endif
615 next_tb = tcg_qemu_tb_exec(tc_ptr);
616 env->current_tb = NULL;
617 if ((next_tb & 3) == 2) {
618 /* Instruction counter expired. */
619 int insns_left;
620 tb = (TranslationBlock *)(long)(next_tb & ~3);
621 /* Restore PC. */
622 cpu_pc_from_tb(env, tb);
623 insns_left = env->icount_decr.u32;
624 if (env->icount_extra && insns_left >= 0) {
625 /* Refill decrementer and continue execution. */
626 env->icount_extra += insns_left;
627 if (env->icount_extra > 0xffff) {
628 insns_left = 0xffff;
629 } else {
630 insns_left = env->icount_extra;
632 env->icount_extra -= insns_left;
633 env->icount_decr.u16.low = insns_left;
634 } else {
635 if (insns_left > 0) {
636 /* Execute remaining instructions. */
637 cpu_exec_nocache(insns_left, tb);
639 env->exception_index = EXCP_INTERRUPT;
640 next_tb = 0;
641 cpu_loop_exit();
645 /* reset soft MMU for next block (it can currently
646 only be set by a memory fault) */
647 } /* for(;;) */
649 } /* for(;;) */
652 #if defined(TARGET_I386)
653 /* restore flags in standard format */
654 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
655 #elif defined(TARGET_ARM)
656 /* XXX: Save/restore host fpu exception state?. */
657 #elif defined(TARGET_SPARC)
658 #elif defined(TARGET_PPC)
659 #elif defined(TARGET_M68K)
660 cpu_m68k_flush_flags(env, env->cc_op);
661 env->cc_op = CC_OP_FLAGS;
662 env->sr = (env->sr & 0xffe0)
663 | env->cc_dest | (env->cc_x << 4);
664 #elif defined(TARGET_MICROBLAZE)
665 #elif defined(TARGET_MIPS)
666 #elif defined(TARGET_SH4)
667 #elif defined(TARGET_ALPHA)
668 #elif defined(TARGET_CRIS)
669 #elif defined(TARGET_S390X)
670 /* XXXXX */
671 #else
672 #error unsupported target CPU
673 #endif
675 /* restore global registers */
676 asm("");
677 env = (void *) saved_env_reg;
679 /* fail safe : never use cpu_single_env outside cpu_exec() */
680 cpu_single_env = NULL;
681 return ret;
684 /* must only be called from the generated code as an exception can be
685 generated */
686 void tb_invalidate_page_range(target_ulong start, target_ulong end)
688 /* XXX: cannot enable it yet because it yields to MMU exception
689 where NIP != read address on PowerPC */
690 #if 0
691 target_ulong phys_addr;
692 phys_addr = get_phys_addr_code(env, start);
693 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
694 #endif
697 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
699 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
701 CPUX86State *saved_env;
703 saved_env = env;
704 env = s;
705 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
706 selector &= 0xffff;
707 cpu_x86_load_seg_cache(env, seg_reg, selector,
708 (selector << 4), 0xffff, 0);
709 } else {
710 helper_load_seg(seg_reg, selector);
712 env = saved_env;
715 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
717 CPUX86State *saved_env;
719 saved_env = env;
720 env = s;
722 helper_fsave(ptr, data32);
724 env = saved_env;
727 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
729 CPUX86State *saved_env;
731 saved_env = env;
732 env = s;
734 helper_frstor(ptr, data32);
736 env = saved_env;
739 #endif /* TARGET_I386 */
741 #if !defined(CONFIG_SOFTMMU)
743 #if defined(TARGET_I386)
744 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
745 #else
746 #define EXCEPTION_ACTION cpu_loop_exit()
747 #endif
749 /* 'pc' is the host PC at which the exception was raised. 'address' is
750 the effective address of the memory exception. 'is_write' is 1 if a
751 write caused the exception and otherwise 0'. 'old_set' is the
752 signal set which should be restored */
753 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
754 int is_write, sigset_t *old_set,
755 void *puc)
757 TranslationBlock *tb;
758 int ret;
760 if (cpu_single_env)
761 env = cpu_single_env; /* XXX: find a correct solution for multithread */
762 #if defined(DEBUG_SIGNAL)
763 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
764 pc, address, is_write, *(unsigned long *)old_set);
765 #endif
766 /* XXX: locking issue */
767 if (is_write && page_unprotect(h2g(address), pc, puc)) {
768 return 1;
771 /* see if it is an MMU fault */
772 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
773 if (ret < 0)
774 return 0; /* not an MMU fault */
775 if (ret == 0)
776 return 1; /* the MMU fault was handled without causing real CPU fault */
777 /* now we have a real cpu fault */
778 tb = tb_find_pc(pc);
779 if (tb) {
780 /* the PC is inside the translated code. It means that we have
781 a virtual CPU fault */
782 cpu_restore_state(tb, env, pc, puc);
785 /* we restore the process signal mask as the sigreturn should
786 do it (XXX: use sigsetjmp) */
787 sigprocmask(SIG_SETMASK, old_set, NULL);
788 EXCEPTION_ACTION;
790 /* never comes here */
791 return 1;
794 #if defined(__i386__)
796 #if defined(__APPLE__)
797 # include <sys/ucontext.h>
799 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
800 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
801 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
802 # define MASK_sig(context) ((context)->uc_sigmask)
803 #elif defined (__NetBSD__)
804 # include <ucontext.h>
806 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
807 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
808 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
809 # define MASK_sig(context) ((context)->uc_sigmask)
810 #elif defined (__FreeBSD__) || defined(__DragonFly__)
811 # include <ucontext.h>
813 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
814 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
815 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
816 # define MASK_sig(context) ((context)->uc_sigmask)
817 #elif defined(__OpenBSD__)
818 # define EIP_sig(context) ((context)->sc_eip)
819 # define TRAP_sig(context) ((context)->sc_trapno)
820 # define ERROR_sig(context) ((context)->sc_err)
821 # define MASK_sig(context) ((context)->sc_mask)
822 #else
823 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
824 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
825 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
826 # define MASK_sig(context) ((context)->uc_sigmask)
827 #endif
829 int cpu_signal_handler(int host_signum, void *pinfo,
830 void *puc)
832 siginfo_t *info = pinfo;
833 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
834 ucontext_t *uc = puc;
835 #elif defined(__OpenBSD__)
836 struct sigcontext *uc = puc;
837 #else
838 struct ucontext *uc = puc;
839 #endif
840 unsigned long pc;
841 int trapno;
843 #ifndef REG_EIP
844 /* for glibc 2.1 */
845 #define REG_EIP EIP
846 #define REG_ERR ERR
847 #define REG_TRAPNO TRAPNO
848 #endif
849 pc = EIP_sig(uc);
850 trapno = TRAP_sig(uc);
851 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
852 trapno == 0xe ?
853 (ERROR_sig(uc) >> 1) & 1 : 0,
854 &MASK_sig(uc), puc);
857 #elif defined(__x86_64__)
859 #ifdef __NetBSD__
860 #define PC_sig(context) _UC_MACHINE_PC(context)
861 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
862 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
863 #define MASK_sig(context) ((context)->uc_sigmask)
864 #elif defined(__OpenBSD__)
865 #define PC_sig(context) ((context)->sc_rip)
866 #define TRAP_sig(context) ((context)->sc_trapno)
867 #define ERROR_sig(context) ((context)->sc_err)
868 #define MASK_sig(context) ((context)->sc_mask)
869 #elif defined (__FreeBSD__) || defined(__DragonFly__)
870 #include <ucontext.h>
872 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
873 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
874 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
875 #define MASK_sig(context) ((context)->uc_sigmask)
876 #else
877 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
878 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
879 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
880 #define MASK_sig(context) ((context)->uc_sigmask)
881 #endif
883 int cpu_signal_handler(int host_signum, void *pinfo,
884 void *puc)
886 siginfo_t *info = pinfo;
887 unsigned long pc;
888 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
889 ucontext_t *uc = puc;
890 #elif defined(__OpenBSD__)
891 struct sigcontext *uc = puc;
892 #else
893 struct ucontext *uc = puc;
894 #endif
896 pc = PC_sig(uc);
897 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
898 TRAP_sig(uc) == 0xe ?
899 (ERROR_sig(uc) >> 1) & 1 : 0,
900 &MASK_sig(uc), puc);
903 #elif defined(_ARCH_PPC)
905 /***********************************************************************
906 * signal context platform-specific definitions
907 * From Wine
909 #ifdef linux
910 /* All Registers access - only for local access */
911 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
912 /* Gpr Registers access */
913 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
914 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
915 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
916 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
917 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
918 # define LR_sig(context) REG_sig(link, context) /* Link register */
919 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
920 /* Float Registers access */
921 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
922 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
923 /* Exception Registers access */
924 # define DAR_sig(context) REG_sig(dar, context)
925 # define DSISR_sig(context) REG_sig(dsisr, context)
926 # define TRAP_sig(context) REG_sig(trap, context)
927 #endif /* linux */
929 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
930 #include <ucontext.h>
931 # define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
932 # define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
933 # define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
934 # define XER_sig(context) ((context)->uc_mcontext.mc_xer)
935 # define LR_sig(context) ((context)->uc_mcontext.mc_lr)
936 # define CR_sig(context) ((context)->uc_mcontext.mc_cr)
937 /* Exception Registers access */
938 # define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
939 # define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
940 # define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
941 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
943 #ifdef __APPLE__
944 # include <sys/ucontext.h>
945 typedef struct ucontext SIGCONTEXT;
946 /* All Registers access - only for local access */
947 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
948 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
949 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
950 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
951 /* Gpr Registers access */
952 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
953 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
954 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
955 # define CTR_sig(context) REG_sig(ctr, context)
956 # define XER_sig(context) REG_sig(xer, context) /* Link register */
957 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
958 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
959 /* Float Registers access */
960 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
961 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
962 /* Exception Registers access */
963 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
964 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
965 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
966 #endif /* __APPLE__ */
968 int cpu_signal_handler(int host_signum, void *pinfo,
969 void *puc)
971 siginfo_t *info = pinfo;
972 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
973 ucontext_t *uc = puc;
974 #else
975 struct ucontext *uc = puc;
976 #endif
977 unsigned long pc;
978 int is_write;
980 pc = IAR_sig(uc);
981 is_write = 0;
982 #if 0
983 /* ppc 4xx case */
984 if (DSISR_sig(uc) & 0x00800000)
985 is_write = 1;
986 #else
987 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
988 is_write = 1;
989 #endif
990 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
991 is_write, &uc->uc_sigmask, puc);
994 #elif defined(__alpha__)
996 int cpu_signal_handler(int host_signum, void *pinfo,
997 void *puc)
999 siginfo_t *info = pinfo;
1000 struct ucontext *uc = puc;
1001 uint32_t *pc = uc->uc_mcontext.sc_pc;
1002 uint32_t insn = *pc;
1003 int is_write = 0;
1005 /* XXX: need kernel patch to get write flag faster */
1006 switch (insn >> 26) {
1007 case 0x0d: // stw
1008 case 0x0e: // stb
1009 case 0x0f: // stq_u
1010 case 0x24: // stf
1011 case 0x25: // stg
1012 case 0x26: // sts
1013 case 0x27: // stt
1014 case 0x2c: // stl
1015 case 0x2d: // stq
1016 case 0x2e: // stl_c
1017 case 0x2f: // stq_c
1018 is_write = 1;
1021 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1022 is_write, &uc->uc_sigmask, puc);
1024 #elif defined(__sparc__)
1026 int cpu_signal_handler(int host_signum, void *pinfo,
1027 void *puc)
1029 siginfo_t *info = pinfo;
1030 int is_write;
1031 uint32_t insn;
1032 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1033 uint32_t *regs = (uint32_t *)(info + 1);
1034 void *sigmask = (regs + 20);
1035 /* XXX: is there a standard glibc define ? */
1036 unsigned long pc = regs[1];
1037 #else
1038 #ifdef __linux__
1039 struct sigcontext *sc = puc;
1040 unsigned long pc = sc->sigc_regs.tpc;
1041 void *sigmask = (void *)sc->sigc_mask;
1042 #elif defined(__OpenBSD__)
1043 struct sigcontext *uc = puc;
1044 unsigned long pc = uc->sc_pc;
1045 void *sigmask = (void *)(long)uc->sc_mask;
1046 #endif
1047 #endif
1049 /* XXX: need kernel patch to get write flag faster */
1050 is_write = 0;
1051 insn = *(uint32_t *)pc;
1052 if ((insn >> 30) == 3) {
1053 switch((insn >> 19) & 0x3f) {
1054 case 0x05: // stb
1055 case 0x15: // stba
1056 case 0x06: // sth
1057 case 0x16: // stha
1058 case 0x04: // st
1059 case 0x14: // sta
1060 case 0x07: // std
1061 case 0x17: // stda
1062 case 0x0e: // stx
1063 case 0x1e: // stxa
1064 case 0x24: // stf
1065 case 0x34: // stfa
1066 case 0x27: // stdf
1067 case 0x37: // stdfa
1068 case 0x26: // stqf
1069 case 0x36: // stqfa
1070 case 0x25: // stfsr
1071 case 0x3c: // casa
1072 case 0x3e: // casxa
1073 is_write = 1;
1074 break;
1077 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1078 is_write, sigmask, NULL);
1081 #elif defined(__arm__)
1083 int cpu_signal_handler(int host_signum, void *pinfo,
1084 void *puc)
1086 siginfo_t *info = pinfo;
1087 struct ucontext *uc = puc;
1088 unsigned long pc;
1089 int is_write;
1091 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1092 pc = uc->uc_mcontext.gregs[R15];
1093 #else
1094 pc = uc->uc_mcontext.arm_pc;
1095 #endif
1096 /* XXX: compute is_write */
1097 is_write = 0;
1098 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1099 is_write,
1100 &uc->uc_sigmask, puc);
1103 #elif defined(__mc68000)
1105 int cpu_signal_handler(int host_signum, void *pinfo,
1106 void *puc)
1108 siginfo_t *info = pinfo;
1109 struct ucontext *uc = puc;
1110 unsigned long pc;
1111 int is_write;
1113 pc = uc->uc_mcontext.gregs[16];
1114 /* XXX: compute is_write */
1115 is_write = 0;
1116 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1117 is_write,
1118 &uc->uc_sigmask, puc);
1121 #elif defined(__ia64)
1123 #ifndef __ISR_VALID
1124 /* This ought to be in <bits/siginfo.h>... */
1125 # define __ISR_VALID 1
1126 #endif
1128 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1130 siginfo_t *info = pinfo;
1131 struct ucontext *uc = puc;
1132 unsigned long ip;
1133 int is_write = 0;
1135 ip = uc->uc_mcontext.sc_ip;
1136 switch (host_signum) {
1137 case SIGILL:
1138 case SIGFPE:
1139 case SIGSEGV:
1140 case SIGBUS:
1141 case SIGTRAP:
1142 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1143 /* ISR.W (write-access) is bit 33: */
1144 is_write = (info->si_isr >> 33) & 1;
1145 break;
1147 default:
1148 break;
1150 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1151 is_write,
1152 &uc->uc_sigmask, puc);
1155 #elif defined(__s390__)
1157 int cpu_signal_handler(int host_signum, void *pinfo,
1158 void *puc)
1160 siginfo_t *info = pinfo;
1161 struct ucontext *uc = puc;
1162 unsigned long pc;
1163 int is_write;
1165 pc = uc->uc_mcontext.psw.addr;
1166 /* XXX: compute is_write */
1167 is_write = 0;
1168 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1169 is_write, &uc->uc_sigmask, puc);
1172 #elif defined(__mips__)
1174 int cpu_signal_handler(int host_signum, void *pinfo,
1175 void *puc)
1177 siginfo_t *info = pinfo;
1178 struct ucontext *uc = puc;
1179 greg_t pc = uc->uc_mcontext.pc;
1180 int is_write;
1182 /* XXX: compute is_write */
1183 is_write = 0;
1184 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1185 is_write, &uc->uc_sigmask, puc);
1188 #elif defined(__hppa__)
1190 int cpu_signal_handler(int host_signum, void *pinfo,
1191 void *puc)
1193 struct siginfo *info = pinfo;
1194 struct ucontext *uc = puc;
1195 unsigned long pc;
1196 int is_write;
1198 pc = uc->uc_mcontext.sc_iaoq[0];
1199 /* FIXME: compute is_write */
1200 is_write = 0;
1201 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1202 is_write,
1203 &uc->uc_sigmask, puc);
1206 #else
1208 #error host CPU specific signal handler needed
1210 #endif
1212 #endif /* !defined(CONFIG_SOFTMMU) */