bsd/darwin-user: mmap_frag() users only check for -1 error
[qemu/pdb.git] / cpu-exec.c
blob0256edf1f3f6878f4da2f54f38f363d87bc7f188
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "kvm.h"
25 #if !defined(CONFIG_SOFTMMU)
26 #undef EAX
27 #undef ECX
28 #undef EDX
29 #undef EBX
30 #undef ESP
31 #undef EBP
32 #undef ESI
33 #undef EDI
34 #undef EIP
35 #include <signal.h>
36 #ifdef __linux__
37 #include <sys/ucontext.h>
38 #endif
39 #endif
41 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
42 // Work around ugly bugs in glibc that mangle global register contents
43 #undef env
44 #define env cpu_single_env
45 #endif
47 int tb_invalidated_flag;
49 //#define CONFIG_DEBUG_EXEC
50 //#define DEBUG_SIGNAL
52 int qemu_cpu_has_work(CPUState *env)
54 return cpu_has_work(env);
57 void cpu_loop_exit(void)
59 env->current_tb = NULL;
60 longjmp(env->jmp_env, 1);
63 /* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
66 void cpu_resume_from_signal(CPUState *env1, void *puc)
68 #if !defined(CONFIG_SOFTMMU)
69 #ifdef __linux__
70 struct ucontext *uc = puc;
71 #elif defined(__OpenBSD__)
72 struct sigcontext *uc = puc;
73 #endif
74 #endif
76 env = env1;
78 /* XXX: restore cpu registers saved in host registers */
80 #if !defined(CONFIG_SOFTMMU)
81 if (puc) {
82 /* XXX: use siglongjmp ? */
83 #ifdef __linux__
84 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
85 #elif defined(__OpenBSD__)
86 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
87 #endif
89 #endif
90 env->exception_index = -1;
91 longjmp(env->jmp_env, 1);
94 /* Execute the code without caching the generated code. An interpreter
95 could be used if available. */
96 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
98 unsigned long next_tb;
99 TranslationBlock *tb;
101 /* Should never happen.
102 We only end up here when an existing TB is too long. */
103 if (max_cycles > CF_COUNT_MASK)
104 max_cycles = CF_COUNT_MASK;
106 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
107 max_cycles);
108 env->current_tb = tb;
109 /* execute the generated code */
110 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
111 env->current_tb = NULL;
113 if ((next_tb & 3) == 2) {
114 /* Restore PC. This may happen if async event occurs before
115 the TB starts executing. */
116 cpu_pc_from_tb(env, tb);
118 tb_phys_invalidate(tb, -1);
119 tb_free(tb);
122 static TranslationBlock *tb_find_slow(target_ulong pc,
123 target_ulong cs_base,
124 uint64_t flags)
126 TranslationBlock *tb, **ptb1;
127 unsigned int h;
128 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
130 tb_invalidated_flag = 0;
132 /* find translated block using physical mappings */
133 phys_pc = get_phys_addr_code(env, pc);
134 phys_page1 = phys_pc & TARGET_PAGE_MASK;
135 phys_page2 = -1;
136 h = tb_phys_hash_func(phys_pc);
137 ptb1 = &tb_phys_hash[h];
138 for(;;) {
139 tb = *ptb1;
140 if (!tb)
141 goto not_found;
142 if (tb->pc == pc &&
143 tb->page_addr[0] == phys_page1 &&
144 tb->cs_base == cs_base &&
145 tb->flags == flags) {
146 /* check next page if needed */
147 if (tb->page_addr[1] != -1) {
148 virt_page2 = (pc & TARGET_PAGE_MASK) +
149 TARGET_PAGE_SIZE;
150 phys_page2 = get_phys_addr_code(env, virt_page2);
151 if (tb->page_addr[1] == phys_page2)
152 goto found;
153 } else {
154 goto found;
157 ptb1 = &tb->phys_hash_next;
159 not_found:
160 /* if no translated code available, then translate it now */
161 tb = tb_gen_code(env, pc, cs_base, flags, 0);
163 found:
164 /* we add the TB in the virtual pc hash table */
165 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
166 return tb;
169 static inline TranslationBlock *tb_find_fast(void)
171 TranslationBlock *tb;
172 target_ulong cs_base, pc;
173 int flags;
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
177 is executed. */
178 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
179 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
180 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
181 tb->flags != flags)) {
182 tb = tb_find_slow(pc, cs_base, flags);
184 return tb;
187 static CPUDebugExcpHandler *debug_excp_handler;
189 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
191 CPUDebugExcpHandler *old_handler = debug_excp_handler;
193 debug_excp_handler = handler;
194 return old_handler;
197 static void cpu_handle_debug_exception(CPUState *env)
199 CPUWatchpoint *wp;
201 if (!env->watchpoint_hit)
202 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
203 wp->flags &= ~BP_WATCHPOINT_HIT;
205 if (debug_excp_handler)
206 debug_excp_handler(env);
209 /* main execution loop */
211 int cpu_exec(CPUState *env1)
213 #define DECLARE_HOST_REGS 1
214 #include "hostregs_helper.h"
215 int ret, interrupt_request;
216 TranslationBlock *tb;
217 uint8_t *tc_ptr;
218 unsigned long next_tb;
220 if (cpu_halted(env1) == EXCP_HALTED)
221 return EXCP_HALTED;
223 cpu_single_env = env1;
225 /* first we save global registers */
226 #define SAVE_HOST_REGS 1
227 #include "hostregs_helper.h"
228 env = env1;
230 #if defined(TARGET_I386)
231 /* put eflags in CPU temporary format */
232 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
233 DF = 1 - (2 * ((env->eflags >> 10) & 1));
234 CC_OP = CC_OP_EFLAGS;
235 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
236 #elif defined(TARGET_SPARC)
237 #elif defined(TARGET_M68K)
238 env->cc_op = CC_OP_FLAGS;
239 env->cc_dest = env->sr & 0xf;
240 env->cc_x = (env->sr >> 4) & 1;
241 #elif defined(TARGET_ALPHA)
242 #elif defined(TARGET_ARM)
243 #elif defined(TARGET_PPC)
244 #elif defined(TARGET_MICROBLAZE)
245 #elif defined(TARGET_MIPS)
246 #elif defined(TARGET_SH4)
247 #elif defined(TARGET_CRIS)
248 #elif defined(TARGET_S390X)
249 /* XXXXX */
250 #else
251 #error unsupported target CPU
252 #endif
253 env->exception_index = -1;
255 /* prepare setjmp context for exception handling */
256 for(;;) {
257 if (setjmp(env->jmp_env) == 0) {
258 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
259 #undef env
260 env = cpu_single_env;
261 #define env cpu_single_env
262 #endif
263 /* if an exception is pending, we execute it here */
264 if (env->exception_index >= 0) {
265 if (env->exception_index >= EXCP_INTERRUPT) {
266 /* exit request from the cpu execution loop */
267 ret = env->exception_index;
268 if (ret == EXCP_DEBUG)
269 cpu_handle_debug_exception(env);
270 break;
271 } else {
272 #if defined(CONFIG_USER_ONLY)
273 /* if user mode only, we simulate a fake exception
274 which will be handled outside the cpu execution
275 loop */
276 #if defined(TARGET_I386)
277 do_interrupt_user(env->exception_index,
278 env->exception_is_int,
279 env->error_code,
280 env->exception_next_eip);
281 /* successfully delivered */
282 env->old_exception = -1;
283 #endif
284 ret = env->exception_index;
285 break;
286 #else
287 #if defined(TARGET_I386)
288 /* simulate a real cpu exception. On i386, it can
289 trigger new exceptions, but we do not handle
290 double or triple faults yet. */
291 do_interrupt(env->exception_index,
292 env->exception_is_int,
293 env->error_code,
294 env->exception_next_eip, 0);
295 /* successfully delivered */
296 env->old_exception = -1;
297 #elif defined(TARGET_PPC)
298 do_interrupt(env);
299 #elif defined(TARGET_MICROBLAZE)
300 do_interrupt(env);
301 #elif defined(TARGET_MIPS)
302 do_interrupt(env);
303 #elif defined(TARGET_SPARC)
304 do_interrupt(env);
305 #elif defined(TARGET_ARM)
306 do_interrupt(env);
307 #elif defined(TARGET_SH4)
308 do_interrupt(env);
309 #elif defined(TARGET_ALPHA)
310 do_interrupt(env);
311 #elif defined(TARGET_CRIS)
312 do_interrupt(env);
313 #elif defined(TARGET_M68K)
314 do_interrupt(0);
315 #endif
316 env->exception_index = -1;
317 #endif
321 if (kvm_enabled()) {
322 kvm_cpu_exec(env);
323 longjmp(env->jmp_env, 1);
326 next_tb = 0; /* force lookup of first TB */
327 for(;;) {
328 interrupt_request = env->interrupt_request;
329 if (unlikely(interrupt_request)) {
330 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
331 /* Mask out external interrupts for this step. */
332 interrupt_request &= ~(CPU_INTERRUPT_HARD |
333 CPU_INTERRUPT_FIQ |
334 CPU_INTERRUPT_SMI |
335 CPU_INTERRUPT_NMI);
337 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
338 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
339 env->exception_index = EXCP_DEBUG;
340 cpu_loop_exit();
342 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
343 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
344 defined(TARGET_MICROBLAZE)
345 if (interrupt_request & CPU_INTERRUPT_HALT) {
346 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
347 env->halted = 1;
348 env->exception_index = EXCP_HLT;
349 cpu_loop_exit();
351 #endif
352 #if defined(TARGET_I386)
353 if (interrupt_request & CPU_INTERRUPT_INIT) {
354 svm_check_intercept(SVM_EXIT_INIT);
355 do_cpu_init(env);
356 env->exception_index = EXCP_HALTED;
357 cpu_loop_exit();
358 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
359 do_cpu_sipi(env);
360 } else if (env->hflags2 & HF2_GIF_MASK) {
361 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
362 !(env->hflags & HF_SMM_MASK)) {
363 svm_check_intercept(SVM_EXIT_SMI);
364 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
365 do_smm_enter();
366 next_tb = 0;
367 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
368 !(env->hflags2 & HF2_NMI_MASK)) {
369 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
370 env->hflags2 |= HF2_NMI_MASK;
371 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
372 next_tb = 0;
373 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
374 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
375 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
376 next_tb = 0;
377 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
378 (((env->hflags2 & HF2_VINTR_MASK) &&
379 (env->hflags2 & HF2_HIF_MASK)) ||
380 (!(env->hflags2 & HF2_VINTR_MASK) &&
381 (env->eflags & IF_MASK &&
382 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
383 int intno;
384 svm_check_intercept(SVM_EXIT_INTR);
385 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
386 intno = cpu_get_pic_interrupt(env);
387 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
388 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
389 #undef env
390 env = cpu_single_env;
391 #define env cpu_single_env
392 #endif
393 do_interrupt(intno, 0, 0, 0, 1);
394 /* ensure that no TB jump will be modified as
395 the program flow was changed */
396 next_tb = 0;
397 #if !defined(CONFIG_USER_ONLY)
398 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
399 (env->eflags & IF_MASK) &&
400 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
401 int intno;
402 /* FIXME: this should respect TPR */
403 svm_check_intercept(SVM_EXIT_VINTR);
404 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
405 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
406 do_interrupt(intno, 0, 0, 0, 1);
407 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
408 next_tb = 0;
409 #endif
412 #elif defined(TARGET_PPC)
413 #if 0
414 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
415 cpu_reset(env);
417 #endif
418 if (interrupt_request & CPU_INTERRUPT_HARD) {
419 ppc_hw_interrupt(env);
420 if (env->pending_interrupts == 0)
421 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
422 next_tb = 0;
424 #elif defined(TARGET_MICROBLAZE)
425 if ((interrupt_request & CPU_INTERRUPT_HARD)
426 && (env->sregs[SR_MSR] & MSR_IE)
427 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
428 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
429 env->exception_index = EXCP_IRQ;
430 do_interrupt(env);
431 next_tb = 0;
433 #elif defined(TARGET_MIPS)
434 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
435 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
436 (env->CP0_Status & (1 << CP0St_IE)) &&
437 !(env->CP0_Status & (1 << CP0St_EXL)) &&
438 !(env->CP0_Status & (1 << CP0St_ERL)) &&
439 !(env->hflags & MIPS_HFLAG_DM)) {
440 /* Raise it */
441 env->exception_index = EXCP_EXT_INTERRUPT;
442 env->error_code = 0;
443 do_interrupt(env);
444 next_tb = 0;
446 #elif defined(TARGET_SPARC)
447 if (interrupt_request & CPU_INTERRUPT_HARD) {
448 if (cpu_interrupts_enabled(env) &&
449 env->interrupt_index > 0) {
450 int pil = env->interrupt_index & 0xf;
451 int type = env->interrupt_index & 0xf0;
453 if (((type == TT_EXTINT) &&
454 cpu_pil_allowed(env, pil)) ||
455 type != TT_EXTINT) {
456 env->exception_index = env->interrupt_index;
457 do_interrupt(env);
458 next_tb = 0;
461 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
462 //do_interrupt(0, 0, 0, 0, 0);
463 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
465 #elif defined(TARGET_ARM)
466 if (interrupt_request & CPU_INTERRUPT_FIQ
467 && !(env->uncached_cpsr & CPSR_F)) {
468 env->exception_index = EXCP_FIQ;
469 do_interrupt(env);
470 next_tb = 0;
472 /* ARMv7-M interrupt return works by loading a magic value
473 into the PC. On real hardware the load causes the
474 return to occur. The qemu implementation performs the
475 jump normally, then does the exception return when the
476 CPU tries to execute code at the magic address.
477 This will cause the magic PC value to be pushed to
478 the stack if an interrupt occured at the wrong time.
479 We avoid this by disabling interrupts when
480 pc contains a magic address. */
481 if (interrupt_request & CPU_INTERRUPT_HARD
482 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
483 || !(env->uncached_cpsr & CPSR_I))) {
484 env->exception_index = EXCP_IRQ;
485 do_interrupt(env);
486 next_tb = 0;
488 #elif defined(TARGET_SH4)
489 if (interrupt_request & CPU_INTERRUPT_HARD) {
490 do_interrupt(env);
491 next_tb = 0;
493 #elif defined(TARGET_ALPHA)
494 if (interrupt_request & CPU_INTERRUPT_HARD) {
495 do_interrupt(env);
496 next_tb = 0;
498 #elif defined(TARGET_CRIS)
499 if (interrupt_request & CPU_INTERRUPT_HARD
500 && (env->pregs[PR_CCS] & I_FLAG)) {
501 env->exception_index = EXCP_IRQ;
502 do_interrupt(env);
503 next_tb = 0;
505 if (interrupt_request & CPU_INTERRUPT_NMI
506 && (env->pregs[PR_CCS] & M_FLAG)) {
507 env->exception_index = EXCP_NMI;
508 do_interrupt(env);
509 next_tb = 0;
511 #elif defined(TARGET_M68K)
512 if (interrupt_request & CPU_INTERRUPT_HARD
513 && ((env->sr & SR_I) >> SR_I_SHIFT)
514 < env->pending_level) {
515 /* Real hardware gets the interrupt vector via an
516 IACK cycle at this point. Current emulated
517 hardware doesn't rely on this, so we
518 provide/save the vector when the interrupt is
519 first signalled. */
520 env->exception_index = env->pending_vector;
521 do_interrupt(1);
522 next_tb = 0;
524 #endif
525 /* Don't use the cached interupt_request value,
526 do_interrupt may have updated the EXITTB flag. */
527 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
528 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
529 /* ensure that no TB jump will be modified as
530 the program flow was changed */
531 next_tb = 0;
534 if (unlikely(env->exit_request)) {
535 env->exit_request = 0;
536 env->exception_index = EXCP_INTERRUPT;
537 cpu_loop_exit();
539 #ifdef CONFIG_DEBUG_EXEC
540 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
541 /* restore flags in standard format */
542 #if defined(TARGET_I386)
543 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
544 log_cpu_state(env, X86_DUMP_CCOP);
545 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
546 #elif defined(TARGET_ARM)
547 log_cpu_state(env, 0);
548 #elif defined(TARGET_SPARC)
549 log_cpu_state(env, 0);
550 #elif defined(TARGET_PPC)
551 log_cpu_state(env, 0);
552 #elif defined(TARGET_M68K)
553 cpu_m68k_flush_flags(env, env->cc_op);
554 env->cc_op = CC_OP_FLAGS;
555 env->sr = (env->sr & 0xffe0)
556 | env->cc_dest | (env->cc_x << 4);
557 log_cpu_state(env, 0);
558 #elif defined(TARGET_MICROBLAZE)
559 log_cpu_state(env, 0);
560 #elif defined(TARGET_MIPS)
561 log_cpu_state(env, 0);
562 #elif defined(TARGET_SH4)
563 log_cpu_state(env, 0);
564 #elif defined(TARGET_ALPHA)
565 log_cpu_state(env, 0);
566 #elif defined(TARGET_CRIS)
567 log_cpu_state(env, 0);
568 #else
569 #error unsupported target CPU
570 #endif
572 #endif
573 spin_lock(&tb_lock);
574 tb = tb_find_fast();
575 /* Note: we do it here to avoid a gcc bug on Mac OS X when
576 doing it in tb_find_slow */
577 if (tb_invalidated_flag) {
578 /* as some TB could have been invalidated because
579 of memory exceptions while generating the code, we
580 must recompute the hash index here */
581 next_tb = 0;
582 tb_invalidated_flag = 0;
584 #ifdef CONFIG_DEBUG_EXEC
585 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
586 (long)tb->tc_ptr, tb->pc,
587 lookup_symbol(tb->pc));
588 #endif
589 /* see if we can patch the calling TB. When the TB
590 spans two pages, we cannot safely do a direct
591 jump. */
592 if (next_tb != 0 && tb->page_addr[1] == -1) {
593 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
595 spin_unlock(&tb_lock);
597 /* cpu_interrupt might be called while translating the
598 TB, but before it is linked into a potentially
599 infinite loop and becomes env->current_tb. Avoid
600 starting execution if there is a pending interrupt. */
601 if (!unlikely (env->exit_request)) {
602 env->current_tb = tb;
603 tc_ptr = tb->tc_ptr;
604 /* execute the generated code */
605 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
606 #undef env
607 env = cpu_single_env;
608 #define env cpu_single_env
609 #endif
610 next_tb = tcg_qemu_tb_exec(tc_ptr);
611 env->current_tb = NULL;
612 if ((next_tb & 3) == 2) {
613 /* Instruction counter expired. */
614 int insns_left;
615 tb = (TranslationBlock *)(long)(next_tb & ~3);
616 /* Restore PC. */
617 cpu_pc_from_tb(env, tb);
618 insns_left = env->icount_decr.u32;
619 if (env->icount_extra && insns_left >= 0) {
620 /* Refill decrementer and continue execution. */
621 env->icount_extra += insns_left;
622 if (env->icount_extra > 0xffff) {
623 insns_left = 0xffff;
624 } else {
625 insns_left = env->icount_extra;
627 env->icount_extra -= insns_left;
628 env->icount_decr.u16.low = insns_left;
629 } else {
630 if (insns_left > 0) {
631 /* Execute remaining instructions. */
632 cpu_exec_nocache(insns_left, tb);
634 env->exception_index = EXCP_INTERRUPT;
635 next_tb = 0;
636 cpu_loop_exit();
640 /* reset soft MMU for next block (it can currently
641 only be set by a memory fault) */
642 } /* for(;;) */
644 } /* for(;;) */
647 #if defined(TARGET_I386)
648 /* restore flags in standard format */
649 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
650 #elif defined(TARGET_ARM)
651 /* XXX: Save/restore host fpu exception state?. */
652 #elif defined(TARGET_SPARC)
653 #elif defined(TARGET_PPC)
654 #elif defined(TARGET_M68K)
655 cpu_m68k_flush_flags(env, env->cc_op);
656 env->cc_op = CC_OP_FLAGS;
657 env->sr = (env->sr & 0xffe0)
658 | env->cc_dest | (env->cc_x << 4);
659 #elif defined(TARGET_MICROBLAZE)
660 #elif defined(TARGET_MIPS)
661 #elif defined(TARGET_SH4)
662 #elif defined(TARGET_ALPHA)
663 #elif defined(TARGET_CRIS)
664 #elif defined(TARGET_S390X)
665 /* XXXXX */
666 #else
667 #error unsupported target CPU
668 #endif
670 /* restore global registers */
671 #include "hostregs_helper.h"
673 /* fail safe : never use cpu_single_env outside cpu_exec() */
674 cpu_single_env = NULL;
675 return ret;
678 /* must only be called from the generated code as an exception can be
679 generated */
680 void tb_invalidate_page_range(target_ulong start, target_ulong end)
682 /* XXX: cannot enable it yet because it yields to MMU exception
683 where NIP != read address on PowerPC */
684 #if 0
685 target_ulong phys_addr;
686 phys_addr = get_phys_addr_code(env, start);
687 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
688 #endif
691 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
693 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
695 CPUX86State *saved_env;
697 saved_env = env;
698 env = s;
699 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
700 selector &= 0xffff;
701 cpu_x86_load_seg_cache(env, seg_reg, selector,
702 (selector << 4), 0xffff, 0);
703 } else {
704 helper_load_seg(seg_reg, selector);
706 env = saved_env;
709 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
711 CPUX86State *saved_env;
713 saved_env = env;
714 env = s;
716 helper_fsave(ptr, data32);
718 env = saved_env;
721 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
723 CPUX86State *saved_env;
725 saved_env = env;
726 env = s;
728 helper_frstor(ptr, data32);
730 env = saved_env;
733 #endif /* TARGET_I386 */
735 #if !defined(CONFIG_SOFTMMU)
737 #if defined(TARGET_I386)
738 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
739 #else
740 #define EXCEPTION_ACTION cpu_loop_exit()
741 #endif
743 /* 'pc' is the host PC at which the exception was raised. 'address' is
744 the effective address of the memory exception. 'is_write' is 1 if a
745 write caused the exception and otherwise 0'. 'old_set' is the
746 signal set which should be restored */
747 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
748 int is_write, sigset_t *old_set,
749 void *puc)
751 TranslationBlock *tb;
752 int ret;
754 if (cpu_single_env)
755 env = cpu_single_env; /* XXX: find a correct solution for multithread */
756 #if defined(DEBUG_SIGNAL)
757 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
758 pc, address, is_write, *(unsigned long *)old_set);
759 #endif
760 /* XXX: locking issue */
761 if (is_write && page_unprotect(h2g(address), pc, puc)) {
762 return 1;
765 /* see if it is an MMU fault */
766 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
767 if (ret < 0)
768 return 0; /* not an MMU fault */
769 if (ret == 0)
770 return 1; /* the MMU fault was handled without causing real CPU fault */
771 /* now we have a real cpu fault */
772 tb = tb_find_pc(pc);
773 if (tb) {
774 /* the PC is inside the translated code. It means that we have
775 a virtual CPU fault */
776 cpu_restore_state(tb, env, pc, puc);
779 /* we restore the process signal mask as the sigreturn should
780 do it (XXX: use sigsetjmp) */
781 sigprocmask(SIG_SETMASK, old_set, NULL);
782 EXCEPTION_ACTION;
784 /* never comes here */
785 return 1;
788 #if defined(__i386__)
790 #if defined(__APPLE__)
791 # include <sys/ucontext.h>
793 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
794 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
795 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
796 # define MASK_sig(context) ((context)->uc_sigmask)
797 #elif defined (__NetBSD__)
798 # include <ucontext.h>
800 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
801 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
802 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
803 # define MASK_sig(context) ((context)->uc_sigmask)
804 #elif defined (__FreeBSD__) || defined(__DragonFly__)
805 # include <ucontext.h>
807 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
808 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
809 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
810 # define MASK_sig(context) ((context)->uc_sigmask)
811 #elif defined(__OpenBSD__)
812 # define EIP_sig(context) ((context)->sc_eip)
813 # define TRAP_sig(context) ((context)->sc_trapno)
814 # define ERROR_sig(context) ((context)->sc_err)
815 # define MASK_sig(context) ((context)->sc_mask)
816 #else
817 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
818 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
819 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
820 # define MASK_sig(context) ((context)->uc_sigmask)
821 #endif
823 int cpu_signal_handler(int host_signum, void *pinfo,
824 void *puc)
826 siginfo_t *info = pinfo;
827 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
828 ucontext_t *uc = puc;
829 #elif defined(__OpenBSD__)
830 struct sigcontext *uc = puc;
831 #else
832 struct ucontext *uc = puc;
833 #endif
834 unsigned long pc;
835 int trapno;
837 #ifndef REG_EIP
838 /* for glibc 2.1 */
839 #define REG_EIP EIP
840 #define REG_ERR ERR
841 #define REG_TRAPNO TRAPNO
842 #endif
843 pc = EIP_sig(uc);
844 trapno = TRAP_sig(uc);
845 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
846 trapno == 0xe ?
847 (ERROR_sig(uc) >> 1) & 1 : 0,
848 &MASK_sig(uc), puc);
851 #elif defined(__x86_64__)
853 #ifdef __NetBSD__
854 #define PC_sig(context) _UC_MACHINE_PC(context)
855 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
856 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
857 #define MASK_sig(context) ((context)->uc_sigmask)
858 #elif defined(__OpenBSD__)
859 #define PC_sig(context) ((context)->sc_rip)
860 #define TRAP_sig(context) ((context)->sc_trapno)
861 #define ERROR_sig(context) ((context)->sc_err)
862 #define MASK_sig(context) ((context)->sc_mask)
863 #elif defined (__FreeBSD__) || defined(__DragonFly__)
864 #include <ucontext.h>
866 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
867 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
868 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
869 #define MASK_sig(context) ((context)->uc_sigmask)
870 #else
871 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
872 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
873 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
874 #define MASK_sig(context) ((context)->uc_sigmask)
875 #endif
877 int cpu_signal_handler(int host_signum, void *pinfo,
878 void *puc)
880 siginfo_t *info = pinfo;
881 unsigned long pc;
882 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
883 ucontext_t *uc = puc;
884 #elif defined(__OpenBSD__)
885 struct sigcontext *uc = puc;
886 #else
887 struct ucontext *uc = puc;
888 #endif
890 pc = PC_sig(uc);
891 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
892 TRAP_sig(uc) == 0xe ?
893 (ERROR_sig(uc) >> 1) & 1 : 0,
894 &MASK_sig(uc), puc);
897 #elif defined(_ARCH_PPC)
899 /***********************************************************************
900 * signal context platform-specific definitions
901 * From Wine
903 #ifdef linux
904 /* All Registers access - only for local access */
905 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
906 /* Gpr Registers access */
907 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
908 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
909 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
910 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
911 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
912 # define LR_sig(context) REG_sig(link, context) /* Link register */
913 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
914 /* Float Registers access */
915 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
916 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
917 /* Exception Registers access */
918 # define DAR_sig(context) REG_sig(dar, context)
919 # define DSISR_sig(context) REG_sig(dsisr, context)
920 # define TRAP_sig(context) REG_sig(trap, context)
921 #endif /* linux */
923 #ifdef __APPLE__
924 # include <sys/ucontext.h>
925 typedef struct ucontext SIGCONTEXT;
926 /* All Registers access - only for local access */
927 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
928 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
929 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
930 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
931 /* Gpr Registers access */
932 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
933 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
934 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
935 # define CTR_sig(context) REG_sig(ctr, context)
936 # define XER_sig(context) REG_sig(xer, context) /* Link register */
937 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
938 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
939 /* Float Registers access */
940 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
941 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
942 /* Exception Registers access */
943 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
944 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
945 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
946 #endif /* __APPLE__ */
948 int cpu_signal_handler(int host_signum, void *pinfo,
949 void *puc)
951 siginfo_t *info = pinfo;
952 struct ucontext *uc = puc;
953 unsigned long pc;
954 int is_write;
956 pc = IAR_sig(uc);
957 is_write = 0;
958 #if 0
959 /* ppc 4xx case */
960 if (DSISR_sig(uc) & 0x00800000)
961 is_write = 1;
962 #else
963 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
964 is_write = 1;
965 #endif
966 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
967 is_write, &uc->uc_sigmask, puc);
970 #elif defined(__alpha__)
972 int cpu_signal_handler(int host_signum, void *pinfo,
973 void *puc)
975 siginfo_t *info = pinfo;
976 struct ucontext *uc = puc;
977 uint32_t *pc = uc->uc_mcontext.sc_pc;
978 uint32_t insn = *pc;
979 int is_write = 0;
981 /* XXX: need kernel patch to get write flag faster */
982 switch (insn >> 26) {
983 case 0x0d: // stw
984 case 0x0e: // stb
985 case 0x0f: // stq_u
986 case 0x24: // stf
987 case 0x25: // stg
988 case 0x26: // sts
989 case 0x27: // stt
990 case 0x2c: // stl
991 case 0x2d: // stq
992 case 0x2e: // stl_c
993 case 0x2f: // stq_c
994 is_write = 1;
997 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
998 is_write, &uc->uc_sigmask, puc);
1000 #elif defined(__sparc__)
1002 int cpu_signal_handler(int host_signum, void *pinfo,
1003 void *puc)
1005 siginfo_t *info = pinfo;
1006 int is_write;
1007 uint32_t insn;
1008 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1009 uint32_t *regs = (uint32_t *)(info + 1);
1010 void *sigmask = (regs + 20);
1011 /* XXX: is there a standard glibc define ? */
1012 unsigned long pc = regs[1];
1013 #else
1014 #ifdef __linux__
1015 struct sigcontext *sc = puc;
1016 unsigned long pc = sc->sigc_regs.tpc;
1017 void *sigmask = (void *)sc->sigc_mask;
1018 #elif defined(__OpenBSD__)
1019 struct sigcontext *uc = puc;
1020 unsigned long pc = uc->sc_pc;
1021 void *sigmask = (void *)(long)uc->sc_mask;
1022 #endif
1023 #endif
1025 /* XXX: need kernel patch to get write flag faster */
1026 is_write = 0;
1027 insn = *(uint32_t *)pc;
1028 if ((insn >> 30) == 3) {
1029 switch((insn >> 19) & 0x3f) {
1030 case 0x05: // stb
1031 case 0x15: // stba
1032 case 0x06: // sth
1033 case 0x16: // stha
1034 case 0x04: // st
1035 case 0x14: // sta
1036 case 0x07: // std
1037 case 0x17: // stda
1038 case 0x0e: // stx
1039 case 0x1e: // stxa
1040 case 0x24: // stf
1041 case 0x34: // stfa
1042 case 0x27: // stdf
1043 case 0x37: // stdfa
1044 case 0x26: // stqf
1045 case 0x36: // stqfa
1046 case 0x25: // stfsr
1047 case 0x3c: // casa
1048 case 0x3e: // casxa
1049 is_write = 1;
1050 break;
1053 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1054 is_write, sigmask, NULL);
1057 #elif defined(__arm__)
1059 int cpu_signal_handler(int host_signum, void *pinfo,
1060 void *puc)
1062 siginfo_t *info = pinfo;
1063 struct ucontext *uc = puc;
1064 unsigned long pc;
1065 int is_write;
1067 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1068 pc = uc->uc_mcontext.gregs[R15];
1069 #else
1070 pc = uc->uc_mcontext.arm_pc;
1071 #endif
1072 /* XXX: compute is_write */
1073 is_write = 0;
1074 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1075 is_write,
1076 &uc->uc_sigmask, puc);
1079 #elif defined(__mc68000)
1081 int cpu_signal_handler(int host_signum, void *pinfo,
1082 void *puc)
1084 siginfo_t *info = pinfo;
1085 struct ucontext *uc = puc;
1086 unsigned long pc;
1087 int is_write;
1089 pc = uc->uc_mcontext.gregs[16];
1090 /* XXX: compute is_write */
1091 is_write = 0;
1092 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1093 is_write,
1094 &uc->uc_sigmask, puc);
1097 #elif defined(__ia64)
1099 #ifndef __ISR_VALID
1100 /* This ought to be in <bits/siginfo.h>... */
1101 # define __ISR_VALID 1
1102 #endif
1104 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1106 siginfo_t *info = pinfo;
1107 struct ucontext *uc = puc;
1108 unsigned long ip;
1109 int is_write = 0;
1111 ip = uc->uc_mcontext.sc_ip;
1112 switch (host_signum) {
1113 case SIGILL:
1114 case SIGFPE:
1115 case SIGSEGV:
1116 case SIGBUS:
1117 case SIGTRAP:
1118 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1119 /* ISR.W (write-access) is bit 33: */
1120 is_write = (info->si_isr >> 33) & 1;
1121 break;
1123 default:
1124 break;
1126 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1127 is_write,
1128 &uc->uc_sigmask, puc);
1131 #elif defined(__s390__)
1133 int cpu_signal_handler(int host_signum, void *pinfo,
1134 void *puc)
1136 siginfo_t *info = pinfo;
1137 struct ucontext *uc = puc;
1138 unsigned long pc;
1139 int is_write;
1141 pc = uc->uc_mcontext.psw.addr;
1142 /* XXX: compute is_write */
1143 is_write = 0;
1144 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1145 is_write, &uc->uc_sigmask, puc);
1148 #elif defined(__mips__)
1150 int cpu_signal_handler(int host_signum, void *pinfo,
1151 void *puc)
1153 siginfo_t *info = pinfo;
1154 struct ucontext *uc = puc;
1155 greg_t pc = uc->uc_mcontext.pc;
1156 int is_write;
1158 /* XXX: compute is_write */
1159 is_write = 0;
1160 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1161 is_write, &uc->uc_sigmask, puc);
1164 #elif defined(__hppa__)
1166 int cpu_signal_handler(int host_signum, void *pinfo,
1167 void *puc)
1169 struct siginfo *info = pinfo;
1170 struct ucontext *uc = puc;
1171 unsigned long pc;
1172 int is_write;
1174 pc = uc->uc_mcontext.sc_iaoq[0];
1175 /* FIXME: compute is_write */
1176 is_write = 0;
1177 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1178 is_write,
1179 &uc->uc_sigmask, puc);
1182 #else
1184 #error host CPU specific signal handler needed
1186 #endif
1188 #endif /* !defined(CONFIG_SOFTMMU) */