kvm: libkvm: track more memory slot fields
[qemu-kvm/fedora.git] / cpu-exec.c
blob79e18ebec3957bb45b52c015b815bb82e76a6eda
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
38 #ifdef USE_KVM
39 #include "qemu-kvm.h"
40 extern int kvm_allowed;
41 #endif
43 int tb_invalidated_flag;
45 //#define DEBUG_EXEC
46 //#define DEBUG_SIGNAL
48 void cpu_loop_exit(void)
50 /* NOTE: the register at this point must be saved by hand because
51 longjmp restore them */
52 regs_to_env();
53 longjmp(env->jmp_env, 1);
56 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
57 #define reg_T2
58 #endif
60 /* exit the current TB from a signal handler. The host registers are
61 restored in a state compatible with the CPU emulator
63 void cpu_resume_from_signal(CPUState *env1, void *puc)
65 #if !defined(CONFIG_SOFTMMU)
66 struct ucontext *uc = puc;
67 #endif
69 env = env1;
71 /* XXX: restore cpu registers saved in host registers */
73 #if !defined(CONFIG_SOFTMMU)
74 if (puc) {
75 /* XXX: use siglongjmp ? */
76 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
78 #endif
79 longjmp(env->jmp_env, 1);
83 static TranslationBlock *tb_find_slow(target_ulong pc,
84 target_ulong cs_base,
85 uint64_t flags)
87 TranslationBlock *tb, **ptb1;
88 int code_gen_size;
89 unsigned int h;
90 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
91 uint8_t *tc_ptr;
93 spin_lock(&tb_lock);
95 tb_invalidated_flag = 0;
97 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
99 /* find translated block using physical mappings */
100 phys_pc = get_phys_addr_code(env, pc);
101 phys_page1 = phys_pc & TARGET_PAGE_MASK;
102 phys_page2 = -1;
103 h = tb_phys_hash_func(phys_pc);
104 ptb1 = &tb_phys_hash[h];
105 for(;;) {
106 tb = *ptb1;
107 if (!tb)
108 goto not_found;
109 if (tb->pc == pc &&
110 tb->page_addr[0] == phys_page1 &&
111 tb->cs_base == cs_base &&
112 tb->flags == flags) {
113 /* check next page if needed */
114 if (tb->page_addr[1] != -1) {
115 virt_page2 = (pc & TARGET_PAGE_MASK) +
116 TARGET_PAGE_SIZE;
117 phys_page2 = get_phys_addr_code(env, virt_page2);
118 if (tb->page_addr[1] == phys_page2)
119 goto found;
120 } else {
121 goto found;
124 ptb1 = &tb->phys_hash_next;
126 not_found:
127 /* if no translated code available, then translate it now */
128 tb = tb_alloc(pc);
129 if (!tb) {
130 /* flush must be done */
131 tb_flush(env);
132 /* cannot fail at this point */
133 tb = tb_alloc(pc);
134 /* don't forget to invalidate previous TB info */
135 tb_invalidated_flag = 1;
137 tc_ptr = code_gen_ptr;
138 tb->tc_ptr = tc_ptr;
139 tb->cs_base = cs_base;
140 tb->flags = flags;
141 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
142 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
144 /* check next page if needed */
145 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
146 phys_page2 = -1;
147 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
148 phys_page2 = get_phys_addr_code(env, virt_page2);
150 tb_link_phys(tb, phys_pc, phys_page2);
152 found:
153 /* we add the TB in the virtual pc hash table */
154 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
155 spin_unlock(&tb_lock);
156 return tb;
159 static inline TranslationBlock *tb_find_fast(void)
161 TranslationBlock *tb;
162 target_ulong cs_base, pc;
163 uint64_t flags;
165 /* we record a subset of the CPU state. It will
166 always be the same before a given translated block
167 is executed. */
168 #if defined(TARGET_I386)
169 flags = env->hflags;
170 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
171 flags |= env->intercept;
172 cs_base = env->segs[R_CS].base;
173 pc = cs_base + env->eip;
174 #elif defined(TARGET_ARM)
175 flags = env->thumb | (env->vfp.vec_len << 1)
176 | (env->vfp.vec_stride << 4);
177 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
178 flags |= (1 << 6);
179 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
180 flags |= (1 << 7);
181 cs_base = 0;
182 pc = env->regs[15];
183 #elif defined(TARGET_SPARC)
184 #ifdef TARGET_SPARC64
185 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
186 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
187 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
188 #else
189 // FPU enable . MMU Boot . MMU enabled . MMU no-fault . Supervisor
190 flags = (env->psref << 4) | (((env->mmuregs[0] & MMU_BM) >> 14) << 3)
191 | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1)
192 | env->psrs;
193 #endif
194 cs_base = env->npc;
195 pc = env->pc;
196 #elif defined(TARGET_PPC)
197 flags = env->hflags;
198 cs_base = 0;
199 pc = env->nip;
200 #elif defined(TARGET_MIPS)
201 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
202 cs_base = 0;
203 pc = env->PC[env->current_tc];
204 #elif defined(TARGET_M68K)
205 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
206 | (env->sr & SR_S) /* Bit 13 */
207 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
208 cs_base = 0;
209 pc = env->pc;
210 #elif defined(TARGET_SH4)
211 flags = env->sr & (SR_MD | SR_RB);
212 cs_base = 0; /* XXXXX */
213 pc = env->pc;
214 #elif defined(TARGET_ALPHA)
215 flags = env->ps;
216 cs_base = 0;
217 pc = env->pc;
218 #else
219 #error unsupported CPU
220 #endif
221 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
222 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
223 tb->flags != flags, 0)) {
224 tb = tb_find_slow(pc, cs_base, flags);
225 /* Note: we do it here to avoid a gcc bug on Mac OS X when
226 doing it in tb_find_slow */
227 if (tb_invalidated_flag) {
228 /* as some TB could have been invalidated because
229 of memory exceptions while generating the code, we
230 must recompute the hash index here */
231 T0 = 0;
234 return tb;
238 /* main execution loop */
240 int cpu_exec(CPUState *env1)
242 #define DECLARE_HOST_REGS 1
243 #include "hostregs_helper.h"
244 #if defined(TARGET_SPARC)
245 #if defined(reg_REGWPTR)
246 uint32_t *saved_regwptr;
247 #endif
248 #endif
249 #if defined(__sparc__) && !defined(HOST_SOLARIS)
250 int saved_i7;
251 target_ulong tmp_T0;
252 #endif
253 int ret, interrupt_request;
254 void (*gen_func)(void);
255 TranslationBlock *tb;
256 uint8_t *tc_ptr;
258 if (cpu_halted(env1) == EXCP_HALTED)
259 return EXCP_HALTED;
261 cpu_single_env = env1;
263 /* first we save global registers */
264 #define SAVE_HOST_REGS 1
265 #include "hostregs_helper.h"
266 env = env1;
267 #if defined(__sparc__) && !defined(HOST_SOLARIS)
268 /* we also save i7 because longjmp may not restore it */
269 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
270 #endif
272 env_to_regs();
273 #if defined(TARGET_I386)
274 /* put eflags in CPU temporary format */
275 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
276 DF = 1 - (2 * ((env->eflags >> 10) & 1));
277 CC_OP = CC_OP_EFLAGS;
278 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
279 #elif defined(TARGET_SPARC)
280 #if defined(reg_REGWPTR)
281 saved_regwptr = REGWPTR;
282 #endif
283 #elif defined(TARGET_M68K)
284 env->cc_op = CC_OP_FLAGS;
285 env->cc_dest = env->sr & 0xf;
286 env->cc_x = (env->sr >> 4) & 1;
287 #elif defined(TARGET_ALPHA)
288 #elif defined(TARGET_ARM)
289 #elif defined(TARGET_PPC)
290 #elif defined(TARGET_MIPS)
291 #elif defined(TARGET_SH4)
292 /* XXXXX */
293 #else
294 #error unsupported target CPU
295 #endif
296 env->exception_index = -1;
298 /* prepare setjmp context for exception handling */
299 for(;;) {
300 if (setjmp(env->jmp_env) == 0) {
301 env->current_tb = NULL;
302 /* if an exception is pending, we execute it here */
303 if (env->exception_index >= 0) {
304 if (env->exception_index >= EXCP_INTERRUPT) {
305 /* exit request from the cpu execution loop */
306 ret = env->exception_index;
307 break;
308 } else if (env->user_mode_only) {
309 /* if user mode only, we simulate a fake exception
310 which will be handled outside the cpu execution
311 loop */
312 #if defined(TARGET_I386)
313 do_interrupt_user(env->exception_index,
314 env->exception_is_int,
315 env->error_code,
316 env->exception_next_eip);
317 #endif
318 ret = env->exception_index;
319 break;
320 } else {
321 #if defined(TARGET_I386)
322 /* simulate a real cpu exception. On i386, it can
323 trigger new exceptions, but we do not handle
324 double or triple faults yet. */
325 do_interrupt(env->exception_index,
326 env->exception_is_int,
327 env->error_code,
328 env->exception_next_eip, 0);
329 /* successfully delivered */
330 env->old_exception = -1;
331 #elif defined(TARGET_PPC)
332 do_interrupt(env);
333 #elif defined(TARGET_MIPS)
334 do_interrupt(env);
335 #elif defined(TARGET_SPARC)
336 do_interrupt(env->exception_index);
337 #elif defined(TARGET_ARM)
338 do_interrupt(env);
339 #elif defined(TARGET_SH4)
340 do_interrupt(env);
341 #elif defined(TARGET_ALPHA)
342 do_interrupt(env);
343 #elif defined(TARGET_M68K)
344 do_interrupt(0);
345 #endif
347 env->exception_index = -1;
349 #ifdef USE_KQEMU
350 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
351 int ret;
352 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
353 ret = kqemu_cpu_exec(env);
354 /* put eflags in CPU temporary format */
355 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
356 DF = 1 - (2 * ((env->eflags >> 10) & 1));
357 CC_OP = CC_OP_EFLAGS;
358 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
359 if (ret == 1) {
360 /* exception */
361 longjmp(env->jmp_env, 1);
362 } else if (ret == 2) {
363 /* softmmu execution needed */
364 } else {
365 if (env->interrupt_request != 0) {
366 /* hardware interrupt will be executed just after */
367 } else {
368 /* otherwise, we restart */
369 longjmp(env->jmp_env, 1);
373 #endif
375 #ifdef USE_KVM
376 if (kvm_allowed) {
377 kvm_cpu_exec(env);
378 longjmp(env->jmp_env, 1);
380 #endif
381 T0 = 0; /* force lookup of first TB */
382 for(;;) {
383 #if defined(__sparc__) && !defined(HOST_SOLARIS)
384 /* g1 can be modified by some libc? functions */
385 tmp_T0 = T0;
386 #endif
387 interrupt_request = env->interrupt_request;
388 if (__builtin_expect(interrupt_request, 0)
389 #if defined(TARGET_I386)
390 && env->hflags & HF_GIF_MASK
391 #endif
393 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
394 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
395 env->exception_index = EXCP_DEBUG;
396 cpu_loop_exit();
398 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
399 defined(TARGET_PPC) || defined(TARGET_ALPHA)
400 if (interrupt_request & CPU_INTERRUPT_HALT) {
401 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
402 env->halted = 1;
403 env->exception_index = EXCP_HLT;
404 cpu_loop_exit();
406 #endif
407 #if defined(TARGET_I386)
408 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
409 !(env->hflags & HF_SMM_MASK)) {
410 svm_check_intercept(SVM_EXIT_SMI);
411 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
412 do_smm_enter();
413 #if defined(__sparc__) && !defined(HOST_SOLARIS)
414 tmp_T0 = 0;
415 #else
416 T0 = 0;
417 #endif
418 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
419 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
420 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
421 int intno;
422 svm_check_intercept(SVM_EXIT_INTR);
423 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
424 intno = cpu_get_pic_interrupt(env);
425 if (loglevel & CPU_LOG_TB_IN_ASM) {
426 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
428 do_interrupt(intno, 0, 0, 0, 1);
429 /* ensure that no TB jump will be modified as
430 the program flow was changed */
431 #if defined(__sparc__) && !defined(HOST_SOLARIS)
432 tmp_T0 = 0;
433 #else
434 T0 = 0;
435 #endif
436 #if !defined(CONFIG_USER_ONLY)
437 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
438 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
439 int intno;
440 /* FIXME: this should respect TPR */
441 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
442 svm_check_intercept(SVM_EXIT_VINTR);
443 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
444 if (loglevel & CPU_LOG_TB_IN_ASM)
445 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
446 do_interrupt(intno, 0, 0, -1, 1);
447 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
448 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
449 #if defined(__sparc__) && !defined(HOST_SOLARIS)
450 tmp_T0 = 0;
451 #else
452 T0 = 0;
453 #endif
454 #endif
456 #elif defined(TARGET_PPC)
457 #if 0
458 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
459 cpu_ppc_reset(env);
461 #endif
462 if (interrupt_request & CPU_INTERRUPT_HARD) {
463 ppc_hw_interrupt(env);
464 if (env->pending_interrupts == 0)
465 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
466 #if defined(__sparc__) && !defined(HOST_SOLARIS)
467 tmp_T0 = 0;
468 #else
469 T0 = 0;
470 #endif
472 #elif defined(TARGET_MIPS)
473 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
474 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
475 (env->CP0_Status & (1 << CP0St_IE)) &&
476 !(env->CP0_Status & (1 << CP0St_EXL)) &&
477 !(env->CP0_Status & (1 << CP0St_ERL)) &&
478 !(env->hflags & MIPS_HFLAG_DM)) {
479 /* Raise it */
480 env->exception_index = EXCP_EXT_INTERRUPT;
481 env->error_code = 0;
482 do_interrupt(env);
483 #if defined(__sparc__) && !defined(HOST_SOLARIS)
484 tmp_T0 = 0;
485 #else
486 T0 = 0;
487 #endif
489 #elif defined(TARGET_SPARC)
490 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
491 (env->psret != 0)) {
492 int pil = env->interrupt_index & 15;
493 int type = env->interrupt_index & 0xf0;
495 if (((type == TT_EXTINT) &&
496 (pil == 15 || pil > env->psrpil)) ||
497 type != TT_EXTINT) {
498 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
499 do_interrupt(env->interrupt_index);
500 env->interrupt_index = 0;
501 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
502 cpu_check_irqs(env);
503 #endif
504 #if defined(__sparc__) && !defined(HOST_SOLARIS)
505 tmp_T0 = 0;
506 #else
507 T0 = 0;
508 #endif
510 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
511 //do_interrupt(0, 0, 0, 0, 0);
512 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
514 #elif defined(TARGET_ARM)
515 if (interrupt_request & CPU_INTERRUPT_FIQ
516 && !(env->uncached_cpsr & CPSR_F)) {
517 env->exception_index = EXCP_FIQ;
518 do_interrupt(env);
520 if (interrupt_request & CPU_INTERRUPT_HARD
521 && !(env->uncached_cpsr & CPSR_I)) {
522 env->exception_index = EXCP_IRQ;
523 do_interrupt(env);
525 #elif defined(TARGET_SH4)
526 /* XXXXX */
527 #elif defined(TARGET_ALPHA)
528 if (interrupt_request & CPU_INTERRUPT_HARD) {
529 do_interrupt(env);
531 #elif defined(TARGET_M68K)
532 if (interrupt_request & CPU_INTERRUPT_HARD
533 && ((env->sr & SR_I) >> SR_I_SHIFT)
534 < env->pending_level) {
535 /* Real hardware gets the interrupt vector via an
536 IACK cycle at this point. Current emulated
537 hardware doesn't rely on this, so we
538 provide/save the vector when the interrupt is
539 first signalled. */
540 env->exception_index = env->pending_vector;
541 do_interrupt(1);
543 #endif
544 /* Don't use the cached interupt_request value,
545 do_interrupt may have updated the EXITTB flag. */
546 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
547 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
548 /* ensure that no TB jump will be modified as
549 the program flow was changed */
550 #if defined(__sparc__) && !defined(HOST_SOLARIS)
551 tmp_T0 = 0;
552 #else
553 T0 = 0;
554 #endif
556 if (interrupt_request & CPU_INTERRUPT_EXIT) {
557 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
558 env->exception_index = EXCP_INTERRUPT;
559 cpu_loop_exit();
562 #ifdef DEBUG_EXEC
563 if ((loglevel & CPU_LOG_TB_CPU)) {
564 /* restore flags in standard format */
565 regs_to_env();
566 #if defined(TARGET_I386)
567 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
568 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
569 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
570 #elif defined(TARGET_ARM)
571 cpu_dump_state(env, logfile, fprintf, 0);
572 #elif defined(TARGET_SPARC)
573 REGWPTR = env->regbase + (env->cwp * 16);
574 env->regwptr = REGWPTR;
575 cpu_dump_state(env, logfile, fprintf, 0);
576 #elif defined(TARGET_PPC)
577 cpu_dump_state(env, logfile, fprintf, 0);
578 #elif defined(TARGET_M68K)
579 cpu_m68k_flush_flags(env, env->cc_op);
580 env->cc_op = CC_OP_FLAGS;
581 env->sr = (env->sr & 0xffe0)
582 | env->cc_dest | (env->cc_x << 4);
583 cpu_dump_state(env, logfile, fprintf, 0);
584 #elif defined(TARGET_MIPS)
585 cpu_dump_state(env, logfile, fprintf, 0);
586 #elif defined(TARGET_SH4)
587 cpu_dump_state(env, logfile, fprintf, 0);
588 #elif defined(TARGET_ALPHA)
589 cpu_dump_state(env, logfile, fprintf, 0);
590 #else
591 #error unsupported target CPU
592 #endif
594 #endif
595 tb = tb_find_fast();
596 #ifdef DEBUG_EXEC
597 if ((loglevel & CPU_LOG_EXEC)) {
598 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
599 (long)tb->tc_ptr, tb->pc,
600 lookup_symbol(tb->pc));
602 #endif
603 #if defined(__sparc__) && !defined(HOST_SOLARIS)
604 T0 = tmp_T0;
605 #endif
606 /* see if we can patch the calling TB. When the TB
607 spans two pages, we cannot safely do a direct
608 jump. */
610 if (T0 != 0 &&
611 #if USE_KQEMU
612 (env->kqemu_enabled != 2) &&
613 #endif
614 tb->page_addr[1] == -1
615 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
616 && (tb->cflags & CF_CODE_COPY) ==
617 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
618 #endif
620 spin_lock(&tb_lock);
621 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
622 #if defined(USE_CODE_COPY)
623 /* propagates the FP use info */
624 ((TranslationBlock *)(T0 & ~3))->cflags |=
625 (tb->cflags & CF_FP_USED);
626 #endif
627 spin_unlock(&tb_lock);
630 tc_ptr = tb->tc_ptr;
631 env->current_tb = tb;
632 /* execute the generated code */
633 gen_func = (void *)tc_ptr;
634 #if defined(__sparc__)
635 __asm__ __volatile__("call %0\n\t"
636 "mov %%o7,%%i0"
637 : /* no outputs */
638 : "r" (gen_func)
639 : "i0", "i1", "i2", "i3", "i4", "i5",
640 "o0", "o1", "o2", "o3", "o4", "o5",
641 "l0", "l1", "l2", "l3", "l4", "l5",
642 "l6", "l7");
643 #elif defined(__arm__)
644 asm volatile ("mov pc, %0\n\t"
645 ".global exec_loop\n\t"
646 "exec_loop:\n\t"
647 : /* no outputs */
648 : "r" (gen_func)
649 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
650 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
652 if (!(tb->cflags & CF_CODE_COPY)) {
653 if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
654 save_native_fp_state(env);
656 gen_func();
657 } else {
658 if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
659 restore_native_fp_state(env);
661 /* we work with native eflags */
662 CC_SRC = cc_table[CC_OP].compute_all();
663 CC_OP = CC_OP_EFLAGS;
664 asm(".globl exec_loop\n"
665 "\n"
666 "debug1:\n"
667 " pushl %%ebp\n"
668 " fs movl %10, %9\n"
669 " fs movl %11, %%eax\n"
670 " andl $0x400, %%eax\n"
671 " fs orl %8, %%eax\n"
672 " pushl %%eax\n"
673 " popf\n"
674 " fs movl %%esp, %12\n"
675 " fs movl %0, %%eax\n"
676 " fs movl %1, %%ecx\n"
677 " fs movl %2, %%edx\n"
678 " fs movl %3, %%ebx\n"
679 " fs movl %4, %%esp\n"
680 " fs movl %5, %%ebp\n"
681 " fs movl %6, %%esi\n"
682 " fs movl %7, %%edi\n"
683 " fs jmp *%9\n"
684 "exec_loop:\n"
685 " fs movl %%esp, %4\n"
686 " fs movl %12, %%esp\n"
687 " fs movl %%eax, %0\n"
688 " fs movl %%ecx, %1\n"
689 " fs movl %%edx, %2\n"
690 " fs movl %%ebx, %3\n"
691 " fs movl %%ebp, %5\n"
692 " fs movl %%esi, %6\n"
693 " fs movl %%edi, %7\n"
694 " pushf\n"
695 " popl %%eax\n"
696 " movl %%eax, %%ecx\n"
697 " andl $0x400, %%ecx\n"
698 " shrl $9, %%ecx\n"
699 " andl $0x8d5, %%eax\n"
700 " fs movl %%eax, %8\n"
701 " movl $1, %%eax\n"
702 " subl %%ecx, %%eax\n"
703 " fs movl %%eax, %11\n"
704 " fs movl %9, %%ebx\n" /* get T0 value */
705 " popl %%ebp\n"
707 : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
708 "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
709 "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
710 "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
711 "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
712 "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
713 "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
714 "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
715 "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
716 "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
717 "a" (gen_func),
718 "m" (*(uint8_t *)offsetof(CPUState, df)),
719 "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
720 : "%ecx", "%edx"
724 #elif defined(__ia64)
725 struct fptr {
726 void *ip;
727 void *gp;
728 } fp;
730 fp.ip = tc_ptr;
731 fp.gp = code_gen_buffer + 2 * (1 << 20);
732 (*(void (*)(void)) &fp)();
733 #else
734 gen_func();
735 #endif
736 env->current_tb = NULL;
737 /* reset soft MMU for next block (it can currently
738 only be set by a memory fault) */
739 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
740 if (env->hflags & HF_SOFTMMU_MASK) {
741 env->hflags &= ~HF_SOFTMMU_MASK;
742 /* do not allow linking to another block */
743 T0 = 0;
745 #endif
746 #if defined(USE_KQEMU)
747 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
748 if (kqemu_is_ok(env) &&
749 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
750 cpu_loop_exit();
752 #endif
753 } /* for(;;) */
754 } else {
755 env_to_regs();
757 } /* for(;;) */
760 #if defined(TARGET_I386)
761 #if defined(USE_CODE_COPY)
762 if (env->native_fp_regs) {
763 save_native_fp_state(env);
765 #endif
766 /* restore flags in standard format */
767 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
768 #elif defined(TARGET_ARM)
769 /* XXX: Save/restore host fpu exception state?. */
770 #elif defined(TARGET_SPARC)
771 #if defined(reg_REGWPTR)
772 REGWPTR = saved_regwptr;
773 #endif
774 #elif defined(TARGET_PPC)
775 #elif defined(TARGET_M68K)
776 cpu_m68k_flush_flags(env, env->cc_op);
777 env->cc_op = CC_OP_FLAGS;
778 env->sr = (env->sr & 0xffe0)
779 | env->cc_dest | (env->cc_x << 4);
780 #elif defined(TARGET_MIPS)
781 #elif defined(TARGET_SH4)
782 #elif defined(TARGET_ALPHA)
783 /* XXXXX */
784 #else
785 #error unsupported target CPU
786 #endif
788 /* restore global registers */
789 #if defined(__sparc__) && !defined(HOST_SOLARIS)
790 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
791 #endif
792 #include "hostregs_helper.h"
794 /* fail safe : never use cpu_single_env outside cpu_exec() */
795 cpu_single_env = NULL;
796 return ret;
799 /* must only be called from the generated code as an exception can be
800 generated */
801 void tb_invalidate_page_range(target_ulong start, target_ulong end)
803 /* XXX: cannot enable it yet because it yields to MMU exception
804 where NIP != read address on PowerPC */
805 #if 0
806 target_ulong phys_addr;
807 phys_addr = get_phys_addr_code(env, start);
808 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
809 #endif
812 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
814 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
816 CPUX86State *saved_env;
818 saved_env = env;
819 env = s;
820 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
821 selector &= 0xffff;
822 cpu_x86_load_seg_cache(env, seg_reg, selector,
823 (selector << 4), 0xffff, 0);
824 } else {
825 load_seg(seg_reg, selector);
827 env = saved_env;
830 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
832 CPUX86State *saved_env;
834 saved_env = env;
835 env = s;
837 helper_fsave((target_ulong)ptr, data32);
839 env = saved_env;
842 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
844 CPUX86State *saved_env;
846 saved_env = env;
847 env = s;
849 helper_frstor((target_ulong)ptr, data32);
851 env = saved_env;
854 #endif /* TARGET_I386 */
856 #if !defined(CONFIG_SOFTMMU)
858 #if defined(TARGET_I386)
860 /* 'pc' is the host PC at which the exception was raised. 'address' is
861 the effective address of the memory exception. 'is_write' is 1 if a
862 write caused the exception and otherwise 0'. 'old_set' is the
863 signal set which should be restored */
864 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
865 int is_write, sigset_t *old_set,
866 void *puc)
868 TranslationBlock *tb;
869 int ret;
871 if (cpu_single_env)
872 env = cpu_single_env; /* XXX: find a correct solution for multithread */
873 #if defined(DEBUG_SIGNAL)
874 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
875 pc, address, is_write, *(unsigned long *)old_set);
876 #endif
877 /* XXX: locking issue */
878 if (is_write && page_unprotect(h2g(address), pc, puc)) {
879 return 1;
882 /* see if it is an MMU fault */
883 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
884 ((env->hflags & HF_CPL_MASK) == 3), 0);
885 if (ret < 0)
886 return 0; /* not an MMU fault */
887 if (ret == 0)
888 return 1; /* the MMU fault was handled without causing real CPU fault */
889 /* now we have a real cpu fault */
890 tb = tb_find_pc(pc);
891 if (tb) {
892 /* the PC is inside the translated code. It means that we have
893 a virtual CPU fault */
894 cpu_restore_state(tb, env, pc, puc);
896 if (ret == 1) {
897 #if 0
898 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
899 env->eip, env->cr[2], env->error_code);
900 #endif
901 /* we restore the process signal mask as the sigreturn should
902 do it (XXX: use sigsetjmp) */
903 sigprocmask(SIG_SETMASK, old_set, NULL);
904 raise_exception_err(env->exception_index, env->error_code);
905 } else {
906 /* activate soft MMU for this block */
907 env->hflags |= HF_SOFTMMU_MASK;
908 cpu_resume_from_signal(env, puc);
910 /* never comes here */
911 return 1;
914 #elif defined(TARGET_ARM)
915 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
916 int is_write, sigset_t *old_set,
917 void *puc)
919 TranslationBlock *tb;
920 int ret;
922 if (cpu_single_env)
923 env = cpu_single_env; /* XXX: find a correct solution for multithread */
924 #if defined(DEBUG_SIGNAL)
925 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
926 pc, address, is_write, *(unsigned long *)old_set);
927 #endif
928 /* XXX: locking issue */
929 if (is_write && page_unprotect(h2g(address), pc, puc)) {
930 return 1;
932 /* see if it is an MMU fault */
933 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
934 if (ret < 0)
935 return 0; /* not an MMU fault */
936 if (ret == 0)
937 return 1; /* the MMU fault was handled without causing real CPU fault */
938 /* now we have a real cpu fault */
939 tb = tb_find_pc(pc);
940 if (tb) {
941 /* the PC is inside the translated code. It means that we have
942 a virtual CPU fault */
943 cpu_restore_state(tb, env, pc, puc);
945 /* we restore the process signal mask as the sigreturn should
946 do it (XXX: use sigsetjmp) */
947 sigprocmask(SIG_SETMASK, old_set, NULL);
948 cpu_loop_exit();
950 #elif defined(TARGET_SPARC)
951 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
952 int is_write, sigset_t *old_set,
953 void *puc)
955 TranslationBlock *tb;
956 int ret;
958 if (cpu_single_env)
959 env = cpu_single_env; /* XXX: find a correct solution for multithread */
960 #if defined(DEBUG_SIGNAL)
961 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
962 pc, address, is_write, *(unsigned long *)old_set);
963 #endif
964 /* XXX: locking issue */
965 if (is_write && page_unprotect(h2g(address), pc, puc)) {
966 return 1;
968 /* see if it is an MMU fault */
969 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
970 if (ret < 0)
971 return 0; /* not an MMU fault */
972 if (ret == 0)
973 return 1; /* the MMU fault was handled without causing real CPU fault */
974 /* now we have a real cpu fault */
975 tb = tb_find_pc(pc);
976 if (tb) {
977 /* the PC is inside the translated code. It means that we have
978 a virtual CPU fault */
979 cpu_restore_state(tb, env, pc, puc);
981 /* we restore the process signal mask as the sigreturn should
982 do it (XXX: use sigsetjmp) */
983 sigprocmask(SIG_SETMASK, old_set, NULL);
984 cpu_loop_exit();
986 #elif defined (TARGET_PPC)
987 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
988 int is_write, sigset_t *old_set,
989 void *puc)
991 TranslationBlock *tb;
992 int ret;
994 if (cpu_single_env)
995 env = cpu_single_env; /* XXX: find a correct solution for multithread */
996 #if defined(DEBUG_SIGNAL)
997 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
998 pc, address, is_write, *(unsigned long *)old_set);
999 #endif
1000 /* XXX: locking issue */
1001 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1002 return 1;
1005 /* see if it is an MMU fault */
1006 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1007 if (ret < 0)
1008 return 0; /* not an MMU fault */
1009 if (ret == 0)
1010 return 1; /* the MMU fault was handled without causing real CPU fault */
1012 /* now we have a real cpu fault */
1013 tb = tb_find_pc(pc);
1014 if (tb) {
1015 /* the PC is inside the translated code. It means that we have
1016 a virtual CPU fault */
1017 cpu_restore_state(tb, env, pc, puc);
1019 if (ret == 1) {
1020 #if 0
1021 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1022 env->nip, env->error_code, tb);
1023 #endif
1024 /* we restore the process signal mask as the sigreturn should
1025 do it (XXX: use sigsetjmp) */
1026 sigprocmask(SIG_SETMASK, old_set, NULL);
1027 do_raise_exception_err(env->exception_index, env->error_code);
1028 } else {
1029 /* activate soft MMU for this block */
1030 cpu_resume_from_signal(env, puc);
1032 /* never comes here */
1033 return 1;
1036 #elif defined(TARGET_M68K)
1037 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1038 int is_write, sigset_t *old_set,
1039 void *puc)
1041 TranslationBlock *tb;
1042 int ret;
1044 if (cpu_single_env)
1045 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1046 #if defined(DEBUG_SIGNAL)
1047 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1048 pc, address, is_write, *(unsigned long *)old_set);
1049 #endif
1050 /* XXX: locking issue */
1051 if (is_write && page_unprotect(address, pc, puc)) {
1052 return 1;
1054 /* see if it is an MMU fault */
1055 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1056 if (ret < 0)
1057 return 0; /* not an MMU fault */
1058 if (ret == 0)
1059 return 1; /* the MMU fault was handled without causing real CPU fault */
1060 /* now we have a real cpu fault */
1061 tb = tb_find_pc(pc);
1062 if (tb) {
1063 /* the PC is inside the translated code. It means that we have
1064 a virtual CPU fault */
1065 cpu_restore_state(tb, env, pc, puc);
1067 /* we restore the process signal mask as the sigreturn should
1068 do it (XXX: use sigsetjmp) */
1069 sigprocmask(SIG_SETMASK, old_set, NULL);
1070 cpu_loop_exit();
1071 /* never comes here */
1072 return 1;
1075 #elif defined (TARGET_MIPS)
1076 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1077 int is_write, sigset_t *old_set,
1078 void *puc)
1080 TranslationBlock *tb;
1081 int ret;
1083 if (cpu_single_env)
1084 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1085 #if defined(DEBUG_SIGNAL)
1086 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1087 pc, address, is_write, *(unsigned long *)old_set);
1088 #endif
1089 /* XXX: locking issue */
1090 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1091 return 1;
1094 /* see if it is an MMU fault */
1095 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1096 if (ret < 0)
1097 return 0; /* not an MMU fault */
1098 if (ret == 0)
1099 return 1; /* the MMU fault was handled without causing real CPU fault */
1101 /* now we have a real cpu fault */
1102 tb = tb_find_pc(pc);
1103 if (tb) {
1104 /* the PC is inside the translated code. It means that we have
1105 a virtual CPU fault */
1106 cpu_restore_state(tb, env, pc, puc);
1108 if (ret == 1) {
1109 #if 0
1110 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1111 env->PC, env->error_code, tb);
1112 #endif
1113 /* we restore the process signal mask as the sigreturn should
1114 do it (XXX: use sigsetjmp) */
1115 sigprocmask(SIG_SETMASK, old_set, NULL);
1116 do_raise_exception_err(env->exception_index, env->error_code);
1117 } else {
1118 /* activate soft MMU for this block */
1119 cpu_resume_from_signal(env, puc);
1121 /* never comes here */
1122 return 1;
1125 #elif defined (TARGET_SH4)
1126 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1127 int is_write, sigset_t *old_set,
1128 void *puc)
1130 TranslationBlock *tb;
1131 int ret;
1133 if (cpu_single_env)
1134 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1135 #if defined(DEBUG_SIGNAL)
1136 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1137 pc, address, is_write, *(unsigned long *)old_set);
1138 #endif
1139 /* XXX: locking issue */
1140 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1141 return 1;
1144 /* see if it is an MMU fault */
1145 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1146 if (ret < 0)
1147 return 0; /* not an MMU fault */
1148 if (ret == 0)
1149 return 1; /* the MMU fault was handled without causing real CPU fault */
1151 /* now we have a real cpu fault */
1152 tb = tb_find_pc(pc);
1153 if (tb) {
1154 /* the PC is inside the translated code. It means that we have
1155 a virtual CPU fault */
1156 cpu_restore_state(tb, env, pc, puc);
1158 #if 0
1159 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1160 env->nip, env->error_code, tb);
1161 #endif
1162 /* we restore the process signal mask as the sigreturn should
1163 do it (XXX: use sigsetjmp) */
1164 sigprocmask(SIG_SETMASK, old_set, NULL);
1165 cpu_loop_exit();
1166 /* never comes here */
1167 return 1;
1170 #elif defined (TARGET_ALPHA)
1171 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1172 int is_write, sigset_t *old_set,
1173 void *puc)
1175 TranslationBlock *tb;
1176 int ret;
1178 if (cpu_single_env)
1179 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1180 #if defined(DEBUG_SIGNAL)
1181 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1182 pc, address, is_write, *(unsigned long *)old_set);
1183 #endif
1184 /* XXX: locking issue */
1185 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1186 return 1;
1189 /* see if it is an MMU fault */
1190 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, 1, 0);
1191 if (ret < 0)
1192 return 0; /* not an MMU fault */
1193 if (ret == 0)
1194 return 1; /* the MMU fault was handled without causing real CPU fault */
1196 /* now we have a real cpu fault */
1197 tb = tb_find_pc(pc);
1198 if (tb) {
1199 /* the PC is inside the translated code. It means that we have
1200 a virtual CPU fault */
1201 cpu_restore_state(tb, env, pc, puc);
1203 #if 0
1204 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1205 env->nip, env->error_code, tb);
1206 #endif
1207 /* we restore the process signal mask as the sigreturn should
1208 do it (XXX: use sigsetjmp) */
1209 sigprocmask(SIG_SETMASK, old_set, NULL);
1210 cpu_loop_exit();
1211 /* never comes here */
1212 return 1;
1214 #else
1215 #error unsupported target CPU
1216 #endif
1218 #if defined(__i386__)
1220 #if defined(__APPLE__)
1221 # include <sys/ucontext.h>
1223 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1224 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1225 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1226 #else
1227 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1228 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1229 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1230 #endif
1232 #if defined(USE_CODE_COPY)
1233 static void cpu_send_trap(unsigned long pc, int trap,
1234 struct ucontext *uc)
1236 TranslationBlock *tb;
1238 if (cpu_single_env)
1239 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1240 /* now we have a real cpu fault */
1241 tb = tb_find_pc(pc);
1242 if (tb) {
1243 /* the PC is inside the translated code. It means that we have
1244 a virtual CPU fault */
1245 cpu_restore_state(tb, env, pc, uc);
1247 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1248 raise_exception_err(trap, env->error_code);
1250 #endif
1252 int cpu_signal_handler(int host_signum, void *pinfo,
1253 void *puc)
1255 siginfo_t *info = pinfo;
1256 struct ucontext *uc = puc;
1257 unsigned long pc;
1258 int trapno;
1260 #ifndef REG_EIP
1261 /* for glibc 2.1 */
1262 #define REG_EIP EIP
1263 #define REG_ERR ERR
1264 #define REG_TRAPNO TRAPNO
1265 #endif
1266 pc = EIP_sig(uc);
1267 trapno = TRAP_sig(uc);
1268 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
1269 if (trapno == 0x00 || trapno == 0x05) {
1270 /* send division by zero or bound exception */
1271 cpu_send_trap(pc, trapno, uc);
1272 return 1;
1273 } else
1274 #endif
1275 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1276 trapno == 0xe ?
1277 (ERROR_sig(uc) >> 1) & 1 : 0,
1278 &uc->uc_sigmask, puc);
1281 #elif defined(__x86_64__)
1283 int cpu_signal_handler(int host_signum, void *pinfo,
1284 void *puc)
1286 siginfo_t *info = pinfo;
1287 struct ucontext *uc = puc;
1288 unsigned long pc;
1290 pc = uc->uc_mcontext.gregs[REG_RIP];
1291 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1292 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1293 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1294 &uc->uc_sigmask, puc);
1297 #elif defined(__powerpc__)
1299 /***********************************************************************
1300 * signal context platform-specific definitions
1301 * From Wine
1303 #ifdef linux
1304 /* All Registers access - only for local access */
1305 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1306 /* Gpr Registers access */
1307 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1308 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1309 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1310 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1311 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1312 # define LR_sig(context) REG_sig(link, context) /* Link register */
1313 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1314 /* Float Registers access */
1315 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1316 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1317 /* Exception Registers access */
1318 # define DAR_sig(context) REG_sig(dar, context)
1319 # define DSISR_sig(context) REG_sig(dsisr, context)
1320 # define TRAP_sig(context) REG_sig(trap, context)
1321 #endif /* linux */
1323 #ifdef __APPLE__
1324 # include <sys/ucontext.h>
1325 typedef struct ucontext SIGCONTEXT;
1326 /* All Registers access - only for local access */
1327 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1328 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1329 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1330 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1331 /* Gpr Registers access */
1332 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1333 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1334 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1335 # define CTR_sig(context) REG_sig(ctr, context)
1336 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1337 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1338 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1339 /* Float Registers access */
1340 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1341 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1342 /* Exception Registers access */
1343 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1344 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1345 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1346 #endif /* __APPLE__ */
1348 int cpu_signal_handler(int host_signum, void *pinfo,
1349 void *puc)
1351 siginfo_t *info = pinfo;
1352 struct ucontext *uc = puc;
1353 unsigned long pc;
1354 int is_write;
1356 pc = IAR_sig(uc);
1357 is_write = 0;
1358 #if 0
1359 /* ppc 4xx case */
1360 if (DSISR_sig(uc) & 0x00800000)
1361 is_write = 1;
1362 #else
1363 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1364 is_write = 1;
1365 #endif
1366 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1367 is_write, &uc->uc_sigmask, puc);
1370 #elif defined(__alpha__)
1372 int cpu_signal_handler(int host_signum, void *pinfo,
1373 void *puc)
1375 siginfo_t *info = pinfo;
1376 struct ucontext *uc = puc;
1377 uint32_t *pc = uc->uc_mcontext.sc_pc;
1378 uint32_t insn = *pc;
1379 int is_write = 0;
1381 /* XXX: need kernel patch to get write flag faster */
1382 switch (insn >> 26) {
1383 case 0x0d: // stw
1384 case 0x0e: // stb
1385 case 0x0f: // stq_u
1386 case 0x24: // stf
1387 case 0x25: // stg
1388 case 0x26: // sts
1389 case 0x27: // stt
1390 case 0x2c: // stl
1391 case 0x2d: // stq
1392 case 0x2e: // stl_c
1393 case 0x2f: // stq_c
1394 is_write = 1;
1397 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1398 is_write, &uc->uc_sigmask, puc);
1400 #elif defined(__sparc__)
1402 int cpu_signal_handler(int host_signum, void *pinfo,
1403 void *puc)
1405 siginfo_t *info = pinfo;
1406 uint32_t *regs = (uint32_t *)(info + 1);
1407 void *sigmask = (regs + 20);
1408 unsigned long pc;
1409 int is_write;
1410 uint32_t insn;
1412 /* XXX: is there a standard glibc define ? */
1413 pc = regs[1];
1414 /* XXX: need kernel patch to get write flag faster */
1415 is_write = 0;
1416 insn = *(uint32_t *)pc;
1417 if ((insn >> 30) == 3) {
1418 switch((insn >> 19) & 0x3f) {
1419 case 0x05: // stb
1420 case 0x06: // sth
1421 case 0x04: // st
1422 case 0x07: // std
1423 case 0x24: // stf
1424 case 0x27: // stdf
1425 case 0x25: // stfsr
1426 is_write = 1;
1427 break;
1430 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1431 is_write, sigmask, NULL);
1434 #elif defined(__arm__)
1436 int cpu_signal_handler(int host_signum, void *pinfo,
1437 void *puc)
1439 siginfo_t *info = pinfo;
1440 struct ucontext *uc = puc;
1441 unsigned long pc;
1442 int is_write;
1444 pc = uc->uc_mcontext.gregs[R15];
1445 /* XXX: compute is_write */
1446 is_write = 0;
1447 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1448 is_write,
1449 &uc->uc_sigmask, puc);
1452 #elif defined(__mc68000)
1454 int cpu_signal_handler(int host_signum, void *pinfo,
1455 void *puc)
1457 siginfo_t *info = pinfo;
1458 struct ucontext *uc = puc;
1459 unsigned long pc;
1460 int is_write;
1462 pc = uc->uc_mcontext.gregs[16];
1463 /* XXX: compute is_write */
1464 is_write = 0;
1465 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1466 is_write,
1467 &uc->uc_sigmask, puc);
1470 #elif defined(__ia64)
1472 #ifndef __ISR_VALID
1473 /* This ought to be in <bits/siginfo.h>... */
1474 # define __ISR_VALID 1
1475 #endif
1477 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1479 siginfo_t *info = pinfo;
1480 struct ucontext *uc = puc;
1481 unsigned long ip;
1482 int is_write = 0;
1484 ip = uc->uc_mcontext.sc_ip;
1485 switch (host_signum) {
1486 case SIGILL:
1487 case SIGFPE:
1488 case SIGSEGV:
1489 case SIGBUS:
1490 case SIGTRAP:
1491 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1492 /* ISR.W (write-access) is bit 33: */
1493 is_write = (info->si_isr >> 33) & 1;
1494 break;
1496 default:
1497 break;
1499 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1500 is_write,
1501 &uc->uc_sigmask, puc);
1504 #elif defined(__s390__)
1506 int cpu_signal_handler(int host_signum, void *pinfo,
1507 void *puc)
1509 siginfo_t *info = pinfo;
1510 struct ucontext *uc = puc;
1511 unsigned long pc;
1512 int is_write;
1514 pc = uc->uc_mcontext.psw.addr;
1515 /* XXX: compute is_write */
1516 is_write = 0;
1517 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1518 is_write, &uc->uc_sigmask, puc);
1521 #elif defined(__mips__)
1523 int cpu_signal_handler(int host_signum, void *pinfo,
1524 void *puc)
1526 siginfo_t *info = pinfo;
1527 struct ucontext *uc = puc;
1528 greg_t pc = uc->uc_mcontext.pc;
1529 int is_write;
1531 /* XXX: compute is_write */
1532 is_write = 0;
1533 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1534 is_write, &uc->uc_sigmask, puc);
1537 #else
1539 #error host CPU specific signal handler needed
1541 #endif
1543 #endif /* !defined(CONFIG_SOFTMMU) */