Add sysfs interface for debug & development.
[qemu-kvm/fedora.git] / cpu-exec.c
blobcf8818c425911e52fdf9eb503aa0d92a7b7d570a
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
38 #ifdef USE_KVM
39 #include "qemu-kvm.h"
40 extern int kvm_allowed;
41 #endif
43 int tb_invalidated_flag;
45 //#define DEBUG_EXEC
46 //#define DEBUG_SIGNAL
48 #if defined(TARGET_ARM) || defined(TARGET_SPARC)
49 /* XXX: unify with i386 target */
50 void cpu_loop_exit(void)
52 longjmp(env->jmp_env, 1);
54 #endif
55 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4))
56 #define reg_T2
57 #endif
59 /* exit the current TB from a signal handler. The host registers are
60 restored in a state compatible with the CPU emulator
62 void cpu_resume_from_signal(CPUState *env1, void *puc)
64 #if !defined(CONFIG_SOFTMMU)
65 struct ucontext *uc = puc;
66 #endif
68 env = env1;
70 /* XXX: restore cpu registers saved in host registers */
72 #if !defined(CONFIG_SOFTMMU)
73 if (puc) {
74 /* XXX: use siglongjmp ? */
75 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
77 #endif
78 longjmp(env->jmp_env, 1);
82 static TranslationBlock *tb_find_slow(target_ulong pc,
83 target_ulong cs_base,
84 unsigned int flags)
86 TranslationBlock *tb, **ptb1;
87 int code_gen_size;
88 unsigned int h;
89 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
90 uint8_t *tc_ptr;
92 spin_lock(&tb_lock);
94 tb_invalidated_flag = 0;
96 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
98 /* find translated block using physical mappings */
99 phys_pc = get_phys_addr_code(env, pc);
100 phys_page1 = phys_pc & TARGET_PAGE_MASK;
101 phys_page2 = -1;
102 h = tb_phys_hash_func(phys_pc);
103 ptb1 = &tb_phys_hash[h];
104 for(;;) {
105 tb = *ptb1;
106 if (!tb)
107 goto not_found;
108 if (tb->pc == pc &&
109 tb->page_addr[0] == phys_page1 &&
110 tb->cs_base == cs_base &&
111 tb->flags == flags) {
112 /* check next page if needed */
113 if (tb->page_addr[1] != -1) {
114 virt_page2 = (pc & TARGET_PAGE_MASK) +
115 TARGET_PAGE_SIZE;
116 phys_page2 = get_phys_addr_code(env, virt_page2);
117 if (tb->page_addr[1] == phys_page2)
118 goto found;
119 } else {
120 goto found;
123 ptb1 = &tb->phys_hash_next;
125 not_found:
126 /* if no translated code available, then translate it now */
127 tb = tb_alloc(pc);
128 if (!tb) {
129 /* flush must be done */
130 tb_flush(env);
131 /* cannot fail at this point */
132 tb = tb_alloc(pc);
133 /* don't forget to invalidate previous TB info */
134 tb_invalidated_flag = 1;
136 tc_ptr = code_gen_ptr;
137 tb->tc_ptr = tc_ptr;
138 tb->cs_base = cs_base;
139 tb->flags = flags;
140 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
141 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
143 /* check next page if needed */
144 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
145 phys_page2 = -1;
146 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
147 phys_page2 = get_phys_addr_code(env, virt_page2);
149 tb_link_phys(tb, phys_pc, phys_page2);
151 found:
152 /* we add the TB in the virtual pc hash table */
153 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
154 spin_unlock(&tb_lock);
155 return tb;
158 static inline TranslationBlock *tb_find_fast(void)
160 TranslationBlock *tb;
161 target_ulong cs_base, pc;
162 unsigned int flags;
164 /* we record a subset of the CPU state. It will
165 always be the same before a given translated block
166 is executed. */
167 #if defined(TARGET_I386)
168 flags = env->hflags;
169 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
170 cs_base = env->segs[R_CS].base;
171 pc = cs_base + env->eip;
172 #elif defined(TARGET_ARM)
173 flags = env->thumb | (env->vfp.vec_len << 1)
174 | (env->vfp.vec_stride << 4);
175 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
176 flags |= (1 << 6);
177 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
178 flags |= (1 << 7);
179 cs_base = 0;
180 pc = env->regs[15];
181 #elif defined(TARGET_SPARC)
182 #ifdef TARGET_SPARC64
183 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
184 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
185 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
186 #else
187 // FPU enable . MMU enabled . MMU no-fault . Supervisor
188 flags = (env->psref << 3) | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1)
189 | env->psrs;
190 #endif
191 cs_base = env->npc;
192 pc = env->pc;
193 #elif defined(TARGET_PPC)
194 flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) |
195 (msr_se << MSR_SE) | (msr_le << MSR_LE);
196 cs_base = 0;
197 pc = env->nip;
198 #elif defined(TARGET_MIPS)
199 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
200 cs_base = 0;
201 pc = env->PC;
202 #elif defined(TARGET_SH4)
203 flags = env->sr & (SR_MD | SR_RB);
204 cs_base = 0; /* XXXXX */
205 pc = env->pc;
206 #else
207 #error unsupported CPU
208 #endif
209 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
210 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
211 tb->flags != flags, 0)) {
212 tb = tb_find_slow(pc, cs_base, flags);
213 /* Note: we do it here to avoid a gcc bug on Mac OS X when
214 doing it in tb_find_slow */
215 if (tb_invalidated_flag) {
216 /* as some TB could have been invalidated because
217 of memory exceptions while generating the code, we
218 must recompute the hash index here */
219 T0 = 0;
222 return tb;
226 /* main execution loop */
228 int cpu_exec(CPUState *env1)
230 int saved_T0, saved_T1;
231 #if defined(reg_T2)
232 int saved_T2;
233 #endif
234 CPUState *saved_env;
235 #if defined(TARGET_I386)
236 #ifdef reg_EAX
237 int saved_EAX;
238 #endif
239 #ifdef reg_ECX
240 int saved_ECX;
241 #endif
242 #ifdef reg_EDX
243 int saved_EDX;
244 #endif
245 #ifdef reg_EBX
246 int saved_EBX;
247 #endif
248 #ifdef reg_ESP
249 int saved_ESP;
250 #endif
251 #ifdef reg_EBP
252 int saved_EBP;
253 #endif
254 #ifdef reg_ESI
255 int saved_ESI;
256 #endif
257 #ifdef reg_EDI
258 int saved_EDI;
259 #endif
260 #elif defined(TARGET_SPARC)
261 #if defined(reg_REGWPTR)
262 uint32_t *saved_regwptr;
263 #endif
264 #endif
265 #if defined(__sparc__) && !defined(HOST_SOLARIS)
266 int saved_i7, tmp_T0;
267 #endif
268 int ret, interrupt_request;
269 void (*gen_func)(void);
270 TranslationBlock *tb;
271 uint8_t *tc_ptr;
273 #if defined(TARGET_I386)
274 /* handle exit of HALTED state */
275 if (env1->hflags & HF_HALTED_MASK) {
276 /* disable halt condition */
277 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
278 (env1->eflags & IF_MASK)) {
279 env1->hflags &= ~HF_HALTED_MASK;
280 } else {
281 return EXCP_HALTED;
284 #elif defined(TARGET_PPC)
285 if (env1->halted) {
286 if (env1->msr[MSR_EE] &&
287 (env1->interrupt_request &
288 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER))) {
289 env1->halted = 0;
290 } else {
291 return EXCP_HALTED;
294 #elif defined(TARGET_SPARC)
295 if (env1->halted) {
296 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
297 (env1->psret != 0)) {
298 env1->halted = 0;
299 } else {
300 return EXCP_HALTED;
303 #elif defined(TARGET_ARM)
304 if (env1->halted) {
305 /* An interrupt wakes the CPU even if the I and F CPSR bits are
306 set. */
307 if (env1->interrupt_request
308 & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) {
309 env1->halted = 0;
310 } else {
311 return EXCP_HALTED;
314 #elif defined(TARGET_MIPS)
315 if (env1->halted) {
316 if (env1->interrupt_request &
317 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) {
318 env1->halted = 0;
319 } else {
320 return EXCP_HALTED;
323 #endif
325 cpu_single_env = env1;
327 /* first we save global registers */
328 saved_env = env;
329 env = env1;
330 saved_T0 = T0;
331 saved_T1 = T1;
332 #if defined(reg_T2)
333 saved_T2 = T2;
334 #endif
335 #if defined(__sparc__) && !defined(HOST_SOLARIS)
336 /* we also save i7 because longjmp may not restore it */
337 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
338 #endif
340 #if defined(TARGET_I386)
341 #ifdef reg_EAX
342 saved_EAX = EAX;
343 #endif
344 #ifdef reg_ECX
345 saved_ECX = ECX;
346 #endif
347 #ifdef reg_EDX
348 saved_EDX = EDX;
349 #endif
350 #ifdef reg_EBX
351 saved_EBX = EBX;
352 #endif
353 #ifdef reg_ESP
354 saved_ESP = ESP;
355 #endif
356 #ifdef reg_EBP
357 saved_EBP = EBP;
358 #endif
359 #ifdef reg_ESI
360 saved_ESI = ESI;
361 #endif
362 #ifdef reg_EDI
363 saved_EDI = EDI;
364 #endif
366 env_to_regs();
367 /* put eflags in CPU temporary format */
368 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
369 DF = 1 - (2 * ((env->eflags >> 10) & 1));
370 CC_OP = CC_OP_EFLAGS;
371 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
372 #elif defined(TARGET_ARM)
373 #elif defined(TARGET_SPARC)
374 #if defined(reg_REGWPTR)
375 saved_regwptr = REGWPTR;
376 #endif
377 #elif defined(TARGET_PPC)
378 #elif defined(TARGET_MIPS)
379 #elif defined(TARGET_SH4)
380 /* XXXXX */
381 #else
382 #error unsupported target CPU
383 #endif
384 env->exception_index = -1;
386 /* prepare setjmp context for exception handling */
387 for(;;) {
388 if (setjmp(env->jmp_env) == 0) {
389 env->current_tb = NULL;
390 /* if an exception is pending, we execute it here */
391 if (env->exception_index >= 0) {
392 if (env->exception_index >= EXCP_INTERRUPT) {
393 /* exit request from the cpu execution loop */
394 ret = env->exception_index;
395 break;
396 } else if (env->user_mode_only) {
397 /* if user mode only, we simulate a fake exception
398 which will be hanlded outside the cpu execution
399 loop */
400 #if defined(TARGET_I386)
401 do_interrupt_user(env->exception_index,
402 env->exception_is_int,
403 env->error_code,
404 env->exception_next_eip);
405 #endif
406 ret = env->exception_index;
407 break;
408 } else {
409 #if defined(TARGET_I386)
410 /* simulate a real cpu exception. On i386, it can
411 trigger new exceptions, but we do not handle
412 double or triple faults yet. */
413 do_interrupt(env->exception_index,
414 env->exception_is_int,
415 env->error_code,
416 env->exception_next_eip, 0);
417 #elif defined(TARGET_PPC)
418 do_interrupt(env);
419 #elif defined(TARGET_MIPS)
420 do_interrupt(env);
421 #elif defined(TARGET_SPARC)
422 do_interrupt(env->exception_index);
423 #elif defined(TARGET_ARM)
424 do_interrupt(env);
425 #elif defined(TARGET_SH4)
426 do_interrupt(env);
427 #endif
429 env->exception_index = -1;
431 #ifdef USE_KQEMU
432 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
433 int ret;
434 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
435 ret = kqemu_cpu_exec(env);
436 /* put eflags in CPU temporary format */
437 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
438 DF = 1 - (2 * ((env->eflags >> 10) & 1));
439 CC_OP = CC_OP_EFLAGS;
440 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
441 if (ret == 1) {
442 /* exception */
443 longjmp(env->jmp_env, 1);
444 } else if (ret == 2) {
445 /* softmmu execution needed */
446 } else {
447 if (env->interrupt_request != 0) {
448 /* hardware interrupt will be executed just after */
449 } else {
450 /* otherwise, we restart */
451 longjmp(env->jmp_env, 1);
455 #endif
457 #ifdef USE_KVM
458 if (kvm_allowed) {
459 kvm_cpu_exec(env);
460 longjmp(env->jmp_env, 1);
462 #endif
463 T0 = 0; /* force lookup of first TB */
464 for(;;) {
465 #if defined(__sparc__) && !defined(HOST_SOLARIS)
466 /* g1 can be modified by some libc? functions */
467 tmp_T0 = T0;
468 #endif
469 interrupt_request = env->interrupt_request;
470 if (__builtin_expect(interrupt_request, 0)
472 #if defined(TARGET_I386)
473 /* if hardware interrupt pending, we execute it */
474 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
475 (env->eflags & IF_MASK) &&
476 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
477 int intno;
478 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
479 intno = cpu_get_pic_interrupt(env);
480 if (loglevel & CPU_LOG_TB_IN_ASM) {
481 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
483 do_interrupt(intno, 0, 0, 0, 1);
484 /* ensure that no TB jump will be modified as
485 the program flow was changed */
486 #if defined(__sparc__) && !defined(HOST_SOLARIS)
487 tmp_T0 = 0;
488 #else
489 T0 = 0;
490 #endif
492 #elif defined(TARGET_PPC)
493 #if 0
494 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
495 cpu_ppc_reset(env);
497 #endif
498 if (msr_ee != 0) {
499 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
500 /* Raise it */
501 env->exception_index = EXCP_EXTERNAL;
502 env->error_code = 0;
503 do_interrupt(env);
504 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
505 #if defined(__sparc__) && !defined(HOST_SOLARIS)
506 tmp_T0 = 0;
507 #else
508 T0 = 0;
509 #endif
510 } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {
511 /* Raise it */
512 env->exception_index = EXCP_DECR;
513 env->error_code = 0;
514 do_interrupt(env);
515 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
516 #if defined(__sparc__) && !defined(HOST_SOLARIS)
517 tmp_T0 = 0;
518 #else
519 T0 = 0;
520 #endif
523 #elif defined(TARGET_MIPS)
524 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
525 (env->CP0_Status & (1 << CP0St_IE)) &&
526 (env->CP0_Status & env->CP0_Cause & 0x0000FF00) &&
527 !(env->hflags & MIPS_HFLAG_EXL) &&
528 !(env->hflags & MIPS_HFLAG_ERL) &&
529 !(env->hflags & MIPS_HFLAG_DM)) {
530 /* Raise it */
531 env->exception_index = EXCP_EXT_INTERRUPT;
532 env->error_code = 0;
533 do_interrupt(env);
534 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
535 #if defined(__sparc__) && !defined(HOST_SOLARIS)
536 tmp_T0 = 0;
537 #else
538 T0 = 0;
539 #endif
541 #elif defined(TARGET_SPARC)
542 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
543 (env->psret != 0)) {
544 int pil = env->interrupt_index & 15;
545 int type = env->interrupt_index & 0xf0;
547 if (((type == TT_EXTINT) &&
548 (pil == 15 || pil > env->psrpil)) ||
549 type != TT_EXTINT) {
550 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
551 do_interrupt(env->interrupt_index);
552 env->interrupt_index = 0;
553 #if defined(__sparc__) && !defined(HOST_SOLARIS)
554 tmp_T0 = 0;
555 #else
556 T0 = 0;
557 #endif
559 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
560 //do_interrupt(0, 0, 0, 0, 0);
561 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
562 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
563 env1->halted = 1;
564 return EXCP_HALTED;
566 #elif defined(TARGET_ARM)
567 if (interrupt_request & CPU_INTERRUPT_FIQ
568 && !(env->uncached_cpsr & CPSR_F)) {
569 env->exception_index = EXCP_FIQ;
570 do_interrupt(env);
572 if (interrupt_request & CPU_INTERRUPT_HARD
573 && !(env->uncached_cpsr & CPSR_I)) {
574 env->exception_index = EXCP_IRQ;
575 do_interrupt(env);
577 #elif defined(TARGET_SH4)
578 /* XXXXX */
579 #endif
580 /* Don't use the cached interupt_request value,
581 do_interrupt may have updated the EXITTB flag. */
582 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
583 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
584 /* ensure that no TB jump will be modified as
585 the program flow was changed */
586 #if defined(__sparc__) && !defined(HOST_SOLARIS)
587 tmp_T0 = 0;
588 #else
589 T0 = 0;
590 #endif
592 if (interrupt_request & CPU_INTERRUPT_EXIT) {
593 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
594 env->exception_index = EXCP_INTERRUPT;
595 cpu_loop_exit();
598 #ifdef DEBUG_EXEC
599 if ((loglevel & CPU_LOG_TB_CPU)) {
600 #if defined(TARGET_I386)
601 /* restore flags in standard format */
602 #ifdef reg_EAX
603 env->regs[R_EAX] = EAX;
604 #endif
605 #ifdef reg_EBX
606 env->regs[R_EBX] = EBX;
607 #endif
608 #ifdef reg_ECX
609 env->regs[R_ECX] = ECX;
610 #endif
611 #ifdef reg_EDX
612 env->regs[R_EDX] = EDX;
613 #endif
614 #ifdef reg_ESI
615 env->regs[R_ESI] = ESI;
616 #endif
617 #ifdef reg_EDI
618 env->regs[R_EDI] = EDI;
619 #endif
620 #ifdef reg_EBP
621 env->regs[R_EBP] = EBP;
622 #endif
623 #ifdef reg_ESP
624 env->regs[R_ESP] = ESP;
625 #endif
626 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
627 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
628 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
629 #elif defined(TARGET_ARM)
630 cpu_dump_state(env, logfile, fprintf, 0);
631 #elif defined(TARGET_SPARC)
632 REGWPTR = env->regbase + (env->cwp * 16);
633 env->regwptr = REGWPTR;
634 cpu_dump_state(env, logfile, fprintf, 0);
635 #elif defined(TARGET_PPC)
636 cpu_dump_state(env, logfile, fprintf, 0);
637 #elif defined(TARGET_MIPS)
638 cpu_dump_state(env, logfile, fprintf, 0);
639 #elif defined(TARGET_SH4)
640 cpu_dump_state(env, logfile, fprintf, 0);
641 #else
642 #error unsupported target CPU
643 #endif
645 #endif
646 tb = tb_find_fast();
647 #ifdef DEBUG_EXEC
648 if ((loglevel & CPU_LOG_EXEC)) {
649 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
650 (long)tb->tc_ptr, tb->pc,
651 lookup_symbol(tb->pc));
653 #endif
654 #if defined(__sparc__) && !defined(HOST_SOLARIS)
655 T0 = tmp_T0;
656 #endif
657 /* see if we can patch the calling TB. When the TB
658 spans two pages, we cannot safely do a direct
659 jump. */
661 if (T0 != 0 &&
662 #if USE_KQEMU
663 (env->kqemu_enabled != 2) &&
664 #endif
665 tb->page_addr[1] == -1
666 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
667 && (tb->cflags & CF_CODE_COPY) ==
668 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
669 #endif
671 spin_lock(&tb_lock);
672 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
673 #if defined(USE_CODE_COPY)
674 /* propagates the FP use info */
675 ((TranslationBlock *)(T0 & ~3))->cflags |=
676 (tb->cflags & CF_FP_USED);
677 #endif
678 spin_unlock(&tb_lock);
681 tc_ptr = tb->tc_ptr;
682 env->current_tb = tb;
683 /* execute the generated code */
684 gen_func = (void *)tc_ptr;
685 #if defined(__sparc__)
686 __asm__ __volatile__("call %0\n\t"
687 "mov %%o7,%%i0"
688 : /* no outputs */
689 : "r" (gen_func)
690 : "i0", "i1", "i2", "i3", "i4", "i5",
691 "l0", "l1", "l2", "l3", "l4", "l5",
692 "l6", "l7");
693 #elif defined(__arm__)
694 asm volatile ("mov pc, %0\n\t"
695 ".global exec_loop\n\t"
696 "exec_loop:\n\t"
697 : /* no outputs */
698 : "r" (gen_func)
699 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
700 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
702 if (!(tb->cflags & CF_CODE_COPY)) {
703 if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
704 save_native_fp_state(env);
706 gen_func();
707 } else {
708 if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
709 restore_native_fp_state(env);
711 /* we work with native eflags */
712 CC_SRC = cc_table[CC_OP].compute_all();
713 CC_OP = CC_OP_EFLAGS;
714 asm(".globl exec_loop\n"
715 "\n"
716 "debug1:\n"
717 " pushl %%ebp\n"
718 " fs movl %10, %9\n"
719 " fs movl %11, %%eax\n"
720 " andl $0x400, %%eax\n"
721 " fs orl %8, %%eax\n"
722 " pushl %%eax\n"
723 " popf\n"
724 " fs movl %%esp, %12\n"
725 " fs movl %0, %%eax\n"
726 " fs movl %1, %%ecx\n"
727 " fs movl %2, %%edx\n"
728 " fs movl %3, %%ebx\n"
729 " fs movl %4, %%esp\n"
730 " fs movl %5, %%ebp\n"
731 " fs movl %6, %%esi\n"
732 " fs movl %7, %%edi\n"
733 " fs jmp *%9\n"
734 "exec_loop:\n"
735 " fs movl %%esp, %4\n"
736 " fs movl %12, %%esp\n"
737 " fs movl %%eax, %0\n"
738 " fs movl %%ecx, %1\n"
739 " fs movl %%edx, %2\n"
740 " fs movl %%ebx, %3\n"
741 " fs movl %%ebp, %5\n"
742 " fs movl %%esi, %6\n"
743 " fs movl %%edi, %7\n"
744 " pushf\n"
745 " popl %%eax\n"
746 " movl %%eax, %%ecx\n"
747 " andl $0x400, %%ecx\n"
748 " shrl $9, %%ecx\n"
749 " andl $0x8d5, %%eax\n"
750 " fs movl %%eax, %8\n"
751 " movl $1, %%eax\n"
752 " subl %%ecx, %%eax\n"
753 " fs movl %%eax, %11\n"
754 " fs movl %9, %%ebx\n" /* get T0 value */
755 " popl %%ebp\n"
757 : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
758 "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
759 "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
760 "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
761 "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
762 "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
763 "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
764 "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
765 "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
766 "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
767 "a" (gen_func),
768 "m" (*(uint8_t *)offsetof(CPUState, df)),
769 "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
770 : "%ecx", "%edx"
774 #elif defined(__ia64)
775 struct fptr {
776 void *ip;
777 void *gp;
778 } fp;
780 fp.ip = tc_ptr;
781 fp.gp = code_gen_buffer + 2 * (1 << 20);
782 (*(void (*)(void)) &fp)();
783 #else
784 gen_func();
785 #endif
786 env->current_tb = NULL;
787 /* reset soft MMU for next block (it can currently
788 only be set by a memory fault) */
789 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
790 if (env->hflags & HF_SOFTMMU_MASK) {
791 env->hflags &= ~HF_SOFTMMU_MASK;
792 /* do not allow linking to another block */
793 T0 = 0;
795 #endif
796 #if defined(USE_KQEMU)
797 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
798 if (kqemu_is_ok(env) &&
799 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
800 cpu_loop_exit();
802 #endif
804 } else {
805 env_to_regs();
807 } /* for(;;) */
810 #if defined(TARGET_I386)
811 #if defined(USE_CODE_COPY)
812 if (env->native_fp_regs) {
813 save_native_fp_state(env);
815 #endif
816 /* restore flags in standard format */
817 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
819 /* restore global registers */
820 #ifdef reg_EAX
821 EAX = saved_EAX;
822 #endif
823 #ifdef reg_ECX
824 ECX = saved_ECX;
825 #endif
826 #ifdef reg_EDX
827 EDX = saved_EDX;
828 #endif
829 #ifdef reg_EBX
830 EBX = saved_EBX;
831 #endif
832 #ifdef reg_ESP
833 ESP = saved_ESP;
834 #endif
835 #ifdef reg_EBP
836 EBP = saved_EBP;
837 #endif
838 #ifdef reg_ESI
839 ESI = saved_ESI;
840 #endif
841 #ifdef reg_EDI
842 EDI = saved_EDI;
843 #endif
844 #elif defined(TARGET_ARM)
845 /* XXX: Save/restore host fpu exception state?. */
846 #elif defined(TARGET_SPARC)
847 #if defined(reg_REGWPTR)
848 REGWPTR = saved_regwptr;
849 #endif
850 #elif defined(TARGET_PPC)
851 #elif defined(TARGET_MIPS)
852 #elif defined(TARGET_SH4)
853 /* XXXXX */
854 #else
855 #error unsupported target CPU
856 #endif
857 #if defined(__sparc__) && !defined(HOST_SOLARIS)
858 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
859 #endif
860 T0 = saved_T0;
861 T1 = saved_T1;
862 #if defined(reg_T2)
863 T2 = saved_T2;
864 #endif
865 env = saved_env;
866 /* fail safe : never use cpu_single_env outside cpu_exec() */
867 cpu_single_env = NULL;
868 return ret;
871 /* must only be called from the generated code as an exception can be
872 generated */
873 void tb_invalidate_page_range(target_ulong start, target_ulong end)
875 /* XXX: cannot enable it yet because it yields to MMU exception
876 where NIP != read address on PowerPC */
877 #if 0
878 target_ulong phys_addr;
879 phys_addr = get_phys_addr_code(env, start);
880 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
881 #endif
884 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
886 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
888 CPUX86State *saved_env;
890 saved_env = env;
891 env = s;
892 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
893 selector &= 0xffff;
894 cpu_x86_load_seg_cache(env, seg_reg, selector,
895 (selector << 4), 0xffff, 0);
896 } else {
897 load_seg(seg_reg, selector);
899 env = saved_env;
902 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
904 CPUX86State *saved_env;
906 saved_env = env;
907 env = s;
909 helper_fsave((target_ulong)ptr, data32);
911 env = saved_env;
914 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
916 CPUX86State *saved_env;
918 saved_env = env;
919 env = s;
921 helper_frstor((target_ulong)ptr, data32);
923 env = saved_env;
926 #endif /* TARGET_I386 */
928 #if !defined(CONFIG_SOFTMMU)
930 #if defined(TARGET_I386)
932 /* 'pc' is the host PC at which the exception was raised. 'address' is
933 the effective address of the memory exception. 'is_write' is 1 if a
934 write caused the exception and otherwise 0'. 'old_set' is the
935 signal set which should be restored */
936 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
937 int is_write, sigset_t *old_set,
938 void *puc)
940 TranslationBlock *tb;
941 int ret;
943 if (cpu_single_env)
944 env = cpu_single_env; /* XXX: find a correct solution for multithread */
945 #if defined(DEBUG_SIGNAL)
946 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
947 pc, address, is_write, *(unsigned long *)old_set);
948 #endif
949 /* XXX: locking issue */
950 if (is_write && page_unprotect(h2g(address), pc, puc)) {
951 return 1;
954 /* see if it is an MMU fault */
955 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
956 ((env->hflags & HF_CPL_MASK) == 3), 0);
957 if (ret < 0)
958 return 0; /* not an MMU fault */
959 if (ret == 0)
960 return 1; /* the MMU fault was handled without causing real CPU fault */
961 /* now we have a real cpu fault */
962 tb = tb_find_pc(pc);
963 if (tb) {
964 /* the PC is inside the translated code. It means that we have
965 a virtual CPU fault */
966 cpu_restore_state(tb, env, pc, puc);
968 if (ret == 1) {
969 #if 0
970 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
971 env->eip, env->cr[2], env->error_code);
972 #endif
973 /* we restore the process signal mask as the sigreturn should
974 do it (XXX: use sigsetjmp) */
975 sigprocmask(SIG_SETMASK, old_set, NULL);
976 raise_exception_err(env->exception_index, env->error_code);
977 } else {
978 /* activate soft MMU for this block */
979 env->hflags |= HF_SOFTMMU_MASK;
980 cpu_resume_from_signal(env, puc);
982 /* never comes here */
983 return 1;
986 #elif defined(TARGET_ARM)
987 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
988 int is_write, sigset_t *old_set,
989 void *puc)
991 TranslationBlock *tb;
992 int ret;
994 if (cpu_single_env)
995 env = cpu_single_env; /* XXX: find a correct solution for multithread */
996 #if defined(DEBUG_SIGNAL)
997 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
998 pc, address, is_write, *(unsigned long *)old_set);
999 #endif
1000 /* XXX: locking issue */
1001 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1002 return 1;
1004 /* see if it is an MMU fault */
1005 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1006 if (ret < 0)
1007 return 0; /* not an MMU fault */
1008 if (ret == 0)
1009 return 1; /* the MMU fault was handled without causing real CPU fault */
1010 /* now we have a real cpu fault */
1011 tb = tb_find_pc(pc);
1012 if (tb) {
1013 /* the PC is inside the translated code. It means that we have
1014 a virtual CPU fault */
1015 cpu_restore_state(tb, env, pc, puc);
1017 /* we restore the process signal mask as the sigreturn should
1018 do it (XXX: use sigsetjmp) */
1019 sigprocmask(SIG_SETMASK, old_set, NULL);
1020 cpu_loop_exit();
1022 #elif defined(TARGET_SPARC)
1023 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1024 int is_write, sigset_t *old_set,
1025 void *puc)
1027 TranslationBlock *tb;
1028 int ret;
1030 if (cpu_single_env)
1031 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1032 #if defined(DEBUG_SIGNAL)
1033 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1034 pc, address, is_write, *(unsigned long *)old_set);
1035 #endif
1036 /* XXX: locking issue */
1037 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1038 return 1;
1040 /* see if it is an MMU fault */
1041 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1042 if (ret < 0)
1043 return 0; /* not an MMU fault */
1044 if (ret == 0)
1045 return 1; /* the MMU fault was handled without causing real CPU fault */
1046 /* now we have a real cpu fault */
1047 tb = tb_find_pc(pc);
1048 if (tb) {
1049 /* the PC is inside the translated code. It means that we have
1050 a virtual CPU fault */
1051 cpu_restore_state(tb, env, pc, puc);
1053 /* we restore the process signal mask as the sigreturn should
1054 do it (XXX: use sigsetjmp) */
1055 sigprocmask(SIG_SETMASK, old_set, NULL);
1056 cpu_loop_exit();
1058 #elif defined (TARGET_PPC)
1059 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1060 int is_write, sigset_t *old_set,
1061 void *puc)
1063 TranslationBlock *tb;
1064 int ret;
1066 if (cpu_single_env)
1067 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1068 #if defined(DEBUG_SIGNAL)
1069 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1070 pc, address, is_write, *(unsigned long *)old_set);
1071 #endif
1072 /* XXX: locking issue */
1073 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1074 return 1;
1077 /* see if it is an MMU fault */
1078 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1079 if (ret < 0)
1080 return 0; /* not an MMU fault */
1081 if (ret == 0)
1082 return 1; /* the MMU fault was handled without causing real CPU fault */
1084 /* now we have a real cpu fault */
1085 tb = tb_find_pc(pc);
1086 if (tb) {
1087 /* the PC is inside the translated code. It means that we have
1088 a virtual CPU fault */
1089 cpu_restore_state(tb, env, pc, puc);
1091 if (ret == 1) {
1092 #if 0
1093 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1094 env->nip, env->error_code, tb);
1095 #endif
1096 /* we restore the process signal mask as the sigreturn should
1097 do it (XXX: use sigsetjmp) */
1098 sigprocmask(SIG_SETMASK, old_set, NULL);
1099 do_raise_exception_err(env->exception_index, env->error_code);
1100 } else {
1101 /* activate soft MMU for this block */
1102 cpu_resume_from_signal(env, puc);
1104 /* never comes here */
1105 return 1;
1108 #elif defined (TARGET_MIPS)
1109 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1110 int is_write, sigset_t *old_set,
1111 void *puc)
1113 TranslationBlock *tb;
1114 int ret;
1116 if (cpu_single_env)
1117 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1118 #if defined(DEBUG_SIGNAL)
1119 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1120 pc, address, is_write, *(unsigned long *)old_set);
1121 #endif
1122 /* XXX: locking issue */
1123 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1124 return 1;
1127 /* see if it is an MMU fault */
1128 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1129 if (ret < 0)
1130 return 0; /* not an MMU fault */
1131 if (ret == 0)
1132 return 1; /* the MMU fault was handled without causing real CPU fault */
1134 /* now we have a real cpu fault */
1135 tb = tb_find_pc(pc);
1136 if (tb) {
1137 /* the PC is inside the translated code. It means that we have
1138 a virtual CPU fault */
1139 cpu_restore_state(tb, env, pc, puc);
1141 if (ret == 1) {
1142 #if 0
1143 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1144 env->nip, env->error_code, tb);
1145 #endif
1146 /* we restore the process signal mask as the sigreturn should
1147 do it (XXX: use sigsetjmp) */
1148 sigprocmask(SIG_SETMASK, old_set, NULL);
1149 do_raise_exception_err(env->exception_index, env->error_code);
1150 } else {
1151 /* activate soft MMU for this block */
1152 cpu_resume_from_signal(env, puc);
1154 /* never comes here */
1155 return 1;
1158 #elif defined (TARGET_SH4)
1159 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1160 int is_write, sigset_t *old_set,
1161 void *puc)
1163 TranslationBlock *tb;
1164 int ret;
1166 if (cpu_single_env)
1167 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1168 #if defined(DEBUG_SIGNAL)
1169 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1170 pc, address, is_write, *(unsigned long *)old_set);
1171 #endif
1172 /* XXX: locking issue */
1173 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1174 return 1;
1177 /* see if it is an MMU fault */
1178 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1179 if (ret < 0)
1180 return 0; /* not an MMU fault */
1181 if (ret == 0)
1182 return 1; /* the MMU fault was handled without causing real CPU fault */
1184 /* now we have a real cpu fault */
1185 tb = tb_find_pc(pc);
1186 if (tb) {
1187 /* the PC is inside the translated code. It means that we have
1188 a virtual CPU fault */
1189 cpu_restore_state(tb, env, pc, puc);
1191 #if 0
1192 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1193 env->nip, env->error_code, tb);
1194 #endif
1195 /* we restore the process signal mask as the sigreturn should
1196 do it (XXX: use sigsetjmp) */
1197 sigprocmask(SIG_SETMASK, old_set, NULL);
1198 cpu_loop_exit();
1199 /* never comes here */
1200 return 1;
1202 #else
1203 #error unsupported target CPU
1204 #endif
1206 #if defined(__i386__)
1208 #if defined(USE_CODE_COPY)
1209 static void cpu_send_trap(unsigned long pc, int trap,
1210 struct ucontext *uc)
1212 TranslationBlock *tb;
1214 if (cpu_single_env)
1215 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1216 /* now we have a real cpu fault */
1217 tb = tb_find_pc(pc);
1218 if (tb) {
1219 /* the PC is inside the translated code. It means that we have
1220 a virtual CPU fault */
1221 cpu_restore_state(tb, env, pc, uc);
1223 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1224 raise_exception_err(trap, env->error_code);
1226 #endif
1228 int cpu_signal_handler(int host_signum, struct siginfo *info,
1229 void *puc)
1231 struct ucontext *uc = puc;
1232 unsigned long pc;
1233 int trapno;
1235 #ifndef REG_EIP
1236 /* for glibc 2.1 */
1237 #define REG_EIP EIP
1238 #define REG_ERR ERR
1239 #define REG_TRAPNO TRAPNO
1240 #endif
1241 pc = uc->uc_mcontext.gregs[REG_EIP];
1242 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1243 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
1244 if (trapno == 0x00 || trapno == 0x05) {
1245 /* send division by zero or bound exception */
1246 cpu_send_trap(pc, trapno, uc);
1247 return 1;
1248 } else
1249 #endif
1250 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1251 trapno == 0xe ?
1252 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1253 &uc->uc_sigmask, puc);
1256 #elif defined(__x86_64__)
1258 int cpu_signal_handler(int host_signum, struct siginfo *info,
1259 void *puc)
1261 struct ucontext *uc = puc;
1262 unsigned long pc;
1264 pc = uc->uc_mcontext.gregs[REG_RIP];
1265 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1266 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1267 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1268 &uc->uc_sigmask, puc);
1271 #elif defined(__powerpc__)
1273 /***********************************************************************
1274 * signal context platform-specific definitions
1275 * From Wine
1277 #ifdef linux
1278 /* All Registers access - only for local access */
1279 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1280 /* Gpr Registers access */
1281 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1282 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1283 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1284 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1285 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1286 # define LR_sig(context) REG_sig(link, context) /* Link register */
1287 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1288 /* Float Registers access */
1289 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1290 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1291 /* Exception Registers access */
1292 # define DAR_sig(context) REG_sig(dar, context)
1293 # define DSISR_sig(context) REG_sig(dsisr, context)
1294 # define TRAP_sig(context) REG_sig(trap, context)
1295 #endif /* linux */
1297 #ifdef __APPLE__
1298 # include <sys/ucontext.h>
1299 typedef struct ucontext SIGCONTEXT;
1300 /* All Registers access - only for local access */
1301 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1302 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1303 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1304 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1305 /* Gpr Registers access */
1306 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1307 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1308 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1309 # define CTR_sig(context) REG_sig(ctr, context)
1310 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1311 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1312 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1313 /* Float Registers access */
1314 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1315 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1316 /* Exception Registers access */
1317 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1318 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1319 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1320 #endif /* __APPLE__ */
1322 int cpu_signal_handler(int host_signum, struct siginfo *info,
1323 void *puc)
1325 struct ucontext *uc = puc;
1326 unsigned long pc;
1327 int is_write;
1329 pc = IAR_sig(uc);
1330 is_write = 0;
1331 #if 0
1332 /* ppc 4xx case */
1333 if (DSISR_sig(uc) & 0x00800000)
1334 is_write = 1;
1335 #else
1336 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1337 is_write = 1;
1338 #endif
1339 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1340 is_write, &uc->uc_sigmask, puc);
1343 #elif defined(__alpha__)
1345 int cpu_signal_handler(int host_signum, struct siginfo *info,
1346 void *puc)
1348 struct ucontext *uc = puc;
1349 uint32_t *pc = uc->uc_mcontext.sc_pc;
1350 uint32_t insn = *pc;
1351 int is_write = 0;
1353 /* XXX: need kernel patch to get write flag faster */
1354 switch (insn >> 26) {
1355 case 0x0d: // stw
1356 case 0x0e: // stb
1357 case 0x0f: // stq_u
1358 case 0x24: // stf
1359 case 0x25: // stg
1360 case 0x26: // sts
1361 case 0x27: // stt
1362 case 0x2c: // stl
1363 case 0x2d: // stq
1364 case 0x2e: // stl_c
1365 case 0x2f: // stq_c
1366 is_write = 1;
1369 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1370 is_write, &uc->uc_sigmask, puc);
1372 #elif defined(__sparc__)
1374 int cpu_signal_handler(int host_signum, struct siginfo *info,
1375 void *puc)
1377 uint32_t *regs = (uint32_t *)(info + 1);
1378 void *sigmask = (regs + 20);
1379 unsigned long pc;
1380 int is_write;
1381 uint32_t insn;
1383 /* XXX: is there a standard glibc define ? */
1384 pc = regs[1];
1385 /* XXX: need kernel patch to get write flag faster */
1386 is_write = 0;
1387 insn = *(uint32_t *)pc;
1388 if ((insn >> 30) == 3) {
1389 switch((insn >> 19) & 0x3f) {
1390 case 0x05: // stb
1391 case 0x06: // sth
1392 case 0x04: // st
1393 case 0x07: // std
1394 case 0x24: // stf
1395 case 0x27: // stdf
1396 case 0x25: // stfsr
1397 is_write = 1;
1398 break;
1401 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1402 is_write, sigmask, NULL);
1405 #elif defined(__arm__)
1407 int cpu_signal_handler(int host_signum, struct siginfo *info,
1408 void *puc)
1410 struct ucontext *uc = puc;
1411 unsigned long pc;
1412 int is_write;
1414 pc = uc->uc_mcontext.gregs[R15];
1415 /* XXX: compute is_write */
1416 is_write = 0;
1417 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1418 is_write,
1419 &uc->uc_sigmask);
1422 #elif defined(__mc68000)
1424 int cpu_signal_handler(int host_signum, struct siginfo *info,
1425 void *puc)
1427 struct ucontext *uc = puc;
1428 unsigned long pc;
1429 int is_write;
1431 pc = uc->uc_mcontext.gregs[16];
1432 /* XXX: compute is_write */
1433 is_write = 0;
1434 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1435 is_write,
1436 &uc->uc_sigmask, puc);
1439 #elif defined(__ia64)
1441 #ifndef __ISR_VALID
1442 /* This ought to be in <bits/siginfo.h>... */
1443 # define __ISR_VALID 1
1444 #endif
1446 int cpu_signal_handler(int host_signum, struct siginfo *info, void *puc)
1448 struct ucontext *uc = puc;
1449 unsigned long ip;
1450 int is_write = 0;
1452 ip = uc->uc_mcontext.sc_ip;
1453 switch (host_signum) {
1454 case SIGILL:
1455 case SIGFPE:
1456 case SIGSEGV:
1457 case SIGBUS:
1458 case SIGTRAP:
1459 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1460 /* ISR.W (write-access) is bit 33: */
1461 is_write = (info->si_isr >> 33) & 1;
1462 break;
1464 default:
1465 break;
1467 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1468 is_write,
1469 &uc->uc_sigmask, puc);
1472 #elif defined(__s390__)
1474 int cpu_signal_handler(int host_signum, struct siginfo *info,
1475 void *puc)
1477 struct ucontext *uc = puc;
1478 unsigned long pc;
1479 int is_write;
1481 pc = uc->uc_mcontext.psw.addr;
1482 /* XXX: compute is_write */
1483 is_write = 0;
1484 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1485 is_write,
1486 &uc->uc_sigmask, puc);
1489 #else
1491 #error host CPU specific signal handler needed
1493 #endif
1495 #endif /* !defined(CONFIG_SOFTMMU) */