Merge commit '60e0df25e415b00cf35c4d214eaba9dc19aaa9e6' into upstream-merge
[qemu/qemu-dev-zwu.git] / cpu-exec.c
blob583d7d6174bcc43d37c7660fda9e38ba00a08d3b
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #if !defined(TARGET_IA64)
23 #include "tcg.h"
24 #endif
25 #include "kvm.h"
26 #include "qemu-barrier.h"
28 #if !defined(CONFIG_SOFTMMU)
29 #undef EAX
30 #undef ECX
31 #undef EDX
32 #undef EBX
33 #undef ESP
34 #undef EBP
35 #undef ESI
36 #undef EDI
37 #undef EIP
38 #include <signal.h>
39 #ifdef __linux__
40 #include <sys/ucontext.h>
41 #endif
42 #endif
44 #include "qemu-kvm.h"
46 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
47 // Work around ugly bugs in glibc that mangle global register contents
48 #undef env
49 #define env cpu_single_env
50 #endif
52 int tb_invalidated_flag;
54 //#define CONFIG_DEBUG_EXEC
55 //#define DEBUG_SIGNAL
57 int qemu_cpu_has_work(CPUState *env)
59 return cpu_has_work(env);
62 void cpu_loop_exit(void)
64 env->current_tb = NULL;
65 longjmp(env->jmp_env, 1);
68 /* exit the current TB from a signal handler. The host registers are
69 restored in a state compatible with the CPU emulator
71 void cpu_resume_from_signal(CPUState *env1, void *puc)
73 #if !defined(CONFIG_SOFTMMU)
74 #ifdef __linux__
75 struct ucontext *uc = puc;
76 #elif defined(__OpenBSD__)
77 struct sigcontext *uc = puc;
78 #endif
79 #endif
81 env = env1;
83 /* XXX: restore cpu registers saved in host registers */
85 #if !defined(CONFIG_SOFTMMU)
86 if (puc) {
87 /* XXX: use siglongjmp ? */
88 #ifdef __linux__
89 #ifdef __ia64
90 sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
91 #else
92 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
93 #endif
94 #elif defined(__OpenBSD__)
95 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
96 #endif
98 #endif
99 env->exception_index = -1;
100 longjmp(env->jmp_env, 1);
103 /* Execute the code without caching the generated code. An interpreter
104 could be used if available. */
105 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
107 unsigned long next_tb;
108 TranslationBlock *tb;
110 /* Should never happen.
111 We only end up here when an existing TB is too long. */
112 if (max_cycles > CF_COUNT_MASK)
113 max_cycles = CF_COUNT_MASK;
115 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
116 max_cycles);
117 env->current_tb = tb;
118 /* execute the generated code */
119 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
120 env->current_tb = NULL;
122 if ((next_tb & 3) == 2) {
123 /* Restore PC. This may happen if async event occurs before
124 the TB starts executing. */
125 cpu_pc_from_tb(env, tb);
127 tb_phys_invalidate(tb, -1);
128 tb_free(tb);
131 static TranslationBlock *tb_find_slow(target_ulong pc,
132 target_ulong cs_base,
133 uint64_t flags)
135 TranslationBlock *tb, **ptb1;
136 unsigned int h;
137 tb_page_addr_t phys_pc, phys_page1, phys_page2;
138 target_ulong virt_page2;
140 tb_invalidated_flag = 0;
142 /* find translated block using physical mappings */
143 phys_pc = get_page_addr_code(env, pc);
144 phys_page1 = phys_pc & TARGET_PAGE_MASK;
145 phys_page2 = -1;
146 h = tb_phys_hash_func(phys_pc);
147 ptb1 = &tb_phys_hash[h];
148 for(;;) {
149 tb = *ptb1;
150 if (!tb)
151 goto not_found;
152 if (tb->pc == pc &&
153 tb->page_addr[0] == phys_page1 &&
154 tb->cs_base == cs_base &&
155 tb->flags == flags) {
156 /* check next page if needed */
157 if (tb->page_addr[1] != -1) {
158 virt_page2 = (pc & TARGET_PAGE_MASK) +
159 TARGET_PAGE_SIZE;
160 phys_page2 = get_page_addr_code(env, virt_page2);
161 if (tb->page_addr[1] == phys_page2)
162 goto found;
163 } else {
164 goto found;
167 ptb1 = &tb->phys_hash_next;
169 not_found:
170 /* if no translated code available, then translate it now */
171 tb = tb_gen_code(env, pc, cs_base, flags, 0);
173 found:
174 /* Move the last found TB to the head of the list */
175 if (likely(*ptb1)) {
176 *ptb1 = tb->phys_hash_next;
177 tb->phys_hash_next = tb_phys_hash[h];
178 tb_phys_hash[h] = tb;
180 /* we add the TB in the virtual pc hash table */
181 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
182 return tb;
185 static inline TranslationBlock *tb_find_fast(void)
187 TranslationBlock *tb;
188 target_ulong cs_base, pc;
189 int flags;
191 /* we record a subset of the CPU state. It will
192 always be the same before a given translated block
193 is executed. */
194 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
195 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
196 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
197 tb->flags != flags)) {
198 tb = tb_find_slow(pc, cs_base, flags);
200 return tb;
203 static CPUDebugExcpHandler *debug_excp_handler;
205 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
207 CPUDebugExcpHandler *old_handler = debug_excp_handler;
209 debug_excp_handler = handler;
210 return old_handler;
213 static void cpu_handle_debug_exception(CPUState *env)
215 CPUWatchpoint *wp;
217 if (!env->watchpoint_hit) {
218 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
219 wp->flags &= ~BP_WATCHPOINT_HIT;
222 if (debug_excp_handler) {
223 debug_excp_handler(env);
227 /* main execution loop */
229 volatile sig_atomic_t exit_request;
231 int cpu_exec(CPUState *env1)
233 volatile host_reg_t saved_env_reg;
234 int ret, interrupt_request;
235 TranslationBlock *tb;
236 uint8_t *tc_ptr;
237 unsigned long next_tb;
239 if (env1->halted) {
240 if (!cpu_has_work(env1)) {
241 return EXCP_HALTED;
244 env1->halted = 0;
247 cpu_single_env = env1;
249 /* the access to env below is actually saving the global register's
250 value, so that files not including target-xyz/exec.h are free to
251 use it. */
252 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
253 saved_env_reg = (host_reg_t) env;
254 barrier();
255 env = env1;
257 if (unlikely(exit_request)) {
258 env->exit_request = 1;
261 #if defined(TARGET_I386)
262 /* put eflags in CPU temporary format */
263 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
264 DF = 1 - (2 * ((env->eflags >> 10) & 1));
265 CC_OP = CC_OP_EFLAGS;
266 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
267 #elif defined(TARGET_SPARC)
268 #elif defined(TARGET_M68K)
269 env->cc_op = CC_OP_FLAGS;
270 env->cc_dest = env->sr & 0xf;
271 env->cc_x = (env->sr >> 4) & 1;
272 #elif defined(TARGET_ALPHA)
273 #elif defined(TARGET_ARM)
274 #elif defined(TARGET_UNICORE32)
275 #elif defined(TARGET_PPC)
276 #elif defined(TARGET_LM32)
277 #elif defined(TARGET_MICROBLAZE)
278 #elif defined(TARGET_MIPS)
279 #elif defined(TARGET_SH4)
280 #elif defined(TARGET_CRIS)
281 #elif defined(TARGET_S390X)
282 #elif defined(TARGET_IA64)
283 /* XXXXX */
284 #else
285 #error unsupported target CPU
286 #endif
287 env->exception_index = -1;
289 /* prepare setjmp context for exception handling */
290 for(;;) {
291 if (setjmp(env->jmp_env) == 0) {
292 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
293 #undef env
294 env = cpu_single_env;
295 #define env cpu_single_env
296 #endif
297 /* if an exception is pending, we execute it here */
298 if (env->exception_index >= 0) {
299 if (env->exception_index >= EXCP_INTERRUPT) {
300 /* exit request from the cpu execution loop */
301 ret = env->exception_index;
302 if (ret == EXCP_DEBUG) {
303 cpu_handle_debug_exception(env);
305 break;
306 } else {
307 #if defined(CONFIG_USER_ONLY)
308 /* if user mode only, we simulate a fake exception
309 which will be handled outside the cpu execution
310 loop */
311 #if defined(TARGET_I386)
312 do_interrupt_user(env->exception_index,
313 env->exception_is_int,
314 env->error_code,
315 env->exception_next_eip);
316 /* successfully delivered */
317 env->old_exception = -1;
318 #endif
319 ret = env->exception_index;
320 break;
321 #else
322 #if defined(TARGET_I386)
323 /* simulate a real cpu exception. On i386, it can
324 trigger new exceptions, but we do not handle
325 double or triple faults yet. */
326 do_interrupt(env->exception_index,
327 env->exception_is_int,
328 env->error_code,
329 env->exception_next_eip, 0);
330 /* successfully delivered */
331 env->old_exception = -1;
332 #elif defined(TARGET_PPC)
333 do_interrupt(env);
334 #elif defined(TARGET_LM32)
335 do_interrupt(env);
336 #elif defined(TARGET_MICROBLAZE)
337 do_interrupt(env);
338 #elif defined(TARGET_MIPS)
339 do_interrupt(env);
340 #elif defined(TARGET_SPARC)
341 do_interrupt(env);
342 #elif defined(TARGET_ARM)
343 do_interrupt(env);
344 #elif defined(TARGET_UNICORE32)
345 do_interrupt(env);
346 #elif defined(TARGET_SH4)
347 do_interrupt(env);
348 #elif defined(TARGET_ALPHA)
349 do_interrupt(env);
350 #elif defined(TARGET_CRIS)
351 do_interrupt(env);
352 #elif defined(TARGET_M68K)
353 do_interrupt(0);
354 #elif defined(TARGET_IA64)
355 do_interrupt(env);
356 #elif defined(TARGET_S390X)
357 do_interrupt(env);
358 #endif
359 env->exception_index = -1;
360 #endif
364 next_tb = 0; /* force lookup of first TB */
365 for(;;) {
366 interrupt_request = env->interrupt_request;
367 if (unlikely(interrupt_request)) {
368 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
369 /* Mask out external interrupts for this step. */
370 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
372 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
373 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
374 env->exception_index = EXCP_DEBUG;
375 cpu_loop_exit();
377 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
378 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
379 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
380 if (interrupt_request & CPU_INTERRUPT_HALT) {
381 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
382 env->halted = 1;
383 env->exception_index = EXCP_HLT;
384 cpu_loop_exit();
386 #endif
387 #if defined(TARGET_I386)
388 if (interrupt_request & CPU_INTERRUPT_INIT) {
389 svm_check_intercept(SVM_EXIT_INIT);
390 do_cpu_init(env);
391 env->exception_index = EXCP_HALTED;
392 cpu_loop_exit();
393 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
394 do_cpu_sipi(env);
395 } else if (env->hflags2 & HF2_GIF_MASK) {
396 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
397 !(env->hflags & HF_SMM_MASK)) {
398 svm_check_intercept(SVM_EXIT_SMI);
399 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
400 do_smm_enter();
401 next_tb = 0;
402 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
403 !(env->hflags2 & HF2_NMI_MASK)) {
404 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
405 env->hflags2 |= HF2_NMI_MASK;
406 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
407 next_tb = 0;
408 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
409 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
410 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
411 next_tb = 0;
412 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
413 (((env->hflags2 & HF2_VINTR_MASK) &&
414 (env->hflags2 & HF2_HIF_MASK)) ||
415 (!(env->hflags2 & HF2_VINTR_MASK) &&
416 (env->eflags & IF_MASK &&
417 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
418 int intno;
419 svm_check_intercept(SVM_EXIT_INTR);
420 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
421 intno = cpu_get_pic_interrupt(env);
422 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
423 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
424 #undef env
425 env = cpu_single_env;
426 #define env cpu_single_env
427 #endif
428 do_interrupt(intno, 0, 0, 0, 1);
429 /* ensure that no TB jump will be modified as
430 the program flow was changed */
431 next_tb = 0;
432 #if !defined(CONFIG_USER_ONLY)
433 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
434 (env->eflags & IF_MASK) &&
435 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
436 int intno;
437 /* FIXME: this should respect TPR */
438 svm_check_intercept(SVM_EXIT_VINTR);
439 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
440 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
441 do_interrupt(intno, 0, 0, 0, 1);
442 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
443 next_tb = 0;
444 #endif
447 #elif defined(TARGET_PPC)
448 #if 0
449 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
450 cpu_reset(env);
452 #endif
453 if (interrupt_request & CPU_INTERRUPT_HARD) {
454 ppc_hw_interrupt(env);
455 if (env->pending_interrupts == 0)
456 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
457 next_tb = 0;
459 #elif defined(TARGET_LM32)
460 if ((interrupt_request & CPU_INTERRUPT_HARD)
461 && (env->ie & IE_IE)) {
462 env->exception_index = EXCP_IRQ;
463 do_interrupt(env);
464 next_tb = 0;
466 #elif defined(TARGET_MICROBLAZE)
467 if ((interrupt_request & CPU_INTERRUPT_HARD)
468 && (env->sregs[SR_MSR] & MSR_IE)
469 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
470 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
471 env->exception_index = EXCP_IRQ;
472 do_interrupt(env);
473 next_tb = 0;
475 #elif defined(TARGET_MIPS)
476 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
477 cpu_mips_hw_interrupts_pending(env)) {
478 /* Raise it */
479 env->exception_index = EXCP_EXT_INTERRUPT;
480 env->error_code = 0;
481 do_interrupt(env);
482 next_tb = 0;
484 #elif defined(TARGET_SPARC)
485 if (interrupt_request & CPU_INTERRUPT_HARD) {
486 if (cpu_interrupts_enabled(env) &&
487 env->interrupt_index > 0) {
488 int pil = env->interrupt_index & 0xf;
489 int type = env->interrupt_index & 0xf0;
491 if (((type == TT_EXTINT) &&
492 cpu_pil_allowed(env, pil)) ||
493 type != TT_EXTINT) {
494 env->exception_index = env->interrupt_index;
495 do_interrupt(env);
496 next_tb = 0;
500 #elif defined(TARGET_ARM)
501 if (interrupt_request & CPU_INTERRUPT_FIQ
502 && !(env->uncached_cpsr & CPSR_F)) {
503 env->exception_index = EXCP_FIQ;
504 do_interrupt(env);
505 next_tb = 0;
507 /* ARMv7-M interrupt return works by loading a magic value
508 into the PC. On real hardware the load causes the
509 return to occur. The qemu implementation performs the
510 jump normally, then does the exception return when the
511 CPU tries to execute code at the magic address.
512 This will cause the magic PC value to be pushed to
513 the stack if an interrupt occurred at the wrong time.
514 We avoid this by disabling interrupts when
515 pc contains a magic address. */
516 if (interrupt_request & CPU_INTERRUPT_HARD
517 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
518 || !(env->uncached_cpsr & CPSR_I))) {
519 env->exception_index = EXCP_IRQ;
520 do_interrupt(env);
521 next_tb = 0;
523 #elif defined(TARGET_UNICORE32)
524 if (interrupt_request & CPU_INTERRUPT_HARD
525 && !(env->uncached_asr & ASR_I)) {
526 do_interrupt(env);
527 next_tb = 0;
529 #elif defined(TARGET_SH4)
530 if (interrupt_request & CPU_INTERRUPT_HARD) {
531 do_interrupt(env);
532 next_tb = 0;
534 #elif defined(TARGET_ALPHA)
535 if (interrupt_request & CPU_INTERRUPT_HARD) {
536 do_interrupt(env);
537 next_tb = 0;
539 #elif defined(TARGET_CRIS)
540 if (interrupt_request & CPU_INTERRUPT_HARD
541 && (env->pregs[PR_CCS] & I_FLAG)
542 && !env->locked_irq) {
543 env->exception_index = EXCP_IRQ;
544 do_interrupt(env);
545 next_tb = 0;
547 if (interrupt_request & CPU_INTERRUPT_NMI
548 && (env->pregs[PR_CCS] & M_FLAG)) {
549 env->exception_index = EXCP_NMI;
550 do_interrupt(env);
551 next_tb = 0;
553 #elif defined(TARGET_M68K)
554 if (interrupt_request & CPU_INTERRUPT_HARD
555 && ((env->sr & SR_I) >> SR_I_SHIFT)
556 < env->pending_level) {
557 /* Real hardware gets the interrupt vector via an
558 IACK cycle at this point. Current emulated
559 hardware doesn't rely on this, so we
560 provide/save the vector when the interrupt is
561 first signalled. */
562 env->exception_index = env->pending_vector;
563 do_interrupt(1);
564 next_tb = 0;
566 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
567 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
568 (env->psw.mask & PSW_MASK_EXT)) {
569 do_interrupt(env);
570 next_tb = 0;
572 #endif
573 /* Don't use the cached interrupt_request value,
574 do_interrupt may have updated the EXITTB flag. */
575 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
576 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
577 /* ensure that no TB jump will be modified as
578 the program flow was changed */
579 next_tb = 0;
582 if (unlikely(env->exit_request)) {
583 env->exit_request = 0;
584 env->exception_index = EXCP_INTERRUPT;
585 cpu_loop_exit();
587 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
588 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
589 /* restore flags in standard format */
590 #if defined(TARGET_I386)
591 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
592 log_cpu_state(env, X86_DUMP_CCOP);
593 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
594 #elif defined(TARGET_M68K)
595 cpu_m68k_flush_flags(env, env->cc_op);
596 env->cc_op = CC_OP_FLAGS;
597 env->sr = (env->sr & 0xffe0)
598 | env->cc_dest | (env->cc_x << 4);
599 log_cpu_state(env, 0);
600 #else
601 log_cpu_state(env, 0);
602 #endif
604 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
605 spin_lock(&tb_lock);
606 tb = tb_find_fast();
607 /* Note: we do it here to avoid a gcc bug on Mac OS X when
608 doing it in tb_find_slow */
609 if (tb_invalidated_flag) {
610 /* as some TB could have been invalidated because
611 of memory exceptions while generating the code, we
612 must recompute the hash index here */
613 next_tb = 0;
614 tb_invalidated_flag = 0;
616 #ifdef CONFIG_DEBUG_EXEC
617 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
618 (long)tb->tc_ptr, tb->pc,
619 lookup_symbol(tb->pc));
620 #endif
621 /* see if we can patch the calling TB. When the TB
622 spans two pages, we cannot safely do a direct
623 jump. */
624 if (next_tb != 0 && tb->page_addr[1] == -1) {
625 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
627 spin_unlock(&tb_lock);
629 /* cpu_interrupt might be called while translating the
630 TB, but before it is linked into a potentially
631 infinite loop and becomes env->current_tb. Avoid
632 starting execution if there is a pending interrupt. */
633 env->current_tb = tb;
634 barrier();
635 if (likely(!env->exit_request)) {
636 tc_ptr = tb->tc_ptr;
637 /* execute the generated code */
638 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
639 #undef env
640 env = cpu_single_env;
641 #define env cpu_single_env
642 #endif
643 next_tb = tcg_qemu_tb_exec(tc_ptr);
644 if ((next_tb & 3) == 2) {
645 /* Instruction counter expired. */
646 int insns_left;
647 tb = (TranslationBlock *)(long)(next_tb & ~3);
648 /* Restore PC. */
649 cpu_pc_from_tb(env, tb);
650 insns_left = env->icount_decr.u32;
651 if (env->icount_extra && insns_left >= 0) {
652 /* Refill decrementer and continue execution. */
653 env->icount_extra += insns_left;
654 if (env->icount_extra > 0xffff) {
655 insns_left = 0xffff;
656 } else {
657 insns_left = env->icount_extra;
659 env->icount_extra -= insns_left;
660 env->icount_decr.u16.low = insns_left;
661 } else {
662 if (insns_left > 0) {
663 /* Execute remaining instructions. */
664 cpu_exec_nocache(insns_left, tb);
666 env->exception_index = EXCP_INTERRUPT;
667 next_tb = 0;
668 cpu_loop_exit();
672 env->current_tb = NULL;
673 /* reset soft MMU for next block (it can currently
674 only be set by a memory fault) */
675 } /* for(;;) */
677 } /* for(;;) */
680 #if defined(TARGET_I386)
681 /* restore flags in standard format */
682 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
683 #elif defined(TARGET_ARM)
684 /* XXX: Save/restore host fpu exception state?. */
685 #elif defined(TARGET_UNICORE32)
686 #elif defined(TARGET_SPARC)
687 #elif defined(TARGET_PPC)
688 #elif defined(TARGET_LM32)
689 #elif defined(TARGET_M68K)
690 cpu_m68k_flush_flags(env, env->cc_op);
691 env->cc_op = CC_OP_FLAGS;
692 env->sr = (env->sr & 0xffe0)
693 | env->cc_dest | (env->cc_x << 4);
694 #elif defined(TARGET_MICROBLAZE)
695 #elif defined(TARGET_MIPS)
696 #elif defined(TARGET_SH4)
697 #elif defined(TARGET_IA64)
698 #elif defined(TARGET_ALPHA)
699 #elif defined(TARGET_CRIS)
700 #elif defined(TARGET_S390X)
701 /* XXXXX */
702 #else
703 #error unsupported target CPU
704 #endif
706 /* restore global registers */
707 barrier();
708 env = (void *) saved_env_reg;
710 /* fail safe : never use cpu_single_env outside cpu_exec() */
711 cpu_single_env = NULL;
712 return ret;
715 /* must only be called from the generated code as an exception can be
716 generated */
717 void tb_invalidate_page_range(target_ulong start, target_ulong end)
719 /* XXX: cannot enable it yet because it yields to MMU exception
720 where NIP != read address on PowerPC */
721 #if 0
722 target_ulong phys_addr;
723 phys_addr = get_phys_addr_code(env, start);
724 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
725 #endif
728 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
730 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
732 CPUX86State *saved_env;
734 saved_env = env;
735 env = s;
736 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
737 selector &= 0xffff;
738 cpu_x86_load_seg_cache(env, seg_reg, selector,
739 (selector << 4), 0xffff, 0);
740 } else {
741 helper_load_seg(seg_reg, selector);
743 env = saved_env;
746 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
748 CPUX86State *saved_env;
750 saved_env = env;
751 env = s;
753 helper_fsave(ptr, data32);
755 env = saved_env;
758 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
760 CPUX86State *saved_env;
762 saved_env = env;
763 env = s;
765 helper_frstor(ptr, data32);
767 env = saved_env;
770 #endif /* TARGET_I386 */
772 #if !defined(CONFIG_SOFTMMU)
774 #if defined(TARGET_I386)
775 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
776 #else
777 #define EXCEPTION_ACTION cpu_loop_exit()
778 #endif
780 /* 'pc' is the host PC at which the exception was raised. 'address' is
781 the effective address of the memory exception. 'is_write' is 1 if a
782 write caused the exception and otherwise 0'. 'old_set' is the
783 signal set which should be restored */
784 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
785 int is_write, sigset_t *old_set,
786 void *puc)
788 TranslationBlock *tb;
789 int ret;
791 if (cpu_single_env)
792 env = cpu_single_env; /* XXX: find a correct solution for multithread */
793 #if defined(DEBUG_SIGNAL)
794 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
795 pc, address, is_write, *(unsigned long *)old_set);
796 #endif
797 /* XXX: locking issue */
798 if (is_write && page_unprotect(h2g(address), pc, puc)) {
799 return 1;
802 /* see if it is an MMU fault */
803 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
804 if (ret < 0)
805 return 0; /* not an MMU fault */
806 if (ret == 0)
807 return 1; /* the MMU fault was handled without causing real CPU fault */
808 /* now we have a real cpu fault */
809 tb = tb_find_pc(pc);
810 if (tb) {
811 /* the PC is inside the translated code. It means that we have
812 a virtual CPU fault */
813 cpu_restore_state(tb, env, pc);
816 /* we restore the process signal mask as the sigreturn should
817 do it (XXX: use sigsetjmp) */
818 sigprocmask(SIG_SETMASK, old_set, NULL);
819 EXCEPTION_ACTION;
821 /* never comes here */
822 return 1;
825 #if defined(__i386__)
827 #if defined(__APPLE__)
828 # include <sys/ucontext.h>
830 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
831 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
832 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
833 # define MASK_sig(context) ((context)->uc_sigmask)
834 #elif defined (__NetBSD__)
835 # include <ucontext.h>
837 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
838 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
839 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
840 # define MASK_sig(context) ((context)->uc_sigmask)
841 #elif defined (__FreeBSD__) || defined(__DragonFly__)
842 # include <ucontext.h>
844 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
845 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
846 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
847 # define MASK_sig(context) ((context)->uc_sigmask)
848 #elif defined(__OpenBSD__)
849 # define EIP_sig(context) ((context)->sc_eip)
850 # define TRAP_sig(context) ((context)->sc_trapno)
851 # define ERROR_sig(context) ((context)->sc_err)
852 # define MASK_sig(context) ((context)->sc_mask)
853 #else
854 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
855 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
856 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
857 # define MASK_sig(context) ((context)->uc_sigmask)
858 #endif
860 int cpu_signal_handler(int host_signum, void *pinfo,
861 void *puc)
863 siginfo_t *info = pinfo;
864 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
865 ucontext_t *uc = puc;
866 #elif defined(__OpenBSD__)
867 struct sigcontext *uc = puc;
868 #else
869 struct ucontext *uc = puc;
870 #endif
871 unsigned long pc;
872 int trapno;
874 #ifndef REG_EIP
875 /* for glibc 2.1 */
876 #define REG_EIP EIP
877 #define REG_ERR ERR
878 #define REG_TRAPNO TRAPNO
879 #endif
880 pc = EIP_sig(uc);
881 trapno = TRAP_sig(uc);
882 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
883 trapno == 0xe ?
884 (ERROR_sig(uc) >> 1) & 1 : 0,
885 &MASK_sig(uc), puc);
888 #elif defined(__x86_64__)
890 #ifdef __NetBSD__
891 #define PC_sig(context) _UC_MACHINE_PC(context)
892 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
893 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
894 #define MASK_sig(context) ((context)->uc_sigmask)
895 #elif defined(__OpenBSD__)
896 #define PC_sig(context) ((context)->sc_rip)
897 #define TRAP_sig(context) ((context)->sc_trapno)
898 #define ERROR_sig(context) ((context)->sc_err)
899 #define MASK_sig(context) ((context)->sc_mask)
900 #elif defined (__FreeBSD__) || defined(__DragonFly__)
901 #include <ucontext.h>
903 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
904 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
905 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
906 #define MASK_sig(context) ((context)->uc_sigmask)
907 #else
908 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
909 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
910 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
911 #define MASK_sig(context) ((context)->uc_sigmask)
912 #endif
914 int cpu_signal_handler(int host_signum, void *pinfo,
915 void *puc)
917 siginfo_t *info = pinfo;
918 unsigned long pc;
919 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
920 ucontext_t *uc = puc;
921 #elif defined(__OpenBSD__)
922 struct sigcontext *uc = puc;
923 #else
924 struct ucontext *uc = puc;
925 #endif
927 pc = PC_sig(uc);
928 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
929 TRAP_sig(uc) == 0xe ?
930 (ERROR_sig(uc) >> 1) & 1 : 0,
931 &MASK_sig(uc), puc);
934 #elif defined(_ARCH_PPC)
936 /***********************************************************************
937 * signal context platform-specific definitions
938 * From Wine
940 #ifdef linux
941 /* All Registers access - only for local access */
942 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
943 /* Gpr Registers access */
944 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
945 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
946 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
947 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
948 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
949 # define LR_sig(context) REG_sig(link, context) /* Link register */
950 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
951 /* Float Registers access */
952 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
953 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
954 /* Exception Registers access */
955 # define DAR_sig(context) REG_sig(dar, context)
956 # define DSISR_sig(context) REG_sig(dsisr, context)
957 # define TRAP_sig(context) REG_sig(trap, context)
958 #endif /* linux */
960 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
961 #include <ucontext.h>
962 # define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
963 # define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
964 # define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
965 # define XER_sig(context) ((context)->uc_mcontext.mc_xer)
966 # define LR_sig(context) ((context)->uc_mcontext.mc_lr)
967 # define CR_sig(context) ((context)->uc_mcontext.mc_cr)
968 /* Exception Registers access */
969 # define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
970 # define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
971 # define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
972 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
974 #ifdef __APPLE__
975 # include <sys/ucontext.h>
976 typedef struct ucontext SIGCONTEXT;
977 /* All Registers access - only for local access */
978 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
979 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
980 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
981 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
982 /* Gpr Registers access */
983 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
984 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
985 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
986 # define CTR_sig(context) REG_sig(ctr, context)
987 # define XER_sig(context) REG_sig(xer, context) /* Link register */
988 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
989 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
990 /* Float Registers access */
991 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
992 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
993 /* Exception Registers access */
994 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
995 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
996 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
997 #endif /* __APPLE__ */
999 int cpu_signal_handler(int host_signum, void *pinfo,
1000 void *puc)
1002 siginfo_t *info = pinfo;
1003 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1004 ucontext_t *uc = puc;
1005 #else
1006 struct ucontext *uc = puc;
1007 #endif
1008 unsigned long pc;
1009 int is_write;
1011 pc = IAR_sig(uc);
1012 is_write = 0;
1013 #if 0
1014 /* ppc 4xx case */
1015 if (DSISR_sig(uc) & 0x00800000)
1016 is_write = 1;
1017 #else
1018 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1019 is_write = 1;
1020 #endif
1021 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1022 is_write, &uc->uc_sigmask, puc);
1025 #elif defined(__alpha__)
1027 int cpu_signal_handler(int host_signum, void *pinfo,
1028 void *puc)
1030 siginfo_t *info = pinfo;
1031 struct ucontext *uc = puc;
1032 uint32_t *pc = uc->uc_mcontext.sc_pc;
1033 uint32_t insn = *pc;
1034 int is_write = 0;
1036 /* XXX: need kernel patch to get write flag faster */
1037 switch (insn >> 26) {
1038 case 0x0d: // stw
1039 case 0x0e: // stb
1040 case 0x0f: // stq_u
1041 case 0x24: // stf
1042 case 0x25: // stg
1043 case 0x26: // sts
1044 case 0x27: // stt
1045 case 0x2c: // stl
1046 case 0x2d: // stq
1047 case 0x2e: // stl_c
1048 case 0x2f: // stq_c
1049 is_write = 1;
1052 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1053 is_write, &uc->uc_sigmask, puc);
1055 #elif defined(__sparc__)
1057 int cpu_signal_handler(int host_signum, void *pinfo,
1058 void *puc)
1060 siginfo_t *info = pinfo;
1061 int is_write;
1062 uint32_t insn;
1063 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1064 uint32_t *regs = (uint32_t *)(info + 1);
1065 void *sigmask = (regs + 20);
1066 /* XXX: is there a standard glibc define ? */
1067 unsigned long pc = regs[1];
1068 #else
1069 #ifdef __linux__
1070 struct sigcontext *sc = puc;
1071 unsigned long pc = sc->sigc_regs.tpc;
1072 void *sigmask = (void *)sc->sigc_mask;
1073 #elif defined(__OpenBSD__)
1074 struct sigcontext *uc = puc;
1075 unsigned long pc = uc->sc_pc;
1076 void *sigmask = (void *)(long)uc->sc_mask;
1077 #endif
1078 #endif
1080 /* XXX: need kernel patch to get write flag faster */
1081 is_write = 0;
1082 insn = *(uint32_t *)pc;
1083 if ((insn >> 30) == 3) {
1084 switch((insn >> 19) & 0x3f) {
1085 case 0x05: // stb
1086 case 0x15: // stba
1087 case 0x06: // sth
1088 case 0x16: // stha
1089 case 0x04: // st
1090 case 0x14: // sta
1091 case 0x07: // std
1092 case 0x17: // stda
1093 case 0x0e: // stx
1094 case 0x1e: // stxa
1095 case 0x24: // stf
1096 case 0x34: // stfa
1097 case 0x27: // stdf
1098 case 0x37: // stdfa
1099 case 0x26: // stqf
1100 case 0x36: // stqfa
1101 case 0x25: // stfsr
1102 case 0x3c: // casa
1103 case 0x3e: // casxa
1104 is_write = 1;
1105 break;
1108 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1109 is_write, sigmask, NULL);
1112 #elif defined(__arm__)
1114 int cpu_signal_handler(int host_signum, void *pinfo,
1115 void *puc)
1117 siginfo_t *info = pinfo;
1118 struct ucontext *uc = puc;
1119 unsigned long pc;
1120 int is_write;
1122 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1123 pc = uc->uc_mcontext.gregs[R15];
1124 #else
1125 pc = uc->uc_mcontext.arm_pc;
1126 #endif
1127 /* XXX: compute is_write */
1128 is_write = 0;
1129 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1130 is_write,
1131 &uc->uc_sigmask, puc);
1134 #elif defined(__mc68000)
1136 int cpu_signal_handler(int host_signum, void *pinfo,
1137 void *puc)
1139 siginfo_t *info = pinfo;
1140 struct ucontext *uc = puc;
1141 unsigned long pc;
1142 int is_write;
1144 pc = uc->uc_mcontext.gregs[16];
1145 /* XXX: compute is_write */
1146 is_write = 0;
1147 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1148 is_write,
1149 &uc->uc_sigmask, puc);
1152 #elif defined(__ia64)
1154 #ifndef __ISR_VALID
1155 /* This ought to be in <bits/siginfo.h>... */
1156 # define __ISR_VALID 1
1157 #endif
1159 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1161 siginfo_t *info = pinfo;
1162 struct ucontext *uc = puc;
1163 unsigned long ip;
1164 int is_write = 0;
1166 ip = uc->uc_mcontext.sc_ip;
1167 switch (host_signum) {
1168 case SIGILL:
1169 case SIGFPE:
1170 case SIGSEGV:
1171 case SIGBUS:
1172 case SIGTRAP:
1173 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1174 /* ISR.W (write-access) is bit 33: */
1175 is_write = (info->si_isr >> 33) & 1;
1176 break;
1178 default:
1179 break;
1181 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1182 is_write,
1183 (sigset_t *)&uc->uc_sigmask, puc);
1186 #elif defined(__s390__)
1188 int cpu_signal_handler(int host_signum, void *pinfo,
1189 void *puc)
1191 siginfo_t *info = pinfo;
1192 struct ucontext *uc = puc;
1193 unsigned long pc;
1194 uint16_t *pinsn;
1195 int is_write = 0;
1197 pc = uc->uc_mcontext.psw.addr;
1199 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1200 of the normal 2 arguments. The 3rd argument contains the "int_code"
1201 from the hardware which does in fact contain the is_write value.
1202 The rt signal handler, as far as I can tell, does not give this value
1203 at all. Not that we could get to it from here even if it were. */
1204 /* ??? This is not even close to complete, since it ignores all
1205 of the read-modify-write instructions. */
1206 pinsn = (uint16_t *)pc;
1207 switch (pinsn[0] >> 8) {
1208 case 0x50: /* ST */
1209 case 0x42: /* STC */
1210 case 0x40: /* STH */
1211 is_write = 1;
1212 break;
1213 case 0xc4: /* RIL format insns */
1214 switch (pinsn[0] & 0xf) {
1215 case 0xf: /* STRL */
1216 case 0xb: /* STGRL */
1217 case 0x7: /* STHRL */
1218 is_write = 1;
1220 break;
1221 case 0xe3: /* RXY format insns */
1222 switch (pinsn[2] & 0xff) {
1223 case 0x50: /* STY */
1224 case 0x24: /* STG */
1225 case 0x72: /* STCY */
1226 case 0x70: /* STHY */
1227 case 0x8e: /* STPQ */
1228 case 0x3f: /* STRVH */
1229 case 0x3e: /* STRV */
1230 case 0x2f: /* STRVG */
1231 is_write = 1;
1233 break;
1235 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1236 is_write, &uc->uc_sigmask, puc);
1239 #elif defined(__mips__)
1241 int cpu_signal_handler(int host_signum, void *pinfo,
1242 void *puc)
1244 siginfo_t *info = pinfo;
1245 struct ucontext *uc = puc;
1246 greg_t pc = uc->uc_mcontext.pc;
1247 int is_write;
1249 /* XXX: compute is_write */
1250 is_write = 0;
1251 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1252 is_write, &uc->uc_sigmask, puc);
1255 #elif defined(__hppa__)
1257 int cpu_signal_handler(int host_signum, void *pinfo,
1258 void *puc)
1260 struct siginfo *info = pinfo;
1261 struct ucontext *uc = puc;
1262 unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1263 uint32_t insn = *(uint32_t *)pc;
1264 int is_write = 0;
1266 /* XXX: need kernel patch to get write flag faster. */
1267 switch (insn >> 26) {
1268 case 0x1a: /* STW */
1269 case 0x19: /* STH */
1270 case 0x18: /* STB */
1271 case 0x1b: /* STWM */
1272 is_write = 1;
1273 break;
1275 case 0x09: /* CSTWX, FSTWX, FSTWS */
1276 case 0x0b: /* CSTDX, FSTDX, FSTDS */
1277 /* Distinguish from coprocessor load ... */
1278 is_write = (insn >> 9) & 1;
1279 break;
1281 case 0x03:
1282 switch ((insn >> 6) & 15) {
1283 case 0xa: /* STWS */
1284 case 0x9: /* STHS */
1285 case 0x8: /* STBS */
1286 case 0xe: /* STWAS */
1287 case 0xc: /* STBYS */
1288 is_write = 1;
1290 break;
1293 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1294 is_write, &uc->uc_sigmask, puc);
1297 #else
1299 #error host CPU specific signal handler needed
1301 #endif
1303 #endif /* !defined(CONFIG_SOFTMMU) */