2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
27 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
46 #define env cpu_single_env
49 int tb_invalidated_flag
;
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
59 longjmp(env
->jmp_env
, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
67 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext
*uc
= puc
;
70 #elif defined(__OpenBSD__)
71 struct sigcontext
*uc
= puc
;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
81 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
89 longjmp(env
->jmp_env
, 1);
92 /* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
96 unsigned long next_tb
;
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles
> CF_COUNT_MASK
)
102 max_cycles
= CF_COUNT_MASK
;
104 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
106 env
->current_tb
= tb
;
107 /* execute the generated code */
108 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
110 if ((next_tb
& 3) == 2) {
111 /* Restore PC. This may happen if async event occurs before
112 the TB starts executing. */
113 cpu_pc_from_tb(env
, tb
);
115 tb_phys_invalidate(tb
, -1);
119 static TranslationBlock
*tb_find_slow(target_ulong pc
,
120 target_ulong cs_base
,
123 TranslationBlock
*tb
, **ptb1
;
125 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
127 tb_invalidated_flag
= 0;
129 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
131 /* find translated block using physical mappings */
132 phys_pc
= get_phys_addr_code(env
, pc
);
133 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
135 h
= tb_phys_hash_func(phys_pc
);
136 ptb1
= &tb_phys_hash
[h
];
142 tb
->page_addr
[0] == phys_page1
&&
143 tb
->cs_base
== cs_base
&&
144 tb
->flags
== flags
) {
145 /* check next page if needed */
146 if (tb
->page_addr
[1] != -1) {
147 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
149 phys_page2
= get_phys_addr_code(env
, virt_page2
);
150 if (tb
->page_addr
[1] == phys_page2
)
156 ptb1
= &tb
->phys_hash_next
;
159 /* if no translated code available, then translate it now */
160 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
163 /* we add the TB in the virtual pc hash table */
164 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
168 static inline TranslationBlock
*tb_find_fast(void)
170 TranslationBlock
*tb
;
171 target_ulong cs_base
, pc
;
174 /* we record a subset of the CPU state. It will
175 always be the same before a given translated block
177 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
178 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
179 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
180 tb
->flags
!= flags
)) {
181 tb
= tb_find_slow(pc
, cs_base
, flags
);
186 static CPUDebugExcpHandler
*debug_excp_handler
;
188 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
190 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
192 debug_excp_handler
= handler
;
196 static void cpu_handle_debug_exception(CPUState
*env
)
200 if (!env
->watchpoint_hit
)
201 for (wp
= env
->watchpoints
; wp
!= NULL
; wp
= wp
->next
)
202 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
204 if (debug_excp_handler
)
205 debug_excp_handler(env
);
208 /* main execution loop */
210 int cpu_exec(CPUState
*env1
)
212 #define DECLARE_HOST_REGS 1
213 #include "hostregs_helper.h"
214 int ret
, interrupt_request
;
215 TranslationBlock
*tb
;
217 unsigned long next_tb
;
219 if (cpu_halted(env1
) == EXCP_HALTED
)
222 cpu_single_env
= env1
;
224 /* first we save global registers */
225 #define SAVE_HOST_REGS 1
226 #include "hostregs_helper.h"
230 #if defined(TARGET_I386)
231 /* put eflags in CPU temporary format */
232 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
233 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
234 CC_OP
= CC_OP_EFLAGS
;
235 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
236 #elif defined(TARGET_SPARC)
237 #elif defined(TARGET_M68K)
238 env
->cc_op
= CC_OP_FLAGS
;
239 env
->cc_dest
= env
->sr
& 0xf;
240 env
->cc_x
= (env
->sr
>> 4) & 1;
241 #elif defined(TARGET_ALPHA)
242 #elif defined(TARGET_ARM)
243 #elif defined(TARGET_PPC)
244 #elif defined(TARGET_MIPS)
245 #elif defined(TARGET_SH4)
246 #elif defined(TARGET_CRIS)
249 #error unsupported target CPU
251 env
->exception_index
= -1;
253 /* prepare setjmp context for exception handling */
255 if (setjmp(env
->jmp_env
) == 0) {
256 env
->current_tb
= NULL
;
257 /* if an exception is pending, we execute it here */
258 if (env
->exception_index
>= 0) {
259 if (env
->exception_index
>= EXCP_INTERRUPT
) {
260 /* exit request from the cpu execution loop */
261 ret
= env
->exception_index
;
262 if (ret
== EXCP_DEBUG
)
263 cpu_handle_debug_exception(env
);
265 } else if (env
->user_mode_only
) {
266 /* if user mode only, we simulate a fake exception
267 which will be handled outside the cpu execution
269 #if defined(TARGET_I386)
270 do_interrupt_user(env
->exception_index
,
271 env
->exception_is_int
,
273 env
->exception_next_eip
);
274 /* successfully delivered */
275 env
->old_exception
= -1;
277 ret
= env
->exception_index
;
280 #if defined(TARGET_I386)
281 /* simulate a real cpu exception. On i386, it can
282 trigger new exceptions, but we do not handle
283 double or triple faults yet. */
284 do_interrupt(env
->exception_index
,
285 env
->exception_is_int
,
287 env
->exception_next_eip
, 0);
288 /* successfully delivered */
289 env
->old_exception
= -1;
290 #elif defined(TARGET_PPC)
292 #elif defined(TARGET_MIPS)
294 #elif defined(TARGET_SPARC)
296 #elif defined(TARGET_ARM)
298 #elif defined(TARGET_SH4)
300 #elif defined(TARGET_ALPHA)
302 #elif defined(TARGET_CRIS)
304 #elif defined(TARGET_M68K)
308 env
->exception_index
= -1;
311 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
313 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
314 ret
= kqemu_cpu_exec(env
);
315 /* put eflags in CPU temporary format */
316 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
317 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
318 CC_OP
= CC_OP_EFLAGS
;
319 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
322 longjmp(env
->jmp_env
, 1);
323 } else if (ret
== 2) {
324 /* softmmu execution needed */
326 if (env
->interrupt_request
!= 0) {
327 /* hardware interrupt will be executed just after */
329 /* otherwise, we restart */
330 longjmp(env
->jmp_env
, 1);
338 longjmp(env
->jmp_env
, 1);
341 next_tb
= 0; /* force lookup of first TB */
343 interrupt_request
= env
->interrupt_request
;
344 if (unlikely(interrupt_request
)) {
345 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
346 /* Mask out external interrupts for this step. */
347 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
352 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
353 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
354 env
->exception_index
= EXCP_DEBUG
;
357 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
358 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
359 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
360 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
362 env
->exception_index
= EXCP_HLT
;
366 #if defined(TARGET_I386)
367 if (env
->hflags2
& HF2_GIF_MASK
) {
368 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
369 !(env
->hflags
& HF_SMM_MASK
)) {
370 svm_check_intercept(SVM_EXIT_SMI
);
371 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
374 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
375 !(env
->hflags2
& HF2_NMI_MASK
)) {
376 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
377 env
->hflags2
|= HF2_NMI_MASK
;
378 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
380 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
381 (((env
->hflags2
& HF2_VINTR_MASK
) &&
382 (env
->hflags2
& HF2_HIF_MASK
)) ||
383 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
384 (env
->eflags
& IF_MASK
&&
385 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
387 svm_check_intercept(SVM_EXIT_INTR
);
388 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
389 intno
= cpu_get_pic_interrupt(env
);
390 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
391 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
393 do_interrupt(intno
, 0, 0, 0, 1);
394 /* ensure that no TB jump will be modified as
395 the program flow was changed */
397 #if !defined(CONFIG_USER_ONLY)
398 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
399 (env
->eflags
& IF_MASK
) &&
400 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
402 /* FIXME: this should respect TPR */
403 svm_check_intercept(SVM_EXIT_VINTR
);
404 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
405 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
406 if (loglevel
& CPU_LOG_TB_IN_ASM
)
407 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
408 do_interrupt(intno
, 0, 0, 0, 1);
413 #elif defined(TARGET_PPC)
415 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
419 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
420 ppc_hw_interrupt(env
);
421 if (env
->pending_interrupts
== 0)
422 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
425 #elif defined(TARGET_MIPS)
426 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
427 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
428 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
429 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
430 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
431 !(env
->hflags
& MIPS_HFLAG_DM
)) {
433 env
->exception_index
= EXCP_EXT_INTERRUPT
;
438 #elif defined(TARGET_SPARC)
439 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
441 int pil
= env
->interrupt_index
& 15;
442 int type
= env
->interrupt_index
& 0xf0;
444 if (((type
== TT_EXTINT
) &&
445 (pil
== 15 || pil
> env
->psrpil
)) ||
447 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
448 env
->exception_index
= env
->interrupt_index
;
450 env
->interrupt_index
= 0;
451 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
456 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
457 //do_interrupt(0, 0, 0, 0, 0);
458 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
460 #elif defined(TARGET_ARM)
461 if (interrupt_request
& CPU_INTERRUPT_FIQ
462 && !(env
->uncached_cpsr
& CPSR_F
)) {
463 env
->exception_index
= EXCP_FIQ
;
467 /* ARMv7-M interrupt return works by loading a magic value
468 into the PC. On real hardware the load causes the
469 return to occur. The qemu implementation performs the
470 jump normally, then does the exception return when the
471 CPU tries to execute code at the magic address.
472 This will cause the magic PC value to be pushed to
473 the stack if an interrupt occured at the wrong time.
474 We avoid this by disabling interrupts when
475 pc contains a magic address. */
476 if (interrupt_request
& CPU_INTERRUPT_HARD
477 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
478 || !(env
->uncached_cpsr
& CPSR_I
))) {
479 env
->exception_index
= EXCP_IRQ
;
483 #elif defined(TARGET_SH4)
484 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
488 #elif defined(TARGET_ALPHA)
489 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
493 #elif defined(TARGET_CRIS)
494 if (interrupt_request
& CPU_INTERRUPT_HARD
495 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
496 env
->exception_index
= EXCP_IRQ
;
500 if (interrupt_request
& CPU_INTERRUPT_NMI
501 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
502 env
->exception_index
= EXCP_NMI
;
506 #elif defined(TARGET_M68K)
507 if (interrupt_request
& CPU_INTERRUPT_HARD
508 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
509 < env
->pending_level
) {
510 /* Real hardware gets the interrupt vector via an
511 IACK cycle at this point. Current emulated
512 hardware doesn't rely on this, so we
513 provide/save the vector when the interrupt is
515 env
->exception_index
= env
->pending_vector
;
520 /* Don't use the cached interupt_request value,
521 do_interrupt may have updated the EXITTB flag. */
522 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
523 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
524 /* ensure that no TB jump will be modified as
525 the program flow was changed */
528 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
529 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
530 env
->exception_index
= EXCP_INTERRUPT
;
535 if ((loglevel
& CPU_LOG_TB_CPU
)) {
536 /* restore flags in standard format */
538 #if defined(TARGET_I386)
539 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
540 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
541 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
542 #elif defined(TARGET_ARM)
543 cpu_dump_state(env
, logfile
, fprintf
, 0);
544 #elif defined(TARGET_SPARC)
545 cpu_dump_state(env
, logfile
, fprintf
, 0);
546 #elif defined(TARGET_PPC)
547 cpu_dump_state(env
, logfile
, fprintf
, 0);
548 #elif defined(TARGET_M68K)
549 cpu_m68k_flush_flags(env
, env
->cc_op
);
550 env
->cc_op
= CC_OP_FLAGS
;
551 env
->sr
= (env
->sr
& 0xffe0)
552 | env
->cc_dest
| (env
->cc_x
<< 4);
553 cpu_dump_state(env
, logfile
, fprintf
, 0);
554 #elif defined(TARGET_MIPS)
555 cpu_dump_state(env
, logfile
, fprintf
, 0);
556 #elif defined(TARGET_SH4)
557 cpu_dump_state(env
, logfile
, fprintf
, 0);
558 #elif defined(TARGET_ALPHA)
559 cpu_dump_state(env
, logfile
, fprintf
, 0);
560 #elif defined(TARGET_CRIS)
561 cpu_dump_state(env
, logfile
, fprintf
, 0);
563 #error unsupported target CPU
569 /* Note: we do it here to avoid a gcc bug on Mac OS X when
570 doing it in tb_find_slow */
571 if (tb_invalidated_flag
) {
572 /* as some TB could have been invalidated because
573 of memory exceptions while generating the code, we
574 must recompute the hash index here */
576 tb_invalidated_flag
= 0;
579 if ((loglevel
& CPU_LOG_EXEC
)) {
580 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
581 (long)tb
->tc_ptr
, tb
->pc
,
582 lookup_symbol(tb
->pc
));
585 /* see if we can patch the calling TB. When the TB
586 spans two pages, we cannot safely do a direct
591 (env
->kqemu_enabled
!= 2) &&
593 tb
->page_addr
[1] == -1) {
594 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
597 spin_unlock(&tb_lock
);
598 env
->current_tb
= tb
;
600 /* cpu_interrupt might be called while translating the
601 TB, but before it is linked into a potentially
602 infinite loop and becomes env->current_tb. Avoid
603 starting execution if there is a pending interrupt. */
604 if (unlikely (env
->interrupt_request
& CPU_INTERRUPT_EXIT
))
605 env
->current_tb
= NULL
;
607 while (env
->current_tb
) {
609 /* execute the generated code */
610 #if defined(__sparc__) && !defined(HOST_SOLARIS)
612 env
= cpu_single_env
;
613 #define env cpu_single_env
615 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
616 env
->current_tb
= NULL
;
617 if ((next_tb
& 3) == 2) {
618 /* Instruction counter expired. */
620 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
622 cpu_pc_from_tb(env
, tb
);
623 insns_left
= env
->icount_decr
.u32
;
624 if (env
->icount_extra
&& insns_left
>= 0) {
625 /* Refill decrementer and continue execution. */
626 env
->icount_extra
+= insns_left
;
627 if (env
->icount_extra
> 0xffff) {
630 insns_left
= env
->icount_extra
;
632 env
->icount_extra
-= insns_left
;
633 env
->icount_decr
.u16
.low
= insns_left
;
635 if (insns_left
> 0) {
636 /* Execute remaining instructions. */
637 cpu_exec_nocache(insns_left
, tb
);
639 env
->exception_index
= EXCP_INTERRUPT
;
645 /* reset soft MMU for next block (it can currently
646 only be set by a memory fault) */
647 #if defined(USE_KQEMU)
648 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
649 if (kqemu_is_ok(env
) &&
650 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
661 #if defined(TARGET_I386)
662 /* restore flags in standard format */
663 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
664 #elif defined(TARGET_ARM)
665 /* XXX: Save/restore host fpu exception state?. */
666 #elif defined(TARGET_SPARC)
667 #elif defined(TARGET_PPC)
668 #elif defined(TARGET_M68K)
669 cpu_m68k_flush_flags(env
, env
->cc_op
);
670 env
->cc_op
= CC_OP_FLAGS
;
671 env
->sr
= (env
->sr
& 0xffe0)
672 | env
->cc_dest
| (env
->cc_x
<< 4);
673 #elif defined(TARGET_MIPS)
674 #elif defined(TARGET_SH4)
675 #elif defined(TARGET_ALPHA)
676 #elif defined(TARGET_CRIS)
679 #error unsupported target CPU
682 /* restore global registers */
683 #include "hostregs_helper.h"
685 /* fail safe : never use cpu_single_env outside cpu_exec() */
686 cpu_single_env
= NULL
;
690 /* must only be called from the generated code as an exception can be
692 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
694 /* XXX: cannot enable it yet because it yields to MMU exception
695 where NIP != read address on PowerPC */
697 target_ulong phys_addr
;
698 phys_addr
= get_phys_addr_code(env
, start
);
699 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
703 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
705 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
707 CPUX86State
*saved_env
;
711 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
713 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
714 (selector
<< 4), 0xffff, 0);
716 helper_load_seg(seg_reg
, selector
);
721 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
723 CPUX86State
*saved_env
;
728 helper_fsave(ptr
, data32
);
733 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
735 CPUX86State
*saved_env
;
740 helper_frstor(ptr
, data32
);
745 #endif /* TARGET_I386 */
747 #if !defined(CONFIG_SOFTMMU)
749 #if defined(TARGET_I386)
751 /* 'pc' is the host PC at which the exception was raised. 'address' is
752 the effective address of the memory exception. 'is_write' is 1 if a
753 write caused the exception and otherwise 0'. 'old_set' is the
754 signal set which should be restored */
755 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
756 int is_write
, sigset_t
*old_set
,
759 TranslationBlock
*tb
;
763 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
764 #if defined(DEBUG_SIGNAL)
765 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
766 pc
, address
, is_write
, *(unsigned long *)old_set
);
768 /* XXX: locking issue */
769 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
773 /* see if it is an MMU fault */
774 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
776 return 0; /* not an MMU fault */
778 return 1; /* the MMU fault was handled without causing real CPU fault */
779 /* now we have a real cpu fault */
782 /* the PC is inside the translated code. It means that we have
783 a virtual CPU fault */
784 cpu_restore_state(tb
, env
, pc
, puc
);
788 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
789 env
->eip
, env
->cr
[2], env
->error_code
);
791 /* we restore the process signal mask as the sigreturn should
792 do it (XXX: use sigsetjmp) */
793 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
794 raise_exception_err(env
->exception_index
, env
->error_code
);
796 /* activate soft MMU for this block */
797 env
->hflags
|= HF_SOFTMMU_MASK
;
798 cpu_resume_from_signal(env
, puc
);
800 /* never comes here */
804 #elif defined(TARGET_ARM)
805 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
806 int is_write
, sigset_t
*old_set
,
809 TranslationBlock
*tb
;
813 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
814 #if defined(DEBUG_SIGNAL)
815 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
816 pc
, address
, is_write
, *(unsigned long *)old_set
);
818 /* XXX: locking issue */
819 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
822 /* see if it is an MMU fault */
823 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
825 return 0; /* not an MMU fault */
827 return 1; /* the MMU fault was handled without causing real CPU fault */
828 /* now we have a real cpu fault */
831 /* the PC is inside the translated code. It means that we have
832 a virtual CPU fault */
833 cpu_restore_state(tb
, env
, pc
, puc
);
835 /* we restore the process signal mask as the sigreturn should
836 do it (XXX: use sigsetjmp) */
837 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
839 /* never comes here */
842 #elif defined(TARGET_SPARC)
843 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
844 int is_write
, sigset_t
*old_set
,
847 TranslationBlock
*tb
;
851 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
852 #if defined(DEBUG_SIGNAL)
853 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
854 pc
, address
, is_write
, *(unsigned long *)old_set
);
856 /* XXX: locking issue */
857 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
860 /* see if it is an MMU fault */
861 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
863 return 0; /* not an MMU fault */
865 return 1; /* the MMU fault was handled without causing real CPU fault */
866 /* now we have a real cpu fault */
869 /* the PC is inside the translated code. It means that we have
870 a virtual CPU fault */
871 cpu_restore_state(tb
, env
, pc
, puc
);
873 /* we restore the process signal mask as the sigreturn should
874 do it (XXX: use sigsetjmp) */
875 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
877 /* never comes here */
880 #elif defined (TARGET_PPC)
881 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
882 int is_write
, sigset_t
*old_set
,
885 TranslationBlock
*tb
;
889 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
890 #if defined(DEBUG_SIGNAL)
891 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
892 pc
, address
, is_write
, *(unsigned long *)old_set
);
894 /* XXX: locking issue */
895 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
899 /* see if it is an MMU fault */
900 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
902 return 0; /* not an MMU fault */
904 return 1; /* the MMU fault was handled without causing real CPU fault */
906 /* now we have a real cpu fault */
909 /* the PC is inside the translated code. It means that we have
910 a virtual CPU fault */
911 cpu_restore_state(tb
, env
, pc
, puc
);
915 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
916 env
->nip
, env
->error_code
, tb
);
918 /* we restore the process signal mask as the sigreturn should
919 do it (XXX: use sigsetjmp) */
920 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
921 raise_exception_err(env
, env
->exception_index
, env
->error_code
);
923 /* activate soft MMU for this block */
924 cpu_resume_from_signal(env
, puc
);
926 /* never comes here */
930 #elif defined(TARGET_M68K)
931 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
932 int is_write
, sigset_t
*old_set
,
935 TranslationBlock
*tb
;
939 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
940 #if defined(DEBUG_SIGNAL)
941 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
942 pc
, address
, is_write
, *(unsigned long *)old_set
);
944 /* XXX: locking issue */
945 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
948 /* see if it is an MMU fault */
949 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
951 return 0; /* not an MMU fault */
953 return 1; /* the MMU fault was handled without causing real CPU fault */
954 /* now we have a real cpu fault */
957 /* the PC is inside the translated code. It means that we have
958 a virtual CPU fault */
959 cpu_restore_state(tb
, env
, pc
, puc
);
961 /* we restore the process signal mask as the sigreturn should
962 do it (XXX: use sigsetjmp) */
963 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
965 /* never comes here */
969 #elif defined (TARGET_MIPS)
970 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
971 int is_write
, sigset_t
*old_set
,
974 TranslationBlock
*tb
;
978 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
979 #if defined(DEBUG_SIGNAL)
980 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
981 pc
, address
, is_write
, *(unsigned long *)old_set
);
983 /* XXX: locking issue */
984 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
988 /* see if it is an MMU fault */
989 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
991 return 0; /* not an MMU fault */
993 return 1; /* the MMU fault was handled without causing real CPU fault */
995 /* now we have a real cpu fault */
998 /* the PC is inside the translated code. It means that we have
999 a virtual CPU fault */
1000 cpu_restore_state(tb
, env
, pc
, puc
);
1004 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1005 env
->PC
, env
->error_code
, tb
);
1007 /* we restore the process signal mask as the sigreturn should
1008 do it (XXX: use sigsetjmp) */
1009 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1010 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1012 /* activate soft MMU for this block */
1013 cpu_resume_from_signal(env
, puc
);
1015 /* never comes here */
1019 #elif defined (TARGET_SH4)
1020 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1021 int is_write
, sigset_t
*old_set
,
1024 TranslationBlock
*tb
;
1028 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1029 #if defined(DEBUG_SIGNAL)
1030 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1031 pc
, address
, is_write
, *(unsigned long *)old_set
);
1033 /* XXX: locking issue */
1034 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1038 /* see if it is an MMU fault */
1039 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1041 return 0; /* not an MMU fault */
1043 return 1; /* the MMU fault was handled without causing real CPU fault */
1045 /* now we have a real cpu fault */
1046 tb
= tb_find_pc(pc
);
1048 /* the PC is inside the translated code. It means that we have
1049 a virtual CPU fault */
1050 cpu_restore_state(tb
, env
, pc
, puc
);
1053 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1054 env
->nip
, env
->error_code
, tb
);
1056 /* we restore the process signal mask as the sigreturn should
1057 do it (XXX: use sigsetjmp) */
1058 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1060 /* never comes here */
1064 #elif defined (TARGET_ALPHA)
1065 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1066 int is_write
, sigset_t
*old_set
,
1069 TranslationBlock
*tb
;
1073 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1074 #if defined(DEBUG_SIGNAL)
1075 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1076 pc
, address
, is_write
, *(unsigned long *)old_set
);
1078 /* XXX: locking issue */
1079 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1083 /* see if it is an MMU fault */
1084 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1086 return 0; /* not an MMU fault */
1088 return 1; /* the MMU fault was handled without causing real CPU fault */
1090 /* now we have a real cpu fault */
1091 tb
= tb_find_pc(pc
);
1093 /* the PC is inside the translated code. It means that we have
1094 a virtual CPU fault */
1095 cpu_restore_state(tb
, env
, pc
, puc
);
1098 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1099 env
->nip
, env
->error_code
, tb
);
1101 /* we restore the process signal mask as the sigreturn should
1102 do it (XXX: use sigsetjmp) */
1103 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1105 /* never comes here */
1108 #elif defined (TARGET_CRIS)
1109 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1110 int is_write
, sigset_t
*old_set
,
1113 TranslationBlock
*tb
;
1117 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1118 #if defined(DEBUG_SIGNAL)
1119 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1120 pc
, address
, is_write
, *(unsigned long *)old_set
);
1122 /* XXX: locking issue */
1123 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1127 /* see if it is an MMU fault */
1128 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1130 return 0; /* not an MMU fault */
1132 return 1; /* the MMU fault was handled without causing real CPU fault */
1134 /* now we have a real cpu fault */
1135 tb
= tb_find_pc(pc
);
1137 /* the PC is inside the translated code. It means that we have
1138 a virtual CPU fault */
1139 cpu_restore_state(tb
, env
, pc
, puc
);
1141 /* we restore the process signal mask as the sigreturn should
1142 do it (XXX: use sigsetjmp) */
1143 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1145 /* never comes here */
1150 #error unsupported target CPU
1153 #if defined(__i386__)
1155 #if defined(__APPLE__)
1156 # include <sys/ucontext.h>
1158 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1159 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1160 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1162 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1163 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1164 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1167 int cpu_signal_handler(int host_signum
, void *pinfo
,
1170 siginfo_t
*info
= pinfo
;
1171 struct ucontext
*uc
= puc
;
1179 #define REG_TRAPNO TRAPNO
1182 trapno
= TRAP_sig(uc
);
1183 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1185 (ERROR_sig(uc
) >> 1) & 1 : 0,
1186 &uc
->uc_sigmask
, puc
);
1189 #elif defined(__x86_64__)
1191 int cpu_signal_handler(int host_signum
, void *pinfo
,
1194 siginfo_t
*info
= pinfo
;
1195 struct ucontext
*uc
= puc
;
1198 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1199 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1200 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1201 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1202 &uc
->uc_sigmask
, puc
);
1205 #elif defined(__powerpc__)
1207 /***********************************************************************
1208 * signal context platform-specific definitions
1212 /* All Registers access - only for local access */
1213 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1214 /* Gpr Registers access */
1215 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1216 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1217 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1218 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1219 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1220 # define LR_sig(context) REG_sig(link, context) /* Link register */
1221 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1222 /* Float Registers access */
1223 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1224 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1225 /* Exception Registers access */
1226 # define DAR_sig(context) REG_sig(dar, context)
1227 # define DSISR_sig(context) REG_sig(dsisr, context)
1228 # define TRAP_sig(context) REG_sig(trap, context)
1232 # include <sys/ucontext.h>
1233 typedef struct ucontext SIGCONTEXT
;
1234 /* All Registers access - only for local access */
1235 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1236 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1237 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1238 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1239 /* Gpr Registers access */
1240 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1241 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1242 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1243 # define CTR_sig(context) REG_sig(ctr, context)
1244 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1245 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1246 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1247 /* Float Registers access */
1248 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1249 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1250 /* Exception Registers access */
1251 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1252 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1253 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1254 #endif /* __APPLE__ */
1256 int cpu_signal_handler(int host_signum
, void *pinfo
,
1259 siginfo_t
*info
= pinfo
;
1260 struct ucontext
*uc
= puc
;
1268 if (DSISR_sig(uc
) & 0x00800000)
1271 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1274 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1275 is_write
, &uc
->uc_sigmask
, puc
);
1278 #elif defined(__alpha__)
1280 int cpu_signal_handler(int host_signum
, void *pinfo
,
1283 siginfo_t
*info
= pinfo
;
1284 struct ucontext
*uc
= puc
;
1285 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1286 uint32_t insn
= *pc
;
1289 /* XXX: need kernel patch to get write flag faster */
1290 switch (insn
>> 26) {
1305 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1306 is_write
, &uc
->uc_sigmask
, puc
);
1308 #elif defined(__sparc__)
1310 int cpu_signal_handler(int host_signum
, void *pinfo
,
1313 siginfo_t
*info
= pinfo
;
1316 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1317 uint32_t *regs
= (uint32_t *)(info
+ 1);
1318 void *sigmask
= (regs
+ 20);
1319 /* XXX: is there a standard glibc define ? */
1320 unsigned long pc
= regs
[1];
1323 struct sigcontext
*sc
= puc
;
1324 unsigned long pc
= sc
->sigc_regs
.tpc
;
1325 void *sigmask
= (void *)sc
->sigc_mask
;
1326 #elif defined(__OpenBSD__)
1327 struct sigcontext
*uc
= puc
;
1328 unsigned long pc
= uc
->sc_pc
;
1329 void *sigmask
= (void *)(long)uc
->sc_mask
;
1333 /* XXX: need kernel patch to get write flag faster */
1335 insn
= *(uint32_t *)pc
;
1336 if ((insn
>> 30) == 3) {
1337 switch((insn
>> 19) & 0x3f) {
1349 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1350 is_write
, sigmask
, NULL
);
1353 #elif defined(__arm__)
1355 int cpu_signal_handler(int host_signum
, void *pinfo
,
1358 siginfo_t
*info
= pinfo
;
1359 struct ucontext
*uc
= puc
;
1363 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1364 pc
= uc
->uc_mcontext
.gregs
[R15
];
1366 pc
= uc
->uc_mcontext
.arm_pc
;
1368 /* XXX: compute is_write */
1370 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1372 &uc
->uc_sigmask
, puc
);
1375 #elif defined(__mc68000)
1377 int cpu_signal_handler(int host_signum
, void *pinfo
,
1380 siginfo_t
*info
= pinfo
;
1381 struct ucontext
*uc
= puc
;
1385 pc
= uc
->uc_mcontext
.gregs
[16];
1386 /* XXX: compute is_write */
1388 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1390 &uc
->uc_sigmask
, puc
);
1393 #elif defined(__ia64)
1396 /* This ought to be in <bits/siginfo.h>... */
1397 # define __ISR_VALID 1
1400 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1402 siginfo_t
*info
= pinfo
;
1403 struct ucontext
*uc
= puc
;
1407 ip
= uc
->uc_mcontext
.sc_ip
;
1408 switch (host_signum
) {
1414 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1415 /* ISR.W (write-access) is bit 33: */
1416 is_write
= (info
->si_isr
>> 33) & 1;
1422 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1424 &uc
->uc_sigmask
, puc
);
1427 #elif defined(__s390__)
1429 int cpu_signal_handler(int host_signum
, void *pinfo
,
1432 siginfo_t
*info
= pinfo
;
1433 struct ucontext
*uc
= puc
;
1437 pc
= uc
->uc_mcontext
.psw
.addr
;
1438 /* XXX: compute is_write */
1440 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1441 is_write
, &uc
->uc_sigmask
, puc
);
1444 #elif defined(__mips__)
1446 int cpu_signal_handler(int host_signum
, void *pinfo
,
1449 siginfo_t
*info
= pinfo
;
1450 struct ucontext
*uc
= puc
;
1451 greg_t pc
= uc
->uc_mcontext
.pc
;
1454 /* XXX: compute is_write */
1456 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1457 is_write
, &uc
->uc_sigmask
, puc
);
1460 #elif defined(__hppa__)
1462 int cpu_signal_handler(int host_signum
, void *pinfo
,
1465 struct siginfo
*info
= pinfo
;
1466 struct ucontext
*uc
= puc
;
1470 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1471 /* FIXME: compute is_write */
1473 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1475 &uc
->uc_sigmask
, puc
);
1480 #error host CPU specific signal handler needed
1484 #endif /* !defined(CONFIG_SOFTMMU) */