2 * User emulator execution
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "disas/disas.h"
22 #include "exec/exec-all.h"
24 #include "qemu/bitops.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/translate-all.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/atomic128.h"
29 #include "trace/trace-root.h"
30 #include "trace/mem.h"
42 #include <sys/ucontext.h>
45 __thread
uintptr_t helper_retaddr
;
47 //#define DEBUG_SIGNAL
49 /* exit the current TB from a signal handler. The host registers are
50 restored in a state compatible with the CPU emulator
52 static void QEMU_NORETURN
cpu_exit_tb_from_sighandler(CPUState
*cpu
,
55 /* XXX: use siglongjmp ? */
56 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
57 cpu_loop_exit_noexc(cpu
);
60 /* 'pc' is the host PC at which the exception was raised. 'address' is
61 the effective address of the memory exception. 'is_write' is 1 if a
62 write caused the exception and otherwise 0'. 'old_set' is the
63 signal set which should be restored */
64 static inline int handle_cpu_signal(uintptr_t pc
, siginfo_t
*info
,
65 int is_write
, sigset_t
*old_set
)
67 CPUState
*cpu
= current_cpu
;
69 unsigned long address
= (unsigned long)info
->si_addr
;
70 MMUAccessType access_type
= is_write
? MMU_DATA_STORE
: MMU_DATA_LOAD
;
72 switch (helper_retaddr
) {
75 * Fault during host memory operation within a helper function.
76 * The helper's host return address, saved here, gives us a
77 * pointer into the generated code that will unwind to the
85 * Fault during host memory operation within generated code.
86 * (Or, a unrelated bug within qemu, but we can't tell from here).
88 * We take the host pc from the signal frame. However, we cannot
89 * use that value directly. Within cpu_restore_state_from_tb, we
90 * assume PC comes from GETPC(), as used by the helper functions,
91 * so we adjust the address by -GETPC_ADJ to form an address that
92 * is within the call insn, so that the address does not accidentally
93 * match the beginning of the next guest insn. However, when the
94 * pc comes from the signal frame it points to the actual faulting
95 * host memory insn and not the return from a call insn.
97 * Therefore, adjust to compensate for what will be done later
98 * by cpu_restore_state_from_tb.
105 * Fault during host read for translation, or loosely, "execution".
107 * The guest pc is already pointing to the start of the TB for which
108 * code is being generated. If the guest translator manages the
109 * page crossings correctly, this is exactly the correct address
110 * (and if the translator doesn't handle page boundaries correctly
111 * there's little we can do about that here). Therefore, do not
112 * trigger the unwinder.
114 * Like tb_gen_code, release the memory lock before cpu_loop_exit.
117 access_type
= MMU_INST_FETCH
;
122 /* For synchronous signals we expect to be coming from the vCPU
123 * thread (so current_cpu should be valid) and either from running
124 * code or during translation which can fault as we cross pages.
126 * If neither is true then something has gone wrong and we should
127 * abort rather than try and restart the vCPU execution.
129 if (!cpu
|| !cpu
->running
) {
130 printf("qemu:%s received signal outside vCPU context @ pc=0x%"
131 PRIxPTR
"\n", __func__
, pc
);
135 #if defined(DEBUG_SIGNAL)
136 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
137 pc
, address
, is_write
, *(unsigned long *)old_set
);
139 /* XXX: locking issue */
140 /* Note that it is important that we don't call page_unprotect() unless
141 * this is really a "write to nonwriteable page" fault, because
142 * page_unprotect() assumes that if it is called for an access to
143 * a page that's writeable this means we had two threads racing and
144 * another thread got there first and already made the page writeable;
145 * so we will retry the access. If we were to call page_unprotect()
146 * for some other kind of fault that should really be passed to the
147 * guest, we'd end up in an infinite loop of retrying the faulting
150 if (is_write
&& info
->si_signo
== SIGSEGV
&& info
->si_code
== SEGV_ACCERR
&&
151 h2g_valid(address
)) {
152 switch (page_unprotect(h2g(address
), pc
)) {
154 /* Fault not caused by a page marked unwritable to protect
155 * cached translations, must be the guest binary's problem.
159 /* Fault caused by protection of cached translation; TBs
160 * invalidated, so resume execution. Retain helper_retaddr
161 * for a possible second fault.
165 /* Fault caused by protection of cached translation, and the
166 * currently executing TB was modified and must be exited
167 * immediately. Clear helper_retaddr for next execution.
169 clear_helper_retaddr();
170 cpu_exit_tb_from_sighandler(cpu
, old_set
);
174 g_assert_not_reached();
178 /* Convert forcefully to guest address space, invalid addresses
179 are still valid segv ones */
180 address
= h2g_nocheck(address
);
183 * There is no way the target can handle this other than raising
184 * an exception. Undo signal and retaddr state prior to longjmp.
186 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
187 clear_helper_retaddr();
189 cc
= CPU_GET_CLASS(cpu
);
190 cc
->tlb_fill(cpu
, address
, 0, access_type
, MMU_USER_IDX
, false, pc
);
191 g_assert_not_reached();
194 static int probe_access_internal(CPUArchState
*env
, target_ulong addr
,
195 int fault_size
, MMUAccessType access_type
,
196 bool nonfault
, uintptr_t ra
)
200 switch (access_type
) {
211 g_assert_not_reached();
214 if (!guest_addr_valid(addr
) || page_check_range(addr
, 1, flags
) < 0) {
216 return TLB_INVALID_MASK
;
218 CPUState
*cpu
= env_cpu(env
);
219 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
220 cc
->tlb_fill(cpu
, addr
, fault_size
, access_type
,
221 MMU_USER_IDX
, false, ra
);
222 g_assert_not_reached();
228 int probe_access_flags(CPUArchState
*env
, target_ulong addr
,
229 MMUAccessType access_type
, int mmu_idx
,
230 bool nonfault
, void **phost
, uintptr_t ra
)
234 flags
= probe_access_internal(env
, addr
, 0, access_type
, nonfault
, ra
);
235 *phost
= flags
? NULL
: g2h(addr
);
239 void *probe_access(CPUArchState
*env
, target_ulong addr
, int size
,
240 MMUAccessType access_type
, int mmu_idx
, uintptr_t ra
)
244 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
245 flags
= probe_access_internal(env
, addr
, size
, access_type
, false, ra
);
246 g_assert(flags
== 0);
248 return size
? g2h(addr
) : NULL
;
251 #if defined(__i386__)
253 #if defined(__NetBSD__)
254 #include <ucontext.h>
256 #define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
257 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
258 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
259 #define MASK_sig(context) ((context)->uc_sigmask)
260 #elif defined(__FreeBSD__) || defined(__DragonFly__)
261 #include <ucontext.h>
263 #define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_eip))
264 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
265 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
266 #define MASK_sig(context) ((context)->uc_sigmask)
267 #elif defined(__OpenBSD__)
268 #define EIP_sig(context) ((context)->sc_eip)
269 #define TRAP_sig(context) ((context)->sc_trapno)
270 #define ERROR_sig(context) ((context)->sc_err)
271 #define MASK_sig(context) ((context)->sc_mask)
273 #define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
274 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
275 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
276 #define MASK_sig(context) ((context)->uc_sigmask)
279 int cpu_signal_handler(int host_signum
, void *pinfo
,
282 siginfo_t
*info
= pinfo
;
283 #if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
284 ucontext_t
*uc
= puc
;
285 #elif defined(__OpenBSD__)
286 struct sigcontext
*uc
= puc
;
288 ucontext_t
*uc
= puc
;
297 #define REG_TRAPNO TRAPNO
300 trapno
= TRAP_sig(uc
);
301 return handle_cpu_signal(pc
, info
,
302 trapno
== 0xe ? (ERROR_sig(uc
) >> 1) & 1 : 0,
306 #elif defined(__x86_64__)
309 #define PC_sig(context) _UC_MACHINE_PC(context)
310 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
311 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
312 #define MASK_sig(context) ((context)->uc_sigmask)
313 #elif defined(__OpenBSD__)
314 #define PC_sig(context) ((context)->sc_rip)
315 #define TRAP_sig(context) ((context)->sc_trapno)
316 #define ERROR_sig(context) ((context)->sc_err)
317 #define MASK_sig(context) ((context)->sc_mask)
318 #elif defined(__FreeBSD__) || defined(__DragonFly__)
319 #include <ucontext.h>
321 #define PC_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_rip))
322 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
323 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
324 #define MASK_sig(context) ((context)->uc_sigmask)
326 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
327 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
328 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
329 #define MASK_sig(context) ((context)->uc_sigmask)
332 int cpu_signal_handler(int host_signum
, void *pinfo
,
335 siginfo_t
*info
= pinfo
;
337 #if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
338 ucontext_t
*uc
= puc
;
339 #elif defined(__OpenBSD__)
340 struct sigcontext
*uc
= puc
;
342 ucontext_t
*uc
= puc
;
346 return handle_cpu_signal(pc
, info
,
347 TRAP_sig(uc
) == 0xe ? (ERROR_sig(uc
) >> 1) & 1 : 0,
351 #elif defined(_ARCH_PPC)
353 /***********************************************************************
354 * signal context platform-specific definitions
358 /* All Registers access - only for local access */
359 #define REG_sig(reg_name, context) \
360 ((context)->uc_mcontext.regs->reg_name)
361 /* Gpr Registers access */
362 #define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
363 /* Program counter */
364 #define IAR_sig(context) REG_sig(nip, context)
365 /* Machine State Register (Supervisor) */
366 #define MSR_sig(context) REG_sig(msr, context)
368 #define CTR_sig(context) REG_sig(ctr, context)
369 /* User's integer exception register */
370 #define XER_sig(context) REG_sig(xer, context)
372 #define LR_sig(context) REG_sig(link, context)
373 /* Condition register */
374 #define CR_sig(context) REG_sig(ccr, context)
376 /* Float Registers access */
377 #define FLOAT_sig(reg_num, context) \
378 (((double *)((char *)((context)->uc_mcontext.regs + 48 * 4)))[reg_num])
379 #define FPSCR_sig(context) \
380 (*(int *)((char *)((context)->uc_mcontext.regs + (48 + 32 * 2) * 4)))
381 /* Exception Registers access */
382 #define DAR_sig(context) REG_sig(dar, context)
383 #define DSISR_sig(context) REG_sig(dsisr, context)
384 #define TRAP_sig(context) REG_sig(trap, context)
387 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
388 #include <ucontext.h>
389 #define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
390 #define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
391 #define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
392 #define XER_sig(context) ((context)->uc_mcontext.mc_xer)
393 #define LR_sig(context) ((context)->uc_mcontext.mc_lr)
394 #define CR_sig(context) ((context)->uc_mcontext.mc_cr)
395 /* Exception Registers access */
396 #define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
397 #define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
398 #define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
399 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
401 int cpu_signal_handler(int host_signum
, void *pinfo
,
404 siginfo_t
*info
= pinfo
;
405 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
406 ucontext_t
*uc
= puc
;
408 ucontext_t
*uc
= puc
;
417 if (DSISR_sig(uc
) & 0x00800000) {
421 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000)) {
425 return handle_cpu_signal(pc
, info
, is_write
, &uc
->uc_sigmask
);
428 #elif defined(__alpha__)
430 int cpu_signal_handler(int host_signum
, void *pinfo
,
433 siginfo_t
*info
= pinfo
;
434 ucontext_t
*uc
= puc
;
435 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
439 /* XXX: need kernel patch to get write flag faster */
440 switch (insn
>> 26) {
443 case 0x0f: /* stq_u */
450 case 0x2e: /* stl_c */
451 case 0x2f: /* stq_c */
455 return handle_cpu_signal(pc
, info
, is_write
, &uc
->uc_sigmask
);
457 #elif defined(__sparc__)
459 int cpu_signal_handler(int host_signum
, void *pinfo
,
462 siginfo_t
*info
= pinfo
;
465 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
466 uint32_t *regs
= (uint32_t *)(info
+ 1);
467 void *sigmask
= (regs
+ 20);
468 /* XXX: is there a standard glibc define ? */
469 unsigned long pc
= regs
[1];
472 struct sigcontext
*sc
= puc
;
473 unsigned long pc
= sc
->sigc_regs
.tpc
;
474 void *sigmask
= (void *)sc
->sigc_mask
;
475 #elif defined(__OpenBSD__)
476 struct sigcontext
*uc
= puc
;
477 unsigned long pc
= uc
->sc_pc
;
478 void *sigmask
= (void *)(long)uc
->sc_mask
;
479 #elif defined(__NetBSD__)
480 ucontext_t
*uc
= puc
;
481 unsigned long pc
= _UC_MACHINE_PC(uc
);
482 void *sigmask
= (void *)&uc
->uc_sigmask
;
486 /* XXX: need kernel patch to get write flag faster */
488 insn
= *(uint32_t *)pc
;
489 if ((insn
>> 30) == 3) {
490 switch ((insn
>> 19) & 0x3f) {
492 case 0x15: /* stba */
494 case 0x16: /* stha */
498 case 0x17: /* stda */
500 case 0x1e: /* stxa */
502 case 0x34: /* stfa */
503 case 0x27: /* stdf */
504 case 0x37: /* stdfa */
505 case 0x26: /* stqf */
506 case 0x36: /* stqfa */
507 case 0x25: /* stfsr */
508 case 0x3c: /* casa */
509 case 0x3e: /* casxa */
514 return handle_cpu_signal(pc
, info
, is_write
, sigmask
);
517 #elif defined(__arm__)
519 #if defined(__NetBSD__)
520 #include <ucontext.h>
521 #include <sys/siginfo.h>
524 int cpu_signal_handler(int host_signum
, void *pinfo
,
527 siginfo_t
*info
= pinfo
;
528 #if defined(__NetBSD__)
529 ucontext_t
*uc
= puc
;
530 siginfo_t
*si
= pinfo
;
532 ucontext_t
*uc
= puc
;
538 #if defined(__NetBSD__)
539 pc
= uc
->uc_mcontext
.__gregs
[_REG_R15
];
540 #elif defined(__GLIBC__) && (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
541 pc
= uc
->uc_mcontext
.gregs
[R15
];
543 pc
= uc
->uc_mcontext
.arm_pc
;
549 fsr
= uc
->uc_mcontext
.error_code
;
552 * In the FSR, bit 11 is WnR, assuming a v6 or
553 * later processor. On v5 we will always report
554 * this as a read, which will fail later.
556 is_write
= extract32(fsr
, 11, 1);
557 return handle_cpu_signal(pc
, info
, is_write
, &uc
->uc_sigmask
);
560 #elif defined(__aarch64__)
562 #if defined(__NetBSD__)
564 #include <ucontext.h>
565 #include <sys/siginfo.h>
567 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
569 ucontext_t
*uc
= puc
;
570 siginfo_t
*si
= pinfo
;
575 pc
= uc
->uc_mcontext
.__gregs
[_REG_PC
];
579 * siginfo_t::si_trap is the ESR value, for data aborts ESR.EC
580 * is 0b10010x: then bit 6 is the WnR bit
582 is_write
= extract32(esr
, 27, 5) == 0x12 && extract32(esr
, 6, 1) == 1;
583 return handle_cpu_signal(pc
, si
, is_write
, &uc
->uc_sigmask
);
589 /* Pre-3.16 kernel headers don't have these, so provide fallback definitions */
590 #define ESR_MAGIC 0x45535201
592 struct _aarch64_ctx head
;
597 static inline struct _aarch64_ctx
*first_ctx(ucontext_t
*uc
)
599 return (struct _aarch64_ctx
*)&uc
->uc_mcontext
.__reserved
;
602 static inline struct _aarch64_ctx
*next_ctx(struct _aarch64_ctx
*hdr
)
604 return (struct _aarch64_ctx
*)((char *)hdr
+ hdr
->size
);
607 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
609 siginfo_t
*info
= pinfo
;
610 ucontext_t
*uc
= puc
;
611 uintptr_t pc
= uc
->uc_mcontext
.pc
;
613 struct _aarch64_ctx
*hdr
;
614 struct esr_context
const *esrctx
= NULL
;
616 /* Find the esr_context, which has the WnR bit in it */
617 for (hdr
= first_ctx(uc
); hdr
->magic
; hdr
= next_ctx(hdr
)) {
618 if (hdr
->magic
== ESR_MAGIC
) {
619 esrctx
= (struct esr_context
const *)hdr
;
625 /* For data aborts ESR.EC is 0b10010x: then bit 6 is the WnR bit */
626 uint64_t esr
= esrctx
->esr
;
627 is_write
= extract32(esr
, 27, 5) == 0x12 && extract32(esr
, 6, 1) == 1;
630 * Fall back to parsing instructions; will only be needed
631 * for really ancient (pre-3.16) kernels.
633 uint32_t insn
= *(uint32_t *)pc
;
635 is_write
= ((insn
& 0xbfff0000) == 0x0c000000 /* C3.3.1 */
636 || (insn
& 0xbfe00000) == 0x0c800000 /* C3.3.2 */
637 || (insn
& 0xbfdf0000) == 0x0d000000 /* C3.3.3 */
638 || (insn
& 0xbfc00000) == 0x0d800000 /* C3.3.4 */
639 || (insn
& 0x3f400000) == 0x08000000 /* C3.3.6 */
640 || (insn
& 0x3bc00000) == 0x39000000 /* C3.3.13 */
641 || (insn
& 0x3fc00000) == 0x3d800000 /* ... 128bit */
642 /* Ignore bits 10, 11 & 21, controlling indexing. */
643 || (insn
& 0x3bc00000) == 0x38000000 /* C3.3.8-12 */
644 || (insn
& 0x3fe00000) == 0x3c800000 /* ... 128bit */
645 /* Ignore bits 23 & 24, controlling indexing. */
646 || (insn
& 0x3a400000) == 0x28000000); /* C3.3.7,14-16 */
648 return handle_cpu_signal(pc
, info
, is_write
, &uc
->uc_sigmask
);
652 #elif defined(__s390__)
654 int cpu_signal_handler(int host_signum
, void *pinfo
,
657 siginfo_t
*info
= pinfo
;
658 ucontext_t
*uc
= puc
;
663 pc
= uc
->uc_mcontext
.psw
.addr
;
665 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
666 of the normal 2 arguments. The 3rd argument contains the "int_code"
667 from the hardware which does in fact contain the is_write value.
668 The rt signal handler, as far as I can tell, does not give this value
669 at all. Not that we could get to it from here even if it were. */
670 /* ??? This is not even close to complete, since it ignores all
671 of the read-modify-write instructions. */
672 pinsn
= (uint16_t *)pc
;
673 switch (pinsn
[0] >> 8) {
679 case 0xc4: /* RIL format insns */
680 switch (pinsn
[0] & 0xf) {
682 case 0xb: /* STGRL */
683 case 0x7: /* STHRL */
687 case 0xe3: /* RXY format insns */
688 switch (pinsn
[2] & 0xff) {
691 case 0x72: /* STCY */
692 case 0x70: /* STHY */
693 case 0x8e: /* STPQ */
694 case 0x3f: /* STRVH */
695 case 0x3e: /* STRV */
696 case 0x2f: /* STRVG */
701 return handle_cpu_signal(pc
, info
, is_write
, &uc
->uc_sigmask
);
704 #elif defined(__mips__)
706 #if defined(__misp16) || defined(__mips_micromips)
707 #error "Unsupported encoding"
710 int cpu_signal_handler(int host_signum
, void *pinfo
,
713 siginfo_t
*info
= pinfo
;
714 ucontext_t
*uc
= puc
;
715 uintptr_t pc
= uc
->uc_mcontext
.pc
;
716 uint32_t insn
= *(uint32_t *)pc
;
719 /* Detect all store instructions at program counter. */
720 switch((insn
>> 26) & 077) {
733 #if !defined(__mips_isa_rev) || __mips_isa_rev < 6
739 case 023: /* COP1X */
740 /* Required in all versions of MIPS64 since
741 MIPS64r1 and subsequent versions of MIPS32r2. */
742 switch (insn
& 077) {
743 case 010: /* SWXC1 */
744 case 011: /* SDXC1 */
745 case 015: /* SUXC1 */
751 return handle_cpu_signal(pc
, info
, is_write
, &uc
->uc_sigmask
);
754 #elif defined(__riscv)
756 int cpu_signal_handler(int host_signum
, void *pinfo
,
759 siginfo_t
*info
= pinfo
;
760 ucontext_t
*uc
= puc
;
761 greg_t pc
= uc
->uc_mcontext
.__gregs
[REG_PC
];
762 uint32_t insn
= *(uint32_t *)pc
;
765 /* Detect store by reading the instruction at the program
766 counter. Note: we currently only generate 32-bit
767 instructions so we thus only detect 32-bit stores */
768 switch (((insn
>> 0) & 0b11)) {
770 switch (((insn
>> 2) & 0b11111)) {
772 switch (((insn
>> 12) & 0b111)) {
785 switch (((insn
>> 12) & 0b111)) {
800 /* Check for compressed instructions */
801 switch (((insn
>> 13) & 0b111)) {
803 switch (insn
& 0b11) {
813 switch (insn
& 0b11) {
826 return handle_cpu_signal(pc
, info
, is_write
, &uc
->uc_sigmask
);
831 #error host CPU specific signal handler needed
835 /* The softmmu versions of these helpers are in cputlb.c. */
837 uint32_t cpu_ldub_data(CPUArchState
*env
, abi_ptr ptr
)
840 uint16_t meminfo
= trace_mem_get_info(MO_UB
, MMU_USER_IDX
, false);
842 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
843 ret
= ldub_p(g2h(ptr
));
844 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
848 int cpu_ldsb_data(CPUArchState
*env
, abi_ptr ptr
)
851 uint16_t meminfo
= trace_mem_get_info(MO_SB
, MMU_USER_IDX
, false);
853 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
854 ret
= ldsb_p(g2h(ptr
));
855 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
859 uint32_t cpu_lduw_be_data(CPUArchState
*env
, abi_ptr ptr
)
862 uint16_t meminfo
= trace_mem_get_info(MO_BEUW
, MMU_USER_IDX
, false);
864 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
865 ret
= lduw_be_p(g2h(ptr
));
866 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
870 int cpu_ldsw_be_data(CPUArchState
*env
, abi_ptr ptr
)
873 uint16_t meminfo
= trace_mem_get_info(MO_BESW
, MMU_USER_IDX
, false);
875 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
876 ret
= ldsw_be_p(g2h(ptr
));
877 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
881 uint32_t cpu_ldl_be_data(CPUArchState
*env
, abi_ptr ptr
)
884 uint16_t meminfo
= trace_mem_get_info(MO_BEUL
, MMU_USER_IDX
, false);
886 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
887 ret
= ldl_be_p(g2h(ptr
));
888 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
892 uint64_t cpu_ldq_be_data(CPUArchState
*env
, abi_ptr ptr
)
895 uint16_t meminfo
= trace_mem_get_info(MO_BEQ
, MMU_USER_IDX
, false);
897 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
898 ret
= ldq_be_p(g2h(ptr
));
899 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
903 uint32_t cpu_lduw_le_data(CPUArchState
*env
, abi_ptr ptr
)
906 uint16_t meminfo
= trace_mem_get_info(MO_LEUW
, MMU_USER_IDX
, false);
908 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
909 ret
= lduw_le_p(g2h(ptr
));
910 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
914 int cpu_ldsw_le_data(CPUArchState
*env
, abi_ptr ptr
)
917 uint16_t meminfo
= trace_mem_get_info(MO_LESW
, MMU_USER_IDX
, false);
919 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
920 ret
= ldsw_le_p(g2h(ptr
));
921 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
925 uint32_t cpu_ldl_le_data(CPUArchState
*env
, abi_ptr ptr
)
928 uint16_t meminfo
= trace_mem_get_info(MO_LEUL
, MMU_USER_IDX
, false);
930 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
931 ret
= ldl_le_p(g2h(ptr
));
932 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
936 uint64_t cpu_ldq_le_data(CPUArchState
*env
, abi_ptr ptr
)
939 uint16_t meminfo
= trace_mem_get_info(MO_LEQ
, MMU_USER_IDX
, false);
941 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
942 ret
= ldq_le_p(g2h(ptr
));
943 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
947 uint32_t cpu_ldub_data_ra(CPUArchState
*env
, abi_ptr ptr
, uintptr_t retaddr
)
951 set_helper_retaddr(retaddr
);
952 ret
= cpu_ldub_data(env
, ptr
);
953 clear_helper_retaddr();
957 int cpu_ldsb_data_ra(CPUArchState
*env
, abi_ptr ptr
, uintptr_t retaddr
)
961 set_helper_retaddr(retaddr
);
962 ret
= cpu_ldsb_data(env
, ptr
);
963 clear_helper_retaddr();
967 uint32_t cpu_lduw_be_data_ra(CPUArchState
*env
, abi_ptr ptr
, uintptr_t retaddr
)
971 set_helper_retaddr(retaddr
);
972 ret
= cpu_lduw_be_data(env
, ptr
);
973 clear_helper_retaddr();
977 int cpu_ldsw_be_data_ra(CPUArchState
*env
, abi_ptr ptr
, uintptr_t retaddr
)
981 set_helper_retaddr(retaddr
);
982 ret
= cpu_ldsw_be_data(env
, ptr
);
983 clear_helper_retaddr();
987 uint32_t cpu_ldl_be_data_ra(CPUArchState
*env
, abi_ptr ptr
, uintptr_t retaddr
)
991 set_helper_retaddr(retaddr
);
992 ret
= cpu_ldl_be_data(env
, ptr
);
993 clear_helper_retaddr();
997 uint64_t cpu_ldq_be_data_ra(CPUArchState
*env
, abi_ptr ptr
, uintptr_t retaddr
)
1001 set_helper_retaddr(retaddr
);
1002 ret
= cpu_ldq_be_data(env
, ptr
);
1003 clear_helper_retaddr();
1007 uint32_t cpu_lduw_le_data_ra(CPUArchState
*env
, abi_ptr ptr
, uintptr_t retaddr
)
1011 set_helper_retaddr(retaddr
);
1012 ret
= cpu_lduw_le_data(env
, ptr
);
1013 clear_helper_retaddr();
1017 int cpu_ldsw_le_data_ra(CPUArchState
*env
, abi_ptr ptr
, uintptr_t retaddr
)
1021 set_helper_retaddr(retaddr
);
1022 ret
= cpu_ldsw_le_data(env
, ptr
);
1023 clear_helper_retaddr();
1027 uint32_t cpu_ldl_le_data_ra(CPUArchState
*env
, abi_ptr ptr
, uintptr_t retaddr
)
1031 set_helper_retaddr(retaddr
);
1032 ret
= cpu_ldl_le_data(env
, ptr
);
1033 clear_helper_retaddr();
1037 uint64_t cpu_ldq_le_data_ra(CPUArchState
*env
, abi_ptr ptr
, uintptr_t retaddr
)
1041 set_helper_retaddr(retaddr
);
1042 ret
= cpu_ldq_le_data(env
, ptr
);
1043 clear_helper_retaddr();
1047 void cpu_stb_data(CPUArchState
*env
, abi_ptr ptr
, uint32_t val
)
1049 uint16_t meminfo
= trace_mem_get_info(MO_UB
, MMU_USER_IDX
, true);
1051 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
1052 stb_p(g2h(ptr
), val
);
1053 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
1056 void cpu_stw_be_data(CPUArchState
*env
, abi_ptr ptr
, uint32_t val
)
1058 uint16_t meminfo
= trace_mem_get_info(MO_BEUW
, MMU_USER_IDX
, true);
1060 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
1061 stw_be_p(g2h(ptr
), val
);
1062 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
1065 void cpu_stl_be_data(CPUArchState
*env
, abi_ptr ptr
, uint32_t val
)
1067 uint16_t meminfo
= trace_mem_get_info(MO_BEUL
, MMU_USER_IDX
, true);
1069 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
1070 stl_be_p(g2h(ptr
), val
);
1071 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
1074 void cpu_stq_be_data(CPUArchState
*env
, abi_ptr ptr
, uint64_t val
)
1076 uint16_t meminfo
= trace_mem_get_info(MO_BEQ
, MMU_USER_IDX
, true);
1078 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
1079 stq_be_p(g2h(ptr
), val
);
1080 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
1083 void cpu_stw_le_data(CPUArchState
*env
, abi_ptr ptr
, uint32_t val
)
1085 uint16_t meminfo
= trace_mem_get_info(MO_LEUW
, MMU_USER_IDX
, true);
1087 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
1088 stw_le_p(g2h(ptr
), val
);
1089 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
1092 void cpu_stl_le_data(CPUArchState
*env
, abi_ptr ptr
, uint32_t val
)
1094 uint16_t meminfo
= trace_mem_get_info(MO_LEUL
, MMU_USER_IDX
, true);
1096 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
1097 stl_le_p(g2h(ptr
), val
);
1098 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
1101 void cpu_stq_le_data(CPUArchState
*env
, abi_ptr ptr
, uint64_t val
)
1103 uint16_t meminfo
= trace_mem_get_info(MO_LEQ
, MMU_USER_IDX
, true);
1105 trace_guest_mem_before_exec(env_cpu(env
), ptr
, meminfo
);
1106 stq_le_p(g2h(ptr
), val
);
1107 qemu_plugin_vcpu_mem_cb(env_cpu(env
), ptr
, meminfo
);
1110 void cpu_stb_data_ra(CPUArchState
*env
, abi_ptr ptr
,
1111 uint32_t val
, uintptr_t retaddr
)
1113 set_helper_retaddr(retaddr
);
1114 cpu_stb_data(env
, ptr
, val
);
1115 clear_helper_retaddr();
1118 void cpu_stw_be_data_ra(CPUArchState
*env
, abi_ptr ptr
,
1119 uint32_t val
, uintptr_t retaddr
)
1121 set_helper_retaddr(retaddr
);
1122 cpu_stw_be_data(env
, ptr
, val
);
1123 clear_helper_retaddr();
1126 void cpu_stl_be_data_ra(CPUArchState
*env
, abi_ptr ptr
,
1127 uint32_t val
, uintptr_t retaddr
)
1129 set_helper_retaddr(retaddr
);
1130 cpu_stl_be_data(env
, ptr
, val
);
1131 clear_helper_retaddr();
1134 void cpu_stq_be_data_ra(CPUArchState
*env
, abi_ptr ptr
,
1135 uint64_t val
, uintptr_t retaddr
)
1137 set_helper_retaddr(retaddr
);
1138 cpu_stq_be_data(env
, ptr
, val
);
1139 clear_helper_retaddr();
1142 void cpu_stw_le_data_ra(CPUArchState
*env
, abi_ptr ptr
,
1143 uint32_t val
, uintptr_t retaddr
)
1145 set_helper_retaddr(retaddr
);
1146 cpu_stw_le_data(env
, ptr
, val
);
1147 clear_helper_retaddr();
1150 void cpu_stl_le_data_ra(CPUArchState
*env
, abi_ptr ptr
,
1151 uint32_t val
, uintptr_t retaddr
)
1153 set_helper_retaddr(retaddr
);
1154 cpu_stl_le_data(env
, ptr
, val
);
1155 clear_helper_retaddr();
1158 void cpu_stq_le_data_ra(CPUArchState
*env
, abi_ptr ptr
,
1159 uint64_t val
, uintptr_t retaddr
)
1161 set_helper_retaddr(retaddr
);
1162 cpu_stq_le_data(env
, ptr
, val
);
1163 clear_helper_retaddr();
1166 uint32_t cpu_ldub_code(CPUArchState
*env
, abi_ptr ptr
)
1170 set_helper_retaddr(1);
1171 ret
= ldub_p(g2h(ptr
));
1172 clear_helper_retaddr();
1176 uint32_t cpu_lduw_code(CPUArchState
*env
, abi_ptr ptr
)
1180 set_helper_retaddr(1);
1181 ret
= lduw_p(g2h(ptr
));
1182 clear_helper_retaddr();
1186 uint32_t cpu_ldl_code(CPUArchState
*env
, abi_ptr ptr
)
1190 set_helper_retaddr(1);
1191 ret
= ldl_p(g2h(ptr
));
1192 clear_helper_retaddr();
1196 uint64_t cpu_ldq_code(CPUArchState
*env
, abi_ptr ptr
)
1200 set_helper_retaddr(1);
1201 ret
= ldq_p(g2h(ptr
));
1202 clear_helper_retaddr();
1206 /* Do not allow unaligned operations to proceed. Return the host address. */
1207 static void *atomic_mmu_lookup(CPUArchState
*env
, target_ulong addr
,
1208 int size
, uintptr_t retaddr
)
1210 /* Enforce qemu required alignment. */
1211 if (unlikely(addr
& (size
- 1))) {
1212 cpu_loop_exit_atomic(env_cpu(env
), retaddr
);
1214 void *ret
= g2h(addr
);
1215 set_helper_retaddr(retaddr
);
1219 /* Macro to call the above, with local variables from the use context. */
1220 #define ATOMIC_MMU_DECLS do {} while (0)
1221 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
1222 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1223 #define ATOMIC_MMU_IDX MMU_USER_IDX
1225 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1228 #include "atomic_common.c.inc"
1231 #include "atomic_template.h"
1234 #include "atomic_template.h"
1237 #include "atomic_template.h"
1239 #ifdef CONFIG_ATOMIC64
1241 #include "atomic_template.h"
1244 /* The following is only callable from other helpers, and matches up
1245 with the softmmu version. */
1247 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128
1251 #undef ATOMIC_MMU_LOOKUP
1253 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
1254 #define ATOMIC_NAME(X) \
1255 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1256 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
1258 #define DATA_SIZE 16
1259 #include "atomic_template.h"