linux-user/host/riscv: Populate host_signal.h
[qemu.git] / accel / tcg / user-exec.c
bloba0cba61e83121fb141e0cd7645810ead9dd896dc
1 /*
2 * User emulator execution
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "hw/core/tcg-cpu-ops.h"
21 #include "disas/disas.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg.h"
24 #include "qemu/bitops.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/translate-all.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/atomic128.h"
29 #include "trace/trace-root.h"
30 #include "internal.h"
32 __thread uintptr_t helper_retaddr;
34 //#define DEBUG_SIGNAL
37 * Adjust the pc to pass to cpu_restore_state; return the memop type.
39 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
41 switch (helper_retaddr) {
42 default:
44 * Fault during host memory operation within a helper function.
45 * The helper's host return address, saved here, gives us a
46 * pointer into the generated code that will unwind to the
47 * correct guest pc.
49 *pc = helper_retaddr;
50 break;
52 case 0:
54 * Fault during host memory operation within generated code.
55 * (Or, a unrelated bug within qemu, but we can't tell from here).
57 * We take the host pc from the signal frame. However, we cannot
58 * use that value directly. Within cpu_restore_state_from_tb, we
59 * assume PC comes from GETPC(), as used by the helper functions,
60 * so we adjust the address by -GETPC_ADJ to form an address that
61 * is within the call insn, so that the address does not accidentally
62 * match the beginning of the next guest insn. However, when the
63 * pc comes from the signal frame it points to the actual faulting
64 * host memory insn and not the return from a call insn.
66 * Therefore, adjust to compensate for what will be done later
67 * by cpu_restore_state_from_tb.
69 *pc += GETPC_ADJ;
70 break;
72 case 1:
74 * Fault during host read for translation, or loosely, "execution".
76 * The guest pc is already pointing to the start of the TB for which
77 * code is being generated. If the guest translator manages the
78 * page crossings correctly, this is exactly the correct address
79 * (and if the translator doesn't handle page boundaries correctly
80 * there's little we can do about that here). Therefore, do not
81 * trigger the unwinder.
83 * Like tb_gen_code, release the memory lock before cpu_loop_exit.
85 mmap_unlock();
86 *pc = 0;
87 return MMU_INST_FETCH;
90 return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
93 /**
94 * handle_sigsegv_accerr_write:
95 * @cpu: the cpu context
96 * @old_set: the sigset_t from the signal ucontext_t
97 * @host_pc: the host pc, adjusted for the signal
98 * @guest_addr: the guest address of the fault
100 * Return true if the write fault has been handled, and should be re-tried.
102 * Note that it is important that we don't call page_unprotect() unless
103 * this is really a "write to nonwriteable page" fault, because
104 * page_unprotect() assumes that if it is called for an access to
105 * a page that's writeable this means we had two threads racing and
106 * another thread got there first and already made the page writeable;
107 * so we will retry the access. If we were to call page_unprotect()
108 * for some other kind of fault that should really be passed to the
109 * guest, we'd end up in an infinite loop of retrying the faulting access.
111 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
112 uintptr_t host_pc, abi_ptr guest_addr)
114 switch (page_unprotect(guest_addr, host_pc)) {
115 case 0:
117 * Fault not caused by a page marked unwritable to protect
118 * cached translations, must be the guest binary's problem.
120 return false;
121 case 1:
123 * Fault caused by protection of cached translation; TBs
124 * invalidated, so resume execution.
126 return true;
127 case 2:
129 * Fault caused by protection of cached translation, and the
130 * currently executing TB was modified and must be exited immediately.
132 sigprocmask(SIG_SETMASK, old_set, NULL);
133 cpu_loop_exit_noexc(cpu);
134 /* NORETURN */
135 default:
136 g_assert_not_reached();
140 static int probe_access_internal(CPUArchState *env, target_ulong addr,
141 int fault_size, MMUAccessType access_type,
142 bool nonfault, uintptr_t ra)
144 int flags;
146 switch (access_type) {
147 case MMU_DATA_STORE:
148 flags = PAGE_WRITE;
149 break;
150 case MMU_DATA_LOAD:
151 flags = PAGE_READ;
152 break;
153 case MMU_INST_FETCH:
154 flags = PAGE_EXEC;
155 break;
156 default:
157 g_assert_not_reached();
160 if (!guest_addr_valid_untagged(addr) ||
161 page_check_range(addr, 1, flags) < 0) {
162 if (nonfault) {
163 return TLB_INVALID_MASK;
164 } else {
165 CPUState *cpu = env_cpu(env);
166 CPUClass *cc = CPU_GET_CLASS(cpu);
167 cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
168 MMU_USER_IDX, false, ra);
169 g_assert_not_reached();
172 return 0;
175 int probe_access_flags(CPUArchState *env, target_ulong addr,
176 MMUAccessType access_type, int mmu_idx,
177 bool nonfault, void **phost, uintptr_t ra)
179 int flags;
181 flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
182 *phost = flags ? NULL : g2h(env_cpu(env), addr);
183 return flags;
186 void *probe_access(CPUArchState *env, target_ulong addr, int size,
187 MMUAccessType access_type, int mmu_idx, uintptr_t ra)
189 int flags;
191 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
192 flags = probe_access_internal(env, addr, size, access_type, false, ra);
193 g_assert(flags == 0);
195 return size ? g2h(env_cpu(env), addr) : NULL;
198 /* The softmmu versions of these helpers are in cputlb.c. */
201 * Verify that we have passed the correct MemOp to the correct function.
203 * We could present one function to target code, and dispatch based on
204 * the MemOp, but so far we have worked hard to avoid an indirect function
205 * call along the memory path.
207 static void validate_memop(MemOpIdx oi, MemOp expected)
209 #ifdef CONFIG_DEBUG_TCG
210 MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
211 assert(have == expected);
212 #endif
215 static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
216 MemOpIdx oi, uintptr_t ra, MMUAccessType type)
218 void *ret;
220 /* TODO: Enforce guest required alignment. */
222 ret = g2h(env_cpu(env), addr);
223 set_helper_retaddr(ra);
224 return ret;
227 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
228 MemOpIdx oi, uintptr_t ra)
230 void *haddr;
231 uint8_t ret;
233 validate_memop(oi, MO_UB);
234 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
235 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
236 ret = ldub_p(haddr);
237 clear_helper_retaddr();
238 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
239 return ret;
242 uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
243 MemOpIdx oi, uintptr_t ra)
245 void *haddr;
246 uint16_t ret;
248 validate_memop(oi, MO_BEUW);
249 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
250 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
251 ret = lduw_be_p(haddr);
252 clear_helper_retaddr();
253 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
254 return ret;
257 uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
258 MemOpIdx oi, uintptr_t ra)
260 void *haddr;
261 uint32_t ret;
263 validate_memop(oi, MO_BEUL);
264 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
265 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
266 ret = ldl_be_p(haddr);
267 clear_helper_retaddr();
268 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
269 return ret;
272 uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
273 MemOpIdx oi, uintptr_t ra)
275 void *haddr;
276 uint64_t ret;
278 validate_memop(oi, MO_BEQ);
279 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
280 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
281 ret = ldq_be_p(haddr);
282 clear_helper_retaddr();
283 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
284 return ret;
287 uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
288 MemOpIdx oi, uintptr_t ra)
290 void *haddr;
291 uint16_t ret;
293 validate_memop(oi, MO_LEUW);
294 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
295 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
296 ret = lduw_le_p(haddr);
297 clear_helper_retaddr();
298 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
299 return ret;
302 uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
303 MemOpIdx oi, uintptr_t ra)
305 void *haddr;
306 uint32_t ret;
308 validate_memop(oi, MO_LEUL);
309 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
310 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
311 ret = ldl_le_p(haddr);
312 clear_helper_retaddr();
313 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
314 return ret;
317 uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
318 MemOpIdx oi, uintptr_t ra)
320 void *haddr;
321 uint64_t ret;
323 validate_memop(oi, MO_LEQ);
324 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
325 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
326 ret = ldq_le_p(haddr);
327 clear_helper_retaddr();
328 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
329 return ret;
332 void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
333 MemOpIdx oi, uintptr_t ra)
335 void *haddr;
337 validate_memop(oi, MO_UB);
338 trace_guest_st_before_exec(env_cpu(env), addr, oi);
339 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
340 stb_p(haddr, val);
341 clear_helper_retaddr();
342 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
345 void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
346 MemOpIdx oi, uintptr_t ra)
348 void *haddr;
350 validate_memop(oi, MO_BEUW);
351 trace_guest_st_before_exec(env_cpu(env), addr, oi);
352 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
353 stw_be_p(haddr, val);
354 clear_helper_retaddr();
355 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
358 void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
359 MemOpIdx oi, uintptr_t ra)
361 void *haddr;
363 validate_memop(oi, MO_BEUL);
364 trace_guest_st_before_exec(env_cpu(env), addr, oi);
365 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
366 stl_be_p(haddr, val);
367 clear_helper_retaddr();
368 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
371 void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
372 MemOpIdx oi, uintptr_t ra)
374 void *haddr;
376 validate_memop(oi, MO_BEQ);
377 trace_guest_st_before_exec(env_cpu(env), addr, oi);
378 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
379 stq_be_p(haddr, val);
380 clear_helper_retaddr();
381 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
384 void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
385 MemOpIdx oi, uintptr_t ra)
387 void *haddr;
389 validate_memop(oi, MO_LEUW);
390 trace_guest_st_before_exec(env_cpu(env), addr, oi);
391 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
392 stw_le_p(haddr, val);
393 clear_helper_retaddr();
394 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
397 void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
398 MemOpIdx oi, uintptr_t ra)
400 void *haddr;
402 validate_memop(oi, MO_LEUL);
403 trace_guest_st_before_exec(env_cpu(env), addr, oi);
404 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
405 stl_le_p(haddr, val);
406 clear_helper_retaddr();
407 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
410 void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
411 MemOpIdx oi, uintptr_t ra)
413 void *haddr;
415 validate_memop(oi, MO_LEQ);
416 trace_guest_st_before_exec(env_cpu(env), addr, oi);
417 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
418 stq_le_p(haddr, val);
419 clear_helper_retaddr();
420 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
423 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
425 uint32_t ret;
427 set_helper_retaddr(1);
428 ret = ldub_p(g2h_untagged(ptr));
429 clear_helper_retaddr();
430 return ret;
433 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
435 uint32_t ret;
437 set_helper_retaddr(1);
438 ret = lduw_p(g2h_untagged(ptr));
439 clear_helper_retaddr();
440 return ret;
443 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
445 uint32_t ret;
447 set_helper_retaddr(1);
448 ret = ldl_p(g2h_untagged(ptr));
449 clear_helper_retaddr();
450 return ret;
453 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
455 uint64_t ret;
457 set_helper_retaddr(1);
458 ret = ldq_p(g2h_untagged(ptr));
459 clear_helper_retaddr();
460 return ret;
463 #include "ldst_common.c.inc"
466 * Do not allow unaligned operations to proceed. Return the host address.
468 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
470 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
471 MemOpIdx oi, int size, int prot,
472 uintptr_t retaddr)
474 /* Enforce qemu required alignment. */
475 if (unlikely(addr & (size - 1))) {
476 cpu_loop_exit_atomic(env_cpu(env), retaddr);
478 void *ret = g2h(env_cpu(env), addr);
479 set_helper_retaddr(retaddr);
480 return ret;
483 #include "atomic_common.c.inc"
486 * First set of functions passes in OI and RETADDR.
487 * This makes them callable from other helpers.
490 #define ATOMIC_NAME(X) \
491 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
492 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
493 #define ATOMIC_MMU_IDX MMU_USER_IDX
495 #define DATA_SIZE 1
496 #include "atomic_template.h"
498 #define DATA_SIZE 2
499 #include "atomic_template.h"
501 #define DATA_SIZE 4
502 #include "atomic_template.h"
504 #ifdef CONFIG_ATOMIC64
505 #define DATA_SIZE 8
506 #include "atomic_template.h"
507 #endif
509 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128
510 #define DATA_SIZE 16
511 #include "atomic_template.h"
512 #endif