Merge tag 'pull-la-20220704' of https://gitlab.com/rth7680/qemu into staging
[qemu/rayw.git] / accel / tcg / user-exec.c
blob20ada5472b4c8b22be4e032b7bf254680f5f2335
1 /*
2 * User emulator execution
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "hw/core/tcg-cpu-ops.h"
21 #include "disas/disas.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg.h"
24 #include "qemu/bitops.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/translate-all.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/atomic128.h"
29 #include "trace/trace-root.h"
30 #include "tcg/tcg-ldst.h"
31 #include "internal.h"
33 __thread uintptr_t helper_retaddr;
35 //#define DEBUG_SIGNAL
38 * Adjust the pc to pass to cpu_restore_state; return the memop type.
40 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
42 switch (helper_retaddr) {
43 default:
45 * Fault during host memory operation within a helper function.
46 * The helper's host return address, saved here, gives us a
47 * pointer into the generated code that will unwind to the
48 * correct guest pc.
50 *pc = helper_retaddr;
51 break;
53 case 0:
55 * Fault during host memory operation within generated code.
56 * (Or, a unrelated bug within qemu, but we can't tell from here).
58 * We take the host pc from the signal frame. However, we cannot
59 * use that value directly. Within cpu_restore_state_from_tb, we
60 * assume PC comes from GETPC(), as used by the helper functions,
61 * so we adjust the address by -GETPC_ADJ to form an address that
62 * is within the call insn, so that the address does not accidentally
63 * match the beginning of the next guest insn. However, when the
64 * pc comes from the signal frame it points to the actual faulting
65 * host memory insn and not the return from a call insn.
67 * Therefore, adjust to compensate for what will be done later
68 * by cpu_restore_state_from_tb.
70 *pc += GETPC_ADJ;
71 break;
73 case 1:
75 * Fault during host read for translation, or loosely, "execution".
77 * The guest pc is already pointing to the start of the TB for which
78 * code is being generated. If the guest translator manages the
79 * page crossings correctly, this is exactly the correct address
80 * (and if the translator doesn't handle page boundaries correctly
81 * there's little we can do about that here). Therefore, do not
82 * trigger the unwinder.
84 * Like tb_gen_code, release the memory lock before cpu_loop_exit.
86 mmap_unlock();
87 *pc = 0;
88 return MMU_INST_FETCH;
91 return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
94 /**
95 * handle_sigsegv_accerr_write:
96 * @cpu: the cpu context
97 * @old_set: the sigset_t from the signal ucontext_t
98 * @host_pc: the host pc, adjusted for the signal
99 * @guest_addr: the guest address of the fault
101 * Return true if the write fault has been handled, and should be re-tried.
103 * Note that it is important that we don't call page_unprotect() unless
104 * this is really a "write to nonwritable page" fault, because
105 * page_unprotect() assumes that if it is called for an access to
106 * a page that's writable this means we had two threads racing and
107 * another thread got there first and already made the page writable;
108 * so we will retry the access. If we were to call page_unprotect()
109 * for some other kind of fault that should really be passed to the
110 * guest, we'd end up in an infinite loop of retrying the faulting access.
112 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
113 uintptr_t host_pc, abi_ptr guest_addr)
115 switch (page_unprotect(guest_addr, host_pc)) {
116 case 0:
118 * Fault not caused by a page marked unwritable to protect
119 * cached translations, must be the guest binary's problem.
121 return false;
122 case 1:
124 * Fault caused by protection of cached translation; TBs
125 * invalidated, so resume execution.
127 return true;
128 case 2:
130 * Fault caused by protection of cached translation, and the
131 * currently executing TB was modified and must be exited immediately.
133 sigprocmask(SIG_SETMASK, old_set, NULL);
134 cpu_loop_exit_noexc(cpu);
135 /* NORETURN */
136 default:
137 g_assert_not_reached();
141 static int probe_access_internal(CPUArchState *env, target_ulong addr,
142 int fault_size, MMUAccessType access_type,
143 bool nonfault, uintptr_t ra)
145 int acc_flag;
146 bool maperr;
148 switch (access_type) {
149 case MMU_DATA_STORE:
150 acc_flag = PAGE_WRITE_ORG;
151 break;
152 case MMU_DATA_LOAD:
153 acc_flag = PAGE_READ;
154 break;
155 case MMU_INST_FETCH:
156 acc_flag = PAGE_EXEC;
157 break;
158 default:
159 g_assert_not_reached();
162 if (guest_addr_valid_untagged(addr)) {
163 int page_flags = page_get_flags(addr);
164 if (page_flags & acc_flag) {
165 return 0; /* success */
167 maperr = !(page_flags & PAGE_VALID);
168 } else {
169 maperr = true;
172 if (nonfault) {
173 return TLB_INVALID_MASK;
176 cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
179 int probe_access_flags(CPUArchState *env, target_ulong addr,
180 MMUAccessType access_type, int mmu_idx,
181 bool nonfault, void **phost, uintptr_t ra)
183 int flags;
185 flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
186 *phost = flags ? NULL : g2h(env_cpu(env), addr);
187 return flags;
190 void *probe_access(CPUArchState *env, target_ulong addr, int size,
191 MMUAccessType access_type, int mmu_idx, uintptr_t ra)
193 int flags;
195 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
196 flags = probe_access_internal(env, addr, size, access_type, false, ra);
197 g_assert(flags == 0);
199 return size ? g2h(env_cpu(env), addr) : NULL;
202 /* The softmmu versions of these helpers are in cputlb.c. */
205 * Verify that we have passed the correct MemOp to the correct function.
207 * We could present one function to target code, and dispatch based on
208 * the MemOp, but so far we have worked hard to avoid an indirect function
209 * call along the memory path.
211 static void validate_memop(MemOpIdx oi, MemOp expected)
213 #ifdef CONFIG_DEBUG_TCG
214 MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
215 assert(have == expected);
216 #endif
219 void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
221 cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
224 void helper_unaligned_st(CPUArchState *env, target_ulong addr)
226 cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
229 static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
230 MemOpIdx oi, uintptr_t ra, MMUAccessType type)
232 MemOp mop = get_memop(oi);
233 int a_bits = get_alignment_bits(mop);
234 void *ret;
236 /* Enforce guest required alignment. */
237 if (unlikely(addr & ((1 << a_bits) - 1))) {
238 cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
241 ret = g2h(env_cpu(env), addr);
242 set_helper_retaddr(ra);
243 return ret;
246 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
247 MemOpIdx oi, uintptr_t ra)
249 void *haddr;
250 uint8_t ret;
252 validate_memop(oi, MO_UB);
253 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
254 ret = ldub_p(haddr);
255 clear_helper_retaddr();
256 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
257 return ret;
260 uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
261 MemOpIdx oi, uintptr_t ra)
263 void *haddr;
264 uint16_t ret;
266 validate_memop(oi, MO_BEUW);
267 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
268 ret = lduw_be_p(haddr);
269 clear_helper_retaddr();
270 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
271 return ret;
274 uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
275 MemOpIdx oi, uintptr_t ra)
277 void *haddr;
278 uint32_t ret;
280 validate_memop(oi, MO_BEUL);
281 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
282 ret = ldl_be_p(haddr);
283 clear_helper_retaddr();
284 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
285 return ret;
288 uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
289 MemOpIdx oi, uintptr_t ra)
291 void *haddr;
292 uint64_t ret;
294 validate_memop(oi, MO_BEUQ);
295 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
296 ret = ldq_be_p(haddr);
297 clear_helper_retaddr();
298 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
299 return ret;
302 uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
303 MemOpIdx oi, uintptr_t ra)
305 void *haddr;
306 uint16_t ret;
308 validate_memop(oi, MO_LEUW);
309 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
310 ret = lduw_le_p(haddr);
311 clear_helper_retaddr();
312 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
313 return ret;
316 uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
317 MemOpIdx oi, uintptr_t ra)
319 void *haddr;
320 uint32_t ret;
322 validate_memop(oi, MO_LEUL);
323 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
324 ret = ldl_le_p(haddr);
325 clear_helper_retaddr();
326 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
327 return ret;
330 uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
331 MemOpIdx oi, uintptr_t ra)
333 void *haddr;
334 uint64_t ret;
336 validate_memop(oi, MO_LEUQ);
337 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
338 ret = ldq_le_p(haddr);
339 clear_helper_retaddr();
340 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
341 return ret;
344 void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
345 MemOpIdx oi, uintptr_t ra)
347 void *haddr;
349 validate_memop(oi, MO_UB);
350 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
351 stb_p(haddr, val);
352 clear_helper_retaddr();
353 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
356 void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
357 MemOpIdx oi, uintptr_t ra)
359 void *haddr;
361 validate_memop(oi, MO_BEUW);
362 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
363 stw_be_p(haddr, val);
364 clear_helper_retaddr();
365 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
368 void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
369 MemOpIdx oi, uintptr_t ra)
371 void *haddr;
373 validate_memop(oi, MO_BEUL);
374 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
375 stl_be_p(haddr, val);
376 clear_helper_retaddr();
377 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
380 void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
381 MemOpIdx oi, uintptr_t ra)
383 void *haddr;
385 validate_memop(oi, MO_BEUQ);
386 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
387 stq_be_p(haddr, val);
388 clear_helper_retaddr();
389 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
392 void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
393 MemOpIdx oi, uintptr_t ra)
395 void *haddr;
397 validate_memop(oi, MO_LEUW);
398 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
399 stw_le_p(haddr, val);
400 clear_helper_retaddr();
401 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
404 void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
405 MemOpIdx oi, uintptr_t ra)
407 void *haddr;
409 validate_memop(oi, MO_LEUL);
410 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
411 stl_le_p(haddr, val);
412 clear_helper_retaddr();
413 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
416 void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
417 MemOpIdx oi, uintptr_t ra)
419 void *haddr;
421 validate_memop(oi, MO_LEUQ);
422 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
423 stq_le_p(haddr, val);
424 clear_helper_retaddr();
425 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
428 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
430 uint32_t ret;
432 set_helper_retaddr(1);
433 ret = ldub_p(g2h_untagged(ptr));
434 clear_helper_retaddr();
435 return ret;
438 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
440 uint32_t ret;
442 set_helper_retaddr(1);
443 ret = lduw_p(g2h_untagged(ptr));
444 clear_helper_retaddr();
445 return ret;
448 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
450 uint32_t ret;
452 set_helper_retaddr(1);
453 ret = ldl_p(g2h_untagged(ptr));
454 clear_helper_retaddr();
455 return ret;
458 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
460 uint64_t ret;
462 set_helper_retaddr(1);
463 ret = ldq_p(g2h_untagged(ptr));
464 clear_helper_retaddr();
465 return ret;
468 #include "ldst_common.c.inc"
471 * Do not allow unaligned operations to proceed. Return the host address.
473 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
475 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
476 MemOpIdx oi, int size, int prot,
477 uintptr_t retaddr)
479 MemOp mop = get_memop(oi);
480 int a_bits = get_alignment_bits(mop);
481 void *ret;
483 /* Enforce guest required alignment. */
484 if (unlikely(addr & ((1 << a_bits) - 1))) {
485 MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
486 cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
489 /* Enforce qemu required alignment. */
490 if (unlikely(addr & (size - 1))) {
491 cpu_loop_exit_atomic(env_cpu(env), retaddr);
494 ret = g2h(env_cpu(env), addr);
495 set_helper_retaddr(retaddr);
496 return ret;
499 #include "atomic_common.c.inc"
502 * First set of functions passes in OI and RETADDR.
503 * This makes them callable from other helpers.
506 #define ATOMIC_NAME(X) \
507 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
508 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
510 #define DATA_SIZE 1
511 #include "atomic_template.h"
513 #define DATA_SIZE 2
514 #include "atomic_template.h"
516 #define DATA_SIZE 4
517 #include "atomic_template.h"
519 #ifdef CONFIG_ATOMIC64
520 #define DATA_SIZE 8
521 #include "atomic_template.h"
522 #endif
524 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128
525 #define DATA_SIZE 16
526 #include "atomic_template.h"
527 #endif