2 * Internal execution defines for qemu (target specific)
4 * Copyright (c) 2003 Fabrice Bellard
6 * SPDX-License-Identifier: LGPL-2.1-or-later
9 #ifndef ACCEL_TCG_INTERNAL_TARGET_H
10 #define ACCEL_TCG_INTERNAL_TARGET_H
12 #include "exec/exec-all.h"
13 #include "exec/translate-all.h"
16 * Access to the various translations structures need to be serialised
17 * via locks for consistency. In user-mode emulation access to the
18 * memory related structures are protected with mmap_lock.
19 * In !user-mode we use per-page locks.
21 #ifdef CONFIG_USER_ONLY
22 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
24 #define assert_memory_lock()
27 #if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
28 void assert_no_pages_locked(void);
30 static inline void assert_no_pages_locked(void) { }
33 #ifdef CONFIG_USER_ONLY
34 static inline void page_table_config_init(void) { }
36 void page_table_config_init(void);
39 #ifdef CONFIG_USER_ONLY
41 * For user-only, page_protect sets the page read-only.
42 * Since most execution is already on read-only pages, and we'd need to
43 * account for other TBs on the same page, defer undoing any page protection
44 * until we receive the write fault.
46 static inline void tb_lock_page0(tb_page_addr_t p0
)
51 static inline void tb_lock_page1(tb_page_addr_t p0
, tb_page_addr_t p1
)
56 static inline void tb_unlock_page1(tb_page_addr_t p0
, tb_page_addr_t p1
) { }
57 static inline void tb_unlock_pages(TranslationBlock
*tb
) { }
59 void tb_lock_page0(tb_page_addr_t
);
60 void tb_lock_page1(tb_page_addr_t
, tb_page_addr_t
);
61 void tb_unlock_page1(tb_page_addr_t
, tb_page_addr_t
);
62 void tb_unlock_pages(TranslationBlock
*);
66 void tb_invalidate_phys_range_fast(ram_addr_t ram_addr
,
69 G_NORETURN
void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
);
70 #endif /* CONFIG_SOFTMMU */
72 TranslationBlock
*tb_gen_code(CPUState
*cpu
, vaddr pc
,
73 uint64_t cs_base
, uint32_t flags
,
76 void tb_htable_init(void);
77 void tb_reset_jump(TranslationBlock
*tb
, int n
);
78 TranslationBlock
*tb_link_page(TranslationBlock
*tb
);
79 bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr
, uintptr_t pc
);
80 void cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
83 bool tcg_exec_realizefn(CPUState
*cpu
, Error
**errp
);
84 void tcg_exec_unrealizefn(CPUState
*cpu
);
86 /* Return the current PC from CPU, which may be cached in TB. */
87 static inline vaddr
log_pc(CPUState
*cpu
, const TranslationBlock
*tb
)
89 if (tb_cflags(tb
) & CF_PCREL
) {
90 return cpu
->cc
->get_pc(cpu
);
96 extern bool one_insn_per_tb
;
102 * Filter @type to the barrier that is required for the guest
103 * memory ordering vs the host memory ordering. A non-zero
104 * result indicates that some barrier is required.
106 * If TCG_GUEST_DEFAULT_MO is not defined, assume that the
107 * guest requires strict ordering.
109 * This is a macro so that it's constant even without optimization.
111 #ifdef TCG_GUEST_DEFAULT_MO
112 # define tcg_req_mo(type) \
113 ((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
115 # define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
122 * If tcg_req_mo indicates a barrier for @type is required
123 * for the guest memory model, issue a host memory barrier.
125 #define cpu_req_mo(type) \
127 if (tcg_req_mo(type)) { \
132 #endif /* ACCEL_TCG_INTERNAL_H */