tcg: remove global exit_request
[qemu/kevin.git] / include / exec / exec-all.h
blob82f0e12327f332e0a60815b211bcaaf1acd43880
1 /*
2 * internal execution defines for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #ifndef EXEC_ALL_H
21 #define EXEC_ALL_H
23 #include "qemu-common.h"
24 #include "exec/tb-context.h"
26 /* allow to see translation results - the slowdown should be negligible, so we leave it */
27 #define DEBUG_DISAS
29 /* Page tracking code uses ram addresses in system mode, and virtual
30 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
31 type. */
32 #if defined(CONFIG_USER_ONLY)
33 typedef abi_ulong tb_page_addr_t;
34 #else
35 typedef ram_addr_t tb_page_addr_t;
36 #endif
38 /* is_jmp field values */
39 #define DISAS_NEXT 0 /* next instruction can be analyzed */
40 #define DISAS_JUMP 1 /* only pc was modified dynamically */
41 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
42 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
44 #include "qemu/log.h"
46 void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
47 void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
48 target_ulong *data);
50 void cpu_gen_init(void);
51 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
53 void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
54 void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
55 TranslationBlock *tb_gen_code(CPUState *cpu,
56 target_ulong pc, target_ulong cs_base,
57 uint32_t flags,
58 int cflags);
60 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
61 void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
62 void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
64 #if !defined(CONFIG_USER_ONLY)
65 void cpu_reloading_memory_map(void);
66 /**
67 * cpu_address_space_init:
68 * @cpu: CPU to add this address space to
69 * @as: address space to add
70 * @asidx: integer index of this address space
72 * Add the specified address space to the CPU's cpu_ases list.
73 * The address space added with @asidx 0 is the one used for the
74 * convenience pointer cpu->as.
75 * The target-specific code which registers ASes is responsible
76 * for defining what semantics address space 0, 1, 2, etc have.
78 * Before the first call to this function, the caller must set
79 * cpu->num_ases to the total number of address spaces it needs
80 * to support.
82 * Note that with KVM only one address space is supported.
84 void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx);
85 /* cputlb.c */
86 /**
87 * tlb_flush_page:
88 * @cpu: CPU whose TLB should be flushed
89 * @addr: virtual address of page to be flushed
91 * Flush one page from the TLB of the specified CPU, for all
92 * MMU indexes.
94 void tlb_flush_page(CPUState *cpu, target_ulong addr);
95 /**
96 * tlb_flush:
97 * @cpu: CPU whose TLB should be flushed
99 * Flush the entire TLB for the specified CPU. Most CPU architectures
100 * allow the implementation to drop entries from the TLB at any time
101 * so this is generally safe. If more selective flushing is required
102 * use one of the other functions for efficiency.
104 void tlb_flush(CPUState *cpu);
106 * tlb_flush_page_by_mmuidx:
107 * @cpu: CPU whose TLB should be flushed
108 * @addr: virtual address of page to be flushed
109 * @...: list of MMU indexes to flush, terminated by a negative value
111 * Flush one page from the TLB of the specified CPU, for the specified
112 * MMU indexes.
114 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
116 * tlb_flush_by_mmuidx:
117 * @cpu: CPU whose TLB should be flushed
118 * @...: list of MMU indexes to flush, terminated by a negative value
120 * Flush all entries from the TLB of the specified CPU, for the specified
121 * MMU indexes.
123 void tlb_flush_by_mmuidx(CPUState *cpu, ...);
125 * tlb_set_page_with_attrs:
126 * @cpu: CPU to add this TLB entry for
127 * @vaddr: virtual address of page to add entry for
128 * @paddr: physical address of the page
129 * @attrs: memory transaction attributes
130 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
131 * @mmu_idx: MMU index to insert TLB entry for
132 * @size: size of the page in bytes
134 * Add an entry to this CPU's TLB (a mapping from virtual address
135 * @vaddr to physical address @paddr) with the specified memory
136 * transaction attributes. This is generally called by the target CPU
137 * specific code after it has been called through the tlb_fill()
138 * entry point and performed a successful page table walk to find
139 * the physical address and attributes for the virtual address
140 * which provoked the TLB miss.
142 * At most one entry for a given virtual address is permitted. Only a
143 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
144 * used by tlb_flush_page.
146 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
147 hwaddr paddr, MemTxAttrs attrs,
148 int prot, int mmu_idx, target_ulong size);
149 /* tlb_set_page:
151 * This function is equivalent to calling tlb_set_page_with_attrs()
152 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
153 * as a convenience for CPUs which don't use memory transaction attributes.
155 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
156 hwaddr paddr, int prot,
157 int mmu_idx, target_ulong size);
158 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
159 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
160 uintptr_t retaddr);
161 #else
162 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
166 static inline void tlb_flush(CPUState *cpu)
170 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
171 target_ulong addr, ...)
175 static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
178 #endif
180 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
182 /* Estimated block size for TB allocation. */
183 /* ??? The following is based on a 2015 survey of x86_64 host output.
184 Better would seem to be some sort of dynamically sized TB array,
185 adapting to the block sizes actually being produced. */
186 #if defined(CONFIG_SOFTMMU)
187 #define CODE_GEN_AVG_BLOCK_SIZE 400
188 #else
189 #define CODE_GEN_AVG_BLOCK_SIZE 150
190 #endif
192 #if defined(__arm__) || defined(_ARCH_PPC) \
193 || defined(__x86_64__) || defined(__i386__) \
194 || defined(__sparc__) || defined(__aarch64__) \
195 || defined(__s390x__) || defined(__mips__) \
196 || defined(CONFIG_TCG_INTERPRETER)
197 /* NOTE: Direct jump patching must be atomic to be thread-safe. */
198 #define USE_DIRECT_JUMP
199 #endif
201 struct TranslationBlock {
202 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
203 target_ulong cs_base; /* CS base for this block */
204 uint32_t flags; /* flags defining in which context the code was generated */
205 uint16_t size; /* size of target code for this block (1 <=
206 size <= TARGET_PAGE_SIZE) */
207 uint16_t icount;
208 uint32_t cflags; /* compile flags */
209 #define CF_COUNT_MASK 0x7fff
210 #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
211 #define CF_NOCACHE 0x10000 /* To be freed after execution */
212 #define CF_USE_ICOUNT 0x20000
213 #define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */
215 uint16_t invalid;
217 void *tc_ptr; /* pointer to the translated code */
218 uint8_t *tc_search; /* pointer to search data */
219 /* original tb when cflags has CF_NOCACHE */
220 struct TranslationBlock *orig_tb;
221 /* first and second physical page containing code. The lower bit
222 of the pointer tells the index in page_next[] */
223 struct TranslationBlock *page_next[2];
224 tb_page_addr_t page_addr[2];
226 /* The following data are used to directly call another TB from
227 * the code of this one. This can be done either by emitting direct or
228 * indirect native jump instructions. These jumps are reset so that the TB
229 * just continue its execution. The TB can be linked to another one by
230 * setting one of the jump targets (or patching the jump instruction). Only
231 * two of such jumps are supported.
233 uint16_t jmp_reset_offset[2]; /* offset of original jump target */
234 #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
235 #ifdef USE_DIRECT_JUMP
236 uint16_t jmp_insn_offset[2]; /* offset of native jump instruction */
237 #else
238 uintptr_t jmp_target_addr[2]; /* target address for indirect jump */
239 #endif
240 /* Each TB has an assosiated circular list of TBs jumping to this one.
241 * jmp_list_first points to the first TB jumping to this one.
242 * jmp_list_next is used to point to the next TB in a list.
243 * Since each TB can have two jumps, it can participate in two lists.
244 * jmp_list_first and jmp_list_next are 4-byte aligned pointers to a
245 * TranslationBlock structure, but the two least significant bits of
246 * them are used to encode which data field of the pointed TB should
247 * be used to traverse the list further from that TB:
248 * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
249 * In other words, 0/1 tells which jump is used in the pointed TB,
250 * and 2 means that this is a pointer back to the target TB of this list.
252 uintptr_t jmp_list_next[2];
253 uintptr_t jmp_list_first;
256 void tb_free(TranslationBlock *tb);
257 void tb_flush(CPUState *cpu);
258 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
260 #if defined(USE_DIRECT_JUMP)
262 #if defined(CONFIG_TCG_INTERPRETER)
263 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
265 /* patch the branch destination */
266 atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
267 /* no need to flush icache explicitly */
269 #elif defined(_ARCH_PPC)
270 void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
271 #define tb_set_jmp_target1 ppc_tb_set_jmp_target
272 #elif defined(__i386__) || defined(__x86_64__)
273 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
275 /* patch the branch destination */
276 atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
277 /* no need to flush icache explicitly */
279 #elif defined(__s390x__)
280 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
282 /* patch the branch destination */
283 intptr_t disp = addr - (jmp_addr - 2);
284 atomic_set((int32_t *)jmp_addr, disp / 2);
285 /* no need to flush icache explicitly */
287 #elif defined(__aarch64__)
288 void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
289 #define tb_set_jmp_target1 aarch64_tb_set_jmp_target
290 #elif defined(__arm__)
291 void arm_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
292 #define tb_set_jmp_target1 arm_tb_set_jmp_target
293 #elif defined(__sparc__) || defined(__mips__)
294 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
295 #else
296 #error tb_set_jmp_target1 is missing
297 #endif
299 static inline void tb_set_jmp_target(TranslationBlock *tb,
300 int n, uintptr_t addr)
302 uint16_t offset = tb->jmp_insn_offset[n];
303 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
306 #else
308 /* set the jump target */
309 static inline void tb_set_jmp_target(TranslationBlock *tb,
310 int n, uintptr_t addr)
312 tb->jmp_target_addr[n] = addr;
315 #endif
317 /* Called with tb_lock held. */
318 static inline void tb_add_jump(TranslationBlock *tb, int n,
319 TranslationBlock *tb_next)
321 assert(n < ARRAY_SIZE(tb->jmp_list_next));
322 if (tb->jmp_list_next[n]) {
323 /* Another thread has already done this while we were
324 * outside of the lock; nothing to do in this case */
325 return;
327 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
328 "Linking TBs %p [" TARGET_FMT_lx
329 "] index %d -> %p [" TARGET_FMT_lx "]\n",
330 tb->tc_ptr, tb->pc, n,
331 tb_next->tc_ptr, tb_next->pc);
333 /* patch the native jump address */
334 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
336 /* add in TB jmp circular list */
337 tb->jmp_list_next[n] = tb_next->jmp_list_first;
338 tb_next->jmp_list_first = (uintptr_t)tb | n;
341 /* GETPC is the true target of the return instruction that we'll execute. */
342 #if defined(CONFIG_TCG_INTERPRETER)
343 extern uintptr_t tci_tb_ptr;
344 # define GETPC() tci_tb_ptr
345 #else
346 # define GETPC() \
347 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
348 #endif
350 /* The true return address will often point to a host insn that is part of
351 the next translated guest insn. Adjust the address backward to point to
352 the middle of the call insn. Subtracting one would do the job except for
353 several compressed mode architectures (arm, mips) which set the low bit
354 to indicate the compressed mode; subtracting two works around that. It
355 is also the case that there are no host isas that contain a call insn
356 smaller than 4 bytes, so we don't worry about special-casing this. */
357 #define GETPC_ADJ 2
359 #if !defined(CONFIG_USER_ONLY)
361 struct MemoryRegion *iotlb_to_region(CPUState *cpu,
362 hwaddr index, MemTxAttrs attrs);
364 void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type,
365 int mmu_idx, uintptr_t retaddr);
367 #endif
369 #if defined(CONFIG_USER_ONLY)
370 void mmap_lock(void);
371 void mmap_unlock(void);
372 bool have_mmap_lock(void);
374 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
376 return addr;
378 #else
379 static inline void mmap_lock(void) {}
380 static inline void mmap_unlock(void) {}
382 /* cputlb.c */
383 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
385 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
386 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
388 /* exec.c */
389 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
391 MemoryRegionSection *
392 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
393 hwaddr *xlat, hwaddr *plen);
394 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
395 MemoryRegionSection *section,
396 target_ulong vaddr,
397 hwaddr paddr, hwaddr xlat,
398 int prot,
399 target_ulong *address);
400 bool memory_region_is_unassigned(MemoryRegion *mr);
402 #endif
404 /* vl.c */
405 extern int singlestep;
407 #endif