include/exec/exec-all: document common exit conditions
[qemu/kevin.git] / include / exec / exec-all.h
blob6b6e28791e579410f732f8d8f4fc5bf68f6cde03
1 /*
2 * internal execution defines for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #ifndef EXEC_ALL_H
21 #define EXEC_ALL_H
23 #include "qemu-common.h"
24 #include "exec/tb-context.h"
26 /* allow to see translation results - the slowdown should be negligible, so we leave it */
27 #define DEBUG_DISAS
29 /* Page tracking code uses ram addresses in system mode, and virtual
30 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
31 type. */
32 #if defined(CONFIG_USER_ONLY)
33 typedef abi_ulong tb_page_addr_t;
34 #else
35 typedef ram_addr_t tb_page_addr_t;
36 #endif
38 /* DisasContext is_jmp field values
40 * is_jmp starts as DISAS_NEXT. The translator will keep processing
41 * instructions until an exit condition is reached. If we reach the
42 * exit condition and is_jmp is still DISAS_NEXT (because of some
43 * other condition) we simply "jump" to the next address.
44 * The remaining exit cases are:
46 * DISAS_JUMP - Only the PC was modified dynamically (e.g computed)
47 * DISAS_TB_JUMP - Only the PC was modified statically (e.g. branch)
49 * In these cases as long as the PC is updated we can chain to the
50 * next TB either by exiting the loop or looking up the next TB via
51 * the loookup helper.
53 * DISAS_UPDATE - CPU State was modified dynamically
55 * This covers any other CPU state which necessities us exiting the
56 * TCG code to the main run-loop. Typically this includes anything
57 * that might change the interrupt state.
59 * Individual translators may define additional exit cases to deal
60 * with per-target special conditions.
62 #define DISAS_NEXT 0 /* next instruction can be analyzed */
63 #define DISAS_JUMP 1 /* only pc was modified dynamically */
64 #define DISAS_TB_JUMP 2 /* only pc was modified statically */
65 #define DISAS_UPDATE 3 /* cpu state was modified dynamically */
67 #include "qemu/log.h"
69 void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
70 void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
71 target_ulong *data);
73 void cpu_gen_init(void);
74 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
76 void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
77 void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
78 TranslationBlock *tb_gen_code(CPUState *cpu,
79 target_ulong pc, target_ulong cs_base,
80 uint32_t flags,
81 int cflags);
83 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
84 void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
85 void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
87 #if !defined(CONFIG_USER_ONLY)
88 void cpu_reloading_memory_map(void);
89 /**
90 * cpu_address_space_init:
91 * @cpu: CPU to add this address space to
92 * @as: address space to add
93 * @asidx: integer index of this address space
95 * Add the specified address space to the CPU's cpu_ases list.
96 * The address space added with @asidx 0 is the one used for the
97 * convenience pointer cpu->as.
98 * The target-specific code which registers ASes is responsible
99 * for defining what semantics address space 0, 1, 2, etc have.
101 * Before the first call to this function, the caller must set
102 * cpu->num_ases to the total number of address spaces it needs
103 * to support.
105 * Note that with KVM only one address space is supported.
107 void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx);
108 #endif
110 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
111 /* cputlb.c */
113 * tlb_flush_page:
114 * @cpu: CPU whose TLB should be flushed
115 * @addr: virtual address of page to be flushed
117 * Flush one page from the TLB of the specified CPU, for all
118 * MMU indexes.
120 void tlb_flush_page(CPUState *cpu, target_ulong addr);
122 * tlb_flush_page_all_cpus:
123 * @cpu: src CPU of the flush
124 * @addr: virtual address of page to be flushed
126 * Flush one page from the TLB of the specified CPU, for all
127 * MMU indexes.
129 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
131 * tlb_flush_page_all_cpus_synced:
132 * @cpu: src CPU of the flush
133 * @addr: virtual address of page to be flushed
135 * Flush one page from the TLB of the specified CPU, for all MMU
136 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
137 * is scheduled as safe work meaning all flushes will be complete once
138 * the source vCPUs safe work is complete. This will depend on when
139 * the guests translation ends the TB.
141 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
143 * tlb_flush:
144 * @cpu: CPU whose TLB should be flushed
146 * Flush the entire TLB for the specified CPU. Most CPU architectures
147 * allow the implementation to drop entries from the TLB at any time
148 * so this is generally safe. If more selective flushing is required
149 * use one of the other functions for efficiency.
151 void tlb_flush(CPUState *cpu);
153 * tlb_flush_all_cpus:
154 * @cpu: src CPU of the flush
156 void tlb_flush_all_cpus(CPUState *src_cpu);
158 * tlb_flush_all_cpus_synced:
159 * @cpu: src CPU of the flush
161 * Like tlb_flush_all_cpus except this except the source vCPUs work is
162 * scheduled as safe work meaning all flushes will be complete once
163 * the source vCPUs safe work is complete. This will depend on when
164 * the guests translation ends the TB.
166 void tlb_flush_all_cpus_synced(CPUState *src_cpu);
168 * tlb_flush_page_by_mmuidx:
169 * @cpu: CPU whose TLB should be flushed
170 * @addr: virtual address of page to be flushed
171 * @idxmap: bitmap of MMU indexes to flush
173 * Flush one page from the TLB of the specified CPU, for the specified
174 * MMU indexes.
176 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
177 uint16_t idxmap);
179 * tlb_flush_page_by_mmuidx_all_cpus:
180 * @cpu: Originating CPU of the flush
181 * @addr: virtual address of page to be flushed
182 * @idxmap: bitmap of MMU indexes to flush
184 * Flush one page from the TLB of all CPUs, for the specified
185 * MMU indexes.
187 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
188 uint16_t idxmap);
190 * tlb_flush_page_by_mmuidx_all_cpus_synced:
191 * @cpu: Originating CPU of the flush
192 * @addr: virtual address of page to be flushed
193 * @idxmap: bitmap of MMU indexes to flush
195 * Flush one page from the TLB of all CPUs, for the specified MMU
196 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
197 * vCPUs work is scheduled as safe work meaning all flushes will be
198 * complete once the source vCPUs safe work is complete. This will
199 * depend on when the guests translation ends the TB.
201 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
202 uint16_t idxmap);
204 * tlb_flush_by_mmuidx:
205 * @cpu: CPU whose TLB should be flushed
206 * @wait: If true ensure synchronisation by exiting the cpu_loop
207 * @idxmap: bitmap of MMU indexes to flush
209 * Flush all entries from the TLB of the specified CPU, for the specified
210 * MMU indexes.
212 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
214 * tlb_flush_by_mmuidx_all_cpus:
215 * @cpu: Originating CPU of the flush
216 * @idxmap: bitmap of MMU indexes to flush
218 * Flush all entries from all TLBs of all CPUs, for the specified
219 * MMU indexes.
221 void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
223 * tlb_flush_by_mmuidx_all_cpus_synced:
224 * @cpu: Originating CPU of the flush
225 * @idxmap: bitmap of MMU indexes to flush
227 * Flush all entries from all TLBs of all CPUs, for the specified
228 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
229 * vCPUs work is scheduled as safe work meaning all flushes will be
230 * complete once the source vCPUs safe work is complete. This will
231 * depend on when the guests translation ends the TB.
233 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
235 * tlb_set_page_with_attrs:
236 * @cpu: CPU to add this TLB entry for
237 * @vaddr: virtual address of page to add entry for
238 * @paddr: physical address of the page
239 * @attrs: memory transaction attributes
240 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
241 * @mmu_idx: MMU index to insert TLB entry for
242 * @size: size of the page in bytes
244 * Add an entry to this CPU's TLB (a mapping from virtual address
245 * @vaddr to physical address @paddr) with the specified memory
246 * transaction attributes. This is generally called by the target CPU
247 * specific code after it has been called through the tlb_fill()
248 * entry point and performed a successful page table walk to find
249 * the physical address and attributes for the virtual address
250 * which provoked the TLB miss.
252 * At most one entry for a given virtual address is permitted. Only a
253 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
254 * used by tlb_flush_page.
256 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
257 hwaddr paddr, MemTxAttrs attrs,
258 int prot, int mmu_idx, target_ulong size);
259 /* tlb_set_page:
261 * This function is equivalent to calling tlb_set_page_with_attrs()
262 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
263 * as a convenience for CPUs which don't use memory transaction attributes.
265 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
266 hwaddr paddr, int prot,
267 int mmu_idx, target_ulong size);
268 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
269 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
270 uintptr_t retaddr);
271 #else
272 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
275 static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
278 static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
279 target_ulong addr)
282 static inline void tlb_flush(CPUState *cpu)
285 static inline void tlb_flush_all_cpus(CPUState *src_cpu)
288 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
291 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
292 target_ulong addr, uint16_t idxmap)
296 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
299 static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
300 target_ulong addr,
301 uint16_t idxmap)
304 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
305 target_ulong addr,
306 uint16_t idxmap)
309 static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
312 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
313 uint16_t idxmap)
316 static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
319 #endif
321 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
323 /* Estimated block size for TB allocation. */
324 /* ??? The following is based on a 2015 survey of x86_64 host output.
325 Better would seem to be some sort of dynamically sized TB array,
326 adapting to the block sizes actually being produced. */
327 #if defined(CONFIG_SOFTMMU)
328 #define CODE_GEN_AVG_BLOCK_SIZE 400
329 #else
330 #define CODE_GEN_AVG_BLOCK_SIZE 150
331 #endif
333 #if defined(_ARCH_PPC) \
334 || defined(__x86_64__) || defined(__i386__) \
335 || defined(__sparc__) || defined(__aarch64__) \
336 || defined(__s390x__) || defined(__mips__) \
337 || defined(CONFIG_TCG_INTERPRETER)
338 /* NOTE: Direct jump patching must be atomic to be thread-safe. */
339 #define USE_DIRECT_JUMP
340 #endif
342 struct TranslationBlock {
343 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
344 target_ulong cs_base; /* CS base for this block */
345 uint32_t flags; /* flags defining in which context the code was generated */
346 uint16_t size; /* size of target code for this block (1 <=
347 size <= TARGET_PAGE_SIZE) */
348 uint16_t icount;
349 uint32_t cflags; /* compile flags */
350 #define CF_COUNT_MASK 0x7fff
351 #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
352 #define CF_NOCACHE 0x10000 /* To be freed after execution */
353 #define CF_USE_ICOUNT 0x20000
354 #define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */
356 uint16_t invalid;
358 void *tc_ptr; /* pointer to the translated code */
359 uint8_t *tc_search; /* pointer to search data */
360 /* original tb when cflags has CF_NOCACHE */
361 struct TranslationBlock *orig_tb;
362 /* first and second physical page containing code. The lower bit
363 of the pointer tells the index in page_next[] */
364 struct TranslationBlock *page_next[2];
365 tb_page_addr_t page_addr[2];
367 /* The following data are used to directly call another TB from
368 * the code of this one. This can be done either by emitting direct or
369 * indirect native jump instructions. These jumps are reset so that the TB
370 * just continue its execution. The TB can be linked to another one by
371 * setting one of the jump targets (or patching the jump instruction). Only
372 * two of such jumps are supported.
374 uint16_t jmp_reset_offset[2]; /* offset of original jump target */
375 #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
376 #ifdef USE_DIRECT_JUMP
377 uint16_t jmp_insn_offset[2]; /* offset of native jump instruction */
378 #else
379 uintptr_t jmp_target_addr[2]; /* target address for indirect jump */
380 #endif
381 /* Each TB has an assosiated circular list of TBs jumping to this one.
382 * jmp_list_first points to the first TB jumping to this one.
383 * jmp_list_next is used to point to the next TB in a list.
384 * Since each TB can have two jumps, it can participate in two lists.
385 * jmp_list_first and jmp_list_next are 4-byte aligned pointers to a
386 * TranslationBlock structure, but the two least significant bits of
387 * them are used to encode which data field of the pointed TB should
388 * be used to traverse the list further from that TB:
389 * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
390 * In other words, 0/1 tells which jump is used in the pointed TB,
391 * and 2 means that this is a pointer back to the target TB of this list.
393 uintptr_t jmp_list_next[2];
394 uintptr_t jmp_list_first;
397 void tb_free(TranslationBlock *tb);
398 void tb_flush(CPUState *cpu);
399 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
400 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
401 target_ulong cs_base, uint32_t flags);
403 #if defined(USE_DIRECT_JUMP)
405 #if defined(CONFIG_TCG_INTERPRETER)
406 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
408 /* patch the branch destination */
409 atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
410 /* no need to flush icache explicitly */
412 #elif defined(_ARCH_PPC)
413 void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
414 #define tb_set_jmp_target1 ppc_tb_set_jmp_target
415 #elif defined(__i386__) || defined(__x86_64__)
416 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
418 /* patch the branch destination */
419 atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
420 /* no need to flush icache explicitly */
422 #elif defined(__s390x__)
423 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
425 /* patch the branch destination */
426 intptr_t disp = addr - (jmp_addr - 2);
427 atomic_set((int32_t *)jmp_addr, disp / 2);
428 /* no need to flush icache explicitly */
430 #elif defined(__aarch64__)
431 void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
432 #define tb_set_jmp_target1 aarch64_tb_set_jmp_target
433 #elif defined(__sparc__) || defined(__mips__)
434 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
435 #else
436 #error tb_set_jmp_target1 is missing
437 #endif
439 static inline void tb_set_jmp_target(TranslationBlock *tb,
440 int n, uintptr_t addr)
442 uint16_t offset = tb->jmp_insn_offset[n];
443 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
446 #else
448 /* set the jump target */
449 static inline void tb_set_jmp_target(TranslationBlock *tb,
450 int n, uintptr_t addr)
452 tb->jmp_target_addr[n] = addr;
455 #endif
457 /* Called with tb_lock held. */
458 static inline void tb_add_jump(TranslationBlock *tb, int n,
459 TranslationBlock *tb_next)
461 assert(n < ARRAY_SIZE(tb->jmp_list_next));
462 if (tb->jmp_list_next[n]) {
463 /* Another thread has already done this while we were
464 * outside of the lock; nothing to do in this case */
465 return;
467 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
468 "Linking TBs %p [" TARGET_FMT_lx
469 "] index %d -> %p [" TARGET_FMT_lx "]\n",
470 tb->tc_ptr, tb->pc, n,
471 tb_next->tc_ptr, tb_next->pc);
473 /* patch the native jump address */
474 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
476 /* add in TB jmp circular list */
477 tb->jmp_list_next[n] = tb_next->jmp_list_first;
478 tb_next->jmp_list_first = (uintptr_t)tb | n;
481 /* GETPC is the true target of the return instruction that we'll execute. */
482 #if defined(CONFIG_TCG_INTERPRETER)
483 extern uintptr_t tci_tb_ptr;
484 # define GETPC() tci_tb_ptr
485 #else
486 # define GETPC() \
487 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
488 #endif
490 /* The true return address will often point to a host insn that is part of
491 the next translated guest insn. Adjust the address backward to point to
492 the middle of the call insn. Subtracting one would do the job except for
493 several compressed mode architectures (arm, mips) which set the low bit
494 to indicate the compressed mode; subtracting two works around that. It
495 is also the case that there are no host isas that contain a call insn
496 smaller than 4 bytes, so we don't worry about special-casing this. */
497 #define GETPC_ADJ 2
499 void tb_lock(void);
500 void tb_unlock(void);
501 void tb_lock_reset(void);
503 #if !defined(CONFIG_USER_ONLY)
505 struct MemoryRegion *iotlb_to_region(CPUState *cpu,
506 hwaddr index, MemTxAttrs attrs);
508 void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type,
509 int mmu_idx, uintptr_t retaddr);
511 #endif
513 #if defined(CONFIG_USER_ONLY)
514 void mmap_lock(void);
515 void mmap_unlock(void);
516 bool have_mmap_lock(void);
518 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
520 return addr;
522 #else
523 static inline void mmap_lock(void) {}
524 static inline void mmap_unlock(void) {}
526 /* cputlb.c */
527 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
529 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
530 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
532 /* exec.c */
533 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
535 MemoryRegionSection *
536 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
537 hwaddr *xlat, hwaddr *plen);
538 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
539 MemoryRegionSection *section,
540 target_ulong vaddr,
541 hwaddr paddr, hwaddr xlat,
542 int prot,
543 target_ulong *address);
544 bool memory_region_is_unassigned(MemoryRegion *mr);
546 #endif
548 /* vl.c */
549 extern int singlestep;
551 #endif