block: Set BDRV_O_ALLOW_RDWR during rw reopen
[qemu/ar7.git] / include / exec / exec-all.h
blob440fc31b37dd11c7c22580e158c2e89d7a6e4bd6
1 /*
2 * internal execution defines for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #ifndef EXEC_ALL_H
21 #define EXEC_ALL_H
23 #include "qemu-common.h"
24 #include "exec/tb-context.h"
26 /* allow to see translation results - the slowdown should be negligible, so we leave it */
27 #define DEBUG_DISAS
29 /* Page tracking code uses ram addresses in system mode, and virtual
30 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
31 type. */
32 #if defined(CONFIG_USER_ONLY)
33 typedef abi_ulong tb_page_addr_t;
34 #else
35 typedef ram_addr_t tb_page_addr_t;
36 #endif
38 /* DisasContext is_jmp field values
40 * is_jmp starts as DISAS_NEXT. The translator will keep processing
41 * instructions until an exit condition is reached. If we reach the
42 * exit condition and is_jmp is still DISAS_NEXT (because of some
43 * other condition) we simply "jump" to the next address.
44 * The remaining exit cases are:
46 * DISAS_JUMP - Only the PC was modified dynamically (e.g computed)
47 * DISAS_TB_JUMP - Only the PC was modified statically (e.g. branch)
49 * In these cases as long as the PC is updated we can chain to the
50 * next TB either by exiting the loop or looking up the next TB via
51 * the loookup helper.
53 * DISAS_UPDATE - CPU State was modified dynamically
55 * This covers any other CPU state which necessities us exiting the
56 * TCG code to the main run-loop. Typically this includes anything
57 * that might change the interrupt state.
59 * Individual translators may define additional exit cases to deal
60 * with per-target special conditions.
62 #define DISAS_NEXT 0 /* next instruction can be analyzed */
63 #define DISAS_JUMP 1 /* only pc was modified dynamically */
64 #define DISAS_TB_JUMP 2 /* only pc was modified statically */
65 #define DISAS_UPDATE 3 /* cpu state was modified dynamically */
67 #include "qemu/log.h"
69 void gen_intermediate_code(CPUState *cpu, struct TranslationBlock *tb);
70 void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
71 target_ulong *data);
73 void cpu_gen_init(void);
74 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
76 void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
77 void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
78 TranslationBlock *tb_gen_code(CPUState *cpu,
79 target_ulong pc, target_ulong cs_base,
80 uint32_t flags,
81 int cflags);
83 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
84 void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
85 void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
87 #if !defined(CONFIG_USER_ONLY)
88 void cpu_reloading_memory_map(void);
89 /**
90 * cpu_address_space_init:
91 * @cpu: CPU to add this address space to
92 * @as: address space to add
93 * @asidx: integer index of this address space
95 * Add the specified address space to the CPU's cpu_ases list.
96 * The address space added with @asidx 0 is the one used for the
97 * convenience pointer cpu->as.
98 * The target-specific code which registers ASes is responsible
99 * for defining what semantics address space 0, 1, 2, etc have.
101 * Before the first call to this function, the caller must set
102 * cpu->num_ases to the total number of address spaces it needs
103 * to support.
105 * Note that with KVM only one address space is supported.
107 void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx);
108 #endif
110 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
111 /* cputlb.c */
113 * tlb_flush_page:
114 * @cpu: CPU whose TLB should be flushed
115 * @addr: virtual address of page to be flushed
117 * Flush one page from the TLB of the specified CPU, for all
118 * MMU indexes.
120 void tlb_flush_page(CPUState *cpu, target_ulong addr);
122 * tlb_flush_page_all_cpus:
123 * @cpu: src CPU of the flush
124 * @addr: virtual address of page to be flushed
126 * Flush one page from the TLB of the specified CPU, for all
127 * MMU indexes.
129 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
131 * tlb_flush_page_all_cpus_synced:
132 * @cpu: src CPU of the flush
133 * @addr: virtual address of page to be flushed
135 * Flush one page from the TLB of the specified CPU, for all MMU
136 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
137 * is scheduled as safe work meaning all flushes will be complete once
138 * the source vCPUs safe work is complete. This will depend on when
139 * the guests translation ends the TB.
141 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
143 * tlb_flush:
144 * @cpu: CPU whose TLB should be flushed
146 * Flush the entire TLB for the specified CPU. Most CPU architectures
147 * allow the implementation to drop entries from the TLB at any time
148 * so this is generally safe. If more selective flushing is required
149 * use one of the other functions for efficiency.
151 void tlb_flush(CPUState *cpu);
153 * tlb_flush_all_cpus:
154 * @cpu: src CPU of the flush
156 void tlb_flush_all_cpus(CPUState *src_cpu);
158 * tlb_flush_all_cpus_synced:
159 * @cpu: src CPU of the flush
161 * Like tlb_flush_all_cpus except this except the source vCPUs work is
162 * scheduled as safe work meaning all flushes will be complete once
163 * the source vCPUs safe work is complete. This will depend on when
164 * the guests translation ends the TB.
166 void tlb_flush_all_cpus_synced(CPUState *src_cpu);
168 * tlb_flush_page_by_mmuidx:
169 * @cpu: CPU whose TLB should be flushed
170 * @addr: virtual address of page to be flushed
171 * @idxmap: bitmap of MMU indexes to flush
173 * Flush one page from the TLB of the specified CPU, for the specified
174 * MMU indexes.
176 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
177 uint16_t idxmap);
179 * tlb_flush_page_by_mmuidx_all_cpus:
180 * @cpu: Originating CPU of the flush
181 * @addr: virtual address of page to be flushed
182 * @idxmap: bitmap of MMU indexes to flush
184 * Flush one page from the TLB of all CPUs, for the specified
185 * MMU indexes.
187 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
188 uint16_t idxmap);
190 * tlb_flush_page_by_mmuidx_all_cpus_synced:
191 * @cpu: Originating CPU of the flush
192 * @addr: virtual address of page to be flushed
193 * @idxmap: bitmap of MMU indexes to flush
195 * Flush one page from the TLB of all CPUs, for the specified MMU
196 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
197 * vCPUs work is scheduled as safe work meaning all flushes will be
198 * complete once the source vCPUs safe work is complete. This will
199 * depend on when the guests translation ends the TB.
201 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
202 uint16_t idxmap);
204 * tlb_flush_by_mmuidx:
205 * @cpu: CPU whose TLB should be flushed
206 * @wait: If true ensure synchronisation by exiting the cpu_loop
207 * @idxmap: bitmap of MMU indexes to flush
209 * Flush all entries from the TLB of the specified CPU, for the specified
210 * MMU indexes.
212 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
214 * tlb_flush_by_mmuidx_all_cpus:
215 * @cpu: Originating CPU of the flush
216 * @idxmap: bitmap of MMU indexes to flush
218 * Flush all entries from all TLBs of all CPUs, for the specified
219 * MMU indexes.
221 void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
223 * tlb_flush_by_mmuidx_all_cpus_synced:
224 * @cpu: Originating CPU of the flush
225 * @idxmap: bitmap of MMU indexes to flush
227 * Flush all entries from all TLBs of all CPUs, for the specified
228 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
229 * vCPUs work is scheduled as safe work meaning all flushes will be
230 * complete once the source vCPUs safe work is complete. This will
231 * depend on when the guests translation ends the TB.
233 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
235 * tlb_set_page_with_attrs:
236 * @cpu: CPU to add this TLB entry for
237 * @vaddr: virtual address of page to add entry for
238 * @paddr: physical address of the page
239 * @attrs: memory transaction attributes
240 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
241 * @mmu_idx: MMU index to insert TLB entry for
242 * @size: size of the page in bytes
244 * Add an entry to this CPU's TLB (a mapping from virtual address
245 * @vaddr to physical address @paddr) with the specified memory
246 * transaction attributes. This is generally called by the target CPU
247 * specific code after it has been called through the tlb_fill()
248 * entry point and performed a successful page table walk to find
249 * the physical address and attributes for the virtual address
250 * which provoked the TLB miss.
252 * At most one entry for a given virtual address is permitted. Only a
253 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
254 * used by tlb_flush_page.
256 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
257 hwaddr paddr, MemTxAttrs attrs,
258 int prot, int mmu_idx, target_ulong size);
259 /* tlb_set_page:
261 * This function is equivalent to calling tlb_set_page_with_attrs()
262 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
263 * as a convenience for CPUs which don't use memory transaction attributes.
265 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
266 hwaddr paddr, int prot,
267 int mmu_idx, target_ulong size);
268 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
269 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
270 uintptr_t retaddr);
271 #else
272 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
275 static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
278 static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
279 target_ulong addr)
282 static inline void tlb_flush(CPUState *cpu)
285 static inline void tlb_flush_all_cpus(CPUState *src_cpu)
288 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
291 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
292 target_ulong addr, uint16_t idxmap)
296 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
299 static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
300 target_ulong addr,
301 uint16_t idxmap)
304 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
305 target_ulong addr,
306 uint16_t idxmap)
309 static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
312 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
313 uint16_t idxmap)
316 static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
319 #endif
321 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
323 /* Estimated block size for TB allocation. */
324 /* ??? The following is based on a 2015 survey of x86_64 host output.
325 Better would seem to be some sort of dynamically sized TB array,
326 adapting to the block sizes actually being produced. */
327 #if defined(CONFIG_SOFTMMU)
328 #define CODE_GEN_AVG_BLOCK_SIZE 400
329 #else
330 #define CODE_GEN_AVG_BLOCK_SIZE 150
331 #endif
333 #if defined(_ARCH_PPC) \
334 || defined(__x86_64__) || defined(__i386__) \
335 || defined(__sparc__) || defined(__aarch64__) \
336 || defined(__s390x__) || defined(__mips__) \
337 || defined(CONFIG_TCG_INTERPRETER)
338 /* NOTE: Direct jump patching must be atomic to be thread-safe. */
339 #define USE_DIRECT_JUMP
340 #endif
342 struct TranslationBlock {
343 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
344 target_ulong cs_base; /* CS base for this block */
345 uint32_t flags; /* flags defining in which context the code was generated */
346 uint16_t size; /* size of target code for this block (1 <=
347 size <= TARGET_PAGE_SIZE) */
348 uint16_t icount;
349 uint32_t cflags; /* compile flags */
350 #define CF_COUNT_MASK 0x7fff
351 #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
352 #define CF_NOCACHE 0x10000 /* To be freed after execution */
353 #define CF_USE_ICOUNT 0x20000
354 #define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */
356 /* Per-vCPU dynamic tracing state used to generate this TB */
357 uint32_t trace_vcpu_dstate;
359 uint16_t invalid;
361 void *tc_ptr; /* pointer to the translated code */
362 uint8_t *tc_search; /* pointer to search data */
363 /* original tb when cflags has CF_NOCACHE */
364 struct TranslationBlock *orig_tb;
365 /* first and second physical page containing code. The lower bit
366 of the pointer tells the index in page_next[] */
367 struct TranslationBlock *page_next[2];
368 tb_page_addr_t page_addr[2];
370 /* The following data are used to directly call another TB from
371 * the code of this one. This can be done either by emitting direct or
372 * indirect native jump instructions. These jumps are reset so that the TB
373 * just continue its execution. The TB can be linked to another one by
374 * setting one of the jump targets (or patching the jump instruction). Only
375 * two of such jumps are supported.
377 uint16_t jmp_reset_offset[2]; /* offset of original jump target */
378 #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
379 #ifdef USE_DIRECT_JUMP
380 uint16_t jmp_insn_offset[2]; /* offset of native jump instruction */
381 #else
382 uintptr_t jmp_target_addr[2]; /* target address for indirect jump */
383 #endif
384 /* Each TB has an assosiated circular list of TBs jumping to this one.
385 * jmp_list_first points to the first TB jumping to this one.
386 * jmp_list_next is used to point to the next TB in a list.
387 * Since each TB can have two jumps, it can participate in two lists.
388 * jmp_list_first and jmp_list_next are 4-byte aligned pointers to a
389 * TranslationBlock structure, but the two least significant bits of
390 * them are used to encode which data field of the pointed TB should
391 * be used to traverse the list further from that TB:
392 * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
393 * In other words, 0/1 tells which jump is used in the pointed TB,
394 * and 2 means that this is a pointer back to the target TB of this list.
396 uintptr_t jmp_list_next[2];
397 uintptr_t jmp_list_first;
400 void tb_free(TranslationBlock *tb);
401 void tb_flush(CPUState *cpu);
402 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
403 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
404 target_ulong cs_base, uint32_t flags);
406 #if defined(USE_DIRECT_JUMP)
408 #if defined(CONFIG_TCG_INTERPRETER)
409 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
411 /* patch the branch destination */
412 atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
413 /* no need to flush icache explicitly */
415 #elif defined(_ARCH_PPC)
416 void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
417 #define tb_set_jmp_target1 ppc_tb_set_jmp_target
418 #elif defined(__i386__) || defined(__x86_64__)
419 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
421 /* patch the branch destination */
422 atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
423 /* no need to flush icache explicitly */
425 #elif defined(__s390x__)
426 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
428 /* patch the branch destination */
429 intptr_t disp = addr - (jmp_addr - 2);
430 atomic_set((int32_t *)jmp_addr, disp / 2);
431 /* no need to flush icache explicitly */
433 #elif defined(__aarch64__)
434 void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
435 #define tb_set_jmp_target1 aarch64_tb_set_jmp_target
436 #elif defined(__sparc__) || defined(__mips__)
437 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
438 #else
439 #error tb_set_jmp_target1 is missing
440 #endif
442 static inline void tb_set_jmp_target(TranslationBlock *tb,
443 int n, uintptr_t addr)
445 uint16_t offset = tb->jmp_insn_offset[n];
446 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
449 #else
451 /* set the jump target */
452 static inline void tb_set_jmp_target(TranslationBlock *tb,
453 int n, uintptr_t addr)
455 tb->jmp_target_addr[n] = addr;
458 #endif
460 /* Called with tb_lock held. */
461 static inline void tb_add_jump(TranslationBlock *tb, int n,
462 TranslationBlock *tb_next)
464 assert(n < ARRAY_SIZE(tb->jmp_list_next));
465 if (tb->jmp_list_next[n]) {
466 /* Another thread has already done this while we were
467 * outside of the lock; nothing to do in this case */
468 return;
470 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
471 "Linking TBs %p [" TARGET_FMT_lx
472 "] index %d -> %p [" TARGET_FMT_lx "]\n",
473 tb->tc_ptr, tb->pc, n,
474 tb_next->tc_ptr, tb_next->pc);
476 /* patch the native jump address */
477 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
479 /* add in TB jmp circular list */
480 tb->jmp_list_next[n] = tb_next->jmp_list_first;
481 tb_next->jmp_list_first = (uintptr_t)tb | n;
484 /* GETPC is the true target of the return instruction that we'll execute. */
485 #if defined(CONFIG_TCG_INTERPRETER)
486 extern uintptr_t tci_tb_ptr;
487 # define GETPC() tci_tb_ptr
488 #else
489 # define GETPC() \
490 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
491 #endif
493 /* The true return address will often point to a host insn that is part of
494 the next translated guest insn. Adjust the address backward to point to
495 the middle of the call insn. Subtracting one would do the job except for
496 several compressed mode architectures (arm, mips) which set the low bit
497 to indicate the compressed mode; subtracting two works around that. It
498 is also the case that there are no host isas that contain a call insn
499 smaller than 4 bytes, so we don't worry about special-casing this. */
500 #define GETPC_ADJ 2
502 void tb_lock(void);
503 void tb_unlock(void);
504 void tb_lock_reset(void);
506 #if !defined(CONFIG_USER_ONLY)
508 struct MemoryRegion *iotlb_to_region(CPUState *cpu,
509 hwaddr index, MemTxAttrs attrs);
511 void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type,
512 int mmu_idx, uintptr_t retaddr);
514 #endif
516 #if defined(CONFIG_USER_ONLY)
517 void mmap_lock(void);
518 void mmap_unlock(void);
519 bool have_mmap_lock(void);
521 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
523 return addr;
525 #else
526 static inline void mmap_lock(void) {}
527 static inline void mmap_unlock(void) {}
529 /* cputlb.c */
530 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
532 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
533 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
535 /* exec.c */
536 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
538 MemoryRegionSection *
539 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
540 hwaddr *xlat, hwaddr *plen);
541 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
542 MemoryRegionSection *section,
543 target_ulong vaddr,
544 hwaddr paddr, hwaddr xlat,
545 int prot,
546 target_ulong *address);
547 bool memory_region_is_unassigned(MemoryRegion *mr);
549 #endif
551 /* vl.c */
552 extern int singlestep;
554 #endif