virtio: introduce virtio_queue_reset()
[qemu.git] / accel / tcg / internal.h
blobcb13bade4fc11e650910a0bb85c9b1f0dfe87fa7
1 /*
2 * Internal execution defines for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * SPDX-License-Identifier: LGPL-2.1-or-later
7 */
9 #ifndef ACCEL_TCG_INTERNAL_H
10 #define ACCEL_TCG_INTERNAL_H
12 #include "exec/exec-all.h"
15 * Access to the various translations structures need to be serialised
16 * via locks for consistency. In user-mode emulation access to the
17 * memory related structures are protected with mmap_lock.
18 * In !user-mode we use per-page locks.
20 #ifdef CONFIG_SOFTMMU
21 #define assert_memory_lock()
22 #else
23 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
24 #endif
26 typedef struct PageDesc {
27 /* list of TBs intersecting this ram page */
28 uintptr_t first_tb;
29 #ifdef CONFIG_USER_ONLY
30 unsigned long flags;
31 void *target_data;
32 #endif
33 #ifdef CONFIG_SOFTMMU
34 QemuSpin lock;
35 #endif
36 } PageDesc;
38 /* Size of the L2 (and L3, etc) page tables. */
39 #define V_L2_BITS 10
40 #define V_L2_SIZE (1 << V_L2_BITS)
43 * L1 Mapping properties
45 extern int v_l1_size;
46 extern int v_l1_shift;
47 extern int v_l2_levels;
50 * The bottom level has pointers to PageDesc, and is indexed by
51 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
53 #define V_L1_MIN_BITS 4
54 #define V_L1_MAX_BITS (V_L2_BITS + 3)
55 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
57 extern void *l1_map[V_L1_MAX_SIZE];
59 PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc);
61 static inline PageDesc *page_find(tb_page_addr_t index)
63 return page_find_alloc(index, false);
66 /* list iterators for lists of tagged pointers in TranslationBlock */
67 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \
68 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
69 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
70 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
72 #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
73 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
75 #define TB_FOR_EACH_JMP(head_tb, tb, n) \
76 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
78 /* In user-mode page locks aren't used; mmap_lock is enough */
79 #ifdef CONFIG_USER_ONLY
80 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
81 static inline void page_lock(PageDesc *pd) { }
82 static inline void page_unlock(PageDesc *pd) { }
83 #else
84 #ifdef CONFIG_DEBUG_TCG
85 void do_assert_page_locked(const PageDesc *pd, const char *file, int line);
86 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
87 #else
88 #define assert_page_locked(pd)
89 #endif
90 void page_lock(PageDesc *pd);
91 void page_unlock(PageDesc *pd);
92 #endif
93 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
94 void assert_no_pages_locked(void);
95 #else
96 static inline void assert_no_pages_locked(void) { }
97 #endif
99 TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
100 target_ulong cs_base, uint32_t flags,
101 int cflags);
102 G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
103 void page_init(void);
104 void tb_htable_init(void);
105 void tb_reset_jump(TranslationBlock *tb, int n);
106 TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
107 tb_page_addr_t phys_page2);
108 bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
109 void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
110 uintptr_t host_pc);
112 /* Return the current PC from CPU, which may be cached in TB. */
113 static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
115 #if TARGET_TB_PCREL
116 return cpu->cc->get_pc(cpu);
117 #else
118 return tb_pc(tb);
119 #endif
122 #endif /* ACCEL_TCG_INTERNAL_H */