spice-core: Use g_strdup_printf instead of snprintf
[qemu.git] / include / exec / exec-all.h
blobbeb41491b4da1e54f00a8b4a81eaa4ebeb6e155b
1 /*
2 * internal execution defines for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #ifndef _EXEC_ALL_H_
21 #define _EXEC_ALL_H_
23 #include "qemu-common.h"
25 /* allow to see translation results - the slowdown should be negligible, so we leave it */
26 #define DEBUG_DISAS
28 /* Page tracking code uses ram addresses in system mode, and virtual
29 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
30 type. */
31 #if defined(CONFIG_USER_ONLY)
32 typedef abi_ulong tb_page_addr_t;
33 #else
34 typedef ram_addr_t tb_page_addr_t;
35 #endif
37 /* is_jmp field values */
38 #define DISAS_NEXT 0 /* next instruction can be analyzed */
39 #define DISAS_JUMP 1 /* only pc was modified dynamically */
40 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
41 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
43 struct TranslationBlock;
44 typedef struct TranslationBlock TranslationBlock;
46 /* XXX: make safe guess about sizes */
47 #define MAX_OP_PER_INSTR 208
49 #if HOST_LONG_BITS == 32
50 #define MAX_OPC_PARAM_PER_ARG 2
51 #else
52 #define MAX_OPC_PARAM_PER_ARG 1
53 #endif
54 #define MAX_OPC_PARAM_IARGS 5
55 #define MAX_OPC_PARAM_OARGS 1
56 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
58 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
59 * and up to 4 + N parameters on 64-bit archs
60 * (N = number of input arguments + output arguments). */
61 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
62 #define OPC_BUF_SIZE 640
63 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
65 /* Maximum size a TCG op can expand to. This is complicated because a
66 single op may require several host instructions and register reloads.
67 For now take a wild guess at 192 bytes, which should allow at least
68 a couple of fixup instructions per argument. */
69 #define TCG_MAX_OP_SIZE 192
71 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
73 #include "qemu/log.h"
75 void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
76 void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
77 void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
78 int pc_pos);
80 void cpu_gen_init(void);
81 int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
82 int *gen_code_size_ptr);
83 bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc);
85 void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
86 void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
87 TranslationBlock *tb_gen_code(CPUArchState *env,
88 target_ulong pc, target_ulong cs_base, int flags,
89 int cflags);
90 void cpu_exec_init(CPUArchState *env);
91 void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
92 int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
93 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
94 int is_cpu_write_access);
95 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
96 int is_cpu_write_access);
97 #if !defined(CONFIG_USER_ONLY)
98 /* cputlb.c */
99 void tlb_flush_page(CPUArchState *env, target_ulong addr);
100 void tlb_flush(CPUArchState *env, int flush_global);
101 void tlb_set_page(CPUArchState *env, target_ulong vaddr,
102 hwaddr paddr, int prot,
103 int mmu_idx, target_ulong size);
104 void tb_invalidate_phys_addr(hwaddr addr);
105 #else
106 static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
110 static inline void tlb_flush(CPUArchState *env, int flush_global)
113 #endif
115 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
117 #define CODE_GEN_PHYS_HASH_BITS 15
118 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
120 /* estimated block size for TB allocation */
121 /* XXX: use a per code average code fragment size and modulate it
122 according to the host CPU */
123 #if defined(CONFIG_SOFTMMU)
124 #define CODE_GEN_AVG_BLOCK_SIZE 128
125 #else
126 #define CODE_GEN_AVG_BLOCK_SIZE 64
127 #endif
129 #if defined(__arm__) || defined(_ARCH_PPC) \
130 || defined(__x86_64__) || defined(__i386__) \
131 || defined(__sparc__) || defined(__aarch64__) \
132 || defined(CONFIG_TCG_INTERPRETER)
133 #define USE_DIRECT_JUMP
134 #endif
136 struct TranslationBlock {
137 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
138 target_ulong cs_base; /* CS base for this block */
139 uint64_t flags; /* flags defining in which context the code was generated */
140 uint16_t size; /* size of target code for this block (1 <=
141 size <= TARGET_PAGE_SIZE) */
142 uint16_t cflags; /* compile flags */
143 #define CF_COUNT_MASK 0x7fff
144 #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
146 uint8_t *tc_ptr; /* pointer to the translated code */
147 /* next matching tb for physical address. */
148 struct TranslationBlock *phys_hash_next;
149 /* first and second physical page containing code. The lower bit
150 of the pointer tells the index in page_next[] */
151 struct TranslationBlock *page_next[2];
152 tb_page_addr_t page_addr[2];
154 /* the following data are used to directly call another TB from
155 the code of this one. */
156 uint16_t tb_next_offset[2]; /* offset of original jump target */
157 #ifdef USE_DIRECT_JUMP
158 uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
159 #else
160 uintptr_t tb_next[2]; /* address of jump generated code */
161 #endif
162 /* list of TBs jumping to this one. This is a circular list using
163 the two least significant bits of the pointers to tell what is
164 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
165 jmp_first */
166 struct TranslationBlock *jmp_next[2];
167 struct TranslationBlock *jmp_first;
168 uint32_t icount;
171 #include "exec/spinlock.h"
173 typedef struct TBContext TBContext;
175 struct TBContext {
177 TranslationBlock *tbs;
178 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
179 int nb_tbs;
180 /* any access to the tbs or the page table must use this lock */
181 spinlock_t tb_lock;
183 /* statistics */
184 int tb_flush_count;
185 int tb_phys_invalidate_count;
187 int tb_invalidated_flag;
190 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
192 target_ulong tmp;
193 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
194 return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
197 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
199 target_ulong tmp;
200 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
201 return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
202 | (tmp & TB_JMP_ADDR_MASK));
205 static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
207 return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
210 void tb_free(TranslationBlock *tb);
211 void tb_flush(CPUArchState *env);
212 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
214 #if defined(USE_DIRECT_JUMP)
216 #if defined(CONFIG_TCG_INTERPRETER)
217 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
219 /* patch the branch destination */
220 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
221 /* no need to flush icache explicitly */
223 #elif defined(_ARCH_PPC)
224 void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
225 #define tb_set_jmp_target1 ppc_tb_set_jmp_target
226 #elif defined(__i386__) || defined(__x86_64__)
227 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
229 /* patch the branch destination */
230 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
231 /* no need to flush icache explicitly */
233 #elif defined(__aarch64__)
234 void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
235 #define tb_set_jmp_target1 aarch64_tb_set_jmp_target
236 #elif defined(__arm__)
237 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
239 #if !QEMU_GNUC_PREREQ(4, 1)
240 register unsigned long _beg __asm ("a1");
241 register unsigned long _end __asm ("a2");
242 register unsigned long _flg __asm ("a3");
243 #endif
245 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
246 *(uint32_t *)jmp_addr =
247 (*(uint32_t *)jmp_addr & ~0xffffff)
248 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
250 #if QEMU_GNUC_PREREQ(4, 1)
251 __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
252 #else
253 /* flush icache */
254 _beg = jmp_addr;
255 _end = jmp_addr + 4;
256 _flg = 0;
257 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
258 #endif
260 #elif defined(__sparc__)
261 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
262 #else
263 #error tb_set_jmp_target1 is missing
264 #endif
266 static inline void tb_set_jmp_target(TranslationBlock *tb,
267 int n, uintptr_t addr)
269 uint16_t offset = tb->tb_jmp_offset[n];
270 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
273 #else
275 /* set the jump target */
276 static inline void tb_set_jmp_target(TranslationBlock *tb,
277 int n, uintptr_t addr)
279 tb->tb_next[n] = addr;
282 #endif
284 static inline void tb_add_jump(TranslationBlock *tb, int n,
285 TranslationBlock *tb_next)
287 /* NOTE: this test is only needed for thread safety */
288 if (!tb->jmp_next[n]) {
289 /* patch the native jump address */
290 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
292 /* add in TB jmp circular list */
293 tb->jmp_next[n] = tb_next->jmp_first;
294 tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
298 /* GETRA is the true target of the return instruction that we'll execute,
299 defined here for simplicity of defining the follow-up macros. */
300 #if defined(CONFIG_TCG_INTERPRETER)
301 extern uintptr_t tci_tb_ptr;
302 # define GETRA() tci_tb_ptr
303 #else
304 # define GETRA() \
305 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
306 #endif
308 /* The true return address will often point to a host insn that is part of
309 the next translated guest insn. Adjust the address backward to point to
310 the middle of the call insn. Subtracting one would do the job except for
311 several compressed mode architectures (arm, mips) which set the low bit
312 to indicate the compressed mode; subtracting two works around that. It
313 is also the case that there are no host isas that contain a call insn
314 smaller than 4 bytes, so we don't worry about special-casing this. */
315 #if defined(CONFIG_TCG_INTERPRETER)
316 # define GETPC_ADJ 0
317 #else
318 # define GETPC_ADJ 2
319 #endif
321 #define GETPC() (GETRA() - GETPC_ADJ)
323 /* The LDST optimizations splits code generation into fast and slow path.
324 In some implementations, we pass the "logical" return address manually;
325 in others, we must infer the logical return from the true return. */
326 #if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
327 # if defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
328 # define GETRA_LDST(RA) (*(int32_t *)((RA) - 4))
329 # elif defined(__arm__)
330 /* We define two insns between the return address and the branch back to
331 straight-line. Find and decode that branch insn. */
332 # define GETRA_LDST(RA) tcg_getra_ldst(RA)
333 static inline uintptr_t tcg_getra_ldst(uintptr_t ra)
335 int32_t b;
336 ra += 8; /* skip the two insns */
337 b = *(int32_t *)ra; /* load the branch insn */
338 b = (b << 8) >> (8 - 2); /* extract the displacement */
339 ra += 8; /* branches are relative to pc+8 */
340 ra += b; /* apply the displacement */
341 return ra;
343 # elif defined(__aarch64__)
344 # define GETRA_LDST(RA) tcg_getra_ldst(RA)
345 static inline uintptr_t tcg_getra_ldst(uintptr_t ra)
347 int32_t b;
348 ra += 4; /* skip one instruction */
349 b = *(int32_t *)ra; /* load the branch insn */
350 b = (b << 6) >> (6 - 2); /* extract the displacement */
351 ra += b; /* apply the displacement */
352 return ra;
354 # endif
355 #endif /* CONFIG_QEMU_LDST_OPTIMIZATION */
357 /* ??? Delete these once they are no longer used. */
358 bool is_tcg_gen_code(uintptr_t pc_ptr);
359 #ifdef GETRA_LDST
360 # define GETRA_EXT() tcg_getra_ext(GETRA())
361 static inline uintptr_t tcg_getra_ext(uintptr_t ra)
363 return is_tcg_gen_code(ra) ? GETRA_LDST(ra) : ra;
365 #else
366 # define GETRA_EXT() GETRA()
367 #endif
369 #if !defined(CONFIG_USER_ONLY)
371 struct MemoryRegion *iotlb_to_region(hwaddr index);
372 bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
373 uint64_t *pvalue, unsigned size);
374 bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
375 uint64_t value, unsigned size);
377 void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
378 uintptr_t retaddr);
380 uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
381 uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
382 uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
383 uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
385 #define ACCESS_TYPE (NB_MMU_MODES + 1)
386 #define MEMSUFFIX _code
388 #define DATA_SIZE 1
389 #include "exec/softmmu_header.h"
391 #define DATA_SIZE 2
392 #include "exec/softmmu_header.h"
394 #define DATA_SIZE 4
395 #include "exec/softmmu_header.h"
397 #define DATA_SIZE 8
398 #include "exec/softmmu_header.h"
400 #undef ACCESS_TYPE
401 #undef MEMSUFFIX
403 #endif
405 #if defined(CONFIG_USER_ONLY)
406 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
408 return addr;
410 #else
411 /* cputlb.c */
412 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
413 #endif
415 typedef void (CPUDebugExcpHandler)(CPUArchState *env);
417 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
419 /* vl.c */
420 extern int singlestep;
422 /* cpu-exec.c */
423 extern volatile sig_atomic_t exit_request;
425 /* Deterministic execution requires that IO only be performed on the last
426 instruction of a TB so that interrupts take effect immediately. */
427 static inline int can_do_io(CPUArchState *env)
429 CPUState *cpu = ENV_GET_CPU(env);
431 if (!use_icount) {
432 return 1;
434 /* If not executing code then assume we are ok. */
435 if (cpu->current_tb == NULL) {
436 return 1;
438 return env->can_do_io != 0;
441 #endif