vmdk: Improve error handling
[qemu.git] / exec-all.h
blob72ef24679345c73d42c5ec3e6fbe8a36c373c592
1 /*
2 * internal execution defines for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #ifndef _EXEC_ALL_H_
21 #define _EXEC_ALL_H_
23 #include "qemu-common.h"
25 /* allow to see translation results - the slowdown should be negligible, so we leave it */
26 #define DEBUG_DISAS
28 /* Page tracking code uses ram addresses in system mode, and virtual
29 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
30 type. */
31 #if defined(CONFIG_USER_ONLY)
32 typedef abi_ulong tb_page_addr_t;
33 #else
34 typedef ram_addr_t tb_page_addr_t;
35 #endif
37 /* is_jmp field values */
38 #define DISAS_NEXT 0 /* next instruction can be analyzed */
39 #define DISAS_JUMP 1 /* only pc was modified dynamically */
40 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
41 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
43 struct TranslationBlock;
44 typedef struct TranslationBlock TranslationBlock;
46 /* XXX: make safe guess about sizes */
47 #define MAX_OP_PER_INSTR 208
49 #if HOST_LONG_BITS == 32
50 #define MAX_OPC_PARAM_PER_ARG 2
51 #else
52 #define MAX_OPC_PARAM_PER_ARG 1
53 #endif
54 #define MAX_OPC_PARAM_IARGS 4
55 #define MAX_OPC_PARAM_OARGS 1
56 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
58 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
59 * and up to 4 + N parameters on 64-bit archs
60 * (N = number of input arguments + output arguments). */
61 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
62 #define OPC_BUF_SIZE 640
63 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
65 /* Maximum size a TCG op can expand to. This is complicated because a
66 single op may require several host instructions and register reloads.
67 For now take a wild guess at 192 bytes, which should allow at least
68 a couple of fixup instructions per argument. */
69 #define TCG_MAX_OP_SIZE 192
71 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
73 extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
74 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
75 extern uint16_t gen_opc_icount[OPC_BUF_SIZE];
77 #include "qemu-log.h"
79 void gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
80 void gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
81 void restore_state_to_opc(CPUState *env, struct TranslationBlock *tb,
82 int pc_pos);
84 void cpu_gen_init(void);
85 int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
86 int *gen_code_size_ptr);
87 int cpu_restore_state(struct TranslationBlock *tb,
88 CPUState *env, unsigned long searched_pc);
89 void cpu_resume_from_signal(CPUState *env1, void *puc);
90 void cpu_io_recompile(CPUState *env, void *retaddr);
91 TranslationBlock *tb_gen_code(CPUState *env,
92 target_ulong pc, target_ulong cs_base, int flags,
93 int cflags);
94 void cpu_exec_init(CPUState *env);
95 void QEMU_NORETURN cpu_loop_exit(CPUState *env1);
96 int page_unprotect(target_ulong address, unsigned long pc, void *puc);
97 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
98 int is_cpu_write_access);
99 void tlb_flush_page(CPUState *env, target_ulong addr);
100 void tlb_flush(CPUState *env, int flush_global);
101 #if !defined(CONFIG_USER_ONLY)
102 void tlb_set_page(CPUState *env, target_ulong vaddr,
103 target_phys_addr_t paddr, int prot,
104 int mmu_idx, target_ulong size);
105 #endif
107 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
109 #define CODE_GEN_PHYS_HASH_BITS 15
110 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
112 #define MIN_CODE_GEN_BUFFER_SIZE (1024 * 1024)
114 /* estimated block size for TB allocation */
115 /* XXX: use a per code average code fragment size and modulate it
116 according to the host CPU */
117 #if defined(CONFIG_SOFTMMU)
118 #define CODE_GEN_AVG_BLOCK_SIZE 128
119 #else
120 #define CODE_GEN_AVG_BLOCK_SIZE 64
121 #endif
123 #if defined(_ARCH_PPC) || defined(__x86_64__) || defined(__arm__) || defined(__i386__)
124 #define USE_DIRECT_JUMP
125 #endif
127 struct TranslationBlock {
128 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
129 target_ulong cs_base; /* CS base for this block */
130 uint64_t flags; /* flags defining in which context the code was generated */
131 uint16_t size; /* size of target code for this block (1 <=
132 size <= TARGET_PAGE_SIZE) */
133 uint16_t cflags; /* compile flags */
134 #define CF_COUNT_MASK 0x7fff
135 #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
137 uint8_t *tc_ptr; /* pointer to the translated code */
138 /* next matching tb for physical address. */
139 struct TranslationBlock *phys_hash_next;
140 /* first and second physical page containing code. The lower bit
141 of the pointer tells the index in page_next[] */
142 struct TranslationBlock *page_next[2];
143 tb_page_addr_t page_addr[2];
145 /* the following data are used to directly call another TB from
146 the code of this one. */
147 uint16_t tb_next_offset[2]; /* offset of original jump target */
148 #ifdef USE_DIRECT_JUMP
149 uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
150 #else
151 unsigned long tb_next[2]; /* address of jump generated code */
152 #endif
153 /* list of TBs jumping to this one. This is a circular list using
154 the two least significant bits of the pointers to tell what is
155 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
156 jmp_first */
157 struct TranslationBlock *jmp_next[2];
158 struct TranslationBlock *jmp_first;
159 uint32_t icount;
162 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
164 target_ulong tmp;
165 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
166 return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
169 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
171 target_ulong tmp;
172 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
173 return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
174 | (tmp & TB_JMP_ADDR_MASK));
177 static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
179 return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
182 void tb_free(TranslationBlock *tb);
183 void tb_flush(CPUState *env);
184 void tb_link_page(TranslationBlock *tb,
185 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2);
186 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
188 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
190 #if defined(USE_DIRECT_JUMP)
192 #if defined(_ARCH_PPC)
193 void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
194 #define tb_set_jmp_target1 ppc_tb_set_jmp_target
195 #elif defined(__i386__) || defined(__x86_64__)
196 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
198 /* patch the branch destination */
199 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
200 /* no need to flush icache explicitly */
202 #elif defined(__arm__)
203 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
205 #if !QEMU_GNUC_PREREQ(4, 1)
206 register unsigned long _beg __asm ("a1");
207 register unsigned long _end __asm ("a2");
208 register unsigned long _flg __asm ("a3");
209 #endif
211 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
212 *(uint32_t *)jmp_addr =
213 (*(uint32_t *)jmp_addr & ~0xffffff)
214 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
216 #if QEMU_GNUC_PREREQ(4, 1)
217 __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
218 #else
219 /* flush icache */
220 _beg = jmp_addr;
221 _end = jmp_addr + 4;
222 _flg = 0;
223 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
224 #endif
226 #endif
228 static inline void tb_set_jmp_target(TranslationBlock *tb,
229 int n, unsigned long addr)
231 unsigned long offset;
233 offset = tb->tb_jmp_offset[n];
234 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
237 #else
239 /* set the jump target */
240 static inline void tb_set_jmp_target(TranslationBlock *tb,
241 int n, unsigned long addr)
243 tb->tb_next[n] = addr;
246 #endif
248 static inline void tb_add_jump(TranslationBlock *tb, int n,
249 TranslationBlock *tb_next)
251 /* NOTE: this test is only needed for thread safety */
252 if (!tb->jmp_next[n]) {
253 /* patch the native jump address */
254 tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
256 /* add in TB jmp circular list */
257 tb->jmp_next[n] = tb_next->jmp_first;
258 tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
262 TranslationBlock *tb_find_pc(unsigned long pc_ptr);
264 #include "qemu-lock.h"
266 extern spinlock_t tb_lock;
268 extern int tb_invalidated_flag;
270 /* The return address may point to the start of the next instruction.
271 Subtracting one gets us the call instruction itself. */
272 #if defined(__s390__) && !defined(__s390x__)
273 # define GETPC() ((void*)(((unsigned long)__builtin_return_address(0) & 0x7fffffffUL) - 1))
274 #elif defined(__arm__)
275 /* Thumb return addresses have the low bit set, so we need to subtract two.
276 This is still safe in ARM mode because instructions are 4 bytes. */
277 # define GETPC() ((void *)((unsigned long)__builtin_return_address(0) - 2))
278 #else
279 # define GETPC() ((void *)((unsigned long)__builtin_return_address(0) - 1))
280 #endif
282 #if !defined(CONFIG_USER_ONLY)
284 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
285 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
286 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
288 void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
289 void *retaddr);
291 #include "softmmu_defs.h"
293 #define ACCESS_TYPE (NB_MMU_MODES + 1)
294 #define MEMSUFFIX _code
295 #define env cpu_single_env
297 #define DATA_SIZE 1
298 #include "softmmu_header.h"
300 #define DATA_SIZE 2
301 #include "softmmu_header.h"
303 #define DATA_SIZE 4
304 #include "softmmu_header.h"
306 #define DATA_SIZE 8
307 #include "softmmu_header.h"
309 #undef ACCESS_TYPE
310 #undef MEMSUFFIX
311 #undef env
313 #endif
315 #if defined(CONFIG_USER_ONLY)
316 static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
318 return addr;
320 #else
321 /* NOTE: this function can trigger an exception */
322 /* NOTE2: the returned address is not exactly the physical address: it
323 is the offset relative to phys_ram_base */
324 static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
326 int mmu_idx, page_index, pd;
327 void *p;
329 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
330 mmu_idx = cpu_mmu_index(env1);
331 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
332 (addr & TARGET_PAGE_MASK))) {
333 ldub_code(addr);
335 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
336 if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
337 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
338 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
339 #else
340 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
341 #endif
343 p = (void *)(unsigned long)addr
344 + env1->tlb_table[mmu_idx][page_index].addend;
345 return qemu_ram_addr_from_host_nofail(p);
347 #endif
349 typedef void (CPUDebugExcpHandler)(CPUState *env);
351 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
353 /* vl.c */
354 extern int singlestep;
356 /* cpu-exec.c */
357 extern volatile sig_atomic_t exit_request;
359 /* Deterministic execution requires that IO only be performed on the last
360 instruction of a TB so that interrupts take effect immediately. */
361 static inline int can_do_io(CPUState *env)
363 if (!use_icount) {
364 return 1;
366 /* If not executing code then assume we are ok. */
367 if (!env->current_tb) {
368 return 1;
370 return env->can_do_io != 0;
373 #endif