2 * internal execution defines for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "qemu-common.h"
25 /* allow to see translation results - the slowdown should be negligible, so we leave it */
28 /* Page tracking code uses ram addresses in system mode, and virtual
29 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
31 #if defined(CONFIG_USER_ONLY)
32 typedef abi_ulong tb_page_addr_t
;
34 typedef ram_addr_t tb_page_addr_t
;
37 /* is_jmp field values */
38 #define DISAS_NEXT 0 /* next instruction can be analyzed */
39 #define DISAS_JUMP 1 /* only pc was modified dynamically */
40 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
41 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
43 struct TranslationBlock
;
44 typedef struct TranslationBlock TranslationBlock
;
46 /* XXX: make safe guess about sizes */
47 #define MAX_OP_PER_INSTR 208
49 #if HOST_LONG_BITS == 32
50 #define MAX_OPC_PARAM_PER_ARG 2
52 #define MAX_OPC_PARAM_PER_ARG 1
54 #define MAX_OPC_PARAM_IARGS 4
55 #define MAX_OPC_PARAM_OARGS 1
56 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
58 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
59 * and up to 4 + N parameters on 64-bit archs
60 * (N = number of input arguments + output arguments). */
61 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
62 #define OPC_BUF_SIZE 640
63 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
65 /* Maximum size a TCG op can expand to. This is complicated because a
66 single op may require several host instructions and register reloads.
67 For now take a wild guess at 192 bytes, which should allow at least
68 a couple of fixup instructions per argument. */
69 #define TCG_MAX_OP_SIZE 192
71 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
73 extern target_ulong gen_opc_pc
[OPC_BUF_SIZE
];
74 extern uint8_t gen_opc_instr_start
[OPC_BUF_SIZE
];
75 extern uint16_t gen_opc_icount
[OPC_BUF_SIZE
];
79 void gen_intermediate_code(CPUArchState
*env
, struct TranslationBlock
*tb
);
80 void gen_intermediate_code_pc(CPUArchState
*env
, struct TranslationBlock
*tb
);
81 void restore_state_to_opc(CPUArchState
*env
, struct TranslationBlock
*tb
,
84 void cpu_gen_init(void);
85 int cpu_gen_code(CPUArchState
*env
, struct TranslationBlock
*tb
,
86 int *gen_code_size_ptr
);
87 int cpu_restore_state(struct TranslationBlock
*tb
,
88 CPUArchState
*env
, uintptr_t searched_pc
);
89 void QEMU_NORETURN
cpu_resume_from_signal(CPUArchState
*env1
, void *puc
);
90 void QEMU_NORETURN
cpu_io_recompile(CPUArchState
*env
, uintptr_t retaddr
);
91 TranslationBlock
*tb_gen_code(CPUArchState
*env
,
92 target_ulong pc
, target_ulong cs_base
, int flags
,
94 void cpu_exec_init(CPUArchState
*env
);
95 void QEMU_NORETURN
cpu_loop_exit(CPUArchState
*env1
);
96 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
);
97 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
98 int is_cpu_write_access
);
99 #if !defined(CONFIG_USER_ONLY)
101 void tlb_flush_page(CPUArchState
*env
, target_ulong addr
);
102 void tlb_flush(CPUArchState
*env
, int flush_global
);
103 void tlb_set_page(CPUArchState
*env
, target_ulong vaddr
,
104 target_phys_addr_t paddr
, int prot
,
105 int mmu_idx
, target_ulong size
);
106 void tb_invalidate_phys_addr(target_phys_addr_t addr
);
108 static inline void tlb_flush_page(CPUArchState
*env
, target_ulong addr
)
112 static inline void tlb_flush(CPUArchState
*env
, int flush_global
)
117 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
119 #define CODE_GEN_PHYS_HASH_BITS 15
120 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
122 #define MIN_CODE_GEN_BUFFER_SIZE (1024 * 1024)
124 /* estimated block size for TB allocation */
125 /* XXX: use a per code average code fragment size and modulate it
126 according to the host CPU */
127 #if defined(CONFIG_SOFTMMU)
128 #define CODE_GEN_AVG_BLOCK_SIZE 128
130 #define CODE_GEN_AVG_BLOCK_SIZE 64
133 #if defined(_ARCH_PPC) || defined(__x86_64__) || defined(__arm__) || defined(__i386__)
134 #define USE_DIRECT_JUMP
135 #elif defined(CONFIG_TCG_INTERPRETER)
136 #define USE_DIRECT_JUMP
139 struct TranslationBlock
{
140 target_ulong pc
; /* simulated PC corresponding to this block (EIP + CS base) */
141 target_ulong cs_base
; /* CS base for this block */
142 uint64_t flags
; /* flags defining in which context the code was generated */
143 uint16_t size
; /* size of target code for this block (1 <=
144 size <= TARGET_PAGE_SIZE) */
145 uint16_t cflags
; /* compile flags */
146 #define CF_COUNT_MASK 0x7fff
147 #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
149 uint8_t *tc_ptr
; /* pointer to the translated code */
150 /* next matching tb for physical address. */
151 struct TranslationBlock
*phys_hash_next
;
152 /* first and second physical page containing code. The lower bit
153 of the pointer tells the index in page_next[] */
154 struct TranslationBlock
*page_next
[2];
155 tb_page_addr_t page_addr
[2];
157 /* the following data are used to directly call another TB from
158 the code of this one. */
159 uint16_t tb_next_offset
[2]; /* offset of original jump target */
160 #ifdef USE_DIRECT_JUMP
161 uint16_t tb_jmp_offset
[2]; /* offset of jump instruction */
163 uintptr_t tb_next
[2]; /* address of jump generated code */
165 /* list of TBs jumping to this one. This is a circular list using
166 the two least significant bits of the pointers to tell what is
167 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
169 struct TranslationBlock
*jmp_next
[2];
170 struct TranslationBlock
*jmp_first
;
174 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc
)
177 tmp
= pc
^ (pc
>> (TARGET_PAGE_BITS
- TB_JMP_PAGE_BITS
));
178 return (tmp
>> (TARGET_PAGE_BITS
- TB_JMP_PAGE_BITS
)) & TB_JMP_PAGE_MASK
;
181 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc
)
184 tmp
= pc
^ (pc
>> (TARGET_PAGE_BITS
- TB_JMP_PAGE_BITS
));
185 return (((tmp
>> (TARGET_PAGE_BITS
- TB_JMP_PAGE_BITS
)) & TB_JMP_PAGE_MASK
)
186 | (tmp
& TB_JMP_ADDR_MASK
));
189 static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc
)
191 return (pc
>> 2) & (CODE_GEN_PHYS_HASH_SIZE
- 1);
194 void tb_free(TranslationBlock
*tb
);
195 void tb_flush(CPUArchState
*env
);
196 void tb_link_page(TranslationBlock
*tb
,
197 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
);
198 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
);
200 extern TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
202 #if defined(USE_DIRECT_JUMP)
204 #if defined(CONFIG_TCG_INTERPRETER)
205 static inline void tb_set_jmp_target1(uintptr_t jmp_addr
, uintptr_t addr
)
207 /* patch the branch destination */
208 *(uint32_t *)jmp_addr
= addr
- (jmp_addr
+ 4);
209 /* no need to flush icache explicitly */
211 #elif defined(_ARCH_PPC)
212 void ppc_tb_set_jmp_target(unsigned long jmp_addr
, unsigned long addr
);
213 #define tb_set_jmp_target1 ppc_tb_set_jmp_target
214 #elif defined(__i386__) || defined(__x86_64__)
215 static inline void tb_set_jmp_target1(uintptr_t jmp_addr
, uintptr_t addr
)
217 /* patch the branch destination */
218 *(uint32_t *)jmp_addr
= addr
- (jmp_addr
+ 4);
219 /* no need to flush icache explicitly */
221 #elif defined(__arm__)
222 static inline void tb_set_jmp_target1(uintptr_t jmp_addr
, uintptr_t addr
)
224 #if !QEMU_GNUC_PREREQ(4, 1)
225 register unsigned long _beg
__asm ("a1");
226 register unsigned long _end
__asm ("a2");
227 register unsigned long _flg
__asm ("a3");
230 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
231 *(uint32_t *)jmp_addr
=
232 (*(uint32_t *)jmp_addr
& ~0xffffff)
233 | (((addr
- (jmp_addr
+ 8)) >> 2) & 0xffffff);
235 #if QEMU_GNUC_PREREQ(4, 1)
236 __builtin___clear_cache((char *) jmp_addr
, (char *) jmp_addr
+ 4);
242 __asm
__volatile__ ("swi 0x9f0002" : : "r" (_beg
), "r" (_end
), "r" (_flg
));
246 #error tb_set_jmp_target1 is missing
249 static inline void tb_set_jmp_target(TranslationBlock
*tb
,
250 int n
, uintptr_t addr
)
252 uint16_t offset
= tb
->tb_jmp_offset
[n
];
253 tb_set_jmp_target1((uintptr_t)(tb
->tc_ptr
+ offset
), addr
);
258 /* set the jump target */
259 static inline void tb_set_jmp_target(TranslationBlock
*tb
,
260 int n
, uintptr_t addr
)
262 tb
->tb_next
[n
] = addr
;
267 static inline void tb_add_jump(TranslationBlock
*tb
, int n
,
268 TranslationBlock
*tb_next
)
270 /* NOTE: this test is only needed for thread safety */
271 if (!tb
->jmp_next
[n
]) {
272 /* patch the native jump address */
273 tb_set_jmp_target(tb
, n
, (uintptr_t)tb_next
->tc_ptr
);
275 /* add in TB jmp circular list */
276 tb
->jmp_next
[n
] = tb_next
->jmp_first
;
277 tb_next
->jmp_first
= (TranslationBlock
*)((uintptr_t)(tb
) | (n
));
281 TranslationBlock
*tb_find_pc(uintptr_t pc_ptr
);
283 #include "qemu-lock.h"
285 extern spinlock_t tb_lock
;
287 extern int tb_invalidated_flag
;
289 /* The return address may point to the start of the next instruction.
290 Subtracting one gets us the call instruction itself. */
291 #if defined(CONFIG_TCG_INTERPRETER)
292 /* Alpha and SH4 user mode emulations and Softmmu call GETPC().
293 For all others, GETPC remains undefined (which makes TCI a little faster. */
294 # if defined(CONFIG_SOFTMMU) || defined(TARGET_ALPHA) || defined(TARGET_SH4)
295 extern uintptr_t tci_tb_ptr
;
296 # define GETPC() tci_tb_ptr
298 #elif defined(__s390__) && !defined(__s390x__)
300 (((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1)
301 #elif defined(__arm__)
302 /* Thumb return addresses have the low bit set, so we need to subtract two.
303 This is still safe in ARM mode because instructions are 4 bytes. */
304 # define GETPC() ((uintptr_t)__builtin_return_address(0) - 2)
306 # define GETPC() ((uintptr_t)__builtin_return_address(0) - 1)
309 #if !defined(CONFIG_USER_ONLY)
311 struct MemoryRegion
*iotlb_to_region(target_phys_addr_t index
);
312 uint64_t io_mem_read(struct MemoryRegion
*mr
, target_phys_addr_t addr
,
314 void io_mem_write(struct MemoryRegion
*mr
, target_phys_addr_t addr
,
315 uint64_t value
, unsigned size
);
317 void tlb_fill(CPUArchState
*env1
, target_ulong addr
, int is_write
, int mmu_idx
,
320 #include "softmmu_defs.h"
322 #define ACCESS_TYPE (NB_MMU_MODES + 1)
323 #define MEMSUFFIX _code
324 #ifndef CONFIG_TCG_PASS_AREG0
325 #define env cpu_single_env
329 #include "softmmu_header.h"
332 #include "softmmu_header.h"
335 #include "softmmu_header.h"
338 #include "softmmu_header.h"
346 #if defined(CONFIG_USER_ONLY)
347 static inline tb_page_addr_t
get_page_addr_code(CPUArchState
*env1
, target_ulong addr
)
353 tb_page_addr_t
get_page_addr_code(CPUArchState
*env1
, target_ulong addr
);
356 typedef void (CPUDebugExcpHandler
)(CPUArchState
*env
);
358 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
);
361 extern int singlestep
;
364 extern volatile sig_atomic_t exit_request
;
366 /* Deterministic execution requires that IO only be performed on the last
367 instruction of a TB so that interrupts take effect immediately. */
368 static inline int can_do_io(CPUArchState
*env
)
373 /* If not executing code then assume we are ok. */
374 if (!env
->current_tb
) {
377 return env
->can_do_io
!= 0;