Avoid CRIS related warnings by Jan Kiszka.
[qemu/qemu-JZ.git] / exec-all.h
blobb1693700028a0746be8a184055e4d6ae3765e5f0
1 /*
2 * internal execution defines for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 /* allow to see translation results - the slowdown should be negligible, so we leave it */
22 #define DEBUG_DISAS
24 /* is_jmp field values */
25 #define DISAS_NEXT 0 /* next instruction can be analyzed */
26 #define DISAS_JUMP 1 /* only pc was modified dynamically */
27 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
28 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
30 typedef struct TranslationBlock TranslationBlock;
32 /* XXX: make safe guess about sizes */
33 #define MAX_OP_PER_INSTR 64
34 /* A Call op needs up to 6 + 2N parameters (N = number of arguments). */
35 #define MAX_OPC_PARAM 10
36 #define OPC_BUF_SIZE 512
37 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
39 /* Maximum size a TCG op can expand to. This is complicated because a
40 single op may require several host instructions and regirster reloads.
41 For now take a wild guess at 128 bytes, which should allow at least
42 a couple of fixup instructions per argument. */
43 #define TCG_MAX_OP_SIZE 128
45 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
47 extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
48 extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
49 extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
50 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
51 extern uint16_t gen_opc_icount[OPC_BUF_SIZE];
52 extern target_ulong gen_opc_jump_pc[2];
53 extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];
55 typedef void (GenOpFunc)(void);
56 typedef void (GenOpFunc1)(long);
57 typedef void (GenOpFunc2)(long, long);
58 typedef void (GenOpFunc3)(long, long, long);
60 extern FILE *logfile;
61 extern int loglevel;
63 int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
64 int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
65 void gen_pc_load(CPUState *env, struct TranslationBlock *tb,
66 unsigned long searched_pc, int pc_pos, void *puc);
68 unsigned long code_gen_max_block_size(void);
69 void cpu_gen_init(void);
70 int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
71 int *gen_code_size_ptr);
72 int cpu_restore_state(struct TranslationBlock *tb,
73 CPUState *env, unsigned long searched_pc,
74 void *puc);
75 int cpu_restore_state_copy(struct TranslationBlock *tb,
76 CPUState *env, unsigned long searched_pc,
77 void *puc);
78 void cpu_resume_from_signal(CPUState *env1, void *puc);
79 void cpu_io_recompile(CPUState *env, void *retaddr);
80 TranslationBlock *tb_gen_code(CPUState *env,
81 target_ulong pc, target_ulong cs_base, int flags,
82 int cflags);
83 void cpu_exec_init(CPUState *env);
84 int page_unprotect(target_ulong address, unsigned long pc, void *puc);
85 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
86 int is_cpu_write_access);
87 void tb_invalidate_page_range(target_ulong start, target_ulong end);
88 void tlb_flush_page(CPUState *env, target_ulong addr);
89 void tlb_flush(CPUState *env, int flush_global);
90 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
91 target_phys_addr_t paddr, int prot,
92 int mmu_idx, int is_softmmu);
93 static inline int tlb_set_page(CPUState *env1, target_ulong vaddr,
94 target_phys_addr_t paddr, int prot,
95 int mmu_idx, int is_softmmu)
97 if (prot & PAGE_READ)
98 prot |= PAGE_EXEC;
99 return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu);
102 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
104 #define CODE_GEN_PHYS_HASH_BITS 15
105 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
107 #define MIN_CODE_GEN_BUFFER_SIZE (1024 * 1024)
109 /* estimated block size for TB allocation */
110 /* XXX: use a per code average code fragment size and modulate it
111 according to the host CPU */
112 #if defined(CONFIG_SOFTMMU)
113 #define CODE_GEN_AVG_BLOCK_SIZE 128
114 #else
115 #define CODE_GEN_AVG_BLOCK_SIZE 64
116 #endif
118 #if defined(__powerpc__) || defined(__x86_64__) || defined(__arm__)
119 #define USE_DIRECT_JUMP
120 #endif
121 #if defined(__i386__) && !defined(_WIN32)
122 #define USE_DIRECT_JUMP
123 #endif
125 struct TranslationBlock {
126 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
127 target_ulong cs_base; /* CS base for this block */
128 uint64_t flags; /* flags defining in which context the code was generated */
129 uint16_t size; /* size of target code for this block (1 <=
130 size <= TARGET_PAGE_SIZE) */
131 uint16_t cflags; /* compile flags */
132 #define CF_COUNT_MASK 0x7fff
133 #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
135 uint8_t *tc_ptr; /* pointer to the translated code */
136 /* next matching tb for physical address. */
137 struct TranslationBlock *phys_hash_next;
138 /* first and second physical page containing code. The lower bit
139 of the pointer tells the index in page_next[] */
140 struct TranslationBlock *page_next[2];
141 target_ulong page_addr[2];
143 /* the following data are used to directly call another TB from
144 the code of this one. */
145 uint16_t tb_next_offset[2]; /* offset of original jump target */
146 #ifdef USE_DIRECT_JUMP
147 uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
148 #else
149 unsigned long tb_next[2]; /* address of jump generated code */
150 #endif
151 /* list of TBs jumping to this one. This is a circular list using
152 the two least significant bits of the pointers to tell what is
153 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
154 jmp_first */
155 struct TranslationBlock *jmp_next[2];
156 struct TranslationBlock *jmp_first;
157 uint32_t icount;
160 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
162 target_ulong tmp;
163 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
164 return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
167 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
169 target_ulong tmp;
170 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
171 return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
172 | (tmp & TB_JMP_ADDR_MASK));
175 static inline unsigned int tb_phys_hash_func(unsigned long pc)
177 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
180 TranslationBlock *tb_alloc(target_ulong pc);
181 void tb_free(TranslationBlock *tb);
182 void tb_flush(CPUState *env);
183 void tb_link_phys(TranslationBlock *tb,
184 target_ulong phys_pc, target_ulong phys_page2);
185 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr);
187 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
188 extern uint8_t *code_gen_ptr;
189 extern int code_gen_max_blocks;
191 #if defined(USE_DIRECT_JUMP)
193 #if defined(__powerpc__)
194 static inline void flush_icache_range(unsigned long start, unsigned long stop);
195 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
197 /* This must be in concord with INDEX_op_goto_tb inside tcg_out_op */
198 uint32_t *ptr;
199 long disp = addr - jmp_addr;
200 unsigned long patch_size;
202 ptr = (uint32_t *)jmp_addr;
204 if ((disp << 6) >> 6 != disp) {
205 ptr[0] = 0x3c000000 | (addr >> 16); /* lis 0,addr@ha */
206 ptr[1] = 0x60000000 | (addr & 0xffff); /* la 0,addr@l(0) */
207 ptr[2] = 0x7c0903a6; /* mtctr 0 */
208 ptr[3] = 0x4e800420; /* brctr */
209 patch_size = 16;
210 } else {
211 /* patch the branch destination */
212 if (disp != 16) {
213 *ptr = 0x48000000 | (disp & 0x03fffffc); /* b disp */
214 patch_size = 4;
215 } else {
216 ptr[0] = 0x60000000; /* nop */
217 ptr[1] = 0x60000000;
218 ptr[2] = 0x60000000;
219 ptr[3] = 0x60000000;
220 patch_size = 16;
223 /* flush icache */
224 flush_icache_range(jmp_addr, jmp_addr + patch_size);
226 #elif defined(__i386__) || defined(__x86_64__)
227 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
229 /* patch the branch destination */
230 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
231 /* no need to flush icache explicitly */
233 #elif defined(__arm__)
234 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
236 register unsigned long _beg __asm ("a1");
237 register unsigned long _end __asm ("a2");
238 register unsigned long _flg __asm ("a3");
240 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
241 *(uint32_t *)jmp_addr |= ((addr - (jmp_addr + 8)) >> 2) & 0xffffff;
243 /* flush icache */
244 _beg = jmp_addr;
245 _end = jmp_addr + 4;
246 _flg = 0;
247 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
249 #endif
251 static inline void tb_set_jmp_target(TranslationBlock *tb,
252 int n, unsigned long addr)
254 unsigned long offset;
256 offset = tb->tb_jmp_offset[n];
257 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
258 offset = tb->tb_jmp_offset[n + 2];
259 if (offset != 0xffff)
260 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
263 #else
265 /* set the jump target */
266 static inline void tb_set_jmp_target(TranslationBlock *tb,
267 int n, unsigned long addr)
269 tb->tb_next[n] = addr;
272 #endif
274 static inline void tb_add_jump(TranslationBlock *tb, int n,
275 TranslationBlock *tb_next)
277 /* NOTE: this test is only needed for thread safety */
278 if (!tb->jmp_next[n]) {
279 /* patch the native jump address */
280 tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
282 /* add in TB jmp circular list */
283 tb->jmp_next[n] = tb_next->jmp_first;
284 tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
288 TranslationBlock *tb_find_pc(unsigned long pc_ptr);
290 #ifndef offsetof
291 #define offsetof(type, field) ((size_t) &((type *)0)->field)
292 #endif
294 #if defined(_WIN32)
295 #define ASM_DATA_SECTION ".section \".data\"\n"
296 #define ASM_PREVIOUS_SECTION ".section .text\n"
297 #elif defined(__APPLE__)
298 #define ASM_DATA_SECTION ".data\n"
299 #define ASM_PREVIOUS_SECTION ".text\n"
300 #else
301 #define ASM_DATA_SECTION ".section \".data\"\n"
302 #define ASM_PREVIOUS_SECTION ".previous\n"
303 #endif
305 #define ASM_OP_LABEL_NAME(n, opname) \
306 ASM_NAME(__op_label) #n "." ASM_NAME(opname)
308 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
309 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
310 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
312 #include "qemu-lock.h"
314 extern spinlock_t tb_lock;
316 extern int tb_invalidated_flag;
318 #if !defined(CONFIG_USER_ONLY)
320 void tlb_fill(target_ulong addr, int is_write, int mmu_idx,
321 void *retaddr);
323 #define ACCESS_TYPE (NB_MMU_MODES + 1)
324 #define MEMSUFFIX _code
325 #define env cpu_single_env
327 #define DATA_SIZE 1
328 #include "softmmu_header.h"
330 #define DATA_SIZE 2
331 #include "softmmu_header.h"
333 #define DATA_SIZE 4
334 #include "softmmu_header.h"
336 #define DATA_SIZE 8
337 #include "softmmu_header.h"
339 #undef ACCESS_TYPE
340 #undef MEMSUFFIX
341 #undef env
343 #endif
345 #if defined(CONFIG_USER_ONLY)
346 static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
348 return addr;
350 #else
351 /* NOTE: this function can trigger an exception */
352 /* NOTE2: the returned address is not exactly the physical address: it
353 is the offset relative to phys_ram_base */
354 static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
356 int mmu_idx, page_index, pd;
358 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
359 mmu_idx = cpu_mmu_index(env1);
360 if (__builtin_expect(env1->tlb_table[mmu_idx][page_index].addr_code !=
361 (addr & TARGET_PAGE_MASK), 0)) {
362 ldub_code(addr);
364 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
365 if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
366 #if defined(TARGET_SPARC) || defined(TARGET_MIPS)
367 do_unassigned_access(addr, 0, 1, 0);
368 #else
369 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
370 #endif
372 return addr + env1->tlb_table[mmu_idx][page_index].addend - (unsigned long)phys_ram_base;
375 /* Deterministic execution requires that IO only be performaed on the last
376 instruction of a TB so that interrupts take effect immediately. */
377 static inline int can_do_io(CPUState *env)
379 if (!use_icount)
380 return 1;
382 /* If not executing code then assume we are ok. */
383 if (!env->current_tb)
384 return 1;
386 return env->can_do_io != 0;
388 #endif
390 #ifdef USE_KQEMU
391 #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
393 #define MSR_QPI_COMMBASE 0xfabe0010
395 int kqemu_init(CPUState *env);
396 int kqemu_cpu_exec(CPUState *env);
397 void kqemu_flush_page(CPUState *env, target_ulong addr);
398 void kqemu_flush(CPUState *env, int global);
399 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
400 void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
401 void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size,
402 ram_addr_t phys_offset);
403 void kqemu_cpu_interrupt(CPUState *env);
404 void kqemu_record_dump(void);
406 extern uint32_t kqemu_comm_base;
408 static inline int kqemu_is_ok(CPUState *env)
410 return(env->kqemu_enabled &&
411 (env->cr[0] & CR0_PE_MASK) &&
412 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
413 (env->eflags & IF_MASK) &&
414 !(env->eflags & VM_MASK) &&
415 (env->kqemu_enabled == 2 ||
416 ((env->hflags & HF_CPL_MASK) == 3 &&
417 (env->eflags & IOPL_MASK) != IOPL_MASK)));
420 #endif