PPC TCG Fixes
[qemu-kvm/fedora.git] / exec-all.h
blobba6f6da5a0a7df7db28f0c37abfc12530e484302
1 /*
2 * internal execution defines for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 /* allow to see translation results - the slowdown should be negligible, so we leave it */
22 #define DEBUG_DISAS
24 /* is_jmp field values */
25 #define DISAS_NEXT 0 /* next instruction can be analyzed */
26 #define DISAS_JUMP 1 /* only pc was modified dynamically */
27 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
28 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
30 struct TranslationBlock;
32 /* XXX: make safe guess about sizes */
33 #define MAX_OP_PER_INSTR 64
34 /* A Call op needs up to 6 + 2N parameters (N = number of arguments). */
35 #define MAX_OPC_PARAM 10
36 #define OPC_BUF_SIZE 512
37 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
39 /* Maximum size a TCG op can expand to. This is complicated because a
40 single op may require several host instructions and regirster reloads.
41 For now take a wild guess at 128 bytes, which should allow at least
42 a couple of fixup instructions per argument. */
43 #define TCG_MAX_OP_SIZE 128
45 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
47 extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
48 extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
49 extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
50 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
51 extern target_ulong gen_opc_jump_pc[2];
52 extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];
54 typedef void (GenOpFunc)(void);
55 typedef void (GenOpFunc1)(long);
56 typedef void (GenOpFunc2)(long, long);
57 typedef void (GenOpFunc3)(long, long, long);
59 extern FILE *logfile;
60 extern int loglevel;
62 int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
63 int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
64 void gen_pc_load(CPUState *env, struct TranslationBlock *tb,
65 unsigned long searched_pc, int pc_pos, void *puc);
67 unsigned long code_gen_max_block_size(void);
68 void cpu_gen_init(void);
69 int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
70 int *gen_code_size_ptr);
71 int cpu_restore_state(struct TranslationBlock *tb,
72 CPUState *env, unsigned long searched_pc,
73 void *puc);
74 int cpu_restore_state_copy(struct TranslationBlock *tb,
75 CPUState *env, unsigned long searched_pc,
76 void *puc);
77 void cpu_resume_from_signal(CPUState *env1, void *puc);
78 void cpu_exec_init(CPUState *env);
79 int page_unprotect(target_ulong address, unsigned long pc, void *puc);
80 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
81 int is_cpu_write_access);
82 void tb_invalidate_page_range(target_ulong start, target_ulong end);
83 void tlb_flush_page(CPUState *env, target_ulong addr);
84 void tlb_flush(CPUState *env, int flush_global);
85 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
86 target_phys_addr_t paddr, int prot,
87 int mmu_idx, int is_softmmu);
88 static inline int tlb_set_page(CPUState *env1, target_ulong vaddr,
89 target_phys_addr_t paddr, int prot,
90 int mmu_idx, int is_softmmu)
92 if (prot & PAGE_READ)
93 prot |= PAGE_EXEC;
94 return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu);
97 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
99 #define CODE_GEN_PHYS_HASH_BITS 15
100 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
102 #define MIN_CODE_GEN_BUFFER_SIZE (1024 * 1024)
104 /* estimated block size for TB allocation */
105 /* XXX: use a per code average code fragment size and modulate it
106 according to the host CPU */
107 #if defined(CONFIG_SOFTMMU)
108 #define CODE_GEN_AVG_BLOCK_SIZE 128
109 #else
110 #define CODE_GEN_AVG_BLOCK_SIZE 64
111 #endif
113 #if defined(__powerpc__) || defined(__x86_64__) || defined(__arm__)
114 #define USE_DIRECT_JUMP
115 #endif
116 #if defined(__i386__) && !defined(_WIN32)
117 #define USE_DIRECT_JUMP
118 #endif
120 typedef struct TranslationBlock {
121 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
122 target_ulong cs_base; /* CS base for this block */
123 uint64_t flags; /* flags defining in which context the code was generated */
124 uint16_t size; /* size of target code for this block (1 <=
125 size <= TARGET_PAGE_SIZE) */
126 uint16_t cflags; /* compile flags */
127 #define CF_TB_FP_USED 0x0002 /* fp ops are used in the TB */
128 #define CF_FP_USED 0x0004 /* fp ops are used in the TB or in a chained TB */
129 #define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */
131 uint8_t *tc_ptr; /* pointer to the translated code */
132 /* next matching tb for physical address. */
133 struct TranslationBlock *phys_hash_next;
134 /* first and second physical page containing code. The lower bit
135 of the pointer tells the index in page_next[] */
136 struct TranslationBlock *page_next[2];
137 target_ulong page_addr[2];
139 /* the following data are used to directly call another TB from
140 the code of this one. */
141 uint16_t tb_next_offset[2]; /* offset of original jump target */
142 #ifdef USE_DIRECT_JUMP
143 uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
144 #else
145 unsigned long tb_next[2]; /* address of jump generated code */
146 #endif
147 /* list of TBs jumping to this one. This is a circular list using
148 the two least significant bits of the pointers to tell what is
149 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
150 jmp_first */
151 struct TranslationBlock *jmp_next[2];
152 struct TranslationBlock *jmp_first;
153 } TranslationBlock;
155 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
157 target_ulong tmp;
158 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
159 return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
162 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
164 target_ulong tmp;
165 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
166 return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
167 | (tmp & TB_JMP_ADDR_MASK));
170 static inline unsigned int tb_phys_hash_func(unsigned long pc)
172 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
175 TranslationBlock *tb_alloc(target_ulong pc);
176 void tb_flush(CPUState *env);
177 void tb_link_phys(TranslationBlock *tb,
178 target_ulong phys_pc, target_ulong phys_page2);
180 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
181 extern uint8_t *code_gen_ptr;
182 extern int code_gen_max_blocks;
184 #if defined(USE_DIRECT_JUMP)
186 #if defined(__powerpc__)
187 static inline void flush_icache_range(unsigned long start, unsigned long stop);
188 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
190 /* This must be in concord with INDEX_op_goto_tb inside tcg_out_op */
191 uint32_t *ptr;
192 long disp = addr - jmp_addr;
193 unsigned long patch_size;
195 ptr = (uint32_t *)jmp_addr;
197 if ((disp << 6) >> 6 != disp) {
198 ptr[0] = 0x3c000000 | (addr >> 16); /* lis 0,addr@ha */
199 ptr[1] = 0x60000000 | (addr & 0xffff); /* la 0,addr@l(0) */
200 ptr[2] = 0x7c0903a6; /* mtctr 0 */
201 ptr[3] = 0x4e800420; /* brctr */
202 patch_size = 16;
203 } else {
204 /* patch the branch destination */
205 if (disp != 16) {
206 *ptr = 0x48000000 | (disp & 0x03fffffc); /* b disp */
207 patch_size = 4;
208 } else {
209 ptr[0] = 0x60000000; /* nop */
210 ptr[1] = 0x60000000;
211 ptr[2] = 0x60000000;
212 ptr[3] = 0x60000000;
213 patch_size = 16;
216 /* flush icache */
217 flush_icache_range(jmp_addr, jmp_addr + patch_size);
219 #elif defined(__i386__) || defined(__x86_64__)
220 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
222 /* patch the branch destination */
223 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
224 /* no need to flush icache explicitly */
226 #elif defined(__arm__)
227 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
229 register unsigned long _beg __asm ("a1");
230 register unsigned long _end __asm ("a2");
231 register unsigned long _flg __asm ("a3");
233 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
234 *(uint32_t *)jmp_addr |= ((addr - (jmp_addr + 8)) >> 2) & 0xffffff;
236 /* flush icache */
237 _beg = jmp_addr;
238 _end = jmp_addr + 4;
239 _flg = 0;
240 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
242 #endif
244 static inline void tb_set_jmp_target(TranslationBlock *tb,
245 int n, unsigned long addr)
247 unsigned long offset;
249 offset = tb->tb_jmp_offset[n];
250 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
251 offset = tb->tb_jmp_offset[n + 2];
252 if (offset != 0xffff)
253 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
256 #else
258 /* set the jump target */
259 static inline void tb_set_jmp_target(TranslationBlock *tb,
260 int n, unsigned long addr)
262 tb->tb_next[n] = addr;
265 #endif
267 static inline void tb_add_jump(TranslationBlock *tb, int n,
268 TranslationBlock *tb_next)
270 /* NOTE: this test is only needed for thread safety */
271 if (!tb->jmp_next[n]) {
272 /* patch the native jump address */
273 tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
275 /* add in TB jmp circular list */
276 tb->jmp_next[n] = tb_next->jmp_first;
277 tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
281 TranslationBlock *tb_find_pc(unsigned long pc_ptr);
283 #ifndef offsetof
284 #define offsetof(type, field) ((size_t) &((type *)0)->field)
285 #endif
287 #if defined(_WIN32)
288 #define ASM_DATA_SECTION ".section \".data\"\n"
289 #define ASM_PREVIOUS_SECTION ".section .text\n"
290 #elif defined(__APPLE__)
291 #define ASM_DATA_SECTION ".data\n"
292 #define ASM_PREVIOUS_SECTION ".text\n"
293 #else
294 #define ASM_DATA_SECTION ".section \".data\"\n"
295 #define ASM_PREVIOUS_SECTION ".previous\n"
296 #endif
298 #define ASM_OP_LABEL_NAME(n, opname) \
299 ASM_NAME(__op_label) #n "." ASM_NAME(opname)
301 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
302 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
303 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
305 #if defined(__hppa__)
307 typedef int spinlock_t[4];
309 #define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 }
311 static inline void resetlock (spinlock_t *p)
313 (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1;
316 #else
318 typedef int spinlock_t;
320 #define SPIN_LOCK_UNLOCKED 0
322 static inline void resetlock (spinlock_t *p)
324 *p = SPIN_LOCK_UNLOCKED;
327 #endif
329 #if defined(__powerpc__)
330 static inline int testandset (int *p)
332 int ret;
333 __asm__ __volatile__ (
334 "0: lwarx %0,0,%1\n"
335 " xor. %0,%3,%0\n"
336 " bne 1f\n"
337 " stwcx. %2,0,%1\n"
338 " bne- 0b\n"
339 "1: "
340 : "=&r" (ret)
341 : "r" (p), "r" (1), "r" (0)
342 : "cr0", "memory");
343 return ret;
345 #elif defined(__i386__)
346 static inline int testandset (int *p)
348 long int readval = 0;
350 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
351 : "+m" (*p), "+a" (readval)
352 : "r" (1)
353 : "cc");
354 return readval;
356 #elif defined(__x86_64__)
357 static inline int testandset (int *p)
359 long int readval = 0;
361 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
362 : "+m" (*p), "+a" (readval)
363 : "r" (1)
364 : "cc");
365 return readval;
367 #elif defined(__s390__)
368 static inline int testandset (int *p)
370 int ret;
372 __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
373 " jl 0b"
374 : "=&d" (ret)
375 : "r" (1), "a" (p), "0" (*p)
376 : "cc", "memory" );
377 return ret;
379 #elif defined(__alpha__)
380 static inline int testandset (int *p)
382 int ret;
383 unsigned long one;
385 __asm__ __volatile__ ("0: mov 1,%2\n"
386 " ldl_l %0,%1\n"
387 " stl_c %2,%1\n"
388 " beq %2,1f\n"
389 ".subsection 2\n"
390 "1: br 0b\n"
391 ".previous"
392 : "=r" (ret), "=m" (*p), "=r" (one)
393 : "m" (*p));
394 return ret;
396 #elif defined(__sparc__)
397 static inline int testandset (int *p)
399 int ret;
401 __asm__ __volatile__("ldstub [%1], %0"
402 : "=r" (ret)
403 : "r" (p)
404 : "memory");
406 return (ret ? 1 : 0);
408 #elif defined(__arm__)
409 static inline int testandset (int *spinlock)
411 register unsigned int ret;
412 __asm__ __volatile__("swp %0, %1, [%2]"
413 : "=r"(ret)
414 : "0"(1), "r"(spinlock));
416 return ret;
418 #elif defined(__mc68000)
419 static inline int testandset (int *p)
421 char ret;
422 __asm__ __volatile__("tas %1; sne %0"
423 : "=r" (ret)
424 : "m" (p)
425 : "cc","memory");
426 return ret;
428 #elif defined(__hppa__)
430 /* Because malloc only guarantees 8-byte alignment for malloc'd data,
431 and GCC only guarantees 8-byte alignment for stack locals, we can't
432 be assured of 16-byte alignment for atomic lock data even if we
433 specify "__attribute ((aligned(16)))" in the type declaration. So,
434 we use a struct containing an array of four ints for the atomic lock
435 type and dynamically select the 16-byte aligned int from the array
436 for the semaphore. */
437 #define __PA_LDCW_ALIGNMENT 16
438 static inline void *ldcw_align (void *p) {
439 unsigned long a = (unsigned long)p;
440 a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1);
441 return (void *)a;
444 static inline int testandset (spinlock_t *p)
446 unsigned int ret;
447 p = ldcw_align(p);
448 __asm__ __volatile__("ldcw 0(%1),%0"
449 : "=r" (ret)
450 : "r" (p)
451 : "memory" );
452 return !ret;
455 #elif defined(__ia64)
457 #include <ia64intrin.h>
459 static inline int testandset (int *p)
461 return __sync_lock_test_and_set (p, 1);
463 #elif defined(__mips__)
464 static inline int testandset (int *p)
466 int ret;
468 __asm__ __volatile__ (
469 " .set push \n"
470 " .set noat \n"
471 " .set mips2 \n"
472 "1: li $1, 1 \n"
473 " ll %0, %1 \n"
474 " sc $1, %1 \n"
475 " beqz $1, 1b \n"
476 " .set pop "
477 : "=r" (ret), "+R" (*p)
479 : "memory");
481 return ret;
483 #else
484 #error unimplemented CPU support
485 #endif
487 #if defined(CONFIG_USER_ONLY)
488 static inline void spin_lock(spinlock_t *lock)
490 while (testandset(lock));
493 static inline void spin_unlock(spinlock_t *lock)
495 resetlock(lock);
498 static inline int spin_trylock(spinlock_t *lock)
500 return !testandset(lock);
502 #else
503 static inline void spin_lock(spinlock_t *lock)
507 static inline void spin_unlock(spinlock_t *lock)
511 static inline int spin_trylock(spinlock_t *lock)
513 return 1;
515 #endif
517 extern spinlock_t tb_lock;
519 extern int tb_invalidated_flag;
521 #if !defined(CONFIG_USER_ONLY)
523 void tlb_fill(target_ulong addr, int is_write, int mmu_idx,
524 void *retaddr);
526 #define ACCESS_TYPE (NB_MMU_MODES + 1)
527 #define MEMSUFFIX _code
528 #define env cpu_single_env
530 #define DATA_SIZE 1
531 #include "softmmu_header.h"
533 #define DATA_SIZE 2
534 #include "softmmu_header.h"
536 #define DATA_SIZE 4
537 #include "softmmu_header.h"
539 #define DATA_SIZE 8
540 #include "softmmu_header.h"
542 #undef ACCESS_TYPE
543 #undef MEMSUFFIX
544 #undef env
546 #endif
548 #if defined(CONFIG_USER_ONLY)
549 static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
551 return addr;
553 #else
554 /* NOTE: this function can trigger an exception */
555 /* NOTE2: the returned address is not exactly the physical address: it
556 is the offset relative to phys_ram_base */
557 static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
559 int mmu_idx, page_index, pd;
561 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
562 mmu_idx = cpu_mmu_index(env1);
563 if (__builtin_expect(env1->tlb_table[mmu_idx][page_index].addr_code !=
564 (addr & TARGET_PAGE_MASK), 0)) {
565 ldub_code(addr);
567 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
568 if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
569 #if defined(TARGET_SPARC) || defined(TARGET_MIPS)
570 do_unassigned_access(addr, 0, 1, 0);
571 #else
572 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
573 #endif
575 return addr + env1->tlb_table[mmu_idx][page_index].addend - (unsigned long)phys_ram_base;
577 #endif
579 #ifdef USE_KQEMU
580 #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
582 #define MSR_QPI_COMMBASE 0xfabe0010
584 int kqemu_init(CPUState *env);
585 int kqemu_cpu_exec(CPUState *env);
586 void kqemu_flush_page(CPUState *env, target_ulong addr);
587 void kqemu_flush(CPUState *env, int global);
588 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
589 void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
590 void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size,
591 ram_addr_t phys_offset);
592 void kqemu_cpu_interrupt(CPUState *env);
593 void kqemu_record_dump(void);
595 extern uint32_t kqemu_comm_base;
597 static inline int kqemu_is_ok(CPUState *env)
599 return(env->kqemu_enabled &&
600 (env->cr[0] & CR0_PE_MASK) &&
601 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
602 (env->eflags & IF_MASK) &&
603 !(env->eflags & VM_MASK) &&
604 (env->kqemu_enabled == 2 ||
605 ((env->hflags & HF_CPL_MASK) == 3 &&
606 (env->eflags & IOPL_MASK) != IOPL_MASK)));
609 #endif