Cirrus VGA emulation
[qemu.git] / exec-all.h
blob8167d1a271a075769c7db56a62aeadc6cea94e56
1 /*
2 * internal execution defines for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 /* allow to see translation results - the slowdown should be negligible, so we leave it */
22 #define DEBUG_DISAS
24 #ifndef glue
25 #define xglue(x, y) x ## y
26 #define glue(x, y) xglue(x, y)
27 #define stringify(s) tostring(s)
28 #define tostring(s) #s
29 #endif
31 #if GCC_MAJOR < 3
32 #define __builtin_expect(x, n) (x)
33 #endif
35 #ifdef __i386__
36 #define REGPARM(n) __attribute((regparm(n)))
37 #else
38 #define REGPARM(n)
39 #endif
41 /* is_jmp field values */
42 #define DISAS_NEXT 0 /* next instruction can be analyzed */
43 #define DISAS_JUMP 1 /* only pc was modified dynamically */
44 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
45 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
47 struct TranslationBlock;
49 /* XXX: make safe guess about sizes */
50 #define MAX_OP_PER_INSTR 32
51 #define OPC_BUF_SIZE 512
52 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
54 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3)
56 extern uint16_t gen_opc_buf[OPC_BUF_SIZE];
57 extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE];
58 extern uint32_t gen_opc_pc[OPC_BUF_SIZE];
59 extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
60 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
62 typedef void (GenOpFunc)(void);
63 typedef void (GenOpFunc1)(long);
64 typedef void (GenOpFunc2)(long, long);
65 typedef void (GenOpFunc3)(long, long, long);
67 #if defined(TARGET_I386)
69 void optimize_flags_init(void);
71 #endif
73 extern FILE *logfile;
74 extern int loglevel;
76 int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
77 int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
78 void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf);
79 int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
80 int max_code_size, int *gen_code_size_ptr);
81 int cpu_restore_state(struct TranslationBlock *tb,
82 CPUState *env, unsigned long searched_pc,
83 void *puc);
84 int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb,
85 int max_code_size, int *gen_code_size_ptr);
86 int cpu_restore_state_copy(struct TranslationBlock *tb,
87 CPUState *env, unsigned long searched_pc,
88 void *puc);
89 void cpu_resume_from_signal(CPUState *env1, void *puc);
90 void cpu_exec_init(void);
91 int page_unprotect(unsigned long address, unsigned long pc, void *puc);
92 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
93 int is_cpu_write_access);
94 void tb_invalidate_page_range(target_ulong start, target_ulong end);
95 void tlb_flush_page(CPUState *env, target_ulong addr);
96 void tlb_flush(CPUState *env, int flush_global);
97 int tlb_set_page(CPUState *env, target_ulong vaddr,
98 target_phys_addr_t paddr, int prot,
99 int is_user, int is_softmmu);
101 #define CODE_GEN_MAX_SIZE 65536
102 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
104 #define CODE_GEN_HASH_BITS 15
105 #define CODE_GEN_HASH_SIZE (1 << CODE_GEN_HASH_BITS)
107 #define CODE_GEN_PHYS_HASH_BITS 15
108 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
110 /* maximum total translate dcode allocated */
112 /* NOTE: the translated code area cannot be too big because on some
113 archs the range of "fast" function calls is limited. Here is a
114 summary of the ranges:
116 i386 : signed 32 bits
117 arm : signed 26 bits
118 ppc : signed 24 bits
119 sparc : signed 32 bits
120 alpha : signed 23 bits
123 #if defined(__alpha__)
124 #define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024)
125 #elif defined(__powerpc__)
126 #define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024)
127 #else
128 #define CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
129 #endif
131 //#define CODE_GEN_BUFFER_SIZE (128 * 1024)
133 /* estimated block size for TB allocation */
134 /* XXX: use a per code average code fragment size and modulate it
135 according to the host CPU */
136 #if defined(CONFIG_SOFTMMU)
137 #define CODE_GEN_AVG_BLOCK_SIZE 128
138 #else
139 #define CODE_GEN_AVG_BLOCK_SIZE 64
140 #endif
142 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE)
144 #if defined(__powerpc__)
145 #define USE_DIRECT_JUMP
146 #endif
147 #if defined(__i386__) && !defined(_WIN32)
148 #define USE_DIRECT_JUMP
149 #endif
151 typedef struct TranslationBlock {
152 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
153 target_ulong cs_base; /* CS base for this block */
154 unsigned int flags; /* flags defining in which context the code was generated */
155 uint16_t size; /* size of target code for this block (1 <=
156 size <= TARGET_PAGE_SIZE) */
157 uint16_t cflags; /* compile flags */
158 #define CF_CODE_COPY 0x0001 /* block was generated in code copy mode */
159 #define CF_TB_FP_USED 0x0002 /* fp ops are used in the TB */
160 #define CF_FP_USED 0x0004 /* fp ops are used in the TB or in a chained TB */
161 #define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */
163 uint8_t *tc_ptr; /* pointer to the translated code */
164 struct TranslationBlock *hash_next; /* next matching tb for virtual address */
165 /* next matching tb for physical address. */
166 struct TranslationBlock *phys_hash_next;
167 /* first and second physical page containing code. The lower bit
168 of the pointer tells the index in page_next[] */
169 struct TranslationBlock *page_next[2];
170 target_ulong page_addr[2];
172 /* the following data are used to directly call another TB from
173 the code of this one. */
174 uint16_t tb_next_offset[2]; /* offset of original jump target */
175 #ifdef USE_DIRECT_JUMP
176 uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
177 #else
178 uint32_t tb_next[2]; /* address of jump generated code */
179 #endif
180 /* list of TBs jumping to this one. This is a circular list using
181 the two least significant bits of the pointers to tell what is
182 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
183 jmp_first */
184 struct TranslationBlock *jmp_next[2];
185 struct TranslationBlock *jmp_first;
186 } TranslationBlock;
188 static inline unsigned int tb_hash_func(unsigned long pc)
190 return pc & (CODE_GEN_HASH_SIZE - 1);
193 static inline unsigned int tb_phys_hash_func(unsigned long pc)
195 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
198 TranslationBlock *tb_alloc(unsigned long pc);
199 void tb_flush(CPUState *env);
200 void tb_link(TranslationBlock *tb);
201 void tb_link_phys(TranslationBlock *tb,
202 target_ulong phys_pc, target_ulong phys_page2);
204 extern TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
205 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
207 extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
208 extern uint8_t *code_gen_ptr;
210 /* find a translation block in the translation cache. If not found,
211 return NULL and the pointer to the last element of the list in pptb */
212 static inline TranslationBlock *tb_find(TranslationBlock ***pptb,
213 target_ulong pc,
214 target_ulong cs_base,
215 unsigned int flags)
217 TranslationBlock **ptb, *tb;
218 unsigned int h;
220 h = tb_hash_func(pc);
221 ptb = &tb_hash[h];
222 for(;;) {
223 tb = *ptb;
224 if (!tb)
225 break;
226 if (tb->pc == pc && tb->cs_base == cs_base && tb->flags == flags)
227 return tb;
228 ptb = &tb->hash_next;
230 *pptb = ptb;
231 return NULL;
235 #if defined(USE_DIRECT_JUMP)
237 #if defined(__powerpc__)
238 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
240 uint32_t val, *ptr;
242 /* patch the branch destination */
243 ptr = (uint32_t *)jmp_addr;
244 val = *ptr;
245 val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc);
246 *ptr = val;
247 /* flush icache */
248 asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory");
249 asm volatile ("sync" : : : "memory");
250 asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory");
251 asm volatile ("sync" : : : "memory");
252 asm volatile ("isync" : : : "memory");
254 #elif defined(__i386__)
255 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
257 /* patch the branch destination */
258 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
259 /* no need to flush icache explicitely */
261 #endif
263 static inline void tb_set_jmp_target(TranslationBlock *tb,
264 int n, unsigned long addr)
266 unsigned long offset;
268 offset = tb->tb_jmp_offset[n];
269 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
270 offset = tb->tb_jmp_offset[n + 2];
271 if (offset != 0xffff)
272 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
275 #else
277 /* set the jump target */
278 static inline void tb_set_jmp_target(TranslationBlock *tb,
279 int n, unsigned long addr)
281 tb->tb_next[n] = addr;
284 #endif
286 static inline void tb_add_jump(TranslationBlock *tb, int n,
287 TranslationBlock *tb_next)
289 /* NOTE: this test is only needed for thread safety */
290 if (!tb->jmp_next[n]) {
291 /* patch the native jump address */
292 tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
294 /* add in TB jmp circular list */
295 tb->jmp_next[n] = tb_next->jmp_first;
296 tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
300 TranslationBlock *tb_find_pc(unsigned long pc_ptr);
302 #ifndef offsetof
303 #define offsetof(type, field) ((size_t) &((type *)0)->field)
304 #endif
306 #if defined(__powerpc__)
308 /* we patch the jump instruction directly */
309 #define JUMP_TB(opname, tbparam, n, eip)\
310 do {\
311 asm volatile (".section \".data\"\n"\
312 "__op_label" #n "." stringify(opname) ":\n"\
313 ".long 1f\n"\
314 ".previous\n"\
315 "b __op_jmp" #n "\n"\
316 "1:\n");\
317 T0 = (long)(tbparam) + (n);\
318 EIP = eip;\
319 EXIT_TB();\
320 } while (0)
322 #define JUMP_TB2(opname, tbparam, n)\
323 do {\
324 asm volatile ("b __op_jmp" #n "\n");\
325 } while (0)
327 #elif defined(__i386__) && defined(USE_DIRECT_JUMP)
329 #ifdef _WIN32
330 #define ASM_PREVIOUS_SECTION ".section .text\n"
331 #else
332 #define ASM_PREVIOUS_SECTION ".previous\n"
333 #endif
335 /* we patch the jump instruction directly */
336 #define JUMP_TB(opname, tbparam, n, eip)\
337 do {\
338 asm volatile (".section .data\n"\
339 "__op_label" #n "." stringify(opname) ":\n"\
340 ".long 1f\n"\
341 ASM_PREVIOUS_SECTION \
342 "jmp __op_jmp" #n "\n"\
343 "1:\n");\
344 T0 = (long)(tbparam) + (n);\
345 EIP = eip;\
346 EXIT_TB();\
347 } while (0)
349 #define JUMP_TB2(opname, tbparam, n)\
350 do {\
351 asm volatile ("jmp __op_jmp" #n "\n");\
352 } while (0)
354 #else
356 /* jump to next block operations (more portable code, does not need
357 cache flushing, but slower because of indirect jump) */
358 #define JUMP_TB(opname, tbparam, n, eip)\
359 do {\
360 static void __attribute__((unused)) *__op_label ## n = &&label ## n;\
361 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
362 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
363 label ## n:\
364 T0 = (long)(tbparam) + (n);\
365 EIP = eip;\
366 dummy_label ## n:\
367 EXIT_TB();\
368 } while (0)
370 /* second jump to same destination 'n' */
371 #define JUMP_TB2(opname, tbparam, n)\
372 do {\
373 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n - 2]);\
374 } while (0)
376 #endif
378 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
379 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
380 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
382 #ifdef __powerpc__
383 static inline int testandset (int *p)
385 int ret;
386 __asm__ __volatile__ (
387 "0: lwarx %0,0,%1 ;"
388 " xor. %0,%3,%0;"
389 " bne 1f;"
390 " stwcx. %2,0,%1;"
391 " bne- 0b;"
392 "1: "
393 : "=&r" (ret)
394 : "r" (p), "r" (1), "r" (0)
395 : "cr0", "memory");
396 return ret;
398 #endif
400 #ifdef __i386__
401 static inline int testandset (int *p)
403 char ret;
404 long int readval;
406 __asm__ __volatile__ ("lock; cmpxchgl %3, %1; sete %0"
407 : "=q" (ret), "=m" (*p), "=a" (readval)
408 : "r" (1), "m" (*p), "a" (0)
409 : "memory");
410 return ret;
412 #endif
414 #ifdef __x86_64__
415 static inline int testandset (int *p)
417 char ret;
418 int readval;
420 __asm__ __volatile__ ("lock; cmpxchgl %3, %1; sete %0"
421 : "=q" (ret), "=m" (*p), "=a" (readval)
422 : "r" (1), "m" (*p), "a" (0)
423 : "memory");
424 return ret;
426 #endif
428 #ifdef __s390__
429 static inline int testandset (int *p)
431 int ret;
433 __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
434 " jl 0b"
435 : "=&d" (ret)
436 : "r" (1), "a" (p), "0" (*p)
437 : "cc", "memory" );
438 return ret;
440 #endif
442 #ifdef __alpha__
443 static inline int testandset (int *p)
445 int ret;
446 unsigned long one;
448 __asm__ __volatile__ ("0: mov 1,%2\n"
449 " ldl_l %0,%1\n"
450 " stl_c %2,%1\n"
451 " beq %2,1f\n"
452 ".subsection 2\n"
453 "1: br 0b\n"
454 ".previous"
455 : "=r" (ret), "=m" (*p), "=r" (one)
456 : "m" (*p));
457 return ret;
459 #endif
461 #ifdef __sparc__
462 static inline int testandset (int *p)
464 int ret;
466 __asm__ __volatile__("ldstub [%1], %0"
467 : "=r" (ret)
468 : "r" (p)
469 : "memory");
471 return (ret ? 1 : 0);
473 #endif
475 #ifdef __arm__
476 static inline int testandset (int *spinlock)
478 register unsigned int ret;
479 __asm__ __volatile__("swp %0, %1, [%2]"
480 : "=r"(ret)
481 : "0"(1), "r"(spinlock));
483 return ret;
485 #endif
487 #ifdef __mc68000
488 static inline int testandset (int *p)
490 char ret;
491 __asm__ __volatile__("tas %1; sne %0"
492 : "=r" (ret)
493 : "m" (p)
494 : "cc","memory");
495 return ret == 0;
497 #endif
499 typedef int spinlock_t;
501 #define SPIN_LOCK_UNLOCKED 0
503 #if defined(CONFIG_USER_ONLY)
504 static inline void spin_lock(spinlock_t *lock)
506 while (testandset(lock));
509 static inline void spin_unlock(spinlock_t *lock)
511 *lock = 0;
514 static inline int spin_trylock(spinlock_t *lock)
516 return !testandset(lock);
518 #else
519 static inline void spin_lock(spinlock_t *lock)
523 static inline void spin_unlock(spinlock_t *lock)
527 static inline int spin_trylock(spinlock_t *lock)
529 return 1;
531 #endif
533 extern spinlock_t tb_lock;
535 extern int tb_invalidated_flag;
537 #if (defined(TARGET_I386) || defined(TARGET_PPC)) && \
538 !defined(CONFIG_USER_ONLY)
540 void tlb_fill(unsigned long addr, int is_write, int is_user,
541 void *retaddr);
543 #define ACCESS_TYPE 3
544 #define MEMSUFFIX _code
545 #define env cpu_single_env
547 #define DATA_SIZE 1
548 #include "softmmu_header.h"
550 #define DATA_SIZE 2
551 #include "softmmu_header.h"
553 #define DATA_SIZE 4
554 #include "softmmu_header.h"
556 #undef ACCESS_TYPE
557 #undef MEMSUFFIX
558 #undef env
560 #endif
562 #if defined(CONFIG_USER_ONLY)
563 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
565 return addr;
567 #else
568 /* NOTE: this function can trigger an exception */
569 /* NOTE2: the returned address is not exactly the physical address: it
570 is the offset relative to phys_ram_base */
571 /* XXX: i386 target specific */
572 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
574 int is_user, index;
576 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
577 #if defined(TARGET_I386)
578 is_user = ((env->hflags & HF_CPL_MASK) == 3);
579 #elif defined (TARGET_PPC)
580 is_user = msr_pr;
581 #else
582 #error "Unimplemented !"
583 #endif
584 if (__builtin_expect(env->tlb_read[is_user][index].address !=
585 (addr & TARGET_PAGE_MASK), 0)) {
586 #if defined (TARGET_PPC)
587 env->access_type = ACCESS_CODE;
588 ldub_code((void *)addr);
589 env->access_type = ACCESS_INT;
590 #else
591 ldub_code((void *)addr);
592 #endif
594 return addr + env->tlb_read[is_user][index].addend - (unsigned long)phys_ram_base;
596 #endif