target-openrisc: Update OpenRISCCPU to QOM realizefn
[qemu/ar7.git] / translate-all.c
blobefeb247add7c0d7a1aa908b4cd87c06e4e2a0617
1 /*
2 * Host code generation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #ifdef _WIN32
20 #include <windows.h>
21 #else
22 #include <sys/types.h>
23 #include <sys/mman.h>
24 #endif
25 #include <stdarg.h>
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <string.h>
29 #include <inttypes.h>
31 #include "config.h"
33 #include "qemu-common.h"
34 #define NO_CPU_IO_DEFS
35 #include "cpu.h"
36 #include "disas/disas.h"
37 #include "tcg.h"
38 #include "qemu/timer.h"
39 #include "exec/memory.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
42 #include "qemu.h"
43 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
44 #include <sys/param.h>
45 #if __FreeBSD_version >= 700104
46 #define HAVE_KINFO_GETVMMAP
47 #define sigqueue sigqueue_freebsd /* avoid redefinition */
48 #include <sys/time.h>
49 #include <sys/proc.h>
50 #include <machine/profile.h>
51 #define _KERNEL
52 #include <sys/user.h>
53 #undef _KERNEL
54 #undef sigqueue
55 #include <libutil.h>
56 #endif
57 #endif
58 #endif
60 #include "exec/cputlb.h"
61 #include "translate-all.h"
63 //#define DEBUG_TB_INVALIDATE
64 //#define DEBUG_FLUSH
65 /* make various TB consistency checks */
66 //#define DEBUG_TB_CHECK
68 #if !defined(CONFIG_USER_ONLY)
69 /* TB consistency checks only implemented for usermode emulation. */
70 #undef DEBUG_TB_CHECK
71 #endif
73 #define SMC_BITMAP_USE_THRESHOLD 10
75 typedef struct PageDesc {
76 /* list of TBs intersecting this ram page */
77 TranslationBlock *first_tb;
78 /* in order to optimize self modifying code, we count the number
79 of lookups we do to a given page to use a bitmap */
80 unsigned int code_write_count;
81 uint8_t *code_bitmap;
82 #if defined(CONFIG_USER_ONLY)
83 unsigned long flags;
84 #endif
85 } PageDesc;
87 /* In system mode we want L1_MAP to be based on ram offsets,
88 while in user mode we want it to be based on virtual addresses. */
89 #if !defined(CONFIG_USER_ONLY)
90 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
91 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
92 #else
93 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
94 #endif
95 #else
96 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
97 #endif
99 /* The bits remaining after N lower levels of page tables. */
100 #define V_L1_BITS_REM \
101 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
103 #if V_L1_BITS_REM < 4
104 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
105 #else
106 #define V_L1_BITS V_L1_BITS_REM
107 #endif
109 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
111 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
113 uintptr_t qemu_real_host_page_size;
114 uintptr_t qemu_host_page_size;
115 uintptr_t qemu_host_page_mask;
117 /* This is a multi-level map on the virtual address space.
118 The bottom level has pointers to PageDesc. */
119 static void *l1_map[V_L1_SIZE];
121 /* code generation context */
122 TCGContext tcg_ctx;
124 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
125 tb_page_addr_t phys_page2);
126 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
128 void cpu_gen_init(void)
130 tcg_context_init(&tcg_ctx);
133 /* return non zero if the very first instruction is invalid so that
134 the virtual CPU can trigger an exception.
136 '*gen_code_size_ptr' contains the size of the generated code (host
137 code).
139 int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
141 TCGContext *s = &tcg_ctx;
142 uint8_t *gen_code_buf;
143 int gen_code_size;
144 #ifdef CONFIG_PROFILER
145 int64_t ti;
146 #endif
148 #ifdef CONFIG_PROFILER
149 s->tb_count1++; /* includes aborted translations because of
150 exceptions */
151 ti = profile_getclock();
152 #endif
153 tcg_func_start(s);
155 gen_intermediate_code(env, tb);
157 /* generate machine code */
158 gen_code_buf = tb->tc_ptr;
159 tb->tb_next_offset[0] = 0xffff;
160 tb->tb_next_offset[1] = 0xffff;
161 s->tb_next_offset = tb->tb_next_offset;
162 #ifdef USE_DIRECT_JUMP
163 s->tb_jmp_offset = tb->tb_jmp_offset;
164 s->tb_next = NULL;
165 #else
166 s->tb_jmp_offset = NULL;
167 s->tb_next = tb->tb_next;
168 #endif
170 #ifdef CONFIG_PROFILER
171 s->tb_count++;
172 s->interm_time += profile_getclock() - ti;
173 s->code_time -= profile_getclock();
174 #endif
175 gen_code_size = tcg_gen_code(s, gen_code_buf);
176 *gen_code_size_ptr = gen_code_size;
177 #ifdef CONFIG_PROFILER
178 s->code_time += profile_getclock();
179 s->code_in_len += tb->size;
180 s->code_out_len += gen_code_size;
181 #endif
183 #ifdef DEBUG_DISAS
184 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
185 qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr);
186 log_disas(tb->tc_ptr, *gen_code_size_ptr);
187 qemu_log("\n");
188 qemu_log_flush();
190 #endif
191 return 0;
194 /* The cpu state corresponding to 'searched_pc' is restored.
196 static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
197 uintptr_t searched_pc)
199 TCGContext *s = &tcg_ctx;
200 int j;
201 uintptr_t tc_ptr;
202 #ifdef CONFIG_PROFILER
203 int64_t ti;
204 #endif
206 #ifdef CONFIG_PROFILER
207 ti = profile_getclock();
208 #endif
209 tcg_func_start(s);
211 gen_intermediate_code_pc(env, tb);
213 if (use_icount) {
214 /* Reset the cycle counter to the start of the block. */
215 env->icount_decr.u16.low += tb->icount;
216 /* Clear the IO flag. */
217 env->can_do_io = 0;
220 /* find opc index corresponding to search_pc */
221 tc_ptr = (uintptr_t)tb->tc_ptr;
222 if (searched_pc < tc_ptr)
223 return -1;
225 s->tb_next_offset = tb->tb_next_offset;
226 #ifdef USE_DIRECT_JUMP
227 s->tb_jmp_offset = tb->tb_jmp_offset;
228 s->tb_next = NULL;
229 #else
230 s->tb_jmp_offset = NULL;
231 s->tb_next = tb->tb_next;
232 #endif
233 j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr);
234 if (j < 0)
235 return -1;
236 /* now find start of instruction before */
237 while (s->gen_opc_instr_start[j] == 0) {
238 j--;
240 env->icount_decr.u16.low -= s->gen_opc_icount[j];
242 restore_state_to_opc(env, tb, j);
244 #ifdef CONFIG_PROFILER
245 s->restore_time += profile_getclock() - ti;
246 s->restore_count++;
247 #endif
248 return 0;
251 bool cpu_restore_state(CPUArchState *env, uintptr_t retaddr)
253 TranslationBlock *tb;
255 tb = tb_find_pc(retaddr);
256 if (tb) {
257 cpu_restore_state_from_tb(tb, env, retaddr);
258 return true;
260 return false;
263 #ifdef _WIN32
264 static inline void map_exec(void *addr, long size)
266 DWORD old_protect;
267 VirtualProtect(addr, size,
268 PAGE_EXECUTE_READWRITE, &old_protect);
270 #else
271 static inline void map_exec(void *addr, long size)
273 unsigned long start, end, page_size;
275 page_size = getpagesize();
276 start = (unsigned long)addr;
277 start &= ~(page_size - 1);
279 end = (unsigned long)addr + size;
280 end += page_size - 1;
281 end &= ~(page_size - 1);
283 mprotect((void *)start, end - start,
284 PROT_READ | PROT_WRITE | PROT_EXEC);
286 #endif
288 static void page_init(void)
290 /* NOTE: we can always suppose that qemu_host_page_size >=
291 TARGET_PAGE_SIZE */
292 #ifdef _WIN32
294 SYSTEM_INFO system_info;
296 GetSystemInfo(&system_info);
297 qemu_real_host_page_size = system_info.dwPageSize;
299 #else
300 qemu_real_host_page_size = getpagesize();
301 #endif
302 if (qemu_host_page_size == 0) {
303 qemu_host_page_size = qemu_real_host_page_size;
305 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
306 qemu_host_page_size = TARGET_PAGE_SIZE;
308 qemu_host_page_mask = ~(qemu_host_page_size - 1);
310 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
312 #ifdef HAVE_KINFO_GETVMMAP
313 struct kinfo_vmentry *freep;
314 int i, cnt;
316 freep = kinfo_getvmmap(getpid(), &cnt);
317 if (freep) {
318 mmap_lock();
319 for (i = 0; i < cnt; i++) {
320 unsigned long startaddr, endaddr;
322 startaddr = freep[i].kve_start;
323 endaddr = freep[i].kve_end;
324 if (h2g_valid(startaddr)) {
325 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327 if (h2g_valid(endaddr)) {
328 endaddr = h2g(endaddr);
329 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
330 } else {
331 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
332 endaddr = ~0ul;
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
334 #endif
338 free(freep);
339 mmap_unlock();
341 #else
342 FILE *f;
344 last_brk = (unsigned long)sbrk(0);
346 f = fopen("/compat/linux/proc/self/maps", "r");
347 if (f) {
348 mmap_lock();
350 do {
351 unsigned long startaddr, endaddr;
352 int n;
354 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
356 if (n == 2 && h2g_valid(startaddr)) {
357 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
359 if (h2g_valid(endaddr)) {
360 endaddr = h2g(endaddr);
361 } else {
362 endaddr = ~0ul;
364 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
366 } while (!feof(f));
368 fclose(f);
369 mmap_unlock();
371 #endif
373 #endif
376 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
378 PageDesc *pd;
379 void **lp;
380 int i;
382 #if defined(CONFIG_USER_ONLY)
383 /* We can't use g_malloc because it may recurse into a locked mutex. */
384 # define ALLOC(P, SIZE) \
385 do { \
386 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
387 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
388 } while (0)
389 #else
390 # define ALLOC(P, SIZE) \
391 do { P = g_malloc0(SIZE); } while (0)
392 #endif
394 /* Level 1. Always allocated. */
395 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
397 /* Level 2..N-1. */
398 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
399 void **p = *lp;
401 if (p == NULL) {
402 if (!alloc) {
403 return NULL;
405 ALLOC(p, sizeof(void *) * L2_SIZE);
406 *lp = p;
409 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
412 pd = *lp;
413 if (pd == NULL) {
414 if (!alloc) {
415 return NULL;
417 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
418 *lp = pd;
421 #undef ALLOC
423 return pd + (index & (L2_SIZE - 1));
426 static inline PageDesc *page_find(tb_page_addr_t index)
428 return page_find_alloc(index, 0);
431 #if !defined(CONFIG_USER_ONLY)
432 #define mmap_lock() do { } while (0)
433 #define mmap_unlock() do { } while (0)
434 #endif
436 #if defined(CONFIG_USER_ONLY)
437 /* Currently it is not recommended to allocate big chunks of data in
438 user mode. It will change when a dedicated libc will be used. */
439 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
440 region in which the guest needs to run. Revisit this. */
441 #define USE_STATIC_CODE_GEN_BUFFER
442 #endif
444 /* ??? Should configure for this, not list operating systems here. */
445 #if (defined(__linux__) \
446 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
447 || defined(__DragonFly__) || defined(__OpenBSD__) \
448 || defined(__NetBSD__))
449 # define USE_MMAP
450 #endif
452 /* Minimum size of the code gen buffer. This number is randomly chosen,
453 but not so small that we can't have a fair number of TB's live. */
454 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
456 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
457 indicated, this is constrained by the range of direct branches on the
458 host cpu, as used by the TCG implementation of goto_tb. */
459 #if defined(__x86_64__)
460 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
461 #elif defined(__sparc__)
462 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
463 #elif defined(__arm__)
464 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
465 #elif defined(__s390x__)
466 /* We have a +- 4GB range on the branches; leave some slop. */
467 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
468 #else
469 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
470 #endif
472 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
474 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
475 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
476 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
478 static inline size_t size_code_gen_buffer(size_t tb_size)
480 /* Size the buffer. */
481 if (tb_size == 0) {
482 #ifdef USE_STATIC_CODE_GEN_BUFFER
483 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
484 #else
485 /* ??? Needs adjustments. */
486 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
487 static buffer, we could size this on RESERVED_VA, on the text
488 segment size of the executable, or continue to use the default. */
489 tb_size = (unsigned long)(ram_size / 4);
490 #endif
492 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
493 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
495 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
496 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
498 tcg_ctx.code_gen_buffer_size = tb_size;
499 return tb_size;
502 #ifdef USE_STATIC_CODE_GEN_BUFFER
503 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
504 __attribute__((aligned(CODE_GEN_ALIGN)));
506 static inline void *alloc_code_gen_buffer(void)
508 map_exec(static_code_gen_buffer, tcg_ctx.code_gen_buffer_size);
509 return static_code_gen_buffer;
511 #elif defined(USE_MMAP)
512 static inline void *alloc_code_gen_buffer(void)
514 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
515 uintptr_t start = 0;
516 void *buf;
518 /* Constrain the position of the buffer based on the host cpu.
519 Note that these addresses are chosen in concert with the
520 addresses assigned in the relevant linker script file. */
521 # if defined(__PIE__) || defined(__PIC__)
522 /* Don't bother setting a preferred location if we're building
523 a position-independent executable. We're more likely to get
524 an address near the main executable if we let the kernel
525 choose the address. */
526 # elif defined(__x86_64__) && defined(MAP_32BIT)
527 /* Force the memory down into low memory with the executable.
528 Leave the choice of exact location with the kernel. */
529 flags |= MAP_32BIT;
530 /* Cannot expect to map more than 800MB in low memory. */
531 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
532 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
534 # elif defined(__sparc__)
535 start = 0x40000000ul;
536 # elif defined(__s390x__)
537 start = 0x90000000ul;
538 # endif
540 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
541 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
542 return buf == MAP_FAILED ? NULL : buf;
544 #else
545 static inline void *alloc_code_gen_buffer(void)
547 void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
549 if (buf) {
550 map_exec(buf, tcg_ctx.code_gen_buffer_size);
552 return buf;
554 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
556 static inline void code_gen_alloc(size_t tb_size)
558 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
559 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
560 if (tcg_ctx.code_gen_buffer == NULL) {
561 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
562 exit(1);
565 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
566 QEMU_MADV_HUGEPAGE);
568 /* Steal room for the prologue at the end of the buffer. This ensures
569 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
570 from TB's to the prologue are going to be in range. It also means
571 that we don't need to mark (additional) portions of the data segment
572 as executable. */
573 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
574 tcg_ctx.code_gen_buffer_size - 1024;
575 tcg_ctx.code_gen_buffer_size -= 1024;
577 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
578 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
579 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
580 CODE_GEN_AVG_BLOCK_SIZE;
581 tcg_ctx.tb_ctx.tbs =
582 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
585 /* Must be called before using the QEMU cpus. 'tb_size' is the size
586 (in bytes) allocated to the translation buffer. Zero means default
587 size. */
588 void tcg_exec_init(unsigned long tb_size)
590 cpu_gen_init();
591 code_gen_alloc(tb_size);
592 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
593 tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
594 page_init();
595 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
596 /* There's no guest base to take into account, so go ahead and
597 initialize the prologue now. */
598 tcg_prologue_init(&tcg_ctx);
599 #endif
602 bool tcg_enabled(void)
604 return tcg_ctx.code_gen_buffer != NULL;
607 /* Allocate a new translation block. Flush the translation buffer if
608 too many translation blocks or too much generated code. */
609 static TranslationBlock *tb_alloc(target_ulong pc)
611 TranslationBlock *tb;
613 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
614 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
615 tcg_ctx.code_gen_buffer_max_size) {
616 return NULL;
618 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
619 tb->pc = pc;
620 tb->cflags = 0;
621 return tb;
624 void tb_free(TranslationBlock *tb)
626 /* In practice this is mostly used for single use temporary TB
627 Ignore the hard cases and just back up if this TB happens to
628 be the last one generated. */
629 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
630 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
631 tcg_ctx.code_gen_ptr = tb->tc_ptr;
632 tcg_ctx.tb_ctx.nb_tbs--;
636 static inline void invalidate_page_bitmap(PageDesc *p)
638 if (p->code_bitmap) {
639 g_free(p->code_bitmap);
640 p->code_bitmap = NULL;
642 p->code_write_count = 0;
645 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
646 static void page_flush_tb_1(int level, void **lp)
648 int i;
650 if (*lp == NULL) {
651 return;
653 if (level == 0) {
654 PageDesc *pd = *lp;
656 for (i = 0; i < L2_SIZE; ++i) {
657 pd[i].first_tb = NULL;
658 invalidate_page_bitmap(pd + i);
660 } else {
661 void **pp = *lp;
663 for (i = 0; i < L2_SIZE; ++i) {
664 page_flush_tb_1(level - 1, pp + i);
669 static void page_flush_tb(void)
671 int i;
673 for (i = 0; i < V_L1_SIZE; i++) {
674 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
678 /* flush all the translation blocks */
679 /* XXX: tb_flush is currently not thread safe */
680 void tb_flush(CPUArchState *env1)
682 CPUArchState *env;
684 #if defined(DEBUG_FLUSH)
685 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
686 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
687 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
688 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
689 tcg_ctx.tb_ctx.nb_tbs : 0);
690 #endif
691 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
692 > tcg_ctx.code_gen_buffer_size) {
693 cpu_abort(env1, "Internal error: code buffer overflow\n");
695 tcg_ctx.tb_ctx.nb_tbs = 0;
697 for (env = first_cpu; env != NULL; env = env->next_cpu) {
698 memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
701 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0,
702 CODE_GEN_PHYS_HASH_SIZE * sizeof(void *));
703 page_flush_tb();
705 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
706 /* XXX: flush processor icache at this point if cache flush is
707 expensive */
708 tcg_ctx.tb_ctx.tb_flush_count++;
711 #ifdef DEBUG_TB_CHECK
713 static void tb_invalidate_check(target_ulong address)
715 TranslationBlock *tb;
716 int i;
718 address &= TARGET_PAGE_MASK;
719 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
720 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
721 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
722 address >= tb->pc + tb->size)) {
723 printf("ERROR invalidate: address=" TARGET_FMT_lx
724 " PC=%08lx size=%04x\n",
725 address, (long)tb->pc, tb->size);
731 /* verify that all the pages have correct rights for code */
732 static void tb_page_check(void)
734 TranslationBlock *tb;
735 int i, flags1, flags2;
737 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
738 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
739 tb = tb->phys_hash_next) {
740 flags1 = page_get_flags(tb->pc);
741 flags2 = page_get_flags(tb->pc + tb->size - 1);
742 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
743 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
744 (long)tb->pc, tb->size, flags1, flags2);
750 #endif
752 static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
754 TranslationBlock *tb1;
756 for (;;) {
757 tb1 = *ptb;
758 if (tb1 == tb) {
759 *ptb = tb1->phys_hash_next;
760 break;
762 ptb = &tb1->phys_hash_next;
766 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
768 TranslationBlock *tb1;
769 unsigned int n1;
771 for (;;) {
772 tb1 = *ptb;
773 n1 = (uintptr_t)tb1 & 3;
774 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
775 if (tb1 == tb) {
776 *ptb = tb1->page_next[n1];
777 break;
779 ptb = &tb1->page_next[n1];
783 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
785 TranslationBlock *tb1, **ptb;
786 unsigned int n1;
788 ptb = &tb->jmp_next[n];
789 tb1 = *ptb;
790 if (tb1) {
791 /* find tb(n) in circular list */
792 for (;;) {
793 tb1 = *ptb;
794 n1 = (uintptr_t)tb1 & 3;
795 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
796 if (n1 == n && tb1 == tb) {
797 break;
799 if (n1 == 2) {
800 ptb = &tb1->jmp_first;
801 } else {
802 ptb = &tb1->jmp_next[n1];
805 /* now we can suppress tb(n) from the list */
806 *ptb = tb->jmp_next[n];
808 tb->jmp_next[n] = NULL;
812 /* reset the jump entry 'n' of a TB so that it is not chained to
813 another TB */
814 static inline void tb_reset_jump(TranslationBlock *tb, int n)
816 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
819 /* invalidate one TB */
820 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
822 CPUArchState *env;
823 PageDesc *p;
824 unsigned int h, n1;
825 tb_page_addr_t phys_pc;
826 TranslationBlock *tb1, *tb2;
828 /* remove the TB from the hash list */
829 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
830 h = tb_phys_hash_func(phys_pc);
831 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
833 /* remove the TB from the page list */
834 if (tb->page_addr[0] != page_addr) {
835 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
836 tb_page_remove(&p->first_tb, tb);
837 invalidate_page_bitmap(p);
839 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
840 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
841 tb_page_remove(&p->first_tb, tb);
842 invalidate_page_bitmap(p);
845 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
847 /* remove the TB from the hash list */
848 h = tb_jmp_cache_hash_func(tb->pc);
849 for (env = first_cpu; env != NULL; env = env->next_cpu) {
850 if (env->tb_jmp_cache[h] == tb) {
851 env->tb_jmp_cache[h] = NULL;
855 /* suppress this TB from the two jump lists */
856 tb_jmp_remove(tb, 0);
857 tb_jmp_remove(tb, 1);
859 /* suppress any remaining jumps to this TB */
860 tb1 = tb->jmp_first;
861 for (;;) {
862 n1 = (uintptr_t)tb1 & 3;
863 if (n1 == 2) {
864 break;
866 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
867 tb2 = tb1->jmp_next[n1];
868 tb_reset_jump(tb1, n1);
869 tb1->jmp_next[n1] = NULL;
870 tb1 = tb2;
872 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
874 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
877 static inline void set_bits(uint8_t *tab, int start, int len)
879 int end, mask, end1;
881 end = start + len;
882 tab += start >> 3;
883 mask = 0xff << (start & 7);
884 if ((start & ~7) == (end & ~7)) {
885 if (start < end) {
886 mask &= ~(0xff << (end & 7));
887 *tab |= mask;
889 } else {
890 *tab++ |= mask;
891 start = (start + 8) & ~7;
892 end1 = end & ~7;
893 while (start < end1) {
894 *tab++ = 0xff;
895 start += 8;
897 if (start < end) {
898 mask = ~(0xff << (end & 7));
899 *tab |= mask;
904 static void build_page_bitmap(PageDesc *p)
906 int n, tb_start, tb_end;
907 TranslationBlock *tb;
909 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
911 tb = p->first_tb;
912 while (tb != NULL) {
913 n = (uintptr_t)tb & 3;
914 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
915 /* NOTE: this is subtle as a TB may span two physical pages */
916 if (n == 0) {
917 /* NOTE: tb_end may be after the end of the page, but
918 it is not a problem */
919 tb_start = tb->pc & ~TARGET_PAGE_MASK;
920 tb_end = tb_start + tb->size;
921 if (tb_end > TARGET_PAGE_SIZE) {
922 tb_end = TARGET_PAGE_SIZE;
924 } else {
925 tb_start = 0;
926 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
928 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
929 tb = tb->page_next[n];
933 TranslationBlock *tb_gen_code(CPUArchState *env,
934 target_ulong pc, target_ulong cs_base,
935 int flags, int cflags)
937 TranslationBlock *tb;
938 uint8_t *tc_ptr;
939 tb_page_addr_t phys_pc, phys_page2;
940 target_ulong virt_page2;
941 int code_gen_size;
943 phys_pc = get_page_addr_code(env, pc);
944 tb = tb_alloc(pc);
945 if (!tb) {
946 /* flush must be done */
947 tb_flush(env);
948 /* cannot fail at this point */
949 tb = tb_alloc(pc);
950 /* Don't forget to invalidate previous TB info. */
951 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
953 tc_ptr = tcg_ctx.code_gen_ptr;
954 tb->tc_ptr = tc_ptr;
955 tb->cs_base = cs_base;
956 tb->flags = flags;
957 tb->cflags = cflags;
958 cpu_gen_code(env, tb, &code_gen_size);
959 tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
960 code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
962 /* check next page if needed */
963 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
964 phys_page2 = -1;
965 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
966 phys_page2 = get_page_addr_code(env, virt_page2);
968 tb_link_page(tb, phys_pc, phys_page2);
969 return tb;
973 * Invalidate all TBs which intersect with the target physical address range
974 * [start;end[. NOTE: start and end may refer to *different* physical pages.
975 * 'is_cpu_write_access' should be true if called from a real cpu write
976 * access: the virtual CPU will exit the current TB if code is modified inside
977 * this TB.
979 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
980 int is_cpu_write_access)
982 while (start < end) {
983 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
984 start &= TARGET_PAGE_MASK;
985 start += TARGET_PAGE_SIZE;
990 * Invalidate all TBs which intersect with the target physical address range
991 * [start;end[. NOTE: start and end must refer to the *same* physical page.
992 * 'is_cpu_write_access' should be true if called from a real cpu write
993 * access: the virtual CPU will exit the current TB if code is modified inside
994 * this TB.
996 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
997 int is_cpu_write_access)
999 TranslationBlock *tb, *tb_next, *saved_tb;
1000 CPUArchState *env = cpu_single_env;
1001 tb_page_addr_t tb_start, tb_end;
1002 PageDesc *p;
1003 int n;
1004 #ifdef TARGET_HAS_PRECISE_SMC
1005 int current_tb_not_found = is_cpu_write_access;
1006 TranslationBlock *current_tb = NULL;
1007 int current_tb_modified = 0;
1008 target_ulong current_pc = 0;
1009 target_ulong current_cs_base = 0;
1010 int current_flags = 0;
1011 #endif /* TARGET_HAS_PRECISE_SMC */
1013 p = page_find(start >> TARGET_PAGE_BITS);
1014 if (!p) {
1015 return;
1017 if (!p->code_bitmap &&
1018 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1019 is_cpu_write_access) {
1020 /* build code bitmap */
1021 build_page_bitmap(p);
1024 /* we remove all the TBs in the range [start, end[ */
1025 /* XXX: see if in some cases it could be faster to invalidate all
1026 the code */
1027 tb = p->first_tb;
1028 while (tb != NULL) {
1029 n = (uintptr_t)tb & 3;
1030 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1031 tb_next = tb->page_next[n];
1032 /* NOTE: this is subtle as a TB may span two physical pages */
1033 if (n == 0) {
1034 /* NOTE: tb_end may be after the end of the page, but
1035 it is not a problem */
1036 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1037 tb_end = tb_start + tb->size;
1038 } else {
1039 tb_start = tb->page_addr[1];
1040 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1042 if (!(tb_end <= start || tb_start >= end)) {
1043 #ifdef TARGET_HAS_PRECISE_SMC
1044 if (current_tb_not_found) {
1045 current_tb_not_found = 0;
1046 current_tb = NULL;
1047 if (env->mem_io_pc) {
1048 /* now we have a real cpu fault */
1049 current_tb = tb_find_pc(env->mem_io_pc);
1052 if (current_tb == tb &&
1053 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1054 /* If we are modifying the current TB, we must stop
1055 its execution. We could be more precise by checking
1056 that the modification is after the current PC, but it
1057 would require a specialized function to partially
1058 restore the CPU state */
1060 current_tb_modified = 1;
1061 cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc);
1062 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1063 &current_flags);
1065 #endif /* TARGET_HAS_PRECISE_SMC */
1066 /* we need to do that to handle the case where a signal
1067 occurs while doing tb_phys_invalidate() */
1068 saved_tb = NULL;
1069 if (env) {
1070 saved_tb = env->current_tb;
1071 env->current_tb = NULL;
1073 tb_phys_invalidate(tb, -1);
1074 if (env) {
1075 env->current_tb = saved_tb;
1076 if (env->interrupt_request && env->current_tb) {
1077 cpu_interrupt(env, env->interrupt_request);
1081 tb = tb_next;
1083 #if !defined(CONFIG_USER_ONLY)
1084 /* if no code remaining, no need to continue to use slow writes */
1085 if (!p->first_tb) {
1086 invalidate_page_bitmap(p);
1087 if (is_cpu_write_access) {
1088 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1091 #endif
1092 #ifdef TARGET_HAS_PRECISE_SMC
1093 if (current_tb_modified) {
1094 /* we generate a block containing just the instruction
1095 modifying the memory. It will ensure that it cannot modify
1096 itself */
1097 env->current_tb = NULL;
1098 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1099 cpu_resume_from_signal(env, NULL);
1101 #endif
1104 /* len must be <= 8 and start must be a multiple of len */
1105 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1107 PageDesc *p;
1108 int offset, b;
1110 #if 0
1111 if (1) {
1112 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1113 cpu_single_env->mem_io_vaddr, len,
1114 cpu_single_env->eip,
1115 cpu_single_env->eip +
1116 (intptr_t)cpu_single_env->segs[R_CS].base);
1118 #endif
1119 p = page_find(start >> TARGET_PAGE_BITS);
1120 if (!p) {
1121 return;
1123 if (p->code_bitmap) {
1124 offset = start & ~TARGET_PAGE_MASK;
1125 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1126 if (b & ((1 << len) - 1)) {
1127 goto do_invalidate;
1129 } else {
1130 do_invalidate:
1131 tb_invalidate_phys_page_range(start, start + len, 1);
1135 #if !defined(CONFIG_SOFTMMU)
1136 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1137 uintptr_t pc, void *puc)
1139 TranslationBlock *tb;
1140 PageDesc *p;
1141 int n;
1142 #ifdef TARGET_HAS_PRECISE_SMC
1143 TranslationBlock *current_tb = NULL;
1144 CPUArchState *env = cpu_single_env;
1145 int current_tb_modified = 0;
1146 target_ulong current_pc = 0;
1147 target_ulong current_cs_base = 0;
1148 int current_flags = 0;
1149 #endif
1151 addr &= TARGET_PAGE_MASK;
1152 p = page_find(addr >> TARGET_PAGE_BITS);
1153 if (!p) {
1154 return;
1156 tb = p->first_tb;
1157 #ifdef TARGET_HAS_PRECISE_SMC
1158 if (tb && pc != 0) {
1159 current_tb = tb_find_pc(pc);
1161 #endif
1162 while (tb != NULL) {
1163 n = (uintptr_t)tb & 3;
1164 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1165 #ifdef TARGET_HAS_PRECISE_SMC
1166 if (current_tb == tb &&
1167 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1168 /* If we are modifying the current TB, we must stop
1169 its execution. We could be more precise by checking
1170 that the modification is after the current PC, but it
1171 would require a specialized function to partially
1172 restore the CPU state */
1174 current_tb_modified = 1;
1175 cpu_restore_state_from_tb(current_tb, env, pc);
1176 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1177 &current_flags);
1179 #endif /* TARGET_HAS_PRECISE_SMC */
1180 tb_phys_invalidate(tb, addr);
1181 tb = tb->page_next[n];
1183 p->first_tb = NULL;
1184 #ifdef TARGET_HAS_PRECISE_SMC
1185 if (current_tb_modified) {
1186 /* we generate a block containing just the instruction
1187 modifying the memory. It will ensure that it cannot modify
1188 itself */
1189 env->current_tb = NULL;
1190 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1191 cpu_resume_from_signal(env, puc);
1193 #endif
1195 #endif
1197 /* add the tb in the target page and protect it if necessary */
1198 static inline void tb_alloc_page(TranslationBlock *tb,
1199 unsigned int n, tb_page_addr_t page_addr)
1201 PageDesc *p;
1202 #ifndef CONFIG_USER_ONLY
1203 bool page_already_protected;
1204 #endif
1206 tb->page_addr[n] = page_addr;
1207 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1208 tb->page_next[n] = p->first_tb;
1209 #ifndef CONFIG_USER_ONLY
1210 page_already_protected = p->first_tb != NULL;
1211 #endif
1212 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1213 invalidate_page_bitmap(p);
1215 #if defined(TARGET_HAS_SMC) || 1
1217 #if defined(CONFIG_USER_ONLY)
1218 if (p->flags & PAGE_WRITE) {
1219 target_ulong addr;
1220 PageDesc *p2;
1221 int prot;
1223 /* force the host page as non writable (writes will have a
1224 page fault + mprotect overhead) */
1225 page_addr &= qemu_host_page_mask;
1226 prot = 0;
1227 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1228 addr += TARGET_PAGE_SIZE) {
1230 p2 = page_find(addr >> TARGET_PAGE_BITS);
1231 if (!p2) {
1232 continue;
1234 prot |= p2->flags;
1235 p2->flags &= ~PAGE_WRITE;
1237 mprotect(g2h(page_addr), qemu_host_page_size,
1238 (prot & PAGE_BITS) & ~PAGE_WRITE);
1239 #ifdef DEBUG_TB_INVALIDATE
1240 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1241 page_addr);
1242 #endif
1244 #else
1245 /* if some code is already present, then the pages are already
1246 protected. So we handle the case where only the first TB is
1247 allocated in a physical page */
1248 if (!page_already_protected) {
1249 tlb_protect_code(page_addr);
1251 #endif
1253 #endif /* TARGET_HAS_SMC */
1256 /* add a new TB and link it to the physical page tables. phys_page2 is
1257 (-1) to indicate that only one page contains the TB. */
1258 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1259 tb_page_addr_t phys_page2)
1261 unsigned int h;
1262 TranslationBlock **ptb;
1264 /* Grab the mmap lock to stop another thread invalidating this TB
1265 before we are done. */
1266 mmap_lock();
1267 /* add in the physical hash table */
1268 h = tb_phys_hash_func(phys_pc);
1269 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1270 tb->phys_hash_next = *ptb;
1271 *ptb = tb;
1273 /* add in the page list */
1274 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1275 if (phys_page2 != -1) {
1276 tb_alloc_page(tb, 1, phys_page2);
1277 } else {
1278 tb->page_addr[1] = -1;
1281 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1282 tb->jmp_next[0] = NULL;
1283 tb->jmp_next[1] = NULL;
1285 /* init original jump addresses */
1286 if (tb->tb_next_offset[0] != 0xffff) {
1287 tb_reset_jump(tb, 0);
1289 if (tb->tb_next_offset[1] != 0xffff) {
1290 tb_reset_jump(tb, 1);
1293 #ifdef DEBUG_TB_CHECK
1294 tb_page_check();
1295 #endif
1296 mmap_unlock();
1299 #if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1300 /* check whether the given addr is in TCG generated code buffer or not */
1301 bool is_tcg_gen_code(uintptr_t tc_ptr)
1303 /* This can be called during code generation, code_gen_buffer_max_size
1304 is used instead of code_gen_ptr for upper boundary checking */
1305 return (tc_ptr >= (uintptr_t)tcg_ctx.code_gen_buffer &&
1306 tc_ptr < (uintptr_t)(tcg_ctx.code_gen_buffer +
1307 tcg_ctx.code_gen_buffer_max_size));
1309 #endif
1311 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1312 tb[1].tc_ptr. Return NULL if not found */
1313 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1315 int m_min, m_max, m;
1316 uintptr_t v;
1317 TranslationBlock *tb;
1319 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1320 return NULL;
1322 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1323 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1324 return NULL;
1326 /* binary search (cf Knuth) */
1327 m_min = 0;
1328 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1329 while (m_min <= m_max) {
1330 m = (m_min + m_max) >> 1;
1331 tb = &tcg_ctx.tb_ctx.tbs[m];
1332 v = (uintptr_t)tb->tc_ptr;
1333 if (v == tc_ptr) {
1334 return tb;
1335 } else if (tc_ptr < v) {
1336 m_max = m - 1;
1337 } else {
1338 m_min = m + 1;
1341 return &tcg_ctx.tb_ctx.tbs[m_max];
1344 static void tb_reset_jump_recursive(TranslationBlock *tb);
1346 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1348 TranslationBlock *tb1, *tb_next, **ptb;
1349 unsigned int n1;
1351 tb1 = tb->jmp_next[n];
1352 if (tb1 != NULL) {
1353 /* find head of list */
1354 for (;;) {
1355 n1 = (uintptr_t)tb1 & 3;
1356 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1357 if (n1 == 2) {
1358 break;
1360 tb1 = tb1->jmp_next[n1];
1362 /* we are now sure now that tb jumps to tb1 */
1363 tb_next = tb1;
1365 /* remove tb from the jmp_first list */
1366 ptb = &tb_next->jmp_first;
1367 for (;;) {
1368 tb1 = *ptb;
1369 n1 = (uintptr_t)tb1 & 3;
1370 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1371 if (n1 == n && tb1 == tb) {
1372 break;
1374 ptb = &tb1->jmp_next[n1];
1376 *ptb = tb->jmp_next[n];
1377 tb->jmp_next[n] = NULL;
1379 /* suppress the jump to next tb in generated code */
1380 tb_reset_jump(tb, n);
1382 /* suppress jumps in the tb on which we could have jumped */
1383 tb_reset_jump_recursive(tb_next);
1387 static void tb_reset_jump_recursive(TranslationBlock *tb)
1389 tb_reset_jump_recursive2(tb, 0);
1390 tb_reset_jump_recursive2(tb, 1);
1393 #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1394 void tb_invalidate_phys_addr(hwaddr addr)
1396 ram_addr_t ram_addr;
1397 MemoryRegionSection *section;
1399 section = phys_page_find(address_space_memory.dispatch,
1400 addr >> TARGET_PAGE_BITS);
1401 if (!(memory_region_is_ram(section->mr)
1402 || (section->mr->rom_device && section->mr->readable))) {
1403 return;
1405 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1406 + memory_region_section_addr(section, addr);
1407 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1409 #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1411 void cpu_unlink_tb(CPUArchState *env)
1413 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1414 problem and hope the cpu will stop of its own accord. For userspace
1415 emulation this often isn't actually as bad as it sounds. Often
1416 signals are used primarily to interrupt blocking syscalls. */
1417 TranslationBlock *tb;
1418 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1420 spin_lock(&interrupt_lock);
1421 tb = env->current_tb;
1422 /* if the cpu is currently executing code, we must unlink it and
1423 all the potentially executing TB */
1424 if (tb) {
1425 env->current_tb = NULL;
1426 tb_reset_jump_recursive(tb);
1428 spin_unlock(&interrupt_lock);
1431 void tb_check_watchpoint(CPUArchState *env)
1433 TranslationBlock *tb;
1435 tb = tb_find_pc(env->mem_io_pc);
1436 if (!tb) {
1437 cpu_abort(env, "check_watchpoint: could not find TB for pc=%p",
1438 (void *)env->mem_io_pc);
1440 cpu_restore_state_from_tb(tb, env, env->mem_io_pc);
1441 tb_phys_invalidate(tb, -1);
1444 #ifndef CONFIG_USER_ONLY
1445 /* mask must never be zero, except for A20 change call */
1446 static void tcg_handle_interrupt(CPUArchState *env, int mask)
1448 CPUState *cpu = ENV_GET_CPU(env);
1449 int old_mask;
1451 old_mask = env->interrupt_request;
1452 env->interrupt_request |= mask;
1455 * If called from iothread context, wake the target cpu in
1456 * case its halted.
1458 if (!qemu_cpu_is_self(cpu)) {
1459 qemu_cpu_kick(cpu);
1460 return;
1463 if (use_icount) {
1464 env->icount_decr.u16.high = 0xffff;
1465 if (!can_do_io(env)
1466 && (mask & ~old_mask) != 0) {
1467 cpu_abort(env, "Raised interrupt while not in I/O function");
1469 } else {
1470 cpu_unlink_tb(env);
1474 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1476 /* in deterministic execution mode, instructions doing device I/Os
1477 must be at the end of the TB */
1478 void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
1480 TranslationBlock *tb;
1481 uint32_t n, cflags;
1482 target_ulong pc, cs_base;
1483 uint64_t flags;
1485 tb = tb_find_pc(retaddr);
1486 if (!tb) {
1487 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
1488 (void *)retaddr);
1490 n = env->icount_decr.u16.low + tb->icount;
1491 cpu_restore_state_from_tb(tb, env, retaddr);
1492 /* Calculate how many instructions had been executed before the fault
1493 occurred. */
1494 n = n - env->icount_decr.u16.low;
1495 /* Generate a new TB ending on the I/O insn. */
1496 n++;
1497 /* On MIPS and SH, delay slot instructions can only be restarted if
1498 they were already the first instruction in the TB. If this is not
1499 the first instruction in a TB then re-execute the preceding
1500 branch. */
1501 #if defined(TARGET_MIPS)
1502 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1503 env->active_tc.PC -= 4;
1504 env->icount_decr.u16.low++;
1505 env->hflags &= ~MIPS_HFLAG_BMASK;
1507 #elif defined(TARGET_SH4)
1508 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1509 && n > 1) {
1510 env->pc -= 2;
1511 env->icount_decr.u16.low++;
1512 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1514 #endif
1515 /* This should never happen. */
1516 if (n > CF_COUNT_MASK) {
1517 cpu_abort(env, "TB too big during recompile");
1520 cflags = n | CF_LAST_IO;
1521 pc = tb->pc;
1522 cs_base = tb->cs_base;
1523 flags = tb->flags;
1524 tb_phys_invalidate(tb, -1);
1525 /* FIXME: In theory this could raise an exception. In practice
1526 we have already translated the block once so it's probably ok. */
1527 tb_gen_code(env, pc, cs_base, flags, cflags);
1528 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1529 the first in the TB) then we end up generating a whole new TB and
1530 repeating the fault, which is horribly inefficient.
1531 Better would be to execute just this insn uncached, or generate a
1532 second new TB. */
1533 cpu_resume_from_signal(env, NULL);
1536 void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1538 unsigned int i;
1540 /* Discard jump cache entries for any tb which might potentially
1541 overlap the flushed page. */
1542 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1543 memset(&env->tb_jmp_cache[i], 0,
1544 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1546 i = tb_jmp_cache_hash_page(addr);
1547 memset(&env->tb_jmp_cache[i], 0,
1548 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1551 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1553 int i, target_code_size, max_target_code_size;
1554 int direct_jmp_count, direct_jmp2_count, cross_page;
1555 TranslationBlock *tb;
1557 target_code_size = 0;
1558 max_target_code_size = 0;
1559 cross_page = 0;
1560 direct_jmp_count = 0;
1561 direct_jmp2_count = 0;
1562 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1563 tb = &tcg_ctx.tb_ctx.tbs[i];
1564 target_code_size += tb->size;
1565 if (tb->size > max_target_code_size) {
1566 max_target_code_size = tb->size;
1568 if (tb->page_addr[1] != -1) {
1569 cross_page++;
1571 if (tb->tb_next_offset[0] != 0xffff) {
1572 direct_jmp_count++;
1573 if (tb->tb_next_offset[1] != 0xffff) {
1574 direct_jmp2_count++;
1578 /* XXX: avoid using doubles ? */
1579 cpu_fprintf(f, "Translation buffer state:\n");
1580 cpu_fprintf(f, "gen code size %td/%zd\n",
1581 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1582 tcg_ctx.code_gen_buffer_max_size);
1583 cpu_fprintf(f, "TB count %d/%d\n",
1584 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1585 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1586 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1587 tcg_ctx.tb_ctx.nb_tbs : 0,
1588 max_target_code_size);
1589 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1590 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1591 tcg_ctx.code_gen_buffer) /
1592 tcg_ctx.tb_ctx.nb_tbs : 0,
1593 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1594 tcg_ctx.code_gen_buffer) /
1595 target_code_size : 0);
1596 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1597 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1598 tcg_ctx.tb_ctx.nb_tbs : 0);
1599 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1600 direct_jmp_count,
1601 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1602 tcg_ctx.tb_ctx.nb_tbs : 0,
1603 direct_jmp2_count,
1604 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1605 tcg_ctx.tb_ctx.nb_tbs : 0);
1606 cpu_fprintf(f, "\nStatistics:\n");
1607 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1608 cpu_fprintf(f, "TB invalidate count %d\n",
1609 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1610 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1611 tcg_dump_info(f, cpu_fprintf);
1614 #else /* CONFIG_USER_ONLY */
1616 void cpu_interrupt(CPUArchState *env, int mask)
1618 env->interrupt_request |= mask;
1619 cpu_unlink_tb(env);
1623 * Walks guest process memory "regions" one by one
1624 * and calls callback function 'fn' for each region.
1626 struct walk_memory_regions_data {
1627 walk_memory_regions_fn fn;
1628 void *priv;
1629 uintptr_t start;
1630 int prot;
1633 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1634 abi_ulong end, int new_prot)
1636 if (data->start != -1ul) {
1637 int rc = data->fn(data->priv, data->start, end, data->prot);
1638 if (rc != 0) {
1639 return rc;
1643 data->start = (new_prot ? end : -1ul);
1644 data->prot = new_prot;
1646 return 0;
1649 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1650 abi_ulong base, int level, void **lp)
1652 abi_ulong pa;
1653 int i, rc;
1655 if (*lp == NULL) {
1656 return walk_memory_regions_end(data, base, 0);
1659 if (level == 0) {
1660 PageDesc *pd = *lp;
1662 for (i = 0; i < L2_SIZE; ++i) {
1663 int prot = pd[i].flags;
1665 pa = base | (i << TARGET_PAGE_BITS);
1666 if (prot != data->prot) {
1667 rc = walk_memory_regions_end(data, pa, prot);
1668 if (rc != 0) {
1669 return rc;
1673 } else {
1674 void **pp = *lp;
1676 for (i = 0; i < L2_SIZE; ++i) {
1677 pa = base | ((abi_ulong)i <<
1678 (TARGET_PAGE_BITS + L2_BITS * level));
1679 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1680 if (rc != 0) {
1681 return rc;
1686 return 0;
1689 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1691 struct walk_memory_regions_data data;
1692 uintptr_t i;
1694 data.fn = fn;
1695 data.priv = priv;
1696 data.start = -1ul;
1697 data.prot = 0;
1699 for (i = 0; i < V_L1_SIZE; i++) {
1700 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
1701 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1703 if (rc != 0) {
1704 return rc;
1708 return walk_memory_regions_end(&data, 0, 0);
1711 static int dump_region(void *priv, abi_ulong start,
1712 abi_ulong end, unsigned long prot)
1714 FILE *f = (FILE *)priv;
1716 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
1717 " "TARGET_ABI_FMT_lx" %c%c%c\n",
1718 start, end, end - start,
1719 ((prot & PAGE_READ) ? 'r' : '-'),
1720 ((prot & PAGE_WRITE) ? 'w' : '-'),
1721 ((prot & PAGE_EXEC) ? 'x' : '-'));
1723 return 0;
1726 /* dump memory mappings */
1727 void page_dump(FILE *f)
1729 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
1730 "start", "end", "size", "prot");
1731 walk_memory_regions(f, dump_region);
1734 int page_get_flags(target_ulong address)
1736 PageDesc *p;
1738 p = page_find(address >> TARGET_PAGE_BITS);
1739 if (!p) {
1740 return 0;
1742 return p->flags;
1745 /* Modify the flags of a page and invalidate the code if necessary.
1746 The flag PAGE_WRITE_ORG is positioned automatically depending
1747 on PAGE_WRITE. The mmap_lock should already be held. */
1748 void page_set_flags(target_ulong start, target_ulong end, int flags)
1750 target_ulong addr, len;
1752 /* This function should never be called with addresses outside the
1753 guest address space. If this assert fires, it probably indicates
1754 a missing call to h2g_valid. */
1755 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1756 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1757 #endif
1758 assert(start < end);
1760 start = start & TARGET_PAGE_MASK;
1761 end = TARGET_PAGE_ALIGN(end);
1763 if (flags & PAGE_WRITE) {
1764 flags |= PAGE_WRITE_ORG;
1767 for (addr = start, len = end - start;
1768 len != 0;
1769 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1770 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1772 /* If the write protection bit is set, then we invalidate
1773 the code inside. */
1774 if (!(p->flags & PAGE_WRITE) &&
1775 (flags & PAGE_WRITE) &&
1776 p->first_tb) {
1777 tb_invalidate_phys_page(addr, 0, NULL);
1779 p->flags = flags;
1783 int page_check_range(target_ulong start, target_ulong len, int flags)
1785 PageDesc *p;
1786 target_ulong end;
1787 target_ulong addr;
1789 /* This function should never be called with addresses outside the
1790 guest address space. If this assert fires, it probably indicates
1791 a missing call to h2g_valid. */
1792 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1793 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1794 #endif
1796 if (len == 0) {
1797 return 0;
1799 if (start + len - 1 < start) {
1800 /* We've wrapped around. */
1801 return -1;
1804 /* must do before we loose bits in the next step */
1805 end = TARGET_PAGE_ALIGN(start + len);
1806 start = start & TARGET_PAGE_MASK;
1808 for (addr = start, len = end - start;
1809 len != 0;
1810 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1811 p = page_find(addr >> TARGET_PAGE_BITS);
1812 if (!p) {
1813 return -1;
1815 if (!(p->flags & PAGE_VALID)) {
1816 return -1;
1819 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1820 return -1;
1822 if (flags & PAGE_WRITE) {
1823 if (!(p->flags & PAGE_WRITE_ORG)) {
1824 return -1;
1826 /* unprotect the page if it was put read-only because it
1827 contains translated code */
1828 if (!(p->flags & PAGE_WRITE)) {
1829 if (!page_unprotect(addr, 0, NULL)) {
1830 return -1;
1833 return 0;
1836 return 0;
1839 /* called from signal handler: invalidate the code and unprotect the
1840 page. Return TRUE if the fault was successfully handled. */
1841 int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1843 unsigned int prot;
1844 PageDesc *p;
1845 target_ulong host_start, host_end, addr;
1847 /* Technically this isn't safe inside a signal handler. However we
1848 know this only ever happens in a synchronous SEGV handler, so in
1849 practice it seems to be ok. */
1850 mmap_lock();
1852 p = page_find(address >> TARGET_PAGE_BITS);
1853 if (!p) {
1854 mmap_unlock();
1855 return 0;
1858 /* if the page was really writable, then we change its
1859 protection back to writable */
1860 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1861 host_start = address & qemu_host_page_mask;
1862 host_end = host_start + qemu_host_page_size;
1864 prot = 0;
1865 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1866 p = page_find(addr >> TARGET_PAGE_BITS);
1867 p->flags |= PAGE_WRITE;
1868 prot |= p->flags;
1870 /* and since the content will be modified, we must invalidate
1871 the corresponding translated code. */
1872 tb_invalidate_phys_page(addr, pc, puc);
1873 #ifdef DEBUG_TB_CHECK
1874 tb_invalidate_check(addr);
1875 #endif
1877 mprotect((void *)g2h(host_start), qemu_host_page_size,
1878 prot & PAGE_BITS);
1880 mmap_unlock();
1881 return 1;
1883 mmap_unlock();
1884 return 0;
1886 #endif /* CONFIG_USER_ONLY */