tcg: Rename tb_jmp_remove() to tb_remove_from_jmp_list()
[qemu/ar7.git] / translate-all.c
blob5e057baaa346a188dd8fbd72deb5d0d7327c79cb
1 /*
2 * Host code generation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #ifdef _WIN32
20 #include <windows.h>
21 #else
22 #include <sys/mman.h>
23 #endif
24 #include "qemu/osdep.h"
27 #include "qemu-common.h"
28 #define NO_CPU_IO_DEFS
29 #include "cpu.h"
30 #include "trace.h"
31 #include "disas/disas.h"
32 #include "tcg.h"
33 #if defined(CONFIG_USER_ONLY)
34 #include "qemu.h"
35 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36 #include <sys/param.h>
37 #if __FreeBSD_version >= 700104
38 #define HAVE_KINFO_GETVMMAP
39 #define sigqueue sigqueue_freebsd /* avoid redefinition */
40 #include <sys/proc.h>
41 #include <machine/profile.h>
42 #define _KERNEL
43 #include <sys/user.h>
44 #undef _KERNEL
45 #undef sigqueue
46 #include <libutil.h>
47 #endif
48 #endif
49 #else
50 #include "exec/address-spaces.h"
51 #endif
53 #include "exec/cputlb.h"
54 #include "exec/tb-hash.h"
55 #include "translate-all.h"
56 #include "qemu/bitmap.h"
57 #include "qemu/timer.h"
58 #include "exec/log.h"
60 //#define DEBUG_TB_INVALIDATE
61 //#define DEBUG_FLUSH
62 /* make various TB consistency checks */
63 //#define DEBUG_TB_CHECK
65 #if !defined(CONFIG_USER_ONLY)
66 /* TB consistency checks only implemented for usermode emulation. */
67 #undef DEBUG_TB_CHECK
68 #endif
70 #define SMC_BITMAP_USE_THRESHOLD 10
72 typedef struct PageDesc {
73 /* list of TBs intersecting this ram page */
74 TranslationBlock *first_tb;
75 /* in order to optimize self modifying code, we count the number
76 of lookups we do to a given page to use a bitmap */
77 unsigned int code_write_count;
78 unsigned long *code_bitmap;
79 #if defined(CONFIG_USER_ONLY)
80 unsigned long flags;
81 #endif
82 } PageDesc;
84 /* In system mode we want L1_MAP to be based on ram offsets,
85 while in user mode we want it to be based on virtual addresses. */
86 #if !defined(CONFIG_USER_ONLY)
87 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
88 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
89 #else
90 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
91 #endif
92 #else
93 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
94 #endif
96 /* Size of the L2 (and L3, etc) page tables. */
97 #define V_L2_BITS 10
98 #define V_L2_SIZE (1 << V_L2_BITS)
100 /* The bits remaining after N lower levels of page tables. */
101 #define V_L1_BITS_REM \
102 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
104 #if V_L1_BITS_REM < 4
105 #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
106 #else
107 #define V_L1_BITS V_L1_BITS_REM
108 #endif
110 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
112 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
114 uintptr_t qemu_host_page_size;
115 intptr_t qemu_host_page_mask;
117 /* The bottom level has pointers to PageDesc */
118 static void *l1_map[V_L1_SIZE];
120 /* code generation context */
121 TCGContext tcg_ctx;
123 /* translation block context */
124 #ifdef CONFIG_USER_ONLY
125 __thread int have_tb_lock;
126 #endif
128 void tb_lock(void)
130 #ifdef CONFIG_USER_ONLY
131 assert(!have_tb_lock);
132 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
133 have_tb_lock++;
134 #endif
137 void tb_unlock(void)
139 #ifdef CONFIG_USER_ONLY
140 assert(have_tb_lock);
141 have_tb_lock--;
142 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
143 #endif
146 void tb_lock_reset(void)
148 #ifdef CONFIG_USER_ONLY
149 if (have_tb_lock) {
150 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
151 have_tb_lock = 0;
153 #endif
156 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
158 void cpu_gen_init(void)
160 tcg_context_init(&tcg_ctx);
163 /* Encode VAL as a signed leb128 sequence at P.
164 Return P incremented past the encoded value. */
165 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
167 int more, byte;
169 do {
170 byte = val & 0x7f;
171 val >>= 7;
172 more = !((val == 0 && (byte & 0x40) == 0)
173 || (val == -1 && (byte & 0x40) != 0));
174 if (more) {
175 byte |= 0x80;
177 *p++ = byte;
178 } while (more);
180 return p;
183 /* Decode a signed leb128 sequence at *PP; increment *PP past the
184 decoded value. Return the decoded value. */
185 static target_long decode_sleb128(uint8_t **pp)
187 uint8_t *p = *pp;
188 target_long val = 0;
189 int byte, shift = 0;
191 do {
192 byte = *p++;
193 val |= (target_ulong)(byte & 0x7f) << shift;
194 shift += 7;
195 } while (byte & 0x80);
196 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
197 val |= -(target_ulong)1 << shift;
200 *pp = p;
201 return val;
204 /* Encode the data collected about the instructions while compiling TB.
205 Place the data at BLOCK, and return the number of bytes consumed.
207 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
208 which come from the target's insn_start data, followed by a uintptr_t
209 which comes from the host pc of the end of the code implementing the insn.
211 Each line of the table is encoded as sleb128 deltas from the previous
212 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
213 That is, the first column is seeded with the guest pc, the last column
214 with the host pc, and the middle columns with zeros. */
216 static int encode_search(TranslationBlock *tb, uint8_t *block)
218 uint8_t *highwater = tcg_ctx.code_gen_highwater;
219 uint8_t *p = block;
220 int i, j, n;
222 tb->tc_search = block;
224 for (i = 0, n = tb->icount; i < n; ++i) {
225 target_ulong prev;
227 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
228 if (i == 0) {
229 prev = (j == 0 ? tb->pc : 0);
230 } else {
231 prev = tcg_ctx.gen_insn_data[i - 1][j];
233 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
235 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
236 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
238 /* Test for (pending) buffer overflow. The assumption is that any
239 one row beginning below the high water mark cannot overrun
240 the buffer completely. Thus we can test for overflow after
241 encoding a row without having to check during encoding. */
242 if (unlikely(p > highwater)) {
243 return -1;
247 return p - block;
250 /* The cpu state corresponding to 'searched_pc' is restored. */
251 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
252 uintptr_t searched_pc)
254 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
255 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
256 CPUArchState *env = cpu->env_ptr;
257 uint8_t *p = tb->tc_search;
258 int i, j, num_insns = tb->icount;
259 #ifdef CONFIG_PROFILER
260 int64_t ti = profile_getclock();
261 #endif
263 if (searched_pc < host_pc) {
264 return -1;
267 /* Reconstruct the stored insn data while looking for the point at
268 which the end of the insn exceeds the searched_pc. */
269 for (i = 0; i < num_insns; ++i) {
270 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
271 data[j] += decode_sleb128(&p);
273 host_pc += decode_sleb128(&p);
274 if (host_pc > searched_pc) {
275 goto found;
278 return -1;
280 found:
281 if (tb->cflags & CF_USE_ICOUNT) {
282 assert(use_icount);
283 /* Reset the cycle counter to the start of the block. */
284 cpu->icount_decr.u16.low += num_insns;
285 /* Clear the IO flag. */
286 cpu->can_do_io = 0;
288 cpu->icount_decr.u16.low -= i;
289 restore_state_to_opc(env, tb, data);
291 #ifdef CONFIG_PROFILER
292 tcg_ctx.restore_time += profile_getclock() - ti;
293 tcg_ctx.restore_count++;
294 #endif
295 return 0;
298 bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
300 TranslationBlock *tb;
302 tb = tb_find_pc(retaddr);
303 if (tb) {
304 cpu_restore_state_from_tb(cpu, tb, retaddr);
305 if (tb->cflags & CF_NOCACHE) {
306 /* one-shot translation, invalidate it immediately */
307 cpu->current_tb = NULL;
308 tb_phys_invalidate(tb, -1);
309 tb_free(tb);
311 return true;
313 return false;
316 void page_size_init(void)
318 /* NOTE: we can always suppose that qemu_host_page_size >=
319 TARGET_PAGE_SIZE */
320 qemu_real_host_page_size = getpagesize();
321 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
322 if (qemu_host_page_size == 0) {
323 qemu_host_page_size = qemu_real_host_page_size;
325 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
326 qemu_host_page_size = TARGET_PAGE_SIZE;
328 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
331 static void page_init(void)
333 page_size_init();
334 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
336 #ifdef HAVE_KINFO_GETVMMAP
337 struct kinfo_vmentry *freep;
338 int i, cnt;
340 freep = kinfo_getvmmap(getpid(), &cnt);
341 if (freep) {
342 mmap_lock();
343 for (i = 0; i < cnt; i++) {
344 unsigned long startaddr, endaddr;
346 startaddr = freep[i].kve_start;
347 endaddr = freep[i].kve_end;
348 if (h2g_valid(startaddr)) {
349 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
351 if (h2g_valid(endaddr)) {
352 endaddr = h2g(endaddr);
353 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
354 } else {
355 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
356 endaddr = ~0ul;
357 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
358 #endif
362 free(freep);
363 mmap_unlock();
365 #else
366 FILE *f;
368 last_brk = (unsigned long)sbrk(0);
370 f = fopen("/compat/linux/proc/self/maps", "r");
371 if (f) {
372 mmap_lock();
374 do {
375 unsigned long startaddr, endaddr;
376 int n;
378 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
380 if (n == 2 && h2g_valid(startaddr)) {
381 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
383 if (h2g_valid(endaddr)) {
384 endaddr = h2g(endaddr);
385 } else {
386 endaddr = ~0ul;
388 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
390 } while (!feof(f));
392 fclose(f);
393 mmap_unlock();
395 #endif
397 #endif
400 /* If alloc=1:
401 * Called with mmap_lock held for user-mode emulation.
403 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
405 PageDesc *pd;
406 void **lp;
407 int i;
409 /* Level 1. Always allocated. */
410 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
412 /* Level 2..N-1. */
413 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
414 void **p = atomic_rcu_read(lp);
416 if (p == NULL) {
417 if (!alloc) {
418 return NULL;
420 p = g_new0(void *, V_L2_SIZE);
421 atomic_rcu_set(lp, p);
424 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
427 pd = atomic_rcu_read(lp);
428 if (pd == NULL) {
429 if (!alloc) {
430 return NULL;
432 pd = g_new0(PageDesc, V_L2_SIZE);
433 atomic_rcu_set(lp, pd);
436 return pd + (index & (V_L2_SIZE - 1));
439 static inline PageDesc *page_find(tb_page_addr_t index)
441 return page_find_alloc(index, 0);
444 #if defined(CONFIG_USER_ONLY)
445 /* Currently it is not recommended to allocate big chunks of data in
446 user mode. It will change when a dedicated libc will be used. */
447 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
448 region in which the guest needs to run. Revisit this. */
449 #define USE_STATIC_CODE_GEN_BUFFER
450 #endif
452 /* Minimum size of the code gen buffer. This number is randomly chosen,
453 but not so small that we can't have a fair number of TB's live. */
454 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
456 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
457 indicated, this is constrained by the range of direct branches on the
458 host cpu, as used by the TCG implementation of goto_tb. */
459 #if defined(__x86_64__)
460 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
461 #elif defined(__sparc__)
462 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
463 #elif defined(__powerpc64__)
464 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
465 #elif defined(__powerpc__)
466 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
467 #elif defined(__aarch64__)
468 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
469 #elif defined(__arm__)
470 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
471 #elif defined(__s390x__)
472 /* We have a +- 4GB range on the branches; leave some slop. */
473 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
474 #elif defined(__mips__)
475 /* We have a 256MB branch region, but leave room to make sure the
476 main executable is also within that region. */
477 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
478 #else
479 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
480 #endif
482 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
484 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
485 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
486 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
488 static inline size_t size_code_gen_buffer(size_t tb_size)
490 /* Size the buffer. */
491 if (tb_size == 0) {
492 #ifdef USE_STATIC_CODE_GEN_BUFFER
493 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
494 #else
495 /* ??? Needs adjustments. */
496 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
497 static buffer, we could size this on RESERVED_VA, on the text
498 segment size of the executable, or continue to use the default. */
499 tb_size = (unsigned long)(ram_size / 4);
500 #endif
502 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
503 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
505 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
506 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
508 return tb_size;
511 #ifdef __mips__
512 /* In order to use J and JAL within the code_gen_buffer, we require
513 that the buffer not cross a 256MB boundary. */
514 static inline bool cross_256mb(void *addr, size_t size)
516 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
519 /* We weren't able to allocate a buffer without crossing that boundary,
520 so make do with the larger portion of the buffer that doesn't cross.
521 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
522 static inline void *split_cross_256mb(void *buf1, size_t size1)
524 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
525 size_t size2 = buf1 + size1 - buf2;
527 size1 = buf2 - buf1;
528 if (size1 < size2) {
529 size1 = size2;
530 buf1 = buf2;
533 tcg_ctx.code_gen_buffer_size = size1;
534 return buf1;
536 #endif
538 #ifdef USE_STATIC_CODE_GEN_BUFFER
539 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
540 __attribute__((aligned(CODE_GEN_ALIGN)));
542 # ifdef _WIN32
543 static inline void do_protect(void *addr, long size, int prot)
545 DWORD old_protect;
546 VirtualProtect(addr, size, prot, &old_protect);
549 static inline void map_exec(void *addr, long size)
551 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
554 static inline void map_none(void *addr, long size)
556 do_protect(addr, size, PAGE_NOACCESS);
558 # else
559 static inline void do_protect(void *addr, long size, int prot)
561 uintptr_t start, end;
563 start = (uintptr_t)addr;
564 start &= qemu_real_host_page_mask;
566 end = (uintptr_t)addr + size;
567 end = ROUND_UP(end, qemu_real_host_page_size);
569 mprotect((void *)start, end - start, prot);
572 static inline void map_exec(void *addr, long size)
574 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
577 static inline void map_none(void *addr, long size)
579 do_protect(addr, size, PROT_NONE);
581 # endif /* WIN32 */
583 static inline void *alloc_code_gen_buffer(void)
585 void *buf = static_code_gen_buffer;
586 size_t full_size, size;
588 /* The size of the buffer, rounded down to end on a page boundary. */
589 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
590 & qemu_real_host_page_mask) - (uintptr_t)buf;
592 /* Reserve a guard page. */
593 size = full_size - qemu_real_host_page_size;
595 /* Honor a command-line option limiting the size of the buffer. */
596 if (size > tcg_ctx.code_gen_buffer_size) {
597 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
598 & qemu_real_host_page_mask) - (uintptr_t)buf;
600 tcg_ctx.code_gen_buffer_size = size;
602 #ifdef __mips__
603 if (cross_256mb(buf, size)) {
604 buf = split_cross_256mb(buf, size);
605 size = tcg_ctx.code_gen_buffer_size;
607 #endif
609 map_exec(buf, size);
610 map_none(buf + size, qemu_real_host_page_size);
611 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
613 return buf;
615 #elif defined(_WIN32)
616 static inline void *alloc_code_gen_buffer(void)
618 size_t size = tcg_ctx.code_gen_buffer_size;
619 void *buf1, *buf2;
621 /* Perform the allocation in two steps, so that the guard page
622 is reserved but uncommitted. */
623 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
624 MEM_RESERVE, PAGE_NOACCESS);
625 if (buf1 != NULL) {
626 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
627 assert(buf1 == buf2);
630 return buf1;
632 #else
633 static inline void *alloc_code_gen_buffer(void)
635 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
636 uintptr_t start = 0;
637 size_t size = tcg_ctx.code_gen_buffer_size;
638 void *buf;
640 /* Constrain the position of the buffer based on the host cpu.
641 Note that these addresses are chosen in concert with the
642 addresses assigned in the relevant linker script file. */
643 # if defined(__PIE__) || defined(__PIC__)
644 /* Don't bother setting a preferred location if we're building
645 a position-independent executable. We're more likely to get
646 an address near the main executable if we let the kernel
647 choose the address. */
648 # elif defined(__x86_64__) && defined(MAP_32BIT)
649 /* Force the memory down into low memory with the executable.
650 Leave the choice of exact location with the kernel. */
651 flags |= MAP_32BIT;
652 /* Cannot expect to map more than 800MB in low memory. */
653 if (size > 800u * 1024 * 1024) {
654 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
656 # elif defined(__sparc__)
657 start = 0x40000000ul;
658 # elif defined(__s390x__)
659 start = 0x90000000ul;
660 # elif defined(__mips__)
661 # if _MIPS_SIM == _ABI64
662 start = 0x128000000ul;
663 # else
664 start = 0x08000000ul;
665 # endif
666 # endif
668 buf = mmap((void *)start, size + qemu_real_host_page_size,
669 PROT_NONE, flags, -1, 0);
670 if (buf == MAP_FAILED) {
671 return NULL;
674 #ifdef __mips__
675 if (cross_256mb(buf, size)) {
676 /* Try again, with the original still mapped, to avoid re-acquiring
677 that 256mb crossing. This time don't specify an address. */
678 size_t size2;
679 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
680 PROT_NONE, flags, -1, 0);
681 switch (buf2 != MAP_FAILED) {
682 case 1:
683 if (!cross_256mb(buf2, size)) {
684 /* Success! Use the new buffer. */
685 munmap(buf, size + qemu_real_host_page_size);
686 break;
688 /* Failure. Work with what we had. */
689 munmap(buf2, size + qemu_real_host_page_size);
690 /* fallthru */
691 default:
692 /* Split the original buffer. Free the smaller half. */
693 buf2 = split_cross_256mb(buf, size);
694 size2 = tcg_ctx.code_gen_buffer_size;
695 if (buf == buf2) {
696 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
697 } else {
698 munmap(buf, size - size2);
700 size = size2;
701 break;
703 buf = buf2;
705 #endif
707 /* Make the final buffer accessible. The guard page at the end
708 will remain inaccessible with PROT_NONE. */
709 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
711 /* Request large pages for the buffer. */
712 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
714 return buf;
716 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
718 static inline void code_gen_alloc(size_t tb_size)
720 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
721 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
722 if (tcg_ctx.code_gen_buffer == NULL) {
723 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
724 exit(1);
727 /* Estimate a good size for the number of TBs we can support. We
728 still haven't deducted the prologue from the buffer size here,
729 but that's minimal and won't affect the estimate much. */
730 tcg_ctx.code_gen_max_blocks
731 = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
732 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
734 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
737 /* Must be called before using the QEMU cpus. 'tb_size' is the size
738 (in bytes) allocated to the translation buffer. Zero means default
739 size. */
740 void tcg_exec_init(unsigned long tb_size)
742 cpu_gen_init();
743 page_init();
744 code_gen_alloc(tb_size);
745 #if defined(CONFIG_SOFTMMU)
746 /* There's no guest base to take into account, so go ahead and
747 initialize the prologue now. */
748 tcg_prologue_init(&tcg_ctx);
749 #endif
752 bool tcg_enabled(void)
754 return tcg_ctx.code_gen_buffer != NULL;
757 /* Allocate a new translation block. Flush the translation buffer if
758 too many translation blocks or too much generated code. */
759 static TranslationBlock *tb_alloc(target_ulong pc)
761 TranslationBlock *tb;
763 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
764 return NULL;
766 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
767 tb->pc = pc;
768 tb->cflags = 0;
769 return tb;
772 void tb_free(TranslationBlock *tb)
774 /* In practice this is mostly used for single use temporary TB
775 Ignore the hard cases and just back up if this TB happens to
776 be the last one generated. */
777 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
778 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
779 tcg_ctx.code_gen_ptr = tb->tc_ptr;
780 tcg_ctx.tb_ctx.nb_tbs--;
784 static inline void invalidate_page_bitmap(PageDesc *p)
786 g_free(p->code_bitmap);
787 p->code_bitmap = NULL;
788 p->code_write_count = 0;
791 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
792 static void page_flush_tb_1(int level, void **lp)
794 int i;
796 if (*lp == NULL) {
797 return;
799 if (level == 0) {
800 PageDesc *pd = *lp;
802 for (i = 0; i < V_L2_SIZE; ++i) {
803 pd[i].first_tb = NULL;
804 invalidate_page_bitmap(pd + i);
806 } else {
807 void **pp = *lp;
809 for (i = 0; i < V_L2_SIZE; ++i) {
810 page_flush_tb_1(level - 1, pp + i);
815 static void page_flush_tb(void)
817 int i;
819 for (i = 0; i < V_L1_SIZE; i++) {
820 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
824 /* flush all the translation blocks */
825 /* XXX: tb_flush is currently not thread safe */
826 void tb_flush(CPUState *cpu)
828 #if defined(DEBUG_FLUSH)
829 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
830 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
831 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
832 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
833 tcg_ctx.tb_ctx.nb_tbs : 0);
834 #endif
835 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
836 > tcg_ctx.code_gen_buffer_size) {
837 cpu_abort(cpu, "Internal error: code buffer overflow\n");
839 tcg_ctx.tb_ctx.nb_tbs = 0;
841 CPU_FOREACH(cpu) {
842 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
845 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
846 page_flush_tb();
848 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
849 /* XXX: flush processor icache at this point if cache flush is
850 expensive */
851 tcg_ctx.tb_ctx.tb_flush_count++;
854 #ifdef DEBUG_TB_CHECK
856 static void tb_invalidate_check(target_ulong address)
858 TranslationBlock *tb;
859 int i;
861 address &= TARGET_PAGE_MASK;
862 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
863 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
864 tb = tb->phys_hash_next) {
865 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
866 address >= tb->pc + tb->size)) {
867 printf("ERROR invalidate: address=" TARGET_FMT_lx
868 " PC=%08lx size=%04x\n",
869 address, (long)tb->pc, tb->size);
875 /* verify that all the pages have correct rights for code */
876 static void tb_page_check(void)
878 TranslationBlock *tb;
879 int i, flags1, flags2;
881 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
882 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
883 tb = tb->phys_hash_next) {
884 flags1 = page_get_flags(tb->pc);
885 flags2 = page_get_flags(tb->pc + tb->size - 1);
886 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
887 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
888 (long)tb->pc, tb->size, flags1, flags2);
894 #endif
896 static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
898 TranslationBlock *tb1;
900 for (;;) {
901 tb1 = *ptb;
902 if (tb1 == tb) {
903 *ptb = tb1->phys_hash_next;
904 break;
906 ptb = &tb1->phys_hash_next;
910 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
912 TranslationBlock *tb1;
913 unsigned int n1;
915 for (;;) {
916 tb1 = *ptb;
917 n1 = (uintptr_t)tb1 & 3;
918 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
919 if (tb1 == tb) {
920 *ptb = tb1->page_next[n1];
921 break;
923 ptb = &tb1->page_next[n1];
927 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
928 static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
930 TranslationBlock *tb1;
931 uintptr_t *ptb, ntb;
932 unsigned int n1;
934 ptb = &tb->jmp_list_next[n];
935 if (*ptb) {
936 /* find tb(n) in circular list */
937 for (;;) {
938 ntb = *ptb;
939 n1 = ntb & 3;
940 tb1 = (TranslationBlock *)(ntb & ~3);
941 if (n1 == n && tb1 == tb) {
942 break;
944 if (n1 == 2) {
945 ptb = &tb1->jmp_list_first;
946 } else {
947 ptb = &tb1->jmp_list_next[n1];
950 /* now we can suppress tb(n) from the list */
951 *ptb = tb->jmp_list_next[n];
953 tb->jmp_list_next[n] = (uintptr_t)NULL;
957 /* reset the jump entry 'n' of a TB so that it is not chained to
958 another TB */
959 static inline void tb_reset_jump(TranslationBlock *tb, int n)
961 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
962 tb_set_jmp_target(tb, n, addr);
965 /* invalidate one TB */
966 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
968 CPUState *cpu;
969 PageDesc *p;
970 unsigned int h, n1;
971 tb_page_addr_t phys_pc;
972 uintptr_t tb1, tb2;
974 /* remove the TB from the hash list */
975 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
976 h = tb_phys_hash_func(phys_pc);
977 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
979 /* remove the TB from the page list */
980 if (tb->page_addr[0] != page_addr) {
981 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
982 tb_page_remove(&p->first_tb, tb);
983 invalidate_page_bitmap(p);
985 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
986 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
987 tb_page_remove(&p->first_tb, tb);
988 invalidate_page_bitmap(p);
991 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
993 /* remove the TB from the hash list */
994 h = tb_jmp_cache_hash_func(tb->pc);
995 CPU_FOREACH(cpu) {
996 if (cpu->tb_jmp_cache[h] == tb) {
997 cpu->tb_jmp_cache[h] = NULL;
1001 /* suppress this TB from the two jump lists */
1002 tb_remove_from_jmp_list(tb, 0);
1003 tb_remove_from_jmp_list(tb, 1);
1005 /* suppress any remaining jumps to this TB */
1006 tb1 = tb->jmp_list_first;
1007 for (;;) {
1008 TranslationBlock *tmp_tb;
1009 n1 = tb1 & 3;
1010 if (n1 == 2) {
1011 break;
1013 tmp_tb = (TranslationBlock *)(tb1 & ~3);
1014 tb2 = tmp_tb->jmp_list_next[n1];
1015 tb_reset_jump(tmp_tb, n1);
1016 tmp_tb->jmp_list_next[n1] = (uintptr_t)NULL;
1017 tb1 = tb2;
1020 assert(((uintptr_t)tb & 3) == 0);
1021 tb->jmp_list_first = (uintptr_t)tb | 2; /* fail safe */
1023 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
1026 static void build_page_bitmap(PageDesc *p)
1028 int n, tb_start, tb_end;
1029 TranslationBlock *tb;
1031 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1033 tb = p->first_tb;
1034 while (tb != NULL) {
1035 n = (uintptr_t)tb & 3;
1036 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1037 /* NOTE: this is subtle as a TB may span two physical pages */
1038 if (n == 0) {
1039 /* NOTE: tb_end may be after the end of the page, but
1040 it is not a problem */
1041 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1042 tb_end = tb_start + tb->size;
1043 if (tb_end > TARGET_PAGE_SIZE) {
1044 tb_end = TARGET_PAGE_SIZE;
1046 } else {
1047 tb_start = 0;
1048 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1050 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1051 tb = tb->page_next[n];
1055 /* add the tb in the target page and protect it if necessary
1057 * Called with mmap_lock held for user-mode emulation.
1059 static inline void tb_alloc_page(TranslationBlock *tb,
1060 unsigned int n, tb_page_addr_t page_addr)
1062 PageDesc *p;
1063 #ifndef CONFIG_USER_ONLY
1064 bool page_already_protected;
1065 #endif
1067 tb->page_addr[n] = page_addr;
1068 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1069 tb->page_next[n] = p->first_tb;
1070 #ifndef CONFIG_USER_ONLY
1071 page_already_protected = p->first_tb != NULL;
1072 #endif
1073 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1074 invalidate_page_bitmap(p);
1076 #if defined(CONFIG_USER_ONLY)
1077 if (p->flags & PAGE_WRITE) {
1078 target_ulong addr;
1079 PageDesc *p2;
1080 int prot;
1082 /* force the host page as non writable (writes will have a
1083 page fault + mprotect overhead) */
1084 page_addr &= qemu_host_page_mask;
1085 prot = 0;
1086 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1087 addr += TARGET_PAGE_SIZE) {
1089 p2 = page_find(addr >> TARGET_PAGE_BITS);
1090 if (!p2) {
1091 continue;
1093 prot |= p2->flags;
1094 p2->flags &= ~PAGE_WRITE;
1096 mprotect(g2h(page_addr), qemu_host_page_size,
1097 (prot & PAGE_BITS) & ~PAGE_WRITE);
1098 #ifdef DEBUG_TB_INVALIDATE
1099 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1100 page_addr);
1101 #endif
1103 #else
1104 /* if some code is already present, then the pages are already
1105 protected. So we handle the case where only the first TB is
1106 allocated in a physical page */
1107 if (!page_already_protected) {
1108 tlb_protect_code(page_addr);
1110 #endif
1113 /* add a new TB and link it to the physical page tables. phys_page2 is
1114 * (-1) to indicate that only one page contains the TB.
1116 * Called with mmap_lock held for user-mode emulation.
1118 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1119 tb_page_addr_t phys_page2)
1121 unsigned int h;
1122 TranslationBlock **ptb;
1124 /* add in the physical hash table */
1125 h = tb_phys_hash_func(phys_pc);
1126 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1127 tb->phys_hash_next = *ptb;
1128 *ptb = tb;
1130 /* add in the page list */
1131 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1132 if (phys_page2 != -1) {
1133 tb_alloc_page(tb, 1, phys_page2);
1134 } else {
1135 tb->page_addr[1] = -1;
1138 #ifdef DEBUG_TB_CHECK
1139 tb_page_check();
1140 #endif
1143 /* Called with mmap_lock held for user mode emulation. */
1144 TranslationBlock *tb_gen_code(CPUState *cpu,
1145 target_ulong pc, target_ulong cs_base,
1146 uint32_t flags, int cflags)
1148 CPUArchState *env = cpu->env_ptr;
1149 TranslationBlock *tb;
1150 tb_page_addr_t phys_pc, phys_page2;
1151 target_ulong virt_page2;
1152 tcg_insn_unit *gen_code_buf;
1153 int gen_code_size, search_size;
1154 #ifdef CONFIG_PROFILER
1155 int64_t ti;
1156 #endif
1158 phys_pc = get_page_addr_code(env, pc);
1159 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
1160 cflags |= CF_USE_ICOUNT;
1163 tb = tb_alloc(pc);
1164 if (unlikely(!tb)) {
1165 buffer_overflow:
1166 /* flush must be done */
1167 tb_flush(cpu);
1168 /* cannot fail at this point */
1169 tb = tb_alloc(pc);
1170 assert(tb != NULL);
1171 /* Don't forget to invalidate previous TB info. */
1172 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
1175 gen_code_buf = tcg_ctx.code_gen_ptr;
1176 tb->tc_ptr = gen_code_buf;
1177 tb->cs_base = cs_base;
1178 tb->flags = flags;
1179 tb->cflags = cflags;
1181 #ifdef CONFIG_PROFILER
1182 tcg_ctx.tb_count1++; /* includes aborted translations because of
1183 exceptions */
1184 ti = profile_getclock();
1185 #endif
1187 tcg_func_start(&tcg_ctx);
1189 gen_intermediate_code(env, tb);
1191 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1193 /* generate machine code */
1194 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1195 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1196 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
1197 #ifdef USE_DIRECT_JUMP
1198 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1199 tcg_ctx.tb_jmp_target_addr = NULL;
1200 #else
1201 tcg_ctx.tb_jmp_insn_offset = NULL;
1202 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
1203 #endif
1205 #ifdef CONFIG_PROFILER
1206 tcg_ctx.tb_count++;
1207 tcg_ctx.interm_time += profile_getclock() - ti;
1208 tcg_ctx.code_time -= profile_getclock();
1209 #endif
1211 /* ??? Overflow could be handled better here. In particular, we
1212 don't need to re-do gen_intermediate_code, nor should we re-do
1213 the tcg optimization currently hidden inside tcg_gen_code. All
1214 that should be required is to flush the TBs, allocate a new TB,
1215 re-initialize it per above, and re-do the actual code generation. */
1216 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
1217 if (unlikely(gen_code_size < 0)) {
1218 goto buffer_overflow;
1220 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1221 if (unlikely(search_size < 0)) {
1222 goto buffer_overflow;
1225 #ifdef CONFIG_PROFILER
1226 tcg_ctx.code_time += profile_getclock();
1227 tcg_ctx.code_in_len += tb->size;
1228 tcg_ctx.code_out_len += gen_code_size;
1229 tcg_ctx.search_out_len += search_size;
1230 #endif
1232 #ifdef DEBUG_DISAS
1233 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1234 qemu_log_in_addr_range(tb->pc)) {
1235 qemu_log("OUT: [size=%d]\n", gen_code_size);
1236 log_disas(tb->tc_ptr, gen_code_size);
1237 qemu_log("\n");
1238 qemu_log_flush();
1240 #endif
1242 tcg_ctx.code_gen_ptr = (void *)
1243 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1244 CODE_GEN_ALIGN);
1246 /* init jump list */
1247 assert(((uintptr_t)tb & 3) == 0);
1248 tb->jmp_list_first = (uintptr_t)tb | 2;
1249 tb->jmp_list_next[0] = (uintptr_t)NULL;
1250 tb->jmp_list_next[1] = (uintptr_t)NULL;
1252 /* init original jump addresses wich has been set during tcg_gen_code() */
1253 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1254 tb_reset_jump(tb, 0);
1256 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1257 tb_reset_jump(tb, 1);
1260 /* check next page if needed */
1261 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1262 phys_page2 = -1;
1263 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1264 phys_page2 = get_page_addr_code(env, virt_page2);
1266 /* As long as consistency of the TB stuff is provided by tb_lock in user
1267 * mode and is implicit in single-threaded softmmu emulation, no explicit
1268 * memory barrier is required before tb_link_page() makes the TB visible
1269 * through the physical hash table and physical page list.
1271 tb_link_page(tb, phys_pc, phys_page2);
1272 return tb;
1276 * Invalidate all TBs which intersect with the target physical address range
1277 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1278 * 'is_cpu_write_access' should be true if called from a real cpu write
1279 * access: the virtual CPU will exit the current TB if code is modified inside
1280 * this TB.
1282 * Called with mmap_lock held for user-mode emulation
1284 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1286 while (start < end) {
1287 tb_invalidate_phys_page_range(start, end, 0);
1288 start &= TARGET_PAGE_MASK;
1289 start += TARGET_PAGE_SIZE;
1294 * Invalidate all TBs which intersect with the target physical address range
1295 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1296 * 'is_cpu_write_access' should be true if called from a real cpu write
1297 * access: the virtual CPU will exit the current TB if code is modified inside
1298 * this TB.
1300 * Called with mmap_lock held for user-mode emulation
1302 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1303 int is_cpu_write_access)
1305 TranslationBlock *tb, *tb_next, *saved_tb;
1306 CPUState *cpu = current_cpu;
1307 #if defined(TARGET_HAS_PRECISE_SMC)
1308 CPUArchState *env = NULL;
1309 #endif
1310 tb_page_addr_t tb_start, tb_end;
1311 PageDesc *p;
1312 int n;
1313 #ifdef TARGET_HAS_PRECISE_SMC
1314 int current_tb_not_found = is_cpu_write_access;
1315 TranslationBlock *current_tb = NULL;
1316 int current_tb_modified = 0;
1317 target_ulong current_pc = 0;
1318 target_ulong current_cs_base = 0;
1319 uint32_t current_flags = 0;
1320 #endif /* TARGET_HAS_PRECISE_SMC */
1322 p = page_find(start >> TARGET_PAGE_BITS);
1323 if (!p) {
1324 return;
1326 #if defined(TARGET_HAS_PRECISE_SMC)
1327 if (cpu != NULL) {
1328 env = cpu->env_ptr;
1330 #endif
1332 /* we remove all the TBs in the range [start, end[ */
1333 /* XXX: see if in some cases it could be faster to invalidate all
1334 the code */
1335 tb = p->first_tb;
1336 while (tb != NULL) {
1337 n = (uintptr_t)tb & 3;
1338 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1339 tb_next = tb->page_next[n];
1340 /* NOTE: this is subtle as a TB may span two physical pages */
1341 if (n == 0) {
1342 /* NOTE: tb_end may be after the end of the page, but
1343 it is not a problem */
1344 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1345 tb_end = tb_start + tb->size;
1346 } else {
1347 tb_start = tb->page_addr[1];
1348 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1350 if (!(tb_end <= start || tb_start >= end)) {
1351 #ifdef TARGET_HAS_PRECISE_SMC
1352 if (current_tb_not_found) {
1353 current_tb_not_found = 0;
1354 current_tb = NULL;
1355 if (cpu->mem_io_pc) {
1356 /* now we have a real cpu fault */
1357 current_tb = tb_find_pc(cpu->mem_io_pc);
1360 if (current_tb == tb &&
1361 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1362 /* If we are modifying the current TB, we must stop
1363 its execution. We could be more precise by checking
1364 that the modification is after the current PC, but it
1365 would require a specialized function to partially
1366 restore the CPU state */
1368 current_tb_modified = 1;
1369 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1370 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1371 &current_flags);
1373 #endif /* TARGET_HAS_PRECISE_SMC */
1374 /* we need to do that to handle the case where a signal
1375 occurs while doing tb_phys_invalidate() */
1376 saved_tb = NULL;
1377 if (cpu != NULL) {
1378 saved_tb = cpu->current_tb;
1379 cpu->current_tb = NULL;
1381 tb_phys_invalidate(tb, -1);
1382 if (cpu != NULL) {
1383 cpu->current_tb = saved_tb;
1384 if (cpu->interrupt_request && cpu->current_tb) {
1385 cpu_interrupt(cpu, cpu->interrupt_request);
1389 tb = tb_next;
1391 #if !defined(CONFIG_USER_ONLY)
1392 /* if no code remaining, no need to continue to use slow writes */
1393 if (!p->first_tb) {
1394 invalidate_page_bitmap(p);
1395 tlb_unprotect_code(start);
1397 #endif
1398 #ifdef TARGET_HAS_PRECISE_SMC
1399 if (current_tb_modified) {
1400 /* we generate a block containing just the instruction
1401 modifying the memory. It will ensure that it cannot modify
1402 itself */
1403 cpu->current_tb = NULL;
1404 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1405 cpu_resume_from_signal(cpu, NULL);
1407 #endif
1410 /* len must be <= 8 and start must be a multiple of len */
1411 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1413 PageDesc *p;
1415 #if 0
1416 if (1) {
1417 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1418 cpu_single_env->mem_io_vaddr, len,
1419 cpu_single_env->eip,
1420 cpu_single_env->eip +
1421 (intptr_t)cpu_single_env->segs[R_CS].base);
1423 #endif
1424 p = page_find(start >> TARGET_PAGE_BITS);
1425 if (!p) {
1426 return;
1428 if (!p->code_bitmap &&
1429 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1430 /* build code bitmap */
1431 build_page_bitmap(p);
1433 if (p->code_bitmap) {
1434 unsigned int nr;
1435 unsigned long b;
1437 nr = start & ~TARGET_PAGE_MASK;
1438 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1439 if (b & ((1 << len) - 1)) {
1440 goto do_invalidate;
1442 } else {
1443 do_invalidate:
1444 tb_invalidate_phys_page_range(start, start + len, 1);
1448 #if !defined(CONFIG_SOFTMMU)
1449 /* Called with mmap_lock held. */
1450 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1451 uintptr_t pc, void *puc,
1452 bool locked)
1454 TranslationBlock *tb;
1455 PageDesc *p;
1456 int n;
1457 #ifdef TARGET_HAS_PRECISE_SMC
1458 TranslationBlock *current_tb = NULL;
1459 CPUState *cpu = current_cpu;
1460 CPUArchState *env = NULL;
1461 int current_tb_modified = 0;
1462 target_ulong current_pc = 0;
1463 target_ulong current_cs_base = 0;
1464 uint32_t current_flags = 0;
1465 #endif
1467 addr &= TARGET_PAGE_MASK;
1468 p = page_find(addr >> TARGET_PAGE_BITS);
1469 if (!p) {
1470 return;
1472 tb = p->first_tb;
1473 #ifdef TARGET_HAS_PRECISE_SMC
1474 if (tb && pc != 0) {
1475 current_tb = tb_find_pc(pc);
1477 if (cpu != NULL) {
1478 env = cpu->env_ptr;
1480 #endif
1481 while (tb != NULL) {
1482 n = (uintptr_t)tb & 3;
1483 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1484 #ifdef TARGET_HAS_PRECISE_SMC
1485 if (current_tb == tb &&
1486 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1487 /* If we are modifying the current TB, we must stop
1488 its execution. We could be more precise by checking
1489 that the modification is after the current PC, but it
1490 would require a specialized function to partially
1491 restore the CPU state */
1493 current_tb_modified = 1;
1494 cpu_restore_state_from_tb(cpu, current_tb, pc);
1495 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1496 &current_flags);
1498 #endif /* TARGET_HAS_PRECISE_SMC */
1499 tb_phys_invalidate(tb, addr);
1500 tb = tb->page_next[n];
1502 p->first_tb = NULL;
1503 #ifdef TARGET_HAS_PRECISE_SMC
1504 if (current_tb_modified) {
1505 /* we generate a block containing just the instruction
1506 modifying the memory. It will ensure that it cannot modify
1507 itself */
1508 cpu->current_tb = NULL;
1509 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1510 if (locked) {
1511 mmap_unlock();
1513 cpu_resume_from_signal(cpu, puc);
1515 #endif
1517 #endif
1519 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1520 tb[1].tc_ptr. Return NULL if not found */
1521 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1523 int m_min, m_max, m;
1524 uintptr_t v;
1525 TranslationBlock *tb;
1527 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1528 return NULL;
1530 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1531 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1532 return NULL;
1534 /* binary search (cf Knuth) */
1535 m_min = 0;
1536 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1537 while (m_min <= m_max) {
1538 m = (m_min + m_max) >> 1;
1539 tb = &tcg_ctx.tb_ctx.tbs[m];
1540 v = (uintptr_t)tb->tc_ptr;
1541 if (v == tc_ptr) {
1542 return tb;
1543 } else if (tc_ptr < v) {
1544 m_max = m - 1;
1545 } else {
1546 m_min = m + 1;
1549 return &tcg_ctx.tb_ctx.tbs[m_max];
1552 #if !defined(CONFIG_USER_ONLY)
1553 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1555 ram_addr_t ram_addr;
1556 MemoryRegion *mr;
1557 hwaddr l = 1;
1559 rcu_read_lock();
1560 mr = address_space_translate(as, addr, &addr, &l, false);
1561 if (!(memory_region_is_ram(mr)
1562 || memory_region_is_romd(mr))) {
1563 rcu_read_unlock();
1564 return;
1566 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1567 + addr;
1568 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1569 rcu_read_unlock();
1571 #endif /* !defined(CONFIG_USER_ONLY) */
1573 void tb_check_watchpoint(CPUState *cpu)
1575 TranslationBlock *tb;
1577 tb = tb_find_pc(cpu->mem_io_pc);
1578 if (tb) {
1579 /* We can use retranslation to find the PC. */
1580 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1581 tb_phys_invalidate(tb, -1);
1582 } else {
1583 /* The exception probably happened in a helper. The CPU state should
1584 have been saved before calling it. Fetch the PC from there. */
1585 CPUArchState *env = cpu->env_ptr;
1586 target_ulong pc, cs_base;
1587 tb_page_addr_t addr;
1588 uint32_t flags;
1590 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1591 addr = get_page_addr_code(env, pc);
1592 tb_invalidate_phys_range(addr, addr + 1);
1596 #ifndef CONFIG_USER_ONLY
1597 /* in deterministic execution mode, instructions doing device I/Os
1598 must be at the end of the TB */
1599 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1601 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1602 CPUArchState *env = cpu->env_ptr;
1603 #endif
1604 TranslationBlock *tb;
1605 uint32_t n, cflags;
1606 target_ulong pc, cs_base;
1607 uint32_t flags;
1609 tb = tb_find_pc(retaddr);
1610 if (!tb) {
1611 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1612 (void *)retaddr);
1614 n = cpu->icount_decr.u16.low + tb->icount;
1615 cpu_restore_state_from_tb(cpu, tb, retaddr);
1616 /* Calculate how many instructions had been executed before the fault
1617 occurred. */
1618 n = n - cpu->icount_decr.u16.low;
1619 /* Generate a new TB ending on the I/O insn. */
1620 n++;
1621 /* On MIPS and SH, delay slot instructions can only be restarted if
1622 they were already the first instruction in the TB. If this is not
1623 the first instruction in a TB then re-execute the preceding
1624 branch. */
1625 #if defined(TARGET_MIPS)
1626 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1627 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1628 cpu->icount_decr.u16.low++;
1629 env->hflags &= ~MIPS_HFLAG_BMASK;
1631 #elif defined(TARGET_SH4)
1632 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1633 && n > 1) {
1634 env->pc -= 2;
1635 cpu->icount_decr.u16.low++;
1636 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1638 #endif
1639 /* This should never happen. */
1640 if (n > CF_COUNT_MASK) {
1641 cpu_abort(cpu, "TB too big during recompile");
1644 cflags = n | CF_LAST_IO;
1645 pc = tb->pc;
1646 cs_base = tb->cs_base;
1647 flags = tb->flags;
1648 tb_phys_invalidate(tb, -1);
1649 if (tb->cflags & CF_NOCACHE) {
1650 if (tb->orig_tb) {
1651 /* Invalidate original TB if this TB was generated in
1652 * cpu_exec_nocache() */
1653 tb_phys_invalidate(tb->orig_tb, -1);
1655 tb_free(tb);
1657 /* FIXME: In theory this could raise an exception. In practice
1658 we have already translated the block once so it's probably ok. */
1659 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1660 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1661 the first in the TB) then we end up generating a whole new TB and
1662 repeating the fault, which is horribly inefficient.
1663 Better would be to execute just this insn uncached, or generate a
1664 second new TB. */
1665 cpu_resume_from_signal(cpu, NULL);
1668 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1670 unsigned int i;
1672 /* Discard jump cache entries for any tb which might potentially
1673 overlap the flushed page. */
1674 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1675 memset(&cpu->tb_jmp_cache[i], 0,
1676 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1678 i = tb_jmp_cache_hash_page(addr);
1679 memset(&cpu->tb_jmp_cache[i], 0,
1680 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1683 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1685 int i, target_code_size, max_target_code_size;
1686 int direct_jmp_count, direct_jmp2_count, cross_page;
1687 TranslationBlock *tb;
1689 target_code_size = 0;
1690 max_target_code_size = 0;
1691 cross_page = 0;
1692 direct_jmp_count = 0;
1693 direct_jmp2_count = 0;
1694 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1695 tb = &tcg_ctx.tb_ctx.tbs[i];
1696 target_code_size += tb->size;
1697 if (tb->size > max_target_code_size) {
1698 max_target_code_size = tb->size;
1700 if (tb->page_addr[1] != -1) {
1701 cross_page++;
1703 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1704 direct_jmp_count++;
1705 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1706 direct_jmp2_count++;
1710 /* XXX: avoid using doubles ? */
1711 cpu_fprintf(f, "Translation buffer state:\n");
1712 cpu_fprintf(f, "gen code size %td/%zd\n",
1713 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1714 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
1715 cpu_fprintf(f, "TB count %d/%d\n",
1716 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1717 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1718 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1719 tcg_ctx.tb_ctx.nb_tbs : 0,
1720 max_target_code_size);
1721 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1722 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1723 tcg_ctx.code_gen_buffer) /
1724 tcg_ctx.tb_ctx.nb_tbs : 0,
1725 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1726 tcg_ctx.code_gen_buffer) /
1727 target_code_size : 0);
1728 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1729 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1730 tcg_ctx.tb_ctx.nb_tbs : 0);
1731 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1732 direct_jmp_count,
1733 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1734 tcg_ctx.tb_ctx.nb_tbs : 0,
1735 direct_jmp2_count,
1736 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1737 tcg_ctx.tb_ctx.nb_tbs : 0);
1738 cpu_fprintf(f, "\nStatistics:\n");
1739 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1740 cpu_fprintf(f, "TB invalidate count %d\n",
1741 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1742 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1743 tcg_dump_info(f, cpu_fprintf);
1746 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1748 tcg_dump_op_count(f, cpu_fprintf);
1751 #else /* CONFIG_USER_ONLY */
1753 void cpu_interrupt(CPUState *cpu, int mask)
1755 cpu->interrupt_request |= mask;
1756 cpu->tcg_exit_req = 1;
1760 * Walks guest process memory "regions" one by one
1761 * and calls callback function 'fn' for each region.
1763 struct walk_memory_regions_data {
1764 walk_memory_regions_fn fn;
1765 void *priv;
1766 target_ulong start;
1767 int prot;
1770 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1771 target_ulong end, int new_prot)
1773 if (data->start != -1u) {
1774 int rc = data->fn(data->priv, data->start, end, data->prot);
1775 if (rc != 0) {
1776 return rc;
1780 data->start = (new_prot ? end : -1u);
1781 data->prot = new_prot;
1783 return 0;
1786 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1787 target_ulong base, int level, void **lp)
1789 target_ulong pa;
1790 int i, rc;
1792 if (*lp == NULL) {
1793 return walk_memory_regions_end(data, base, 0);
1796 if (level == 0) {
1797 PageDesc *pd = *lp;
1799 for (i = 0; i < V_L2_SIZE; ++i) {
1800 int prot = pd[i].flags;
1802 pa = base | (i << TARGET_PAGE_BITS);
1803 if (prot != data->prot) {
1804 rc = walk_memory_regions_end(data, pa, prot);
1805 if (rc != 0) {
1806 return rc;
1810 } else {
1811 void **pp = *lp;
1813 for (i = 0; i < V_L2_SIZE; ++i) {
1814 pa = base | ((target_ulong)i <<
1815 (TARGET_PAGE_BITS + V_L2_BITS * level));
1816 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1817 if (rc != 0) {
1818 return rc;
1823 return 0;
1826 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1828 struct walk_memory_regions_data data;
1829 uintptr_t i;
1831 data.fn = fn;
1832 data.priv = priv;
1833 data.start = -1u;
1834 data.prot = 0;
1836 for (i = 0; i < V_L1_SIZE; i++) {
1837 int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
1838 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
1839 if (rc != 0) {
1840 return rc;
1844 return walk_memory_regions_end(&data, 0, 0);
1847 static int dump_region(void *priv, target_ulong start,
1848 target_ulong end, unsigned long prot)
1850 FILE *f = (FILE *)priv;
1852 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1853 " "TARGET_FMT_lx" %c%c%c\n",
1854 start, end, end - start,
1855 ((prot & PAGE_READ) ? 'r' : '-'),
1856 ((prot & PAGE_WRITE) ? 'w' : '-'),
1857 ((prot & PAGE_EXEC) ? 'x' : '-'));
1859 return 0;
1862 /* dump memory mappings */
1863 void page_dump(FILE *f)
1865 const int length = sizeof(target_ulong) * 2;
1866 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1867 length, "start", length, "end", length, "size", "prot");
1868 walk_memory_regions(f, dump_region);
1871 int page_get_flags(target_ulong address)
1873 PageDesc *p;
1875 p = page_find(address >> TARGET_PAGE_BITS);
1876 if (!p) {
1877 return 0;
1879 return p->flags;
1882 /* Modify the flags of a page and invalidate the code if necessary.
1883 The flag PAGE_WRITE_ORG is positioned automatically depending
1884 on PAGE_WRITE. The mmap_lock should already be held. */
1885 void page_set_flags(target_ulong start, target_ulong end, int flags)
1887 target_ulong addr, len;
1889 /* This function should never be called with addresses outside the
1890 guest address space. If this assert fires, it probably indicates
1891 a missing call to h2g_valid. */
1892 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1893 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1894 #endif
1895 assert(start < end);
1897 start = start & TARGET_PAGE_MASK;
1898 end = TARGET_PAGE_ALIGN(end);
1900 if (flags & PAGE_WRITE) {
1901 flags |= PAGE_WRITE_ORG;
1904 for (addr = start, len = end - start;
1905 len != 0;
1906 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1907 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1909 /* If the write protection bit is set, then we invalidate
1910 the code inside. */
1911 if (!(p->flags & PAGE_WRITE) &&
1912 (flags & PAGE_WRITE) &&
1913 p->first_tb) {
1914 tb_invalidate_phys_page(addr, 0, NULL, false);
1916 p->flags = flags;
1920 int page_check_range(target_ulong start, target_ulong len, int flags)
1922 PageDesc *p;
1923 target_ulong end;
1924 target_ulong addr;
1926 /* This function should never be called with addresses outside the
1927 guest address space. If this assert fires, it probably indicates
1928 a missing call to h2g_valid. */
1929 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1930 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1931 #endif
1933 if (len == 0) {
1934 return 0;
1936 if (start + len - 1 < start) {
1937 /* We've wrapped around. */
1938 return -1;
1941 /* must do before we loose bits in the next step */
1942 end = TARGET_PAGE_ALIGN(start + len);
1943 start = start & TARGET_PAGE_MASK;
1945 for (addr = start, len = end - start;
1946 len != 0;
1947 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1948 p = page_find(addr >> TARGET_PAGE_BITS);
1949 if (!p) {
1950 return -1;
1952 if (!(p->flags & PAGE_VALID)) {
1953 return -1;
1956 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1957 return -1;
1959 if (flags & PAGE_WRITE) {
1960 if (!(p->flags & PAGE_WRITE_ORG)) {
1961 return -1;
1963 /* unprotect the page if it was put read-only because it
1964 contains translated code */
1965 if (!(p->flags & PAGE_WRITE)) {
1966 if (!page_unprotect(addr, 0, NULL)) {
1967 return -1;
1972 return 0;
1975 /* called from signal handler: invalidate the code and unprotect the
1976 page. Return TRUE if the fault was successfully handled. */
1977 int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1979 unsigned int prot;
1980 PageDesc *p;
1981 target_ulong host_start, host_end, addr;
1983 /* Technically this isn't safe inside a signal handler. However we
1984 know this only ever happens in a synchronous SEGV handler, so in
1985 practice it seems to be ok. */
1986 mmap_lock();
1988 p = page_find(address >> TARGET_PAGE_BITS);
1989 if (!p) {
1990 mmap_unlock();
1991 return 0;
1994 /* if the page was really writable, then we change its
1995 protection back to writable */
1996 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1997 host_start = address & qemu_host_page_mask;
1998 host_end = host_start + qemu_host_page_size;
2000 prot = 0;
2001 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2002 p = page_find(addr >> TARGET_PAGE_BITS);
2003 p->flags |= PAGE_WRITE;
2004 prot |= p->flags;
2006 /* and since the content will be modified, we must invalidate
2007 the corresponding translated code. */
2008 tb_invalidate_phys_page(addr, pc, puc, true);
2009 #ifdef DEBUG_TB_CHECK
2010 tb_invalidate_check(addr);
2011 #endif
2013 mprotect((void *)g2h(host_start), qemu_host_page_size,
2014 prot & PAGE_BITS);
2016 mmap_unlock();
2017 return 1;
2019 mmap_unlock();
2020 return 0;
2022 #endif /* CONFIG_USER_ONLY */