tcg: consistently access cpu->tb_jmp_cache atomically
[qemu/ar7.git] / accel / tcg / translate-all.c
blob93fb9230ba94c65c961bb7b964e171a4dca9efb8
1 /*
2 * Host code generation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #ifdef _WIN32
20 #include <windows.h>
21 #endif
22 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
27 #include "cpu.h"
28 #include "trace.h"
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
31 #include "tcg.h"
32 #if defined(CONFIG_USER_ONLY)
33 #include "qemu.h"
34 #include "exec/exec-all.h"
35 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36 #include <sys/param.h>
37 #if __FreeBSD_version >= 700104
38 #define HAVE_KINFO_GETVMMAP
39 #define sigqueue sigqueue_freebsd /* avoid redefinition */
40 #include <sys/proc.h>
41 #include <machine/profile.h>
42 #define _KERNEL
43 #include <sys/user.h>
44 #undef _KERNEL
45 #undef sigqueue
46 #include <libutil.h>
47 #endif
48 #endif
49 #else
50 #include "exec/address-spaces.h"
51 #endif
53 #include "exec/cputlb.h"
54 #include "exec/tb-hash.h"
55 #include "translate-all.h"
56 #include "qemu/bitmap.h"
57 #include "qemu/timer.h"
58 #include "qemu/main-loop.h"
59 #include "exec/log.h"
60 #include "sysemu/cpus.h"
62 /* #define DEBUG_TB_INVALIDATE */
63 /* #define DEBUG_TB_FLUSH */
64 /* make various TB consistency checks */
65 /* #define DEBUG_TB_CHECK */
67 #if !defined(CONFIG_USER_ONLY)
68 /* TB consistency checks only implemented for usermode emulation. */
69 #undef DEBUG_TB_CHECK
70 #endif
72 /* Access to the various translations structures need to be serialised via locks
73 * for consistency. This is automatic for SoftMMU based system
74 * emulation due to its single threaded nature. In user-mode emulation
75 * access to the memory related structures are protected with the
76 * mmap_lock.
78 #ifdef CONFIG_SOFTMMU
79 #define assert_memory_lock() tcg_debug_assert(have_tb_lock)
80 #else
81 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
82 #endif
84 #define SMC_BITMAP_USE_THRESHOLD 10
86 typedef struct PageDesc {
87 /* list of TBs intersecting this ram page */
88 TranslationBlock *first_tb;
89 #ifdef CONFIG_SOFTMMU
90 /* in order to optimize self modifying code, we count the number
91 of lookups we do to a given page to use a bitmap */
92 unsigned int code_write_count;
93 unsigned long *code_bitmap;
94 #else
95 unsigned long flags;
96 #endif
97 } PageDesc;
99 /* In system mode we want L1_MAP to be based on ram offsets,
100 while in user mode we want it to be based on virtual addresses. */
101 #if !defined(CONFIG_USER_ONLY)
102 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
103 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
104 #else
105 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
106 #endif
107 #else
108 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
109 #endif
111 /* Size of the L2 (and L3, etc) page tables. */
112 #define V_L2_BITS 10
113 #define V_L2_SIZE (1 << V_L2_BITS)
115 uintptr_t qemu_host_page_size;
116 intptr_t qemu_host_page_mask;
119 * L1 Mapping properties
121 static int v_l1_size;
122 static int v_l1_shift;
123 static int v_l2_levels;
125 /* The bottom level has pointers to PageDesc, and is indexed by
126 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
128 #define V_L1_MIN_BITS 4
129 #define V_L1_MAX_BITS (V_L2_BITS + 3)
130 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
132 static void *l1_map[V_L1_MAX_SIZE];
134 /* code generation context */
135 TCGContext tcg_ctx;
136 bool parallel_cpus;
138 /* translation block context */
139 __thread int have_tb_lock;
141 static void page_table_config_init(void)
143 uint32_t v_l1_bits;
145 assert(TARGET_PAGE_BITS);
146 /* The bits remaining after N lower levels of page tables. */
147 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
148 if (v_l1_bits < V_L1_MIN_BITS) {
149 v_l1_bits += V_L2_BITS;
152 v_l1_size = 1 << v_l1_bits;
153 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
154 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
156 assert(v_l1_bits <= V_L1_MAX_BITS);
157 assert(v_l1_shift % V_L2_BITS == 0);
158 assert(v_l2_levels >= 0);
161 #define assert_tb_locked() tcg_debug_assert(have_tb_lock)
162 #define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
164 void tb_lock(void)
166 assert_tb_unlocked();
167 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
168 have_tb_lock++;
171 void tb_unlock(void)
173 assert_tb_locked();
174 have_tb_lock--;
175 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
178 void tb_lock_reset(void)
180 if (have_tb_lock) {
181 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
182 have_tb_lock = 0;
186 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
188 void cpu_gen_init(void)
190 tcg_context_init(&tcg_ctx);
193 /* Encode VAL as a signed leb128 sequence at P.
194 Return P incremented past the encoded value. */
195 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
197 int more, byte;
199 do {
200 byte = val & 0x7f;
201 val >>= 7;
202 more = !((val == 0 && (byte & 0x40) == 0)
203 || (val == -1 && (byte & 0x40) != 0));
204 if (more) {
205 byte |= 0x80;
207 *p++ = byte;
208 } while (more);
210 return p;
213 /* Decode a signed leb128 sequence at *PP; increment *PP past the
214 decoded value. Return the decoded value. */
215 static target_long decode_sleb128(uint8_t **pp)
217 uint8_t *p = *pp;
218 target_long val = 0;
219 int byte, shift = 0;
221 do {
222 byte = *p++;
223 val |= (target_ulong)(byte & 0x7f) << shift;
224 shift += 7;
225 } while (byte & 0x80);
226 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
227 val |= -(target_ulong)1 << shift;
230 *pp = p;
231 return val;
234 /* Encode the data collected about the instructions while compiling TB.
235 Place the data at BLOCK, and return the number of bytes consumed.
237 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
238 which come from the target's insn_start data, followed by a uintptr_t
239 which comes from the host pc of the end of the code implementing the insn.
241 Each line of the table is encoded as sleb128 deltas from the previous
242 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
243 That is, the first column is seeded with the guest pc, the last column
244 with the host pc, and the middle columns with zeros. */
246 static int encode_search(TranslationBlock *tb, uint8_t *block)
248 uint8_t *highwater = tcg_ctx.code_gen_highwater;
249 uint8_t *p = block;
250 int i, j, n;
252 tb->tc_search = block;
254 for (i = 0, n = tb->icount; i < n; ++i) {
255 target_ulong prev;
257 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
258 if (i == 0) {
259 prev = (j == 0 ? tb->pc : 0);
260 } else {
261 prev = tcg_ctx.gen_insn_data[i - 1][j];
263 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
265 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
266 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
268 /* Test for (pending) buffer overflow. The assumption is that any
269 one row beginning below the high water mark cannot overrun
270 the buffer completely. Thus we can test for overflow after
271 encoding a row without having to check during encoding. */
272 if (unlikely(p > highwater)) {
273 return -1;
277 return p - block;
280 /* The cpu state corresponding to 'searched_pc' is restored.
281 * Called with tb_lock held.
283 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
284 uintptr_t searched_pc)
286 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
287 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
288 CPUArchState *env = cpu->env_ptr;
289 uint8_t *p = tb->tc_search;
290 int i, j, num_insns = tb->icount;
291 #ifdef CONFIG_PROFILER
292 int64_t ti = profile_getclock();
293 #endif
295 searched_pc -= GETPC_ADJ;
297 if (searched_pc < host_pc) {
298 return -1;
301 /* Reconstruct the stored insn data while looking for the point at
302 which the end of the insn exceeds the searched_pc. */
303 for (i = 0; i < num_insns; ++i) {
304 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
305 data[j] += decode_sleb128(&p);
307 host_pc += decode_sleb128(&p);
308 if (host_pc > searched_pc) {
309 goto found;
312 return -1;
314 found:
315 if (tb->cflags & CF_USE_ICOUNT) {
316 assert(use_icount);
317 /* Reset the cycle counter to the start of the block. */
318 cpu->icount_decr.u16.low += num_insns;
319 /* Clear the IO flag. */
320 cpu->can_do_io = 0;
322 cpu->icount_decr.u16.low -= i;
323 restore_state_to_opc(env, tb, data);
325 #ifdef CONFIG_PROFILER
326 tcg_ctx.restore_time += profile_getclock() - ti;
327 tcg_ctx.restore_count++;
328 #endif
329 return 0;
332 bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
334 TranslationBlock *tb;
335 bool r = false;
337 /* A retaddr of zero is invalid so we really shouldn't have ended
338 * up here. The target code has likely forgotten to check retaddr
339 * != 0 before attempting to restore state. We return early to
340 * avoid blowing up on a recursive tb_lock(). The target must have
341 * previously survived a failed cpu_restore_state because
342 * tb_find_pc(0) would have failed anyway. It still should be
343 * fixed though.
346 if (!retaddr) {
347 return r;
350 tb_lock();
351 tb = tb_find_pc(retaddr);
352 if (tb) {
353 cpu_restore_state_from_tb(cpu, tb, retaddr);
354 if (tb->cflags & CF_NOCACHE) {
355 /* one-shot translation, invalidate it immediately */
356 tb_phys_invalidate(tb, -1);
357 tb_free(tb);
359 r = true;
361 tb_unlock();
363 return r;
366 void page_size_init(void)
368 /* NOTE: we can always suppose that qemu_host_page_size >=
369 TARGET_PAGE_SIZE */
370 qemu_real_host_page_size = getpagesize();
371 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
372 if (qemu_host_page_size == 0) {
373 qemu_host_page_size = qemu_real_host_page_size;
375 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
376 qemu_host_page_size = TARGET_PAGE_SIZE;
378 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
381 static void page_init(void)
383 page_size_init();
384 page_table_config_init();
386 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
388 #ifdef HAVE_KINFO_GETVMMAP
389 struct kinfo_vmentry *freep;
390 int i, cnt;
392 freep = kinfo_getvmmap(getpid(), &cnt);
393 if (freep) {
394 mmap_lock();
395 for (i = 0; i < cnt; i++) {
396 unsigned long startaddr, endaddr;
398 startaddr = freep[i].kve_start;
399 endaddr = freep[i].kve_end;
400 if (h2g_valid(startaddr)) {
401 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
403 if (h2g_valid(endaddr)) {
404 endaddr = h2g(endaddr);
405 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
406 } else {
407 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
408 endaddr = ~0ul;
409 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
410 #endif
414 free(freep);
415 mmap_unlock();
417 #else
418 FILE *f;
420 last_brk = (unsigned long)sbrk(0);
422 f = fopen("/compat/linux/proc/self/maps", "r");
423 if (f) {
424 mmap_lock();
426 do {
427 unsigned long startaddr, endaddr;
428 int n;
430 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
432 if (n == 2 && h2g_valid(startaddr)) {
433 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
435 if (h2g_valid(endaddr)) {
436 endaddr = h2g(endaddr);
437 } else {
438 endaddr = ~0ul;
440 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
442 } while (!feof(f));
444 fclose(f);
445 mmap_unlock();
447 #endif
449 #endif
452 /* If alloc=1:
453 * Called with tb_lock held for system emulation.
454 * Called with mmap_lock held for user-mode emulation.
456 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
458 PageDesc *pd;
459 void **lp;
460 int i;
462 if (alloc) {
463 assert_memory_lock();
466 /* Level 1. Always allocated. */
467 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
469 /* Level 2..N-1. */
470 for (i = v_l2_levels; i > 0; i--) {
471 void **p = atomic_rcu_read(lp);
473 if (p == NULL) {
474 if (!alloc) {
475 return NULL;
477 p = g_new0(void *, V_L2_SIZE);
478 atomic_rcu_set(lp, p);
481 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
484 pd = atomic_rcu_read(lp);
485 if (pd == NULL) {
486 if (!alloc) {
487 return NULL;
489 pd = g_new0(PageDesc, V_L2_SIZE);
490 atomic_rcu_set(lp, pd);
493 return pd + (index & (V_L2_SIZE - 1));
496 static inline PageDesc *page_find(tb_page_addr_t index)
498 return page_find_alloc(index, 0);
501 #if defined(CONFIG_USER_ONLY)
502 /* Currently it is not recommended to allocate big chunks of data in
503 user mode. It will change when a dedicated libc will be used. */
504 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
505 region in which the guest needs to run. Revisit this. */
506 #define USE_STATIC_CODE_GEN_BUFFER
507 #endif
509 /* Minimum size of the code gen buffer. This number is randomly chosen,
510 but not so small that we can't have a fair number of TB's live. */
511 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
513 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
514 indicated, this is constrained by the range of direct branches on the
515 host cpu, as used by the TCG implementation of goto_tb. */
516 #if defined(__x86_64__)
517 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
518 #elif defined(__sparc__)
519 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
520 #elif defined(__powerpc64__)
521 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
522 #elif defined(__powerpc__)
523 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
524 #elif defined(__aarch64__)
525 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
526 #elif defined(__s390x__)
527 /* We have a +- 4GB range on the branches; leave some slop. */
528 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
529 #elif defined(__mips__)
530 /* We have a 256MB branch region, but leave room to make sure the
531 main executable is also within that region. */
532 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
533 #else
534 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
535 #endif
537 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
539 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
540 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
541 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
543 static inline size_t size_code_gen_buffer(size_t tb_size)
545 /* Size the buffer. */
546 if (tb_size == 0) {
547 #ifdef USE_STATIC_CODE_GEN_BUFFER
548 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
549 #else
550 /* ??? Needs adjustments. */
551 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
552 static buffer, we could size this on RESERVED_VA, on the text
553 segment size of the executable, or continue to use the default. */
554 tb_size = (unsigned long)(ram_size / 4);
555 #endif
557 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
558 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
560 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
561 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
563 return tb_size;
566 #ifdef __mips__
567 /* In order to use J and JAL within the code_gen_buffer, we require
568 that the buffer not cross a 256MB boundary. */
569 static inline bool cross_256mb(void *addr, size_t size)
571 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
574 /* We weren't able to allocate a buffer without crossing that boundary,
575 so make do with the larger portion of the buffer that doesn't cross.
576 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
577 static inline void *split_cross_256mb(void *buf1, size_t size1)
579 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
580 size_t size2 = buf1 + size1 - buf2;
582 size1 = buf2 - buf1;
583 if (size1 < size2) {
584 size1 = size2;
585 buf1 = buf2;
588 tcg_ctx.code_gen_buffer_size = size1;
589 return buf1;
591 #endif
593 #ifdef USE_STATIC_CODE_GEN_BUFFER
594 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
595 __attribute__((aligned(CODE_GEN_ALIGN)));
597 # ifdef _WIN32
598 static inline void do_protect(void *addr, long size, int prot)
600 DWORD old_protect;
601 VirtualProtect(addr, size, prot, &old_protect);
604 static inline void map_exec(void *addr, long size)
606 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
609 static inline void map_none(void *addr, long size)
611 do_protect(addr, size, PAGE_NOACCESS);
613 # else
614 static inline void do_protect(void *addr, long size, int prot)
616 uintptr_t start, end;
618 start = (uintptr_t)addr;
619 start &= qemu_real_host_page_mask;
621 end = (uintptr_t)addr + size;
622 end = ROUND_UP(end, qemu_real_host_page_size);
624 mprotect((void *)start, end - start, prot);
627 static inline void map_exec(void *addr, long size)
629 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
632 static inline void map_none(void *addr, long size)
634 do_protect(addr, size, PROT_NONE);
636 # endif /* WIN32 */
638 static inline void *alloc_code_gen_buffer(void)
640 void *buf = static_code_gen_buffer;
641 size_t full_size, size;
643 /* The size of the buffer, rounded down to end on a page boundary. */
644 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
645 & qemu_real_host_page_mask) - (uintptr_t)buf;
647 /* Reserve a guard page. */
648 size = full_size - qemu_real_host_page_size;
650 /* Honor a command-line option limiting the size of the buffer. */
651 if (size > tcg_ctx.code_gen_buffer_size) {
652 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
653 & qemu_real_host_page_mask) - (uintptr_t)buf;
655 tcg_ctx.code_gen_buffer_size = size;
657 #ifdef __mips__
658 if (cross_256mb(buf, size)) {
659 buf = split_cross_256mb(buf, size);
660 size = tcg_ctx.code_gen_buffer_size;
662 #endif
664 map_exec(buf, size);
665 map_none(buf + size, qemu_real_host_page_size);
666 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
668 return buf;
670 #elif defined(_WIN32)
671 static inline void *alloc_code_gen_buffer(void)
673 size_t size = tcg_ctx.code_gen_buffer_size;
674 void *buf1, *buf2;
676 /* Perform the allocation in two steps, so that the guard page
677 is reserved but uncommitted. */
678 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
679 MEM_RESERVE, PAGE_NOACCESS);
680 if (buf1 != NULL) {
681 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
682 assert(buf1 == buf2);
685 return buf1;
687 #else
688 static inline void *alloc_code_gen_buffer(void)
690 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
691 uintptr_t start = 0;
692 size_t size = tcg_ctx.code_gen_buffer_size;
693 void *buf;
695 /* Constrain the position of the buffer based on the host cpu.
696 Note that these addresses are chosen in concert with the
697 addresses assigned in the relevant linker script file. */
698 # if defined(__PIE__) || defined(__PIC__)
699 /* Don't bother setting a preferred location if we're building
700 a position-independent executable. We're more likely to get
701 an address near the main executable if we let the kernel
702 choose the address. */
703 # elif defined(__x86_64__) && defined(MAP_32BIT)
704 /* Force the memory down into low memory with the executable.
705 Leave the choice of exact location with the kernel. */
706 flags |= MAP_32BIT;
707 /* Cannot expect to map more than 800MB in low memory. */
708 if (size > 800u * 1024 * 1024) {
709 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
711 # elif defined(__sparc__)
712 start = 0x40000000ul;
713 # elif defined(__s390x__)
714 start = 0x90000000ul;
715 # elif defined(__mips__)
716 # if _MIPS_SIM == _ABI64
717 start = 0x128000000ul;
718 # else
719 start = 0x08000000ul;
720 # endif
721 # endif
723 buf = mmap((void *)start, size + qemu_real_host_page_size,
724 PROT_NONE, flags, -1, 0);
725 if (buf == MAP_FAILED) {
726 return NULL;
729 #ifdef __mips__
730 if (cross_256mb(buf, size)) {
731 /* Try again, with the original still mapped, to avoid re-acquiring
732 that 256mb crossing. This time don't specify an address. */
733 size_t size2;
734 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
735 PROT_NONE, flags, -1, 0);
736 switch ((int)(buf2 != MAP_FAILED)) {
737 case 1:
738 if (!cross_256mb(buf2, size)) {
739 /* Success! Use the new buffer. */
740 munmap(buf, size + qemu_real_host_page_size);
741 break;
743 /* Failure. Work with what we had. */
744 munmap(buf2, size + qemu_real_host_page_size);
745 /* fallthru */
746 default:
747 /* Split the original buffer. Free the smaller half. */
748 buf2 = split_cross_256mb(buf, size);
749 size2 = tcg_ctx.code_gen_buffer_size;
750 if (buf == buf2) {
751 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
752 } else {
753 munmap(buf, size - size2);
755 size = size2;
756 break;
758 buf = buf2;
760 #endif
762 /* Make the final buffer accessible. The guard page at the end
763 will remain inaccessible with PROT_NONE. */
764 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
766 /* Request large pages for the buffer. */
767 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
769 return buf;
771 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
773 static inline void code_gen_alloc(size_t tb_size)
775 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
776 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
777 if (tcg_ctx.code_gen_buffer == NULL) {
778 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
779 exit(1);
782 /* size this conservatively -- realloc later if needed */
783 tcg_ctx.tb_ctx.tbs_size =
784 tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE / 8;
785 if (unlikely(!tcg_ctx.tb_ctx.tbs_size)) {
786 tcg_ctx.tb_ctx.tbs_size = 64 * 1024;
788 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock *, tcg_ctx.tb_ctx.tbs_size);
790 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
793 static void tb_htable_init(void)
795 unsigned int mode = QHT_MODE_AUTO_RESIZE;
797 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
800 /* Must be called before using the QEMU cpus. 'tb_size' is the size
801 (in bytes) allocated to the translation buffer. Zero means default
802 size. */
803 void tcg_exec_init(unsigned long tb_size)
805 cpu_gen_init();
806 page_init();
807 tb_htable_init();
808 code_gen_alloc(tb_size);
809 #if defined(CONFIG_SOFTMMU)
810 /* There's no guest base to take into account, so go ahead and
811 initialize the prologue now. */
812 tcg_prologue_init(&tcg_ctx);
813 #endif
816 bool tcg_enabled(void)
818 return tcg_ctx.code_gen_buffer != NULL;
822 * Allocate a new translation block. Flush the translation buffer if
823 * too many translation blocks or too much generated code.
825 * Called with tb_lock held.
827 static TranslationBlock *tb_alloc(target_ulong pc)
829 TranslationBlock *tb;
830 TBContext *ctx;
832 assert_tb_locked();
834 tb = tcg_tb_alloc(&tcg_ctx);
835 if (unlikely(tb == NULL)) {
836 return NULL;
838 ctx = &tcg_ctx.tb_ctx;
839 if (unlikely(ctx->nb_tbs == ctx->tbs_size)) {
840 ctx->tbs_size *= 2;
841 ctx->tbs = g_renew(TranslationBlock *, ctx->tbs, ctx->tbs_size);
843 ctx->tbs[ctx->nb_tbs++] = tb;
844 return tb;
847 /* Called with tb_lock held. */
848 void tb_free(TranslationBlock *tb)
850 assert_tb_locked();
852 /* In practice this is mostly used for single use temporary TB
853 Ignore the hard cases and just back up if this TB happens to
854 be the last one generated. */
855 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
856 tb == tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
857 size_t struct_size = ROUND_UP(sizeof(*tb), qemu_icache_linesize);
859 tcg_ctx.code_gen_ptr = tb->tc_ptr - struct_size;
860 tcg_ctx.tb_ctx.nb_tbs--;
864 static inline void invalidate_page_bitmap(PageDesc *p)
866 #ifdef CONFIG_SOFTMMU
867 g_free(p->code_bitmap);
868 p->code_bitmap = NULL;
869 p->code_write_count = 0;
870 #endif
873 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
874 static void page_flush_tb_1(int level, void **lp)
876 int i;
878 if (*lp == NULL) {
879 return;
881 if (level == 0) {
882 PageDesc *pd = *lp;
884 for (i = 0; i < V_L2_SIZE; ++i) {
885 pd[i].first_tb = NULL;
886 invalidate_page_bitmap(pd + i);
888 } else {
889 void **pp = *lp;
891 for (i = 0; i < V_L2_SIZE; ++i) {
892 page_flush_tb_1(level - 1, pp + i);
897 static void page_flush_tb(void)
899 int i, l1_sz = v_l1_size;
901 for (i = 0; i < l1_sz; i++) {
902 page_flush_tb_1(v_l2_levels, l1_map + i);
906 /* flush all the translation blocks */
907 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
909 tb_lock();
911 /* If it is already been done on request of another CPU,
912 * just retry.
914 if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) {
915 goto done;
918 #if defined(DEBUG_TB_FLUSH)
919 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
920 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
921 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
922 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
923 tcg_ctx.tb_ctx.nb_tbs : 0);
924 #endif
925 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
926 > tcg_ctx.code_gen_buffer_size) {
927 cpu_abort(cpu, "Internal error: code buffer overflow\n");
930 CPU_FOREACH(cpu) {
931 cpu_tb_jmp_cache_clear(cpu);
934 tcg_ctx.tb_ctx.nb_tbs = 0;
935 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
936 page_flush_tb();
938 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
939 /* XXX: flush processor icache at this point if cache flush is
940 expensive */
941 atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count,
942 tcg_ctx.tb_ctx.tb_flush_count + 1);
944 done:
945 tb_unlock();
948 void tb_flush(CPUState *cpu)
950 if (tcg_enabled()) {
951 unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
952 async_safe_run_on_cpu(cpu, do_tb_flush,
953 RUN_ON_CPU_HOST_INT(tb_flush_count));
957 #ifdef DEBUG_TB_CHECK
959 static void
960 do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
962 TranslationBlock *tb = p;
963 target_ulong addr = *(target_ulong *)userp;
965 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
966 printf("ERROR invalidate: address=" TARGET_FMT_lx
967 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
971 /* verify that all the pages have correct rights for code
973 * Called with tb_lock held.
975 static void tb_invalidate_check(target_ulong address)
977 address &= TARGET_PAGE_MASK;
978 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
981 static void
982 do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
984 TranslationBlock *tb = p;
985 int flags1, flags2;
987 flags1 = page_get_flags(tb->pc);
988 flags2 = page_get_flags(tb->pc + tb->size - 1);
989 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
990 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
991 (long)tb->pc, tb->size, flags1, flags2);
995 /* verify that all the pages have correct rights for code */
996 static void tb_page_check(void)
998 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
1001 #endif
1003 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
1005 TranslationBlock *tb1;
1006 unsigned int n1;
1008 for (;;) {
1009 tb1 = *ptb;
1010 n1 = (uintptr_t)tb1 & 3;
1011 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1012 if (tb1 == tb) {
1013 *ptb = tb1->page_next[n1];
1014 break;
1016 ptb = &tb1->page_next[n1];
1020 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1021 static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
1023 TranslationBlock *tb1;
1024 uintptr_t *ptb, ntb;
1025 unsigned int n1;
1027 ptb = &tb->jmp_list_next[n];
1028 if (*ptb) {
1029 /* find tb(n) in circular list */
1030 for (;;) {
1031 ntb = *ptb;
1032 n1 = ntb & 3;
1033 tb1 = (TranslationBlock *)(ntb & ~3);
1034 if (n1 == n && tb1 == tb) {
1035 break;
1037 if (n1 == 2) {
1038 ptb = &tb1->jmp_list_first;
1039 } else {
1040 ptb = &tb1->jmp_list_next[n1];
1043 /* now we can suppress tb(n) from the list */
1044 *ptb = tb->jmp_list_next[n];
1046 tb->jmp_list_next[n] = (uintptr_t)NULL;
1050 /* reset the jump entry 'n' of a TB so that it is not chained to
1051 another TB */
1052 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1054 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
1055 tb_set_jmp_target(tb, n, addr);
1058 /* remove any jumps to the TB */
1059 static inline void tb_jmp_unlink(TranslationBlock *tb)
1061 TranslationBlock *tb1;
1062 uintptr_t *ptb, ntb;
1063 unsigned int n1;
1065 ptb = &tb->jmp_list_first;
1066 for (;;) {
1067 ntb = *ptb;
1068 n1 = ntb & 3;
1069 tb1 = (TranslationBlock *)(ntb & ~3);
1070 if (n1 == 2) {
1071 break;
1073 tb_reset_jump(tb1, n1);
1074 *ptb = tb1->jmp_list_next[n1];
1075 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
1079 /* invalidate one TB
1081 * Called with tb_lock held.
1083 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1085 CPUState *cpu;
1086 PageDesc *p;
1087 uint32_t h;
1088 tb_page_addr_t phys_pc;
1090 assert_tb_locked();
1092 atomic_set(&tb->invalid, true);
1094 /* remove the TB from the hash list */
1095 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1096 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1097 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
1099 /* remove the TB from the page list */
1100 if (tb->page_addr[0] != page_addr) {
1101 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1102 tb_page_remove(&p->first_tb, tb);
1103 invalidate_page_bitmap(p);
1105 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1106 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1107 tb_page_remove(&p->first_tb, tb);
1108 invalidate_page_bitmap(p);
1111 /* remove the TB from the hash list */
1112 h = tb_jmp_cache_hash_func(tb->pc);
1113 CPU_FOREACH(cpu) {
1114 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1115 atomic_set(&cpu->tb_jmp_cache[h], NULL);
1119 /* suppress this TB from the two jump lists */
1120 tb_remove_from_jmp_list(tb, 0);
1121 tb_remove_from_jmp_list(tb, 1);
1123 /* suppress any remaining jumps to this TB */
1124 tb_jmp_unlink(tb);
1126 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
1129 #ifdef CONFIG_SOFTMMU
1130 static void build_page_bitmap(PageDesc *p)
1132 int n, tb_start, tb_end;
1133 TranslationBlock *tb;
1135 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1137 tb = p->first_tb;
1138 while (tb != NULL) {
1139 n = (uintptr_t)tb & 3;
1140 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1141 /* NOTE: this is subtle as a TB may span two physical pages */
1142 if (n == 0) {
1143 /* NOTE: tb_end may be after the end of the page, but
1144 it is not a problem */
1145 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1146 tb_end = tb_start + tb->size;
1147 if (tb_end > TARGET_PAGE_SIZE) {
1148 tb_end = TARGET_PAGE_SIZE;
1150 } else {
1151 tb_start = 0;
1152 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1154 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1155 tb = tb->page_next[n];
1158 #endif
1160 /* add the tb in the target page and protect it if necessary
1162 * Called with mmap_lock held for user-mode emulation.
1164 static inline void tb_alloc_page(TranslationBlock *tb,
1165 unsigned int n, tb_page_addr_t page_addr)
1167 PageDesc *p;
1168 #ifndef CONFIG_USER_ONLY
1169 bool page_already_protected;
1170 #endif
1172 assert_memory_lock();
1174 tb->page_addr[n] = page_addr;
1175 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1176 tb->page_next[n] = p->first_tb;
1177 #ifndef CONFIG_USER_ONLY
1178 page_already_protected = p->first_tb != NULL;
1179 #endif
1180 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1181 invalidate_page_bitmap(p);
1183 #if defined(CONFIG_USER_ONLY)
1184 if (p->flags & PAGE_WRITE) {
1185 target_ulong addr;
1186 PageDesc *p2;
1187 int prot;
1189 /* force the host page as non writable (writes will have a
1190 page fault + mprotect overhead) */
1191 page_addr &= qemu_host_page_mask;
1192 prot = 0;
1193 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1194 addr += TARGET_PAGE_SIZE) {
1196 p2 = page_find(addr >> TARGET_PAGE_BITS);
1197 if (!p2) {
1198 continue;
1200 prot |= p2->flags;
1201 p2->flags &= ~PAGE_WRITE;
1203 mprotect(g2h(page_addr), qemu_host_page_size,
1204 (prot & PAGE_BITS) & ~PAGE_WRITE);
1205 #ifdef DEBUG_TB_INVALIDATE
1206 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1207 page_addr);
1208 #endif
1210 #else
1211 /* if some code is already present, then the pages are already
1212 protected. So we handle the case where only the first TB is
1213 allocated in a physical page */
1214 if (!page_already_protected) {
1215 tlb_protect_code(page_addr);
1217 #endif
1220 /* add a new TB and link it to the physical page tables. phys_page2 is
1221 * (-1) to indicate that only one page contains the TB.
1223 * Called with mmap_lock held for user-mode emulation.
1225 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1226 tb_page_addr_t phys_page2)
1228 uint32_t h;
1230 assert_memory_lock();
1232 /* add in the page list */
1233 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1234 if (phys_page2 != -1) {
1235 tb_alloc_page(tb, 1, phys_page2);
1236 } else {
1237 tb->page_addr[1] = -1;
1240 /* add in the hash table */
1241 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1242 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1244 #ifdef DEBUG_TB_CHECK
1245 tb_page_check();
1246 #endif
1249 /* Called with mmap_lock held for user mode emulation. */
1250 TranslationBlock *tb_gen_code(CPUState *cpu,
1251 target_ulong pc, target_ulong cs_base,
1252 uint32_t flags, int cflags)
1254 CPUArchState *env = cpu->env_ptr;
1255 TranslationBlock *tb;
1256 tb_page_addr_t phys_pc, phys_page2;
1257 target_ulong virt_page2;
1258 tcg_insn_unit *gen_code_buf;
1259 int gen_code_size, search_size;
1260 #ifdef CONFIG_PROFILER
1261 int64_t ti;
1262 #endif
1263 assert_memory_lock();
1265 phys_pc = get_page_addr_code(env, pc);
1266 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
1267 cflags |= CF_USE_ICOUNT;
1270 tb = tb_alloc(pc);
1271 if (unlikely(!tb)) {
1272 buffer_overflow:
1273 /* flush must be done */
1274 tb_flush(cpu);
1275 mmap_unlock();
1276 /* Make the execution loop process the flush as soon as possible. */
1277 cpu->exception_index = EXCP_INTERRUPT;
1278 cpu_loop_exit(cpu);
1281 gen_code_buf = tcg_ctx.code_gen_ptr;
1282 tb->tc_ptr = gen_code_buf;
1283 tb->pc = pc;
1284 tb->cs_base = cs_base;
1285 tb->flags = flags;
1286 tb->cflags = cflags;
1287 tb->invalid = false;
1289 #ifdef CONFIG_PROFILER
1290 tcg_ctx.tb_count1++; /* includes aborted translations because of
1291 exceptions */
1292 ti = profile_getclock();
1293 #endif
1295 tcg_func_start(&tcg_ctx);
1297 tcg_ctx.cpu = ENV_GET_CPU(env);
1298 gen_intermediate_code(env, tb);
1299 tcg_ctx.cpu = NULL;
1301 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1303 /* generate machine code */
1304 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1305 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1306 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
1307 #ifdef USE_DIRECT_JUMP
1308 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1309 tcg_ctx.tb_jmp_target_addr = NULL;
1310 #else
1311 tcg_ctx.tb_jmp_insn_offset = NULL;
1312 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
1313 #endif
1315 #ifdef CONFIG_PROFILER
1316 tcg_ctx.tb_count++;
1317 tcg_ctx.interm_time += profile_getclock() - ti;
1318 tcg_ctx.code_time -= profile_getclock();
1319 #endif
1321 /* ??? Overflow could be handled better here. In particular, we
1322 don't need to re-do gen_intermediate_code, nor should we re-do
1323 the tcg optimization currently hidden inside tcg_gen_code. All
1324 that should be required is to flush the TBs, allocate a new TB,
1325 re-initialize it per above, and re-do the actual code generation. */
1326 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
1327 if (unlikely(gen_code_size < 0)) {
1328 goto buffer_overflow;
1330 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1331 if (unlikely(search_size < 0)) {
1332 goto buffer_overflow;
1335 #ifdef CONFIG_PROFILER
1336 tcg_ctx.code_time += profile_getclock();
1337 tcg_ctx.code_in_len += tb->size;
1338 tcg_ctx.code_out_len += gen_code_size;
1339 tcg_ctx.search_out_len += search_size;
1340 #endif
1342 #ifdef DEBUG_DISAS
1343 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1344 qemu_log_in_addr_range(tb->pc)) {
1345 qemu_log_lock();
1346 qemu_log("OUT: [size=%d]\n", gen_code_size);
1347 log_disas(tb->tc_ptr, gen_code_size);
1348 qemu_log("\n");
1349 qemu_log_flush();
1350 qemu_log_unlock();
1352 #endif
1354 tcg_ctx.code_gen_ptr = (void *)
1355 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1356 CODE_GEN_ALIGN);
1358 /* init jump list */
1359 assert(((uintptr_t)tb & 3) == 0);
1360 tb->jmp_list_first = (uintptr_t)tb | 2;
1361 tb->jmp_list_next[0] = (uintptr_t)NULL;
1362 tb->jmp_list_next[1] = (uintptr_t)NULL;
1364 /* init original jump addresses wich has been set during tcg_gen_code() */
1365 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1366 tb_reset_jump(tb, 0);
1368 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1369 tb_reset_jump(tb, 1);
1372 /* check next page if needed */
1373 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1374 phys_page2 = -1;
1375 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1376 phys_page2 = get_page_addr_code(env, virt_page2);
1378 /* As long as consistency of the TB stuff is provided by tb_lock in user
1379 * mode and is implicit in single-threaded softmmu emulation, no explicit
1380 * memory barrier is required before tb_link_page() makes the TB visible
1381 * through the physical hash table and physical page list.
1383 tb_link_page(tb, phys_pc, phys_page2);
1384 return tb;
1388 * Invalidate all TBs which intersect with the target physical address range
1389 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1390 * 'is_cpu_write_access' should be true if called from a real cpu write
1391 * access: the virtual CPU will exit the current TB if code is modified inside
1392 * this TB.
1394 * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1395 * Called with tb_lock held for system-mode emulation
1397 static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
1399 while (start < end) {
1400 tb_invalidate_phys_page_range(start, end, 0);
1401 start &= TARGET_PAGE_MASK;
1402 start += TARGET_PAGE_SIZE;
1406 #ifdef CONFIG_SOFTMMU
1407 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1409 assert_tb_locked();
1410 tb_invalidate_phys_range_1(start, end);
1412 #else
1413 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1415 assert_memory_lock();
1416 tb_lock();
1417 tb_invalidate_phys_range_1(start, end);
1418 tb_unlock();
1420 #endif
1422 * Invalidate all TBs which intersect with the target physical address range
1423 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1424 * 'is_cpu_write_access' should be true if called from a real cpu write
1425 * access: the virtual CPU will exit the current TB if code is modified inside
1426 * this TB.
1428 * Called with tb_lock/mmap_lock held for user-mode emulation
1429 * Called with tb_lock held for system-mode emulation
1431 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1432 int is_cpu_write_access)
1434 TranslationBlock *tb, *tb_next;
1435 #if defined(TARGET_HAS_PRECISE_SMC)
1436 CPUState *cpu = current_cpu;
1437 CPUArchState *env = NULL;
1438 #endif
1439 tb_page_addr_t tb_start, tb_end;
1440 PageDesc *p;
1441 int n;
1442 #ifdef TARGET_HAS_PRECISE_SMC
1443 int current_tb_not_found = is_cpu_write_access;
1444 TranslationBlock *current_tb = NULL;
1445 int current_tb_modified = 0;
1446 target_ulong current_pc = 0;
1447 target_ulong current_cs_base = 0;
1448 uint32_t current_flags = 0;
1449 #endif /* TARGET_HAS_PRECISE_SMC */
1451 assert_memory_lock();
1452 assert_tb_locked();
1454 p = page_find(start >> TARGET_PAGE_BITS);
1455 if (!p) {
1456 return;
1458 #if defined(TARGET_HAS_PRECISE_SMC)
1459 if (cpu != NULL) {
1460 env = cpu->env_ptr;
1462 #endif
1464 /* we remove all the TBs in the range [start, end[ */
1465 /* XXX: see if in some cases it could be faster to invalidate all
1466 the code */
1467 tb = p->first_tb;
1468 while (tb != NULL) {
1469 n = (uintptr_t)tb & 3;
1470 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1471 tb_next = tb->page_next[n];
1472 /* NOTE: this is subtle as a TB may span two physical pages */
1473 if (n == 0) {
1474 /* NOTE: tb_end may be after the end of the page, but
1475 it is not a problem */
1476 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1477 tb_end = tb_start + tb->size;
1478 } else {
1479 tb_start = tb->page_addr[1];
1480 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1482 if (!(tb_end <= start || tb_start >= end)) {
1483 #ifdef TARGET_HAS_PRECISE_SMC
1484 if (current_tb_not_found) {
1485 current_tb_not_found = 0;
1486 current_tb = NULL;
1487 if (cpu->mem_io_pc) {
1488 /* now we have a real cpu fault */
1489 current_tb = tb_find_pc(cpu->mem_io_pc);
1492 if (current_tb == tb &&
1493 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1494 /* If we are modifying the current TB, we must stop
1495 its execution. We could be more precise by checking
1496 that the modification is after the current PC, but it
1497 would require a specialized function to partially
1498 restore the CPU state */
1500 current_tb_modified = 1;
1501 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1502 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1503 &current_flags);
1505 #endif /* TARGET_HAS_PRECISE_SMC */
1506 tb_phys_invalidate(tb, -1);
1508 tb = tb_next;
1510 #if !defined(CONFIG_USER_ONLY)
1511 /* if no code remaining, no need to continue to use slow writes */
1512 if (!p->first_tb) {
1513 invalidate_page_bitmap(p);
1514 tlb_unprotect_code(start);
1516 #endif
1517 #ifdef TARGET_HAS_PRECISE_SMC
1518 if (current_tb_modified) {
1519 /* we generate a block containing just the instruction
1520 modifying the memory. It will ensure that it cannot modify
1521 itself */
1522 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1523 cpu_loop_exit_noexc(cpu);
1525 #endif
1528 #ifdef CONFIG_SOFTMMU
1529 /* len must be <= 8 and start must be a multiple of len.
1530 * Called via softmmu_template.h when code areas are written to with
1531 * iothread mutex not held.
1533 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1535 PageDesc *p;
1537 #if 0
1538 if (1) {
1539 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1540 cpu_single_env->mem_io_vaddr, len,
1541 cpu_single_env->eip,
1542 cpu_single_env->eip +
1543 (intptr_t)cpu_single_env->segs[R_CS].base);
1545 #endif
1546 assert_memory_lock();
1548 p = page_find(start >> TARGET_PAGE_BITS);
1549 if (!p) {
1550 return;
1552 if (!p->code_bitmap &&
1553 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1554 /* build code bitmap. FIXME: writes should be protected by
1555 * tb_lock, reads by tb_lock or RCU.
1557 build_page_bitmap(p);
1559 if (p->code_bitmap) {
1560 unsigned int nr;
1561 unsigned long b;
1563 nr = start & ~TARGET_PAGE_MASK;
1564 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1565 if (b & ((1 << len) - 1)) {
1566 goto do_invalidate;
1568 } else {
1569 do_invalidate:
1570 tb_invalidate_phys_page_range(start, start + len, 1);
1573 #else
1574 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1575 * host PC of the faulting store instruction that caused this invalidate.
1576 * Returns true if the caller needs to abort execution of the current
1577 * TB (because it was modified by this store and the guest CPU has
1578 * precise-SMC semantics).
1580 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1582 TranslationBlock *tb;
1583 PageDesc *p;
1584 int n;
1585 #ifdef TARGET_HAS_PRECISE_SMC
1586 TranslationBlock *current_tb = NULL;
1587 CPUState *cpu = current_cpu;
1588 CPUArchState *env = NULL;
1589 int current_tb_modified = 0;
1590 target_ulong current_pc = 0;
1591 target_ulong current_cs_base = 0;
1592 uint32_t current_flags = 0;
1593 #endif
1595 assert_memory_lock();
1597 addr &= TARGET_PAGE_MASK;
1598 p = page_find(addr >> TARGET_PAGE_BITS);
1599 if (!p) {
1600 return false;
1603 tb_lock();
1604 tb = p->first_tb;
1605 #ifdef TARGET_HAS_PRECISE_SMC
1606 if (tb && pc != 0) {
1607 current_tb = tb_find_pc(pc);
1609 if (cpu != NULL) {
1610 env = cpu->env_ptr;
1612 #endif
1613 while (tb != NULL) {
1614 n = (uintptr_t)tb & 3;
1615 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1616 #ifdef TARGET_HAS_PRECISE_SMC
1617 if (current_tb == tb &&
1618 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1619 /* If we are modifying the current TB, we must stop
1620 its execution. We could be more precise by checking
1621 that the modification is after the current PC, but it
1622 would require a specialized function to partially
1623 restore the CPU state */
1625 current_tb_modified = 1;
1626 cpu_restore_state_from_tb(cpu, current_tb, pc);
1627 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1628 &current_flags);
1630 #endif /* TARGET_HAS_PRECISE_SMC */
1631 tb_phys_invalidate(tb, addr);
1632 tb = tb->page_next[n];
1634 p->first_tb = NULL;
1635 #ifdef TARGET_HAS_PRECISE_SMC
1636 if (current_tb_modified) {
1637 /* we generate a block containing just the instruction
1638 modifying the memory. It will ensure that it cannot modify
1639 itself */
1640 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1641 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
1642 * back into the cpu_exec loop. */
1643 return true;
1645 #endif
1646 tb_unlock();
1648 return false;
1650 #endif
1652 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1653 tb[1].tc_ptr. Return NULL if not found */
1654 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1656 int m_min, m_max, m;
1657 uintptr_t v;
1658 TranslationBlock *tb;
1660 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1661 return NULL;
1663 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1664 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1665 return NULL;
1667 /* binary search (cf Knuth) */
1668 m_min = 0;
1669 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1670 while (m_min <= m_max) {
1671 m = (m_min + m_max) >> 1;
1672 tb = tcg_ctx.tb_ctx.tbs[m];
1673 v = (uintptr_t)tb->tc_ptr;
1674 if (v == tc_ptr) {
1675 return tb;
1676 } else if (tc_ptr < v) {
1677 m_max = m - 1;
1678 } else {
1679 m_min = m + 1;
1682 return tcg_ctx.tb_ctx.tbs[m_max];
1685 #if !defined(CONFIG_USER_ONLY)
1686 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1688 ram_addr_t ram_addr;
1689 MemoryRegion *mr;
1690 hwaddr l = 1;
1692 rcu_read_lock();
1693 mr = address_space_translate(as, addr, &addr, &l, false);
1694 if (!(memory_region_is_ram(mr)
1695 || memory_region_is_romd(mr))) {
1696 rcu_read_unlock();
1697 return;
1699 ram_addr = memory_region_get_ram_addr(mr) + addr;
1700 tb_lock();
1701 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1702 tb_unlock();
1703 rcu_read_unlock();
1705 #endif /* !defined(CONFIG_USER_ONLY) */
1707 /* Called with tb_lock held. */
1708 void tb_check_watchpoint(CPUState *cpu)
1710 TranslationBlock *tb;
1712 tb = tb_find_pc(cpu->mem_io_pc);
1713 if (tb) {
1714 /* We can use retranslation to find the PC. */
1715 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1716 tb_phys_invalidate(tb, -1);
1717 } else {
1718 /* The exception probably happened in a helper. The CPU state should
1719 have been saved before calling it. Fetch the PC from there. */
1720 CPUArchState *env = cpu->env_ptr;
1721 target_ulong pc, cs_base;
1722 tb_page_addr_t addr;
1723 uint32_t flags;
1725 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1726 addr = get_page_addr_code(env, pc);
1727 tb_invalidate_phys_range(addr, addr + 1);
1731 #ifndef CONFIG_USER_ONLY
1732 /* in deterministic execution mode, instructions doing device I/Os
1733 * must be at the end of the TB.
1735 * Called by softmmu_template.h, with iothread mutex not held.
1737 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1739 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1740 CPUArchState *env = cpu->env_ptr;
1741 #endif
1742 TranslationBlock *tb;
1743 uint32_t n, cflags;
1744 target_ulong pc, cs_base;
1745 uint32_t flags;
1747 tb_lock();
1748 tb = tb_find_pc(retaddr);
1749 if (!tb) {
1750 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1751 (void *)retaddr);
1753 n = cpu->icount_decr.u16.low + tb->icount;
1754 cpu_restore_state_from_tb(cpu, tb, retaddr);
1755 /* Calculate how many instructions had been executed before the fault
1756 occurred. */
1757 n = n - cpu->icount_decr.u16.low;
1758 /* Generate a new TB ending on the I/O insn. */
1759 n++;
1760 /* On MIPS and SH, delay slot instructions can only be restarted if
1761 they were already the first instruction in the TB. If this is not
1762 the first instruction in a TB then re-execute the preceding
1763 branch. */
1764 #if defined(TARGET_MIPS)
1765 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1766 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1767 cpu->icount_decr.u16.low++;
1768 env->hflags &= ~MIPS_HFLAG_BMASK;
1770 #elif defined(TARGET_SH4)
1771 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1772 && n > 1) {
1773 env->pc -= 2;
1774 cpu->icount_decr.u16.low++;
1775 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1777 #endif
1778 /* This should never happen. */
1779 if (n > CF_COUNT_MASK) {
1780 cpu_abort(cpu, "TB too big during recompile");
1783 cflags = n | CF_LAST_IO;
1784 pc = tb->pc;
1785 cs_base = tb->cs_base;
1786 flags = tb->flags;
1787 tb_phys_invalidate(tb, -1);
1788 if (tb->cflags & CF_NOCACHE) {
1789 if (tb->orig_tb) {
1790 /* Invalidate original TB if this TB was generated in
1791 * cpu_exec_nocache() */
1792 tb_phys_invalidate(tb->orig_tb, -1);
1794 tb_free(tb);
1796 /* FIXME: In theory this could raise an exception. In practice
1797 we have already translated the block once so it's probably ok. */
1798 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1800 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1801 * the first in the TB) then we end up generating a whole new TB and
1802 * repeating the fault, which is horribly inefficient.
1803 * Better would be to execute just this insn uncached, or generate a
1804 * second new TB.
1806 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
1807 * tb_lock gets reset.
1809 cpu_loop_exit_noexc(cpu);
1812 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
1814 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
1816 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
1817 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
1821 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1823 /* Discard jump cache entries for any tb which might potentially
1824 overlap the flushed page. */
1825 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
1826 tb_jmp_cache_clear_page(cpu, addr);
1829 static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1830 struct qht_stats hst)
1832 uint32_t hgram_opts;
1833 size_t hgram_bins;
1834 char *hgram;
1836 if (!hst.head_buckets) {
1837 return;
1839 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1840 hst.used_head_buckets, hst.head_buckets,
1841 (double)hst.used_head_buckets / hst.head_buckets * 100);
1843 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1844 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1845 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1846 hgram_opts |= QDIST_PR_NODECIMAL;
1848 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1849 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1850 qdist_avg(&hst.occupancy) * 100, hgram);
1851 g_free(hgram);
1853 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1854 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1855 if (hgram_bins > 10) {
1856 hgram_bins = 10;
1857 } else {
1858 hgram_bins = 0;
1859 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1861 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1862 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1863 qdist_avg(&hst.chain), hgram);
1864 g_free(hgram);
1867 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1869 int i, target_code_size, max_target_code_size;
1870 int direct_jmp_count, direct_jmp2_count, cross_page;
1871 TranslationBlock *tb;
1872 struct qht_stats hst;
1874 tb_lock();
1876 target_code_size = 0;
1877 max_target_code_size = 0;
1878 cross_page = 0;
1879 direct_jmp_count = 0;
1880 direct_jmp2_count = 0;
1881 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1882 tb = tcg_ctx.tb_ctx.tbs[i];
1883 target_code_size += tb->size;
1884 if (tb->size > max_target_code_size) {
1885 max_target_code_size = tb->size;
1887 if (tb->page_addr[1] != -1) {
1888 cross_page++;
1890 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1891 direct_jmp_count++;
1892 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1893 direct_jmp2_count++;
1897 /* XXX: avoid using doubles ? */
1898 cpu_fprintf(f, "Translation buffer state:\n");
1899 cpu_fprintf(f, "gen code size %td/%zd\n",
1900 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1901 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
1902 cpu_fprintf(f, "TB count %d\n", tcg_ctx.tb_ctx.nb_tbs);
1903 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1904 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1905 tcg_ctx.tb_ctx.nb_tbs : 0,
1906 max_target_code_size);
1907 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1908 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1909 tcg_ctx.code_gen_buffer) /
1910 tcg_ctx.tb_ctx.nb_tbs : 0,
1911 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1912 tcg_ctx.code_gen_buffer) /
1913 target_code_size : 0);
1914 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1915 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1916 tcg_ctx.tb_ctx.nb_tbs : 0);
1917 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1918 direct_jmp_count,
1919 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1920 tcg_ctx.tb_ctx.nb_tbs : 0,
1921 direct_jmp2_count,
1922 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1923 tcg_ctx.tb_ctx.nb_tbs : 0);
1925 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
1926 print_qht_statistics(f, cpu_fprintf, hst);
1927 qht_statistics_destroy(&hst);
1929 cpu_fprintf(f, "\nStatistics:\n");
1930 cpu_fprintf(f, "TB flush count %u\n",
1931 atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
1932 cpu_fprintf(f, "TB invalidate count %d\n",
1933 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1934 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1935 tcg_dump_info(f, cpu_fprintf);
1937 tb_unlock();
1940 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1942 tcg_dump_op_count(f, cpu_fprintf);
1945 #else /* CONFIG_USER_ONLY */
1947 void cpu_interrupt(CPUState *cpu, int mask)
1949 g_assert(qemu_mutex_iothread_locked());
1950 cpu->interrupt_request |= mask;
1951 cpu->icount_decr.u16.high = -1;
1955 * Walks guest process memory "regions" one by one
1956 * and calls callback function 'fn' for each region.
1958 struct walk_memory_regions_data {
1959 walk_memory_regions_fn fn;
1960 void *priv;
1961 target_ulong start;
1962 int prot;
1965 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1966 target_ulong end, int new_prot)
1968 if (data->start != -1u) {
1969 int rc = data->fn(data->priv, data->start, end, data->prot);
1970 if (rc != 0) {
1971 return rc;
1975 data->start = (new_prot ? end : -1u);
1976 data->prot = new_prot;
1978 return 0;
1981 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1982 target_ulong base, int level, void **lp)
1984 target_ulong pa;
1985 int i, rc;
1987 if (*lp == NULL) {
1988 return walk_memory_regions_end(data, base, 0);
1991 if (level == 0) {
1992 PageDesc *pd = *lp;
1994 for (i = 0; i < V_L2_SIZE; ++i) {
1995 int prot = pd[i].flags;
1997 pa = base | (i << TARGET_PAGE_BITS);
1998 if (prot != data->prot) {
1999 rc = walk_memory_regions_end(data, pa, prot);
2000 if (rc != 0) {
2001 return rc;
2005 } else {
2006 void **pp = *lp;
2008 for (i = 0; i < V_L2_SIZE; ++i) {
2009 pa = base | ((target_ulong)i <<
2010 (TARGET_PAGE_BITS + V_L2_BITS * level));
2011 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2012 if (rc != 0) {
2013 return rc;
2018 return 0;
2021 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2023 struct walk_memory_regions_data data;
2024 uintptr_t i, l1_sz = v_l1_size;
2026 data.fn = fn;
2027 data.priv = priv;
2028 data.start = -1u;
2029 data.prot = 0;
2031 for (i = 0; i < l1_sz; i++) {
2032 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2033 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2034 if (rc != 0) {
2035 return rc;
2039 return walk_memory_regions_end(&data, 0, 0);
2042 static int dump_region(void *priv, target_ulong start,
2043 target_ulong end, unsigned long prot)
2045 FILE *f = (FILE *)priv;
2047 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2048 " "TARGET_FMT_lx" %c%c%c\n",
2049 start, end, end - start,
2050 ((prot & PAGE_READ) ? 'r' : '-'),
2051 ((prot & PAGE_WRITE) ? 'w' : '-'),
2052 ((prot & PAGE_EXEC) ? 'x' : '-'));
2054 return 0;
2057 /* dump memory mappings */
2058 void page_dump(FILE *f)
2060 const int length = sizeof(target_ulong) * 2;
2061 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2062 length, "start", length, "end", length, "size", "prot");
2063 walk_memory_regions(f, dump_region);
2066 int page_get_flags(target_ulong address)
2068 PageDesc *p;
2070 p = page_find(address >> TARGET_PAGE_BITS);
2071 if (!p) {
2072 return 0;
2074 return p->flags;
2077 /* Modify the flags of a page and invalidate the code if necessary.
2078 The flag PAGE_WRITE_ORG is positioned automatically depending
2079 on PAGE_WRITE. The mmap_lock should already be held. */
2080 void page_set_flags(target_ulong start, target_ulong end, int flags)
2082 target_ulong addr, len;
2084 /* This function should never be called with addresses outside the
2085 guest address space. If this assert fires, it probably indicates
2086 a missing call to h2g_valid. */
2087 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2088 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2089 #endif
2090 assert(start < end);
2091 assert_memory_lock();
2093 start = start & TARGET_PAGE_MASK;
2094 end = TARGET_PAGE_ALIGN(end);
2096 if (flags & PAGE_WRITE) {
2097 flags |= PAGE_WRITE_ORG;
2100 for (addr = start, len = end - start;
2101 len != 0;
2102 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2103 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2105 /* If the write protection bit is set, then we invalidate
2106 the code inside. */
2107 if (!(p->flags & PAGE_WRITE) &&
2108 (flags & PAGE_WRITE) &&
2109 p->first_tb) {
2110 tb_invalidate_phys_page(addr, 0);
2112 p->flags = flags;
2116 int page_check_range(target_ulong start, target_ulong len, int flags)
2118 PageDesc *p;
2119 target_ulong end;
2120 target_ulong addr;
2122 /* This function should never be called with addresses outside the
2123 guest address space. If this assert fires, it probably indicates
2124 a missing call to h2g_valid. */
2125 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2126 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2127 #endif
2129 if (len == 0) {
2130 return 0;
2132 if (start + len - 1 < start) {
2133 /* We've wrapped around. */
2134 return -1;
2137 /* must do before we loose bits in the next step */
2138 end = TARGET_PAGE_ALIGN(start + len);
2139 start = start & TARGET_PAGE_MASK;
2141 for (addr = start, len = end - start;
2142 len != 0;
2143 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2144 p = page_find(addr >> TARGET_PAGE_BITS);
2145 if (!p) {
2146 return -1;
2148 if (!(p->flags & PAGE_VALID)) {
2149 return -1;
2152 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2153 return -1;
2155 if (flags & PAGE_WRITE) {
2156 if (!(p->flags & PAGE_WRITE_ORG)) {
2157 return -1;
2159 /* unprotect the page if it was put read-only because it
2160 contains translated code */
2161 if (!(p->flags & PAGE_WRITE)) {
2162 if (!page_unprotect(addr, 0)) {
2163 return -1;
2168 return 0;
2171 /* called from signal handler: invalidate the code and unprotect the
2172 * page. Return 0 if the fault was not handled, 1 if it was handled,
2173 * and 2 if it was handled but the caller must cause the TB to be
2174 * immediately exited. (We can only return 2 if the 'pc' argument is
2175 * non-zero.)
2177 int page_unprotect(target_ulong address, uintptr_t pc)
2179 unsigned int prot;
2180 bool current_tb_invalidated;
2181 PageDesc *p;
2182 target_ulong host_start, host_end, addr;
2184 /* Technically this isn't safe inside a signal handler. However we
2185 know this only ever happens in a synchronous SEGV handler, so in
2186 practice it seems to be ok. */
2187 mmap_lock();
2189 p = page_find(address >> TARGET_PAGE_BITS);
2190 if (!p) {
2191 mmap_unlock();
2192 return 0;
2195 /* if the page was really writable, then we change its
2196 protection back to writable */
2197 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2198 host_start = address & qemu_host_page_mask;
2199 host_end = host_start + qemu_host_page_size;
2201 prot = 0;
2202 current_tb_invalidated = false;
2203 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2204 p = page_find(addr >> TARGET_PAGE_BITS);
2205 p->flags |= PAGE_WRITE;
2206 prot |= p->flags;
2208 /* and since the content will be modified, we must invalidate
2209 the corresponding translated code. */
2210 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2211 #ifdef DEBUG_TB_CHECK
2212 tb_invalidate_check(addr);
2213 #endif
2215 mprotect((void *)g2h(host_start), qemu_host_page_size,
2216 prot & PAGE_BITS);
2218 mmap_unlock();
2219 /* If current TB was invalidated return to main loop */
2220 return current_tb_invalidated ? 2 : 1;
2222 mmap_unlock();
2223 return 0;
2225 #endif /* CONFIG_USER_ONLY */