Merge remote-tracking branch 'qemu/master'
[qemu/ar7.git] / translate-all.c
blob595e4540c0fd01de5802b3cfc27b118d6bd6f16f
1 /*
2 * Host code generation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #ifdef _WIN32
20 #include <windows.h>
21 #endif
22 #include "qemu/osdep.h"
24 #include "qemu-common.h"
25 #define NO_CPU_IO_DEFS
26 #include "cpu.h"
27 #include "trace-root.h"
28 #include "disas/disas.h"
29 #include "exec/exec-all.h"
30 #include "tcg.h"
31 #if defined(CONFIG_USER_ONLY)
32 #include "qemu.h"
33 #if defined(TARGET_X86_64)
34 #include "vsyscall.h"
35 #endif
36 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
37 #include <sys/param.h>
38 #if __FreeBSD_version >= 700104
39 #define HAVE_KINFO_GETVMMAP
40 #define sigqueue sigqueue_freebsd /* avoid redefinition */
41 #include <sys/proc.h>
42 #include <machine/profile.h>
43 #define _KERNEL
44 #include <sys/user.h>
45 #undef _KERNEL
46 #undef sigqueue
47 #include <libutil.h>
48 #endif
49 #endif
50 #else
51 #include "exec/address-spaces.h"
52 #endif
54 #include "exec/cputlb.h"
55 #include "exec/tb-hash.h"
56 #include "translate-all.h"
57 #include "qemu/bitmap.h"
58 #include "qemu/timer.h"
59 #include "qemu/main-loop.h"
60 #include "exec/log.h"
62 /* #define DEBUG_TB_INVALIDATE */
63 /* #define DEBUG_TB_FLUSH */
64 /* make various TB consistency checks */
65 /* #define DEBUG_TB_CHECK */
67 #if !defined(CONFIG_USER_ONLY)
68 /* TB consistency checks only implemented for usermode emulation. */
69 #undef DEBUG_TB_CHECK
70 #endif
72 /* Access to the various translations structures need to be serialised via locks
73 * for consistency. This is automatic for SoftMMU based system
74 * emulation due to its single threaded nature. In user-mode emulation
75 * access to the memory related structures are protected with the
76 * mmap_lock.
78 #ifdef CONFIG_SOFTMMU
79 #define assert_memory_lock() tcg_debug_assert(have_tb_lock)
80 #else
81 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
82 #endif
84 #define SMC_BITMAP_USE_THRESHOLD 10
86 typedef struct PageDesc {
87 /* list of TBs intersecting this ram page */
88 TranslationBlock *first_tb;
89 #ifdef CONFIG_SOFTMMU
90 /* in order to optimize self modifying code, we count the number
91 of lookups we do to a given page to use a bitmap */
92 unsigned int code_write_count;
93 unsigned long *code_bitmap;
94 #else
95 unsigned long flags;
96 #endif
97 } PageDesc;
99 /* In system mode we want L1_MAP to be based on ram offsets,
100 while in user mode we want it to be based on virtual addresses. */
101 #if !defined(CONFIG_USER_ONLY)
102 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
103 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
104 #else
105 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
106 #endif
107 #else
108 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
109 #endif
111 /* Size of the L2 (and L3, etc) page tables. */
112 #define V_L2_BITS 10
113 #define V_L2_SIZE (1 << V_L2_BITS)
115 uintptr_t qemu_host_page_size;
116 intptr_t qemu_host_page_mask;
119 * L1 Mapping properties
121 static int v_l1_size;
122 static int v_l1_shift;
123 static int v_l2_levels;
125 /* The bottom level has pointers to PageDesc, and is indexed by
126 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
128 #define V_L1_MIN_BITS 4
129 #define V_L1_MAX_BITS (V_L2_BITS + 3)
130 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
132 static void *l1_map[V_L1_MAX_SIZE];
134 /* code generation context */
135 TCGContext tcg_ctx;
136 bool parallel_cpus;
138 /* translation block context */
139 __thread int have_tb_lock;
141 static void page_table_config_init(void)
143 uint32_t v_l1_bits;
145 assert(TARGET_PAGE_BITS);
146 /* The bits remaining after N lower levels of page tables. */
147 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
148 if (v_l1_bits < V_L1_MIN_BITS) {
149 v_l1_bits += V_L2_BITS;
152 v_l1_size = 1 << v_l1_bits;
153 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
154 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
156 assert(v_l1_bits <= V_L1_MAX_BITS);
157 assert(v_l1_shift % V_L2_BITS == 0);
158 assert(v_l2_levels >= 0);
161 #define assert_tb_locked() tcg_debug_assert(have_tb_lock)
162 #define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
164 void tb_lock(void)
166 assert_tb_unlocked();
167 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
168 have_tb_lock++;
171 void tb_unlock(void)
173 assert_tb_locked();
174 have_tb_lock--;
175 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
178 void tb_lock_reset(void)
180 if (have_tb_lock) {
181 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
182 have_tb_lock = 0;
186 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
188 void cpu_gen_init(void)
190 tcg_context_init(&tcg_ctx);
193 /* Encode VAL as a signed leb128 sequence at P.
194 Return P incremented past the encoded value. */
195 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
197 int more, byte;
199 do {
200 byte = val & 0x7f;
201 val >>= 7;
202 more = !((val == 0 && (byte & 0x40) == 0)
203 || (val == -1 && (byte & 0x40) != 0));
204 if (more) {
205 byte |= 0x80;
207 *p++ = byte;
208 } while (more);
210 return p;
213 /* Decode a signed leb128 sequence at *PP; increment *PP past the
214 decoded value. Return the decoded value. */
215 static target_long decode_sleb128(uint8_t **pp)
217 uint8_t *p = *pp;
218 target_long val = 0;
219 int byte, shift = 0;
221 do {
222 byte = *p++;
223 val |= (target_ulong)(byte & 0x7f) << shift;
224 shift += 7;
225 } while (byte & 0x80);
226 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
227 val |= -(target_ulong)1 << shift;
230 *pp = p;
231 return val;
234 /* Encode the data collected about the instructions while compiling TB.
235 Place the data at BLOCK, and return the number of bytes consumed.
237 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
238 which come from the target's insn_start data, followed by a uintptr_t
239 which comes from the host pc of the end of the code implementing the insn.
241 Each line of the table is encoded as sleb128 deltas from the previous
242 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
243 That is, the first column is seeded with the guest pc, the last column
244 with the host pc, and the middle columns with zeros. */
246 static int encode_search(TranslationBlock *tb, uint8_t *block)
248 uint8_t *highwater = tcg_ctx.code_gen_highwater;
249 uint8_t *p = block;
250 int i, j, n;
252 tb->tc_search = block;
254 for (i = 0, n = tb->icount; i < n; ++i) {
255 target_ulong prev;
257 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
258 if (i == 0) {
259 prev = (j == 0 ? tb->pc : 0);
260 } else {
261 prev = tcg_ctx.gen_insn_data[i - 1][j];
263 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
265 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
266 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
268 /* Test for (pending) buffer overflow. The assumption is that any
269 one row beginning below the high water mark cannot overrun
270 the buffer completely. Thus we can test for overflow after
271 encoding a row without having to check during encoding. */
272 if (unlikely(p > highwater)) {
273 return -1;
277 return p - block;
280 /* The cpu state corresponding to 'searched_pc' is restored.
281 * Called with tb_lock held.
283 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
284 uintptr_t searched_pc)
286 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
287 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
288 CPUArchState *env = cpu->env_ptr;
289 uint8_t *p = tb->tc_search;
290 int i, j, num_insns = tb->icount;
291 #ifdef CONFIG_PROFILER
292 int64_t ti = profile_getclock();
293 #endif
295 searched_pc -= GETPC_ADJ;
297 if (searched_pc < host_pc) {
298 return -1;
301 /* Reconstruct the stored insn data while looking for the point at
302 which the end of the insn exceeds the searched_pc. */
303 for (i = 0; i < num_insns; ++i) {
304 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
305 data[j] += decode_sleb128(&p);
307 host_pc += decode_sleb128(&p);
308 if (host_pc > searched_pc) {
309 goto found;
312 return -1;
314 found:
315 if (tb->cflags & CF_USE_ICOUNT) {
316 assert(use_icount);
317 /* Reset the cycle counter to the start of the block. */
318 cpu->icount_decr.u16.low += num_insns;
319 /* Clear the IO flag. */
320 cpu->can_do_io = 0;
322 cpu->icount_decr.u16.low -= i;
323 restore_state_to_opc(env, tb, data);
325 #ifdef CONFIG_PROFILER
326 tcg_ctx.restore_time += profile_getclock() - ti;
327 tcg_ctx.restore_count++;
328 #endif
329 return 0;
332 bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
334 TranslationBlock *tb;
335 bool r = false;
337 tb_lock();
338 tb = tb_find_pc(retaddr);
339 if (tb) {
340 cpu_restore_state_from_tb(cpu, tb, retaddr);
341 if (tb->cflags & CF_NOCACHE) {
342 /* one-shot translation, invalidate it immediately */
343 tb_phys_invalidate(tb, -1);
344 tb_free(tb);
346 r = true;
348 tb_unlock();
350 return r;
353 void page_size_init(void)
355 /* NOTE: we can always suppose that qemu_host_page_size >=
356 TARGET_PAGE_SIZE */
357 qemu_real_host_page_size = getpagesize();
358 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
359 if (qemu_host_page_size == 0) {
360 qemu_host_page_size = qemu_real_host_page_size;
362 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
363 qemu_host_page_size = TARGET_PAGE_SIZE;
365 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
368 static void page_init(void)
370 page_size_init();
371 page_table_config_init();
373 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
375 #ifdef HAVE_KINFO_GETVMMAP
376 struct kinfo_vmentry *freep;
377 int i, cnt;
379 freep = kinfo_getvmmap(getpid(), &cnt);
380 if (freep) {
381 mmap_lock();
382 for (i = 0; i < cnt; i++) {
383 unsigned long startaddr, endaddr;
385 startaddr = freep[i].kve_start;
386 endaddr = freep[i].kve_end;
387 if (h2g_valid(startaddr)) {
388 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
390 if (h2g_valid(endaddr)) {
391 endaddr = h2g(endaddr);
392 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
393 } else {
394 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
395 endaddr = ~0ul;
396 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
397 #endif
401 free(freep);
402 mmap_unlock();
404 #else
405 FILE *f;
407 last_brk = (unsigned long)sbrk(0);
409 f = fopen("/compat/linux/proc/self/maps", "r");
410 if (f) {
411 mmap_lock();
413 do {
414 unsigned long startaddr, endaddr;
415 int n;
417 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
419 if (n == 2 && h2g_valid(startaddr)) {
420 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
422 if (h2g_valid(endaddr)) {
423 endaddr = h2g(endaddr);
424 } else {
425 endaddr = ~0ul;
427 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
429 } while (!feof(f));
431 fclose(f);
432 mmap_unlock();
434 #endif
436 #endif
439 /* If alloc=1:
440 * Called with tb_lock held for system emulation.
441 * Called with mmap_lock held for user-mode emulation.
443 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
445 PageDesc *pd;
446 void **lp;
447 int i;
449 if (alloc) {
450 assert_memory_lock();
453 /* Level 1. Always allocated. */
454 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
456 /* Level 2..N-1. */
457 for (i = v_l2_levels; i > 0; i--) {
458 void **p = atomic_rcu_read(lp);
460 if (p == NULL) {
461 if (!alloc) {
462 return NULL;
464 p = g_new0(void *, V_L2_SIZE);
465 atomic_rcu_set(lp, p);
468 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
471 pd = atomic_rcu_read(lp);
472 if (pd == NULL) {
473 if (!alloc) {
474 return NULL;
476 pd = g_new0(PageDesc, V_L2_SIZE);
477 atomic_rcu_set(lp, pd);
480 return pd + (index & (V_L2_SIZE - 1));
483 static inline PageDesc *page_find(tb_page_addr_t index)
485 return page_find_alloc(index, 0);
488 #if defined(CONFIG_USER_ONLY)
489 /* Currently it is not recommended to allocate big chunks of data in
490 user mode. It will change when a dedicated libc will be used. */
491 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
492 region in which the guest needs to run. Revisit this. */
493 #define USE_STATIC_CODE_GEN_BUFFER
494 #endif
496 /* Minimum size of the code gen buffer. This number is randomly chosen,
497 but not so small that we can't have a fair number of TB's live. */
498 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
500 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
501 indicated, this is constrained by the range of direct branches on the
502 host cpu, as used by the TCG implementation of goto_tb. */
503 #if defined(__x86_64__)
504 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
505 #elif defined(__sparc__)
506 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
507 #elif defined(__powerpc64__)
508 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
509 #elif defined(__powerpc__)
510 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
511 #elif defined(__aarch64__)
512 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
513 #elif defined(__arm__)
514 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
515 #elif defined(__s390x__)
516 /* We have a +- 4GB range on the branches; leave some slop. */
517 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
518 #elif defined(__mips__)
519 /* We have a 256MB branch region, but leave room to make sure the
520 main executable is also within that region. */
521 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
522 #else
523 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
524 #endif
526 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
528 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
529 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
530 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
532 static inline size_t size_code_gen_buffer(size_t tb_size)
534 /* Size the buffer. */
535 if (tb_size == 0) {
536 #ifdef USE_STATIC_CODE_GEN_BUFFER
537 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
538 #else
539 /* ??? Needs adjustments. */
540 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
541 static buffer, we could size this on RESERVED_VA, on the text
542 segment size of the executable, or continue to use the default. */
543 tb_size = (unsigned long)(ram_size / 4);
544 #endif
546 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
547 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
549 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
550 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
552 return tb_size;
555 #ifdef __mips__
556 /* In order to use J and JAL within the code_gen_buffer, we require
557 that the buffer not cross a 256MB boundary. */
558 static inline bool cross_256mb(void *addr, size_t size)
560 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
563 /* We weren't able to allocate a buffer without crossing that boundary,
564 so make do with the larger portion of the buffer that doesn't cross.
565 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
566 static inline void *split_cross_256mb(void *buf1, size_t size1)
568 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
569 size_t size2 = buf1 + size1 - buf2;
571 size1 = buf2 - buf1;
572 if (size1 < size2) {
573 size1 = size2;
574 buf1 = buf2;
577 tcg_ctx.code_gen_buffer_size = size1;
578 return buf1;
580 #endif
582 #ifdef USE_STATIC_CODE_GEN_BUFFER
583 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
584 __attribute__((aligned(CODE_GEN_ALIGN)));
586 # ifdef _WIN32
587 static inline void do_protect(void *addr, long size, int prot)
589 DWORD old_protect;
590 VirtualProtect(addr, size, prot, &old_protect);
593 static inline void map_exec(void *addr, long size)
595 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
598 static inline void map_none(void *addr, long size)
600 do_protect(addr, size, PAGE_NOACCESS);
602 # else
603 static inline void do_protect(void *addr, long size, int prot)
605 uintptr_t start, end;
607 start = (uintptr_t)addr;
608 start &= qemu_real_host_page_mask;
610 end = (uintptr_t)addr + size;
611 end = ROUND_UP(end, qemu_real_host_page_size);
613 mprotect((void *)start, end - start, prot);
616 static inline void map_exec(void *addr, long size)
618 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
621 static inline void map_none(void *addr, long size)
623 do_protect(addr, size, PROT_NONE);
625 # endif /* WIN32 */
627 static inline void *alloc_code_gen_buffer(void)
629 void *buf = static_code_gen_buffer;
630 size_t full_size, size;
632 /* The size of the buffer, rounded down to end on a page boundary. */
633 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
634 & qemu_real_host_page_mask) - (uintptr_t)buf;
636 /* Reserve a guard page. */
637 size = full_size - qemu_real_host_page_size;
639 /* Honor a command-line option limiting the size of the buffer. */
640 if (size > tcg_ctx.code_gen_buffer_size) {
641 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
642 & qemu_real_host_page_mask) - (uintptr_t)buf;
644 tcg_ctx.code_gen_buffer_size = size;
646 #ifdef __mips__
647 if (cross_256mb(buf, size)) {
648 buf = split_cross_256mb(buf, size);
649 size = tcg_ctx.code_gen_buffer_size;
651 #endif
653 map_exec(buf, size);
654 map_none(buf + size, qemu_real_host_page_size);
655 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
657 return buf;
659 #elif defined(_WIN32)
660 static inline void *alloc_code_gen_buffer(void)
662 size_t size = tcg_ctx.code_gen_buffer_size;
663 void *buf1, *buf2;
665 /* Perform the allocation in two steps, so that the guard page
666 is reserved but uncommitted. */
667 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
668 MEM_RESERVE, PAGE_NOACCESS);
669 if (buf1 != NULL) {
670 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
671 assert(buf1 == buf2);
674 return buf1;
676 #else
677 static inline void *alloc_code_gen_buffer(void)
679 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
680 uintptr_t start = 0;
681 size_t size = tcg_ctx.code_gen_buffer_size;
682 void *buf;
684 /* Constrain the position of the buffer based on the host cpu.
685 Note that these addresses are chosen in concert with the
686 addresses assigned in the relevant linker script file. */
687 # if defined(__PIE__) || defined(__PIC__)
688 /* Don't bother setting a preferred location if we're building
689 a position-independent executable. We're more likely to get
690 an address near the main executable if we let the kernel
691 choose the address. */
692 # elif defined(__x86_64__) && defined(MAP_32BIT)
693 /* Force the memory down into low memory with the executable.
694 Leave the choice of exact location with the kernel. */
695 flags |= MAP_32BIT;
696 /* Cannot expect to map more than 800MB in low memory. */
697 if (size > 800u * 1024 * 1024) {
698 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
700 # elif defined(__sparc__)
701 start = 0x40000000ul;
702 # elif defined(__s390x__)
703 start = 0x90000000ul;
704 # elif defined(__mips__)
705 # if _MIPS_SIM == _ABI64
706 start = 0x128000000ul;
707 # else
708 start = 0x08000000ul;
709 # endif
710 # endif
712 buf = mmap((void *)start, size + qemu_real_host_page_size,
713 PROT_NONE, flags, -1, 0);
714 if (buf == MAP_FAILED) {
715 return NULL;
718 #ifdef __mips__
719 if (cross_256mb(buf, size)) {
720 /* Try again, with the original still mapped, to avoid re-acquiring
721 that 256mb crossing. This time don't specify an address. */
722 size_t size2;
723 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
724 PROT_NONE, flags, -1, 0);
725 switch ((int)(buf2 != MAP_FAILED)) {
726 case 1:
727 if (!cross_256mb(buf2, size)) {
728 /* Success! Use the new buffer. */
729 munmap(buf, size + qemu_real_host_page_size);
730 break;
732 /* Failure. Work with what we had. */
733 munmap(buf2, size + qemu_real_host_page_size);
734 /* fallthru */
735 default:
736 /* Split the original buffer. Free the smaller half. */
737 buf2 = split_cross_256mb(buf, size);
738 size2 = tcg_ctx.code_gen_buffer_size;
739 if (buf == buf2) {
740 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
741 } else {
742 munmap(buf, size - size2);
744 size = size2;
745 break;
747 buf = buf2;
749 #endif
751 /* Make the final buffer accessible. The guard page at the end
752 will remain inaccessible with PROT_NONE. */
753 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
755 /* Request large pages for the buffer. */
756 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
758 return buf;
760 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
762 static inline void code_gen_alloc(size_t tb_size)
764 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
765 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
766 if (tcg_ctx.code_gen_buffer == NULL) {
767 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
768 exit(1);
771 /* Estimate a good size for the number of TBs we can support. We
772 still haven't deducted the prologue from the buffer size here,
773 but that's minimal and won't affect the estimate much. */
774 tcg_ctx.code_gen_max_blocks
775 = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
776 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
778 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
781 static void tb_htable_init(void)
783 unsigned int mode = QHT_MODE_AUTO_RESIZE;
785 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
788 /* Must be called before using the QEMU cpus. 'tb_size' is the size
789 (in bytes) allocated to the translation buffer. Zero means default
790 size. */
791 void tcg_exec_init(uintptr_t tb_size)
793 cpu_gen_init();
794 page_init();
795 tb_htable_init();
796 code_gen_alloc(tb_size);
797 #if defined(CONFIG_SOFTMMU)
798 /* There's no guest base to take into account, so go ahead and
799 initialize the prologue now. */
800 tcg_prologue_init(&tcg_ctx);
801 #endif
804 bool tcg_enabled(void)
806 return tcg_ctx.code_gen_buffer != NULL;
810 * Allocate a new translation block. Flush the translation buffer if
811 * too many translation blocks or too much generated code.
813 * Called with tb_lock held.
815 static TranslationBlock *tb_alloc(target_ulong pc)
817 TranslationBlock *tb;
819 assert_tb_locked();
821 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
822 return NULL;
824 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
825 tb->pc = pc;
826 tb->cflags = 0;
827 tb->invalid = false;
828 return tb;
831 /* Called with tb_lock held. */
832 void tb_free(TranslationBlock *tb)
834 assert_tb_locked();
836 /* In practice this is mostly used for single use temporary TB
837 Ignore the hard cases and just back up if this TB happens to
838 be the last one generated. */
839 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
840 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
841 tcg_ctx.code_gen_ptr = tb->tc_ptr;
842 tcg_ctx.tb_ctx.nb_tbs--;
846 static inline void invalidate_page_bitmap(PageDesc *p)
848 #ifdef CONFIG_SOFTMMU
849 g_free(p->code_bitmap);
850 p->code_bitmap = NULL;
851 p->code_write_count = 0;
852 #endif
855 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
856 static void page_flush_tb_1(int level, void **lp)
858 int i;
860 if (*lp == NULL) {
861 return;
863 if (level == 0) {
864 PageDesc *pd = *lp;
866 for (i = 0; i < V_L2_SIZE; ++i) {
867 pd[i].first_tb = NULL;
868 invalidate_page_bitmap(pd + i);
870 } else {
871 void **pp = *lp;
873 for (i = 0; i < V_L2_SIZE; ++i) {
874 page_flush_tb_1(level - 1, pp + i);
879 static void page_flush_tb(void)
881 int i, l1_sz = v_l1_size;
883 for (i = 0; i < l1_sz; i++) {
884 page_flush_tb_1(v_l2_levels, l1_map + i);
888 /* flush all the translation blocks */
889 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
891 tb_lock();
893 /* If it is already been done on request of another CPU,
894 * just retry.
896 if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) {
897 goto done;
900 #if defined(DEBUG_TB_FLUSH)
901 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
902 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
903 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
904 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
905 tcg_ctx.tb_ctx.nb_tbs : 0);
906 #endif
907 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
908 > tcg_ctx.code_gen_buffer_size) {
909 cpu_abort(cpu, "Internal error: code buffer overflow\n");
912 CPU_FOREACH(cpu) {
913 int i;
915 for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
916 atomic_set(&cpu->tb_jmp_cache[i], NULL);
920 tcg_ctx.tb_ctx.nb_tbs = 0;
921 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
922 page_flush_tb();
924 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
925 /* XXX: flush processor icache at this point if cache flush is
926 expensive */
927 atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count,
928 tcg_ctx.tb_ctx.tb_flush_count + 1);
930 done:
931 tb_unlock();
934 void tb_flush(CPUState *cpu)
936 if (tcg_enabled()) {
937 unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
938 async_safe_run_on_cpu(cpu, do_tb_flush,
939 RUN_ON_CPU_HOST_INT(tb_flush_count));
943 #ifdef DEBUG_TB_CHECK
945 static void
946 do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
948 TranslationBlock *tb = p;
949 target_ulong addr = *(target_ulong *)userp;
951 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
952 printf("ERROR invalidate: address=" TARGET_FMT_lx
953 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
957 /* verify that all the pages have correct rights for code
959 * Called with tb_lock held.
961 static void tb_invalidate_check(target_ulong address)
963 address &= TARGET_PAGE_MASK;
964 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
967 static void
968 do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
970 TranslationBlock *tb = p;
971 int flags1, flags2;
973 flags1 = page_get_flags(tb->pc);
974 flags2 = page_get_flags(tb->pc + tb->size - 1);
975 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
976 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
977 (long)tb->pc, tb->size, flags1, flags2);
981 /* verify that all the pages have correct rights for code */
982 static void tb_page_check(void)
984 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
987 #endif
989 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
991 TranslationBlock *tb1;
992 unsigned int n1;
994 for (;;) {
995 tb1 = *ptb;
996 n1 = (uintptr_t)tb1 & 3;
997 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
998 if (tb1 == tb) {
999 *ptb = tb1->page_next[n1];
1000 break;
1002 ptb = &tb1->page_next[n1];
1006 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1007 static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
1009 TranslationBlock *tb1;
1010 uintptr_t *ptb, ntb;
1011 unsigned int n1;
1013 ptb = &tb->jmp_list_next[n];
1014 if (*ptb) {
1015 /* find tb(n) in circular list */
1016 for (;;) {
1017 ntb = *ptb;
1018 n1 = ntb & 3;
1019 tb1 = (TranslationBlock *)(ntb & ~3);
1020 if (n1 == n && tb1 == tb) {
1021 break;
1023 if (n1 == 2) {
1024 ptb = &tb1->jmp_list_first;
1025 } else {
1026 ptb = &tb1->jmp_list_next[n1];
1029 /* now we can suppress tb(n) from the list */
1030 *ptb = tb->jmp_list_next[n];
1032 tb->jmp_list_next[n] = (uintptr_t)NULL;
1036 /* reset the jump entry 'n' of a TB so that it is not chained to
1037 another TB */
1038 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1040 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
1041 tb_set_jmp_target(tb, n, addr);
1044 /* remove any jumps to the TB */
1045 static inline void tb_jmp_unlink(TranslationBlock *tb)
1047 TranslationBlock *tb1;
1048 uintptr_t *ptb, ntb;
1049 unsigned int n1;
1051 ptb = &tb->jmp_list_first;
1052 for (;;) {
1053 ntb = *ptb;
1054 n1 = ntb & 3;
1055 tb1 = (TranslationBlock *)(ntb & ~3);
1056 if (n1 == 2) {
1057 break;
1059 tb_reset_jump(tb1, n1);
1060 *ptb = tb1->jmp_list_next[n1];
1061 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
1065 /* invalidate one TB
1067 * Called with tb_lock held.
1069 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1071 CPUState *cpu;
1072 PageDesc *p;
1073 uint32_t h;
1074 tb_page_addr_t phys_pc;
1076 assert_tb_locked();
1078 atomic_set(&tb->invalid, true);
1080 /* remove the TB from the hash list */
1081 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1082 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1083 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
1085 /* remove the TB from the page list */
1086 if (tb->page_addr[0] != page_addr) {
1087 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1088 tb_page_remove(&p->first_tb, tb);
1089 invalidate_page_bitmap(p);
1091 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1092 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1093 tb_page_remove(&p->first_tb, tb);
1094 invalidate_page_bitmap(p);
1097 /* remove the TB from the hash list */
1098 h = tb_jmp_cache_hash_func(tb->pc);
1099 CPU_FOREACH(cpu) {
1100 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1101 atomic_set(&cpu->tb_jmp_cache[h], NULL);
1105 /* suppress this TB from the two jump lists */
1106 tb_remove_from_jmp_list(tb, 0);
1107 tb_remove_from_jmp_list(tb, 1);
1109 /* suppress any remaining jumps to this TB */
1110 tb_jmp_unlink(tb);
1112 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
1115 #ifdef CONFIG_SOFTMMU
1116 static void build_page_bitmap(PageDesc *p)
1118 int n, tb_start, tb_end;
1119 TranslationBlock *tb;
1121 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1123 tb = p->first_tb;
1124 while (tb != NULL) {
1125 n = (uintptr_t)tb & 3;
1126 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1127 /* NOTE: this is subtle as a TB may span two physical pages */
1128 if (n == 0) {
1129 /* NOTE: tb_end may be after the end of the page, but
1130 it is not a problem */
1131 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1132 tb_end = tb_start + tb->size;
1133 if (tb_end > TARGET_PAGE_SIZE) {
1134 tb_end = TARGET_PAGE_SIZE;
1136 } else {
1137 tb_start = 0;
1138 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1140 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1141 tb = tb->page_next[n];
1144 #endif
1146 /* add the tb in the target page and protect it if necessary
1148 * Called with mmap_lock held for user-mode emulation.
1150 static inline void tb_alloc_page(TranslationBlock *tb,
1151 unsigned int n, tb_page_addr_t page_addr)
1153 PageDesc *p;
1154 #ifndef CONFIG_USER_ONLY
1155 bool page_already_protected;
1156 #endif
1158 assert_memory_lock();
1160 tb->page_addr[n] = page_addr;
1161 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1162 tb->page_next[n] = p->first_tb;
1163 #ifndef CONFIG_USER_ONLY
1164 page_already_protected = p->first_tb != NULL;
1165 #endif
1166 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1167 invalidate_page_bitmap(p);
1169 #if defined(CONFIG_USER_ONLY)
1170 if (p->flags & PAGE_WRITE) {
1171 target_ulong addr;
1172 PageDesc *p2;
1173 int prot;
1175 /* force the host page as non writable (writes will have a
1176 page fault + mprotect overhead) */
1177 page_addr &= qemu_host_page_mask;
1178 prot = 0;
1179 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1180 addr += TARGET_PAGE_SIZE) {
1182 p2 = page_find(addr >> TARGET_PAGE_BITS);
1183 if (!p2) {
1184 continue;
1186 prot |= p2->flags;
1187 p2->flags &= ~PAGE_WRITE;
1189 mprotect(g2h(page_addr), qemu_host_page_size,
1190 (prot & PAGE_BITS) & ~PAGE_WRITE);
1191 #ifdef DEBUG_TB_INVALIDATE
1192 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1193 page_addr);
1194 #endif
1196 #else
1197 /* if some code is already present, then the pages are already
1198 protected. So we handle the case where only the first TB is
1199 allocated in a physical page */
1200 if (!page_already_protected) {
1201 tlb_protect_code(page_addr);
1203 #endif
1206 /* add a new TB and link it to the physical page tables. phys_page2 is
1207 * (-1) to indicate that only one page contains the TB.
1209 * Called with mmap_lock held for user-mode emulation.
1211 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1212 tb_page_addr_t phys_page2)
1214 uint32_t h;
1216 assert_memory_lock();
1218 /* add in the page list */
1219 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1220 if (phys_page2 != -1) {
1221 tb_alloc_page(tb, 1, phys_page2);
1222 } else {
1223 tb->page_addr[1] = -1;
1226 /* add in the hash table */
1227 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1228 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1230 #ifdef DEBUG_TB_CHECK
1231 tb_page_check();
1232 #endif
1235 /* Called with mmap_lock held for user mode emulation. */
1236 TranslationBlock *tb_gen_code(CPUState *cpu,
1237 target_ulong pc, target_ulong cs_base,
1238 uint32_t flags, int cflags)
1240 CPUArchState *env = cpu->env_ptr;
1241 TranslationBlock *tb;
1242 tb_page_addr_t phys_pc, phys_page2;
1243 target_ulong virt_page2;
1244 tcg_insn_unit *gen_code_buf;
1245 int gen_code_size, search_size;
1246 #ifdef CONFIG_PROFILER
1247 int64_t ti;
1248 #endif
1249 assert_memory_lock();
1251 phys_pc = get_page_addr_code(env, pc);
1252 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
1253 cflags |= CF_USE_ICOUNT;
1256 tb = tb_alloc(pc);
1257 if (unlikely(!tb)) {
1258 buffer_overflow:
1259 /* flush must be done */
1260 tb_flush(cpu);
1261 mmap_unlock();
1262 /* Make the execution loop process the flush as soon as possible. */
1263 cpu->exception_index = EXCP_INTERRUPT;
1264 cpu_loop_exit(cpu);
1267 gen_code_buf = tcg_ctx.code_gen_ptr;
1268 tb->tc_ptr = gen_code_buf;
1269 tb->cs_base = cs_base;
1270 tb->flags = flags;
1271 tb->cflags = cflags;
1273 #ifdef CONFIG_PROFILER
1274 tcg_ctx.tb_count1++; /* includes aborted translations because of
1275 exceptions */
1276 ti = profile_getclock();
1277 #endif
1279 tcg_func_start(&tcg_ctx);
1281 tcg_ctx.cpu = ENV_GET_CPU(env);
1282 gen_intermediate_code(env, tb);
1283 tcg_ctx.cpu = NULL;
1285 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1287 /* generate machine code */
1288 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1289 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1290 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
1291 #ifdef USE_DIRECT_JUMP
1292 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1293 tcg_ctx.tb_jmp_target_addr = NULL;
1294 #else
1295 tcg_ctx.tb_jmp_insn_offset = NULL;
1296 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
1297 #endif
1299 #ifdef CONFIG_PROFILER
1300 tcg_ctx.tb_count++;
1301 tcg_ctx.interm_time += profile_getclock() - ti;
1302 tcg_ctx.code_time -= profile_getclock();
1303 #endif
1305 /* ??? Overflow could be handled better here. In particular, we
1306 don't need to re-do gen_intermediate_code, nor should we re-do
1307 the tcg optimization currently hidden inside tcg_gen_code. All
1308 that should be required is to flush the TBs, allocate a new TB,
1309 re-initialize it per above, and re-do the actual code generation. */
1310 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
1311 if (unlikely(gen_code_size < 0)) {
1312 goto buffer_overflow;
1314 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1315 if (unlikely(search_size < 0)) {
1316 goto buffer_overflow;
1319 #ifdef CONFIG_PROFILER
1320 tcg_ctx.code_time += profile_getclock();
1321 tcg_ctx.code_in_len += tb->size;
1322 tcg_ctx.code_out_len += gen_code_size;
1323 tcg_ctx.search_out_len += search_size;
1324 #endif
1326 #ifdef DEBUG_DISAS
1327 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1328 qemu_log_in_addr_range(tb->pc)) {
1329 qemu_log_lock();
1330 qemu_log("OUT: [size=%d]\n", gen_code_size);
1331 log_disas(tb->tc_ptr, gen_code_size);
1332 qemu_log("\n");
1333 qemu_log_flush();
1334 qemu_log_unlock();
1336 #endif
1338 tcg_ctx.code_gen_ptr = (void *)
1339 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1340 CODE_GEN_ALIGN);
1342 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
1343 /* if we are doing vsyscall don't link the page as it lies in high memory
1344 and tb_alloc_page will abort due to page_l1_map returning NULL */
1345 if (unlikely(phys_pc >= TARGET_VSYSCALL_START
1346 && phys_pc < TARGET_VSYSCALL_END))
1347 return tb;
1348 #endif
1350 /* init jump list */
1351 assert(((uintptr_t)tb & 3) == 0);
1352 tb->jmp_list_first = (uintptr_t)tb | 2;
1353 tb->jmp_list_next[0] = (uintptr_t)NULL;
1354 tb->jmp_list_next[1] = (uintptr_t)NULL;
1356 /* init original jump addresses wich has been set during tcg_gen_code() */
1357 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1358 tb_reset_jump(tb, 0);
1360 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1361 tb_reset_jump(tb, 1);
1364 /* check next page if needed */
1365 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1366 phys_page2 = -1;
1367 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1368 phys_page2 = get_page_addr_code(env, virt_page2);
1370 /* As long as consistency of the TB stuff is provided by tb_lock in user
1371 * mode and is implicit in single-threaded softmmu emulation, no explicit
1372 * memory barrier is required before tb_link_page() makes the TB visible
1373 * through the physical hash table and physical page list.
1375 tb_link_page(tb, phys_pc, phys_page2);
1376 return tb;
1380 * Invalidate all TBs which intersect with the target physical address range
1381 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1382 * 'is_cpu_write_access' should be true if called from a real cpu write
1383 * access: the virtual CPU will exit the current TB if code is modified inside
1384 * this TB.
1386 * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1387 * Called with tb_lock held for system-mode emulation
1389 static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
1391 while (start < end) {
1392 tb_invalidate_phys_page_range(start, end, 0);
1393 start &= TARGET_PAGE_MASK;
1394 start += TARGET_PAGE_SIZE;
1398 #ifdef CONFIG_SOFTMMU
1399 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1401 assert_tb_locked();
1402 tb_invalidate_phys_range_1(start, end);
1404 #else
1405 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1407 assert_memory_lock();
1408 tb_lock();
1409 tb_invalidate_phys_range_1(start, end);
1410 tb_unlock();
1412 #endif
1414 * Invalidate all TBs which intersect with the target physical address range
1415 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1416 * 'is_cpu_write_access' should be true if called from a real cpu write
1417 * access: the virtual CPU will exit the current TB if code is modified inside
1418 * this TB.
1420 * Called with tb_lock/mmap_lock held for user-mode emulation
1421 * Called with tb_lock held for system-mode emulation
1423 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1424 int is_cpu_write_access)
1426 TranslationBlock *tb, *tb_next;
1427 #if defined(TARGET_HAS_PRECISE_SMC)
1428 CPUState *cpu = current_cpu;
1429 CPUArchState *env = NULL;
1430 #endif
1431 tb_page_addr_t tb_start, tb_end;
1432 PageDesc *p;
1433 int n;
1434 #ifdef TARGET_HAS_PRECISE_SMC
1435 int current_tb_not_found = is_cpu_write_access;
1436 TranslationBlock *current_tb = NULL;
1437 int current_tb_modified = 0;
1438 target_ulong current_pc = 0;
1439 target_ulong current_cs_base = 0;
1440 uint32_t current_flags = 0;
1441 #endif /* TARGET_HAS_PRECISE_SMC */
1443 assert_memory_lock();
1444 assert_tb_locked();
1446 p = page_find(start >> TARGET_PAGE_BITS);
1447 if (!p) {
1448 return;
1450 #if defined(TARGET_HAS_PRECISE_SMC)
1451 if (cpu != NULL) {
1452 env = cpu->env_ptr;
1454 #endif
1456 /* we remove all the TBs in the range [start, end[ */
1457 /* XXX: see if in some cases it could be faster to invalidate all
1458 the code */
1459 tb = p->first_tb;
1460 while (tb != NULL) {
1461 n = (uintptr_t)tb & 3;
1462 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1463 tb_next = tb->page_next[n];
1464 /* NOTE: this is subtle as a TB may span two physical pages */
1465 if (n == 0) {
1466 /* NOTE: tb_end may be after the end of the page, but
1467 it is not a problem */
1468 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1469 tb_end = tb_start + tb->size;
1470 } else {
1471 tb_start = tb->page_addr[1];
1472 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1474 if (!(tb_end <= start || tb_start >= end)) {
1475 #ifdef TARGET_HAS_PRECISE_SMC
1476 if (current_tb_not_found) {
1477 current_tb_not_found = 0;
1478 current_tb = NULL;
1479 if (cpu->mem_io_pc) {
1480 /* now we have a real cpu fault */
1481 current_tb = tb_find_pc(cpu->mem_io_pc);
1484 if (current_tb == tb &&
1485 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1486 /* If we are modifying the current TB, we must stop
1487 its execution. We could be more precise by checking
1488 that the modification is after the current PC, but it
1489 would require a specialized function to partially
1490 restore the CPU state */
1492 current_tb_modified = 1;
1493 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1494 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1495 &current_flags);
1497 #endif /* TARGET_HAS_PRECISE_SMC */
1498 tb_phys_invalidate(tb, -1);
1500 tb = tb_next;
1502 #if !defined(CONFIG_USER_ONLY)
1503 /* if no code remaining, no need to continue to use slow writes */
1504 if (!p->first_tb) {
1505 invalidate_page_bitmap(p);
1506 tlb_unprotect_code(start);
1508 #endif
1509 #ifdef TARGET_HAS_PRECISE_SMC
1510 if (current_tb_modified) {
1511 /* we generate a block containing just the instruction
1512 modifying the memory. It will ensure that it cannot modify
1513 itself */
1514 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1515 cpu_loop_exit_noexc(cpu);
1517 #endif
1520 #ifdef CONFIG_SOFTMMU
1521 /* len must be <= 8 and start must be a multiple of len.
1522 * Called via softmmu_template.h when code areas are written to with
1523 * iothread mutex not held.
1525 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1527 PageDesc *p;
1529 #if 0
1530 if (1) {
1531 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1532 cpu_single_env->mem_io_vaddr, len,
1533 cpu_single_env->eip,
1534 cpu_single_env->eip +
1535 (intptr_t)cpu_single_env->segs[R_CS].base);
1537 #endif
1538 assert_memory_lock();
1540 p = page_find(start >> TARGET_PAGE_BITS);
1541 if (!p) {
1542 return;
1544 if (!p->code_bitmap &&
1545 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1546 /* build code bitmap. FIXME: writes should be protected by
1547 * tb_lock, reads by tb_lock or RCU.
1549 build_page_bitmap(p);
1551 if (p->code_bitmap) {
1552 unsigned int nr;
1553 unsigned long b;
1555 nr = start & ~TARGET_PAGE_MASK;
1556 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1557 if (b & ((1 << len) - 1)) {
1558 goto do_invalidate;
1560 } else {
1561 do_invalidate:
1562 tb_invalidate_phys_page_range(start, start + len, 1);
1565 #else
1566 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1567 * host PC of the faulting store instruction that caused this invalidate.
1568 * Returns true if the caller needs to abort execution of the current
1569 * TB (because it was modified by this store and the guest CPU has
1570 * precise-SMC semantics).
1572 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1574 TranslationBlock *tb;
1575 PageDesc *p;
1576 int n;
1577 #ifdef TARGET_HAS_PRECISE_SMC
1578 TranslationBlock *current_tb = NULL;
1579 CPUState *cpu = current_cpu;
1580 CPUArchState *env = NULL;
1581 int current_tb_modified = 0;
1582 target_ulong current_pc = 0;
1583 target_ulong current_cs_base = 0;
1584 uint32_t current_flags = 0;
1585 #endif
1587 assert_memory_lock();
1589 addr &= TARGET_PAGE_MASK;
1590 p = page_find(addr >> TARGET_PAGE_BITS);
1591 if (!p) {
1592 return false;
1595 tb_lock();
1596 tb = p->first_tb;
1597 #ifdef TARGET_HAS_PRECISE_SMC
1598 if (tb && pc != 0) {
1599 current_tb = tb_find_pc(pc);
1601 if (cpu != NULL) {
1602 env = cpu->env_ptr;
1604 #endif
1605 while (tb != NULL) {
1606 n = (uintptr_t)tb & 3;
1607 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1608 #ifdef TARGET_HAS_PRECISE_SMC
1609 if (current_tb == tb &&
1610 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1611 /* If we are modifying the current TB, we must stop
1612 its execution. We could be more precise by checking
1613 that the modification is after the current PC, but it
1614 would require a specialized function to partially
1615 restore the CPU state */
1617 current_tb_modified = 1;
1618 cpu_restore_state_from_tb(cpu, current_tb, pc);
1619 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1620 &current_flags);
1622 #endif /* TARGET_HAS_PRECISE_SMC */
1623 tb_phys_invalidate(tb, addr);
1624 tb = tb->page_next[n];
1626 p->first_tb = NULL;
1627 #ifdef TARGET_HAS_PRECISE_SMC
1628 if (current_tb_modified) {
1629 /* we generate a block containing just the instruction
1630 modifying the memory. It will ensure that it cannot modify
1631 itself */
1632 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1633 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
1634 * back into the cpu_exec loop. */
1635 return true;
1637 #endif
1638 tb_unlock();
1640 return false;
1642 #endif
1644 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1645 tb[1].tc_ptr. Return NULL if not found */
1646 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1648 int m_min, m_max, m;
1649 uintptr_t v;
1650 TranslationBlock *tb;
1652 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1653 return NULL;
1655 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1656 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1657 return NULL;
1659 /* binary search (cf Knuth) */
1660 m_min = 0;
1661 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1662 while (m_min <= m_max) {
1663 m = (m_min + m_max) >> 1;
1664 tb = &tcg_ctx.tb_ctx.tbs[m];
1665 v = (uintptr_t)tb->tc_ptr;
1666 if (v == tc_ptr) {
1667 return tb;
1668 } else if (tc_ptr < v) {
1669 m_max = m - 1;
1670 } else {
1671 m_min = m + 1;
1674 return &tcg_ctx.tb_ctx.tbs[m_max];
1677 #if !defined(CONFIG_USER_ONLY)
1678 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1680 ram_addr_t ram_addr;
1681 MemoryRegion *mr;
1682 hwaddr l = 1;
1684 rcu_read_lock();
1685 mr = address_space_translate(as, addr, &addr, &l, false);
1686 if (!(memory_region_is_ram(mr)
1687 || memory_region_is_romd(mr))) {
1688 rcu_read_unlock();
1689 return;
1691 ram_addr = memory_region_get_ram_addr(mr) + addr;
1692 tb_lock();
1693 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1694 tb_unlock();
1695 rcu_read_unlock();
1697 #endif /* !defined(CONFIG_USER_ONLY) */
1699 /* Called with tb_lock held. */
1700 void tb_check_watchpoint(CPUState *cpu)
1702 TranslationBlock *tb;
1704 tb = tb_find_pc(cpu->mem_io_pc);
1705 if (tb) {
1706 /* We can use retranslation to find the PC. */
1707 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1708 tb_phys_invalidate(tb, -1);
1709 } else {
1710 /* The exception probably happened in a helper. The CPU state should
1711 have been saved before calling it. Fetch the PC from there. */
1712 CPUArchState *env = cpu->env_ptr;
1713 target_ulong pc, cs_base;
1714 tb_page_addr_t addr;
1715 uint32_t flags;
1717 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1718 addr = get_page_addr_code(env, pc);
1719 tb_invalidate_phys_range(addr, addr + 1);
1723 #ifndef CONFIG_USER_ONLY
1724 /* in deterministic execution mode, instructions doing device I/Os
1725 * must be at the end of the TB.
1727 * Called by softmmu_template.h, with iothread mutex not held.
1729 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1731 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1732 CPUArchState *env = cpu->env_ptr;
1733 #endif
1734 TranslationBlock *tb;
1735 uint32_t n, cflags;
1736 target_ulong pc, cs_base;
1737 uint32_t flags;
1739 tb_lock();
1740 tb = tb_find_pc(retaddr);
1741 if (!tb) {
1742 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1743 (void *)retaddr);
1745 n = cpu->icount_decr.u16.low + tb->icount;
1746 cpu_restore_state_from_tb(cpu, tb, retaddr);
1747 /* Calculate how many instructions had been executed before the fault
1748 occurred. */
1749 n = n - cpu->icount_decr.u16.low;
1750 /* Generate a new TB ending on the I/O insn. */
1751 n++;
1752 /* On MIPS and SH, delay slot instructions can only be restarted if
1753 they were already the first instruction in the TB. If this is not
1754 the first instruction in a TB then re-execute the preceding
1755 branch. */
1756 #if defined(TARGET_MIPS)
1757 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1758 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1759 cpu->icount_decr.u16.low++;
1760 env->hflags &= ~MIPS_HFLAG_BMASK;
1762 #elif defined(TARGET_SH4)
1763 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1764 && n > 1) {
1765 env->pc -= 2;
1766 cpu->icount_decr.u16.low++;
1767 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1769 #endif
1770 /* This should never happen. */
1771 if (n > CF_COUNT_MASK) {
1772 cpu_abort(cpu, "TB too big during recompile");
1775 cflags = n | CF_LAST_IO;
1776 pc = tb->pc;
1777 cs_base = tb->cs_base;
1778 flags = tb->flags;
1779 tb_phys_invalidate(tb, -1);
1780 if (tb->cflags & CF_NOCACHE) {
1781 if (tb->orig_tb) {
1782 /* Invalidate original TB if this TB was generated in
1783 * cpu_exec_nocache() */
1784 tb_phys_invalidate(tb->orig_tb, -1);
1786 tb_free(tb);
1788 /* FIXME: In theory this could raise an exception. In practice
1789 we have already translated the block once so it's probably ok. */
1790 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1792 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1793 * the first in the TB) then we end up generating a whole new TB and
1794 * repeating the fault, which is horribly inefficient.
1795 * Better would be to execute just this insn uncached, or generate a
1796 * second new TB.
1798 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
1799 * tb_lock gets reset.
1801 cpu_loop_exit_noexc(cpu);
1804 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1806 unsigned int i;
1808 /* Discard jump cache entries for any tb which might potentially
1809 overlap the flushed page. */
1810 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1811 memset(&cpu->tb_jmp_cache[i], 0,
1812 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1814 i = tb_jmp_cache_hash_page(addr);
1815 memset(&cpu->tb_jmp_cache[i], 0,
1816 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1819 static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1820 struct qht_stats hst)
1822 uint32_t hgram_opts;
1823 size_t hgram_bins;
1824 char *hgram;
1826 if (!hst.head_buckets) {
1827 return;
1829 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1830 hst.used_head_buckets, hst.head_buckets,
1831 (double)hst.used_head_buckets / hst.head_buckets * 100);
1833 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1834 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1835 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1836 hgram_opts |= QDIST_PR_NODECIMAL;
1838 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1839 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1840 qdist_avg(&hst.occupancy) * 100, hgram);
1841 g_free(hgram);
1843 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1844 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1845 if (hgram_bins > 10) {
1846 hgram_bins = 10;
1847 } else {
1848 hgram_bins = 0;
1849 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1851 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1852 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1853 qdist_avg(&hst.chain), hgram);
1854 g_free(hgram);
1857 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1859 int i, target_code_size, max_target_code_size;
1860 int direct_jmp_count, direct_jmp2_count, cross_page;
1861 TranslationBlock *tb;
1862 struct qht_stats hst;
1864 tb_lock();
1866 target_code_size = 0;
1867 max_target_code_size = 0;
1868 cross_page = 0;
1869 direct_jmp_count = 0;
1870 direct_jmp2_count = 0;
1871 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1872 tb = &tcg_ctx.tb_ctx.tbs[i];
1873 target_code_size += tb->size;
1874 if (tb->size > max_target_code_size) {
1875 max_target_code_size = tb->size;
1877 if (tb->page_addr[1] != -1) {
1878 cross_page++;
1880 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1881 direct_jmp_count++;
1882 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1883 direct_jmp2_count++;
1887 /* XXX: avoid using doubles ? */
1888 cpu_fprintf(f, "Translation buffer state:\n");
1889 cpu_fprintf(f, "gen code size %td/%zd\n",
1890 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1891 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
1892 cpu_fprintf(f, "TB count %d/%d\n",
1893 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1894 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1895 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1896 tcg_ctx.tb_ctx.nb_tbs : 0,
1897 max_target_code_size);
1898 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1899 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1900 tcg_ctx.code_gen_buffer) /
1901 tcg_ctx.tb_ctx.nb_tbs : 0,
1902 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1903 tcg_ctx.code_gen_buffer) /
1904 target_code_size : 0);
1905 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1906 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1907 tcg_ctx.tb_ctx.nb_tbs : 0);
1908 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1909 direct_jmp_count,
1910 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1911 tcg_ctx.tb_ctx.nb_tbs : 0,
1912 direct_jmp2_count,
1913 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1914 tcg_ctx.tb_ctx.nb_tbs : 0);
1916 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
1917 print_qht_statistics(f, cpu_fprintf, hst);
1918 qht_statistics_destroy(&hst);
1920 cpu_fprintf(f, "\nStatistics:\n");
1921 cpu_fprintf(f, "TB flush count %u\n",
1922 atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
1923 cpu_fprintf(f, "TB invalidate count %d\n",
1924 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1925 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1926 tcg_dump_info(f, cpu_fprintf);
1928 tb_unlock();
1931 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1933 tcg_dump_op_count(f, cpu_fprintf);
1936 #else /* CONFIG_USER_ONLY */
1938 void cpu_interrupt(CPUState *cpu, int mask)
1940 g_assert(qemu_mutex_iothread_locked());
1941 cpu->interrupt_request |= mask;
1942 cpu->icount_decr.u16.high = -1;
1946 * Walks guest process memory "regions" one by one
1947 * and calls callback function 'fn' for each region.
1949 struct walk_memory_regions_data {
1950 walk_memory_regions_fn fn;
1951 void *priv;
1952 target_ulong start;
1953 int prot;
1956 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1957 target_ulong end, int new_prot)
1959 if (data->start != -1u) {
1960 int rc = data->fn(data->priv, data->start, end, data->prot);
1961 if (rc != 0) {
1962 return rc;
1966 data->start = (new_prot ? end : -1u);
1967 data->prot = new_prot;
1969 return 0;
1972 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1973 target_ulong base, int level, void **lp)
1975 target_ulong pa;
1976 int i, rc;
1978 if (*lp == NULL) {
1979 return walk_memory_regions_end(data, base, 0);
1982 if (level == 0) {
1983 PageDesc *pd = *lp;
1985 for (i = 0; i < V_L2_SIZE; ++i) {
1986 int prot = pd[i].flags;
1988 pa = base | (i << TARGET_PAGE_BITS);
1989 if (prot != data->prot) {
1990 rc = walk_memory_regions_end(data, pa, prot);
1991 if (rc != 0) {
1992 return rc;
1996 } else {
1997 void **pp = *lp;
1999 for (i = 0; i < V_L2_SIZE; ++i) {
2000 pa = base | ((target_ulong)i <<
2001 (TARGET_PAGE_BITS + V_L2_BITS * level));
2002 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2003 if (rc != 0) {
2004 return rc;
2009 return 0;
2012 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2014 struct walk_memory_regions_data data;
2015 uintptr_t i, l1_sz = v_l1_size;
2017 data.fn = fn;
2018 data.priv = priv;
2019 data.start = -1u;
2020 data.prot = 0;
2022 for (i = 0; i < l1_sz; i++) {
2023 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2024 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2025 if (rc != 0) {
2026 return rc;
2030 return walk_memory_regions_end(&data, 0, 0);
2033 static int dump_region(void *priv, target_ulong start,
2034 target_ulong end, abi_ulong prot)
2036 FILE *f = (FILE *)priv;
2038 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2039 " "TARGET_FMT_lx" %c%c%c\n",
2040 start, end, end - start,
2041 ((prot & PAGE_READ) ? 'r' : '-'),
2042 ((prot & PAGE_WRITE) ? 'w' : '-'),
2043 ((prot & PAGE_EXEC) ? 'x' : '-'));
2045 return 0;
2048 /* dump memory mappings */
2049 void page_dump(FILE *f)
2051 const int length = sizeof(target_ulong) * 2;
2052 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2053 length, "start", length, "end", length, "size", "prot");
2054 walk_memory_regions(f, dump_region);
2057 int page_get_flags(target_ulong address)
2059 PageDesc *p;
2061 p = page_find(address >> TARGET_PAGE_BITS);
2062 if (!p) {
2063 return 0;
2065 return p->flags;
2068 /* Modify the flags of a page and invalidate the code if necessary.
2069 The flag PAGE_WRITE_ORG is positioned automatically depending
2070 on PAGE_WRITE. The mmap_lock should already be held. */
2071 void page_set_flags(target_ulong start, target_ulong end, int flags)
2073 target_ulong addr, len;
2075 /* This function should never be called with addresses outside the
2076 guest address space. If this assert fires, it probably indicates
2077 a missing call to h2g_valid. */
2078 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2079 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2080 #endif
2081 assert(start < end);
2082 assert_memory_lock();
2084 start = start & TARGET_PAGE_MASK;
2085 end = TARGET_PAGE_ALIGN(end);
2087 if (flags & PAGE_WRITE) {
2088 flags |= PAGE_WRITE_ORG;
2091 for (addr = start, len = end - start;
2092 len != 0;
2093 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2094 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2096 /* If the write protection bit is set, then we invalidate
2097 the code inside. */
2098 if (!(p->flags & PAGE_WRITE) &&
2099 (flags & PAGE_WRITE) &&
2100 p->first_tb) {
2101 tb_invalidate_phys_page(addr, 0);
2103 p->flags = flags;
2107 int page_check_range(target_ulong start, target_ulong len, int flags)
2109 PageDesc *p;
2110 target_ulong end;
2111 target_ulong addr;
2113 /* This function should never be called with addresses outside the
2114 guest address space. If this assert fires, it probably indicates
2115 a missing call to h2g_valid. */
2116 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2117 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2118 #endif
2120 if (len == 0) {
2121 return 0;
2123 if (start + len - 1 < start) {
2124 /* We've wrapped around. */
2125 return -1;
2128 /* must do before we loose bits in the next step */
2129 end = TARGET_PAGE_ALIGN(start + len);
2130 start = start & TARGET_PAGE_MASK;
2132 for (addr = start, len = end - start;
2133 len != 0;
2134 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2135 p = page_find(addr >> TARGET_PAGE_BITS);
2136 if (!p) {
2137 return -1;
2139 if (!(p->flags & PAGE_VALID)) {
2140 return -1;
2143 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2144 return -1;
2146 if (flags & PAGE_WRITE) {
2147 if (!(p->flags & PAGE_WRITE_ORG)) {
2148 return -1;
2150 /* unprotect the page if it was put read-only because it
2151 contains translated code */
2152 if (!(p->flags & PAGE_WRITE)) {
2153 if (!page_unprotect(addr, 0)) {
2154 return -1;
2159 return 0;
2162 /* called from signal handler: invalidate the code and unprotect the
2163 * page. Return 0 if the fault was not handled, 1 if it was handled,
2164 * and 2 if it was handled but the caller must cause the TB to be
2165 * immediately exited. (We can only return 2 if the 'pc' argument is
2166 * non-zero.)
2168 int page_unprotect(target_ulong address, uintptr_t pc)
2170 unsigned int prot;
2171 bool current_tb_invalidated;
2172 PageDesc *p;
2173 target_ulong host_start, host_end, addr;
2175 /* Technically this isn't safe inside a signal handler. However we
2176 know this only ever happens in a synchronous SEGV handler, so in
2177 practice it seems to be ok. */
2178 mmap_lock();
2180 p = page_find(address >> TARGET_PAGE_BITS);
2181 if (!p) {
2182 mmap_unlock();
2183 return 0;
2186 /* if the page was really writable, then we change its
2187 protection back to writable */
2188 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2189 host_start = address & qemu_host_page_mask;
2190 host_end = host_start + qemu_host_page_size;
2192 prot = 0;
2193 current_tb_invalidated = false;
2194 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2195 p = page_find(addr >> TARGET_PAGE_BITS);
2196 p->flags |= PAGE_WRITE;
2197 prot |= p->flags;
2199 /* and since the content will be modified, we must invalidate
2200 the corresponding translated code. */
2201 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2202 #ifdef DEBUG_TB_CHECK
2203 tb_invalidate_check(addr);
2204 #endif
2206 mprotect((void *)g2h(host_start), qemu_host_page_size,
2207 prot & PAGE_BITS);
2209 mmap_unlock();
2210 /* If current TB was invalidated return to main loop */
2211 return current_tb_invalidated ? 2 : 1;
2213 mmap_unlock();
2214 return 0;
2216 #endif /* CONFIG_USER_ONLY */