4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
28 #include "trace-root.h"
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
32 #if defined(CONFIG_USER_ONLY)
34 #include "exec/exec-all.h"
35 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36 #include <sys/param.h>
37 #if __FreeBSD_version >= 700104
38 #define HAVE_KINFO_GETVMMAP
39 #define sigqueue sigqueue_freebsd /* avoid redefinition */
41 #include <machine/profile.h>
50 #include "exec/address-spaces.h"
53 #include "exec/cputlb.h"
54 #include "exec/tb-hash.h"
55 #include "translate-all.h"
56 #include "qemu/bitmap.h"
57 #include "qemu/timer.h"
58 #include "qemu/main-loop.h"
60 #include "sysemu/cpus.h"
62 /* #define DEBUG_TB_INVALIDATE */
63 /* #define DEBUG_TB_FLUSH */
64 /* make various TB consistency checks */
65 /* #define DEBUG_TB_CHECK */
67 #if !defined(CONFIG_USER_ONLY)
68 /* TB consistency checks only implemented for usermode emulation. */
72 /* Access to the various translations structures need to be serialised via locks
73 * for consistency. This is automatic for SoftMMU based system
74 * emulation due to its single threaded nature. In user-mode emulation
75 * access to the memory related structures are protected with the
79 #define assert_memory_lock() tcg_debug_assert(have_tb_lock)
81 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
84 #define SMC_BITMAP_USE_THRESHOLD 10
86 typedef struct PageDesc
{
87 /* list of TBs intersecting this ram page */
88 TranslationBlock
*first_tb
;
90 /* in order to optimize self modifying code, we count the number
91 of lookups we do to a given page to use a bitmap */
92 unsigned int code_write_count
;
93 unsigned long *code_bitmap
;
99 /* In system mode we want L1_MAP to be based on ram offsets,
100 while in user mode we want it to be based on virtual addresses. */
101 #if !defined(CONFIG_USER_ONLY)
102 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
103 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
105 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
108 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
111 /* Size of the L2 (and L3, etc) page tables. */
113 #define V_L2_SIZE (1 << V_L2_BITS)
115 uintptr_t qemu_host_page_size
;
116 intptr_t qemu_host_page_mask
;
119 * L1 Mapping properties
121 static int v_l1_size
;
122 static int v_l1_shift
;
123 static int v_l2_levels
;
125 /* The bottom level has pointers to PageDesc, and is indexed by
126 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
128 #define V_L1_MIN_BITS 4
129 #define V_L1_MAX_BITS (V_L2_BITS + 3)
130 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
132 static void *l1_map
[V_L1_MAX_SIZE
];
134 /* code generation context */
138 /* translation block context */
139 __thread
int have_tb_lock
;
141 static void page_table_config_init(void)
145 assert(TARGET_PAGE_BITS
);
146 /* The bits remaining after N lower levels of page tables. */
147 v_l1_bits
= (L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
) % V_L2_BITS
;
148 if (v_l1_bits
< V_L1_MIN_BITS
) {
149 v_l1_bits
+= V_L2_BITS
;
152 v_l1_size
= 1 << v_l1_bits
;
153 v_l1_shift
= L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
- v_l1_bits
;
154 v_l2_levels
= v_l1_shift
/ V_L2_BITS
- 1;
156 assert(v_l1_bits
<= V_L1_MAX_BITS
);
157 assert(v_l1_shift
% V_L2_BITS
== 0);
158 assert(v_l2_levels
>= 0);
161 #define assert_tb_locked() tcg_debug_assert(have_tb_lock)
162 #define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
166 assert_tb_unlocked();
167 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
175 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
178 void tb_lock_reset(void)
181 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
186 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
188 void cpu_gen_init(void)
190 tcg_context_init(&tcg_ctx
);
193 /* Encode VAL as a signed leb128 sequence at P.
194 Return P incremented past the encoded value. */
195 static uint8_t *encode_sleb128(uint8_t *p
, target_long val
)
202 more
= !((val
== 0 && (byte
& 0x40) == 0)
203 || (val
== -1 && (byte
& 0x40) != 0));
213 /* Decode a signed leb128 sequence at *PP; increment *PP past the
214 decoded value. Return the decoded value. */
215 static target_long
decode_sleb128(uint8_t **pp
)
223 val
|= (target_ulong
)(byte
& 0x7f) << shift
;
225 } while (byte
& 0x80);
226 if (shift
< TARGET_LONG_BITS
&& (byte
& 0x40)) {
227 val
|= -(target_ulong
)1 << shift
;
234 /* Encode the data collected about the instructions while compiling TB.
235 Place the data at BLOCK, and return the number of bytes consumed.
237 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
238 which come from the target's insn_start data, followed by a uintptr_t
239 which comes from the host pc of the end of the code implementing the insn.
241 Each line of the table is encoded as sleb128 deltas from the previous
242 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
243 That is, the first column is seeded with the guest pc, the last column
244 with the host pc, and the middle columns with zeros. */
246 static int encode_search(TranslationBlock
*tb
, uint8_t *block
)
248 uint8_t *highwater
= tcg_ctx
.code_gen_highwater
;
252 tb
->tc_search
= block
;
254 for (i
= 0, n
= tb
->icount
; i
< n
; ++i
) {
257 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
259 prev
= (j
== 0 ? tb
->pc
: 0);
261 prev
= tcg_ctx
.gen_insn_data
[i
- 1][j
];
263 p
= encode_sleb128(p
, tcg_ctx
.gen_insn_data
[i
][j
] - prev
);
265 prev
= (i
== 0 ? 0 : tcg_ctx
.gen_insn_end_off
[i
- 1]);
266 p
= encode_sleb128(p
, tcg_ctx
.gen_insn_end_off
[i
] - prev
);
268 /* Test for (pending) buffer overflow. The assumption is that any
269 one row beginning below the high water mark cannot overrun
270 the buffer completely. Thus we can test for overflow after
271 encoding a row without having to check during encoding. */
272 if (unlikely(p
> highwater
)) {
280 /* The cpu state corresponding to 'searched_pc' is restored.
281 * Called with tb_lock held.
283 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
284 uintptr_t searched_pc
)
286 target_ulong data
[TARGET_INSN_START_WORDS
] = { tb
->pc
};
287 uintptr_t host_pc
= (uintptr_t)tb
->tc_ptr
;
288 CPUArchState
*env
= cpu
->env_ptr
;
289 uint8_t *p
= tb
->tc_search
;
290 int i
, j
, num_insns
= tb
->icount
;
291 #ifdef CONFIG_PROFILER
292 int64_t ti
= profile_getclock();
295 searched_pc
-= GETPC_ADJ
;
297 if (searched_pc
< host_pc
) {
301 /* Reconstruct the stored insn data while looking for the point at
302 which the end of the insn exceeds the searched_pc. */
303 for (i
= 0; i
< num_insns
; ++i
) {
304 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
305 data
[j
] += decode_sleb128(&p
);
307 host_pc
+= decode_sleb128(&p
);
308 if (host_pc
> searched_pc
) {
315 if (tb
->cflags
& CF_USE_ICOUNT
) {
317 /* Reset the cycle counter to the start of the block. */
318 cpu
->icount_decr
.u16
.low
+= num_insns
;
319 /* Clear the IO flag. */
322 cpu
->icount_decr
.u16
.low
-= i
;
323 restore_state_to_opc(env
, tb
, data
);
325 #ifdef CONFIG_PROFILER
326 tcg_ctx
.restore_time
+= profile_getclock() - ti
;
327 tcg_ctx
.restore_count
++;
332 bool cpu_restore_state(CPUState
*cpu
, uintptr_t retaddr
)
334 TranslationBlock
*tb
;
337 /* A retaddr of zero is invalid so we really shouldn't have ended
338 * up here. The target code has likely forgotten to check retaddr
339 * != 0 before attempting to restore state. We return early to
340 * avoid blowing up on a recursive tb_lock(). The target must have
341 * previously survived a failed cpu_restore_state because
342 * tb_find_pc(0) would have failed anyway. It still should be
351 tb
= tb_find_pc(retaddr
);
353 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
354 if (tb
->cflags
& CF_NOCACHE
) {
355 /* one-shot translation, invalidate it immediately */
356 tb_phys_invalidate(tb
, -1);
366 void page_size_init(void)
368 /* NOTE: we can always suppose that qemu_host_page_size >=
370 qemu_real_host_page_size
= getpagesize();
371 qemu_real_host_page_mask
= -(intptr_t)qemu_real_host_page_size
;
372 if (qemu_host_page_size
== 0) {
373 qemu_host_page_size
= qemu_real_host_page_size
;
375 if (qemu_host_page_size
< TARGET_PAGE_SIZE
) {
376 qemu_host_page_size
= TARGET_PAGE_SIZE
;
378 qemu_host_page_mask
= -(intptr_t)qemu_host_page_size
;
381 static void page_init(void)
384 page_table_config_init();
386 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
388 #ifdef HAVE_KINFO_GETVMMAP
389 struct kinfo_vmentry
*freep
;
392 freep
= kinfo_getvmmap(getpid(), &cnt
);
395 for (i
= 0; i
< cnt
; i
++) {
396 unsigned long startaddr
, endaddr
;
398 startaddr
= freep
[i
].kve_start
;
399 endaddr
= freep
[i
].kve_end
;
400 if (h2g_valid(startaddr
)) {
401 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
403 if (h2g_valid(endaddr
)) {
404 endaddr
= h2g(endaddr
);
405 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
407 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
409 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
420 last_brk
= (unsigned long)sbrk(0);
422 f
= fopen("/compat/linux/proc/self/maps", "r");
427 unsigned long startaddr
, endaddr
;
430 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
432 if (n
== 2 && h2g_valid(startaddr
)) {
433 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
435 if (h2g_valid(endaddr
)) {
436 endaddr
= h2g(endaddr
);
440 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
453 * Called with tb_lock held for system emulation.
454 * Called with mmap_lock held for user-mode emulation.
456 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
463 assert_memory_lock();
466 /* Level 1. Always allocated. */
467 lp
= l1_map
+ ((index
>> v_l1_shift
) & (v_l1_size
- 1));
470 for (i
= v_l2_levels
; i
> 0; i
--) {
471 void **p
= atomic_rcu_read(lp
);
477 p
= g_new0(void *, V_L2_SIZE
);
478 atomic_rcu_set(lp
, p
);
481 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
484 pd
= atomic_rcu_read(lp
);
489 pd
= g_new0(PageDesc
, V_L2_SIZE
);
490 atomic_rcu_set(lp
, pd
);
493 return pd
+ (index
& (V_L2_SIZE
- 1));
496 static inline PageDesc
*page_find(tb_page_addr_t index
)
498 return page_find_alloc(index
, 0);
501 #if defined(CONFIG_USER_ONLY)
502 /* Currently it is not recommended to allocate big chunks of data in
503 user mode. It will change when a dedicated libc will be used. */
504 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
505 region in which the guest needs to run. Revisit this. */
506 #define USE_STATIC_CODE_GEN_BUFFER
509 /* Minimum size of the code gen buffer. This number is randomly chosen,
510 but not so small that we can't have a fair number of TB's live. */
511 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
513 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
514 indicated, this is constrained by the range of direct branches on the
515 host cpu, as used by the TCG implementation of goto_tb. */
516 #if defined(__x86_64__)
517 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
518 #elif defined(__sparc__)
519 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
520 #elif defined(__powerpc64__)
521 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
522 #elif defined(__powerpc__)
523 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
524 #elif defined(__aarch64__)
525 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
526 #elif defined(__s390x__)
527 /* We have a +- 4GB range on the branches; leave some slop. */
528 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
529 #elif defined(__mips__)
530 /* We have a 256MB branch region, but leave room to make sure the
531 main executable is also within that region. */
532 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
534 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
537 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
539 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
540 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
541 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
543 static inline size_t size_code_gen_buffer(size_t tb_size
)
545 /* Size the buffer. */
547 #ifdef USE_STATIC_CODE_GEN_BUFFER
548 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
550 /* ??? Needs adjustments. */
551 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
552 static buffer, we could size this on RESERVED_VA, on the text
553 segment size of the executable, or continue to use the default. */
554 tb_size
= (unsigned long)(ram_size
/ 4);
557 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
558 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
560 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
561 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
567 /* In order to use J and JAL within the code_gen_buffer, we require
568 that the buffer not cross a 256MB boundary. */
569 static inline bool cross_256mb(void *addr
, size_t size
)
571 return ((uintptr_t)addr
^ ((uintptr_t)addr
+ size
)) & ~0x0ffffffful
;
574 /* We weren't able to allocate a buffer without crossing that boundary,
575 so make do with the larger portion of the buffer that doesn't cross.
576 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
577 static inline void *split_cross_256mb(void *buf1
, size_t size1
)
579 void *buf2
= (void *)(((uintptr_t)buf1
+ size1
) & ~0x0ffffffful
);
580 size_t size2
= buf1
+ size1
- buf2
;
588 tcg_ctx
.code_gen_buffer_size
= size1
;
593 #ifdef USE_STATIC_CODE_GEN_BUFFER
594 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
595 __attribute__((aligned(CODE_GEN_ALIGN
)));
598 static inline void do_protect(void *addr
, long size
, int prot
)
601 VirtualProtect(addr
, size
, prot
, &old_protect
);
604 static inline void map_exec(void *addr
, long size
)
606 do_protect(addr
, size
, PAGE_EXECUTE_READWRITE
);
609 static inline void map_none(void *addr
, long size
)
611 do_protect(addr
, size
, PAGE_NOACCESS
);
614 static inline void do_protect(void *addr
, long size
, int prot
)
616 uintptr_t start
, end
;
618 start
= (uintptr_t)addr
;
619 start
&= qemu_real_host_page_mask
;
621 end
= (uintptr_t)addr
+ size
;
622 end
= ROUND_UP(end
, qemu_real_host_page_size
);
624 mprotect((void *)start
, end
- start
, prot
);
627 static inline void map_exec(void *addr
, long size
)
629 do_protect(addr
, size
, PROT_READ
| PROT_WRITE
| PROT_EXEC
);
632 static inline void map_none(void *addr
, long size
)
634 do_protect(addr
, size
, PROT_NONE
);
638 static inline void *alloc_code_gen_buffer(void)
640 void *buf
= static_code_gen_buffer
;
641 size_t full_size
, size
;
643 /* The size of the buffer, rounded down to end on a page boundary. */
644 full_size
= (((uintptr_t)buf
+ sizeof(static_code_gen_buffer
))
645 & qemu_real_host_page_mask
) - (uintptr_t)buf
;
647 /* Reserve a guard page. */
648 size
= full_size
- qemu_real_host_page_size
;
650 /* Honor a command-line option limiting the size of the buffer. */
651 if (size
> tcg_ctx
.code_gen_buffer_size
) {
652 size
= (((uintptr_t)buf
+ tcg_ctx
.code_gen_buffer_size
)
653 & qemu_real_host_page_mask
) - (uintptr_t)buf
;
655 tcg_ctx
.code_gen_buffer_size
= size
;
658 if (cross_256mb(buf
, size
)) {
659 buf
= split_cross_256mb(buf
, size
);
660 size
= tcg_ctx
.code_gen_buffer_size
;
665 map_none(buf
+ size
, qemu_real_host_page_size
);
666 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
670 #elif defined(_WIN32)
671 static inline void *alloc_code_gen_buffer(void)
673 size_t size
= tcg_ctx
.code_gen_buffer_size
;
676 /* Perform the allocation in two steps, so that the guard page
677 is reserved but uncommitted. */
678 buf1
= VirtualAlloc(NULL
, size
+ qemu_real_host_page_size
,
679 MEM_RESERVE
, PAGE_NOACCESS
);
681 buf2
= VirtualAlloc(buf1
, size
, MEM_COMMIT
, PAGE_EXECUTE_READWRITE
);
682 assert(buf1
== buf2
);
688 static inline void *alloc_code_gen_buffer(void)
690 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
692 size_t size
= tcg_ctx
.code_gen_buffer_size
;
695 /* Constrain the position of the buffer based on the host cpu.
696 Note that these addresses are chosen in concert with the
697 addresses assigned in the relevant linker script file. */
698 # if defined(__PIE__) || defined(__PIC__)
699 /* Don't bother setting a preferred location if we're building
700 a position-independent executable. We're more likely to get
701 an address near the main executable if we let the kernel
702 choose the address. */
703 # elif defined(__x86_64__) && defined(MAP_32BIT)
704 /* Force the memory down into low memory with the executable.
705 Leave the choice of exact location with the kernel. */
707 /* Cannot expect to map more than 800MB in low memory. */
708 if (size
> 800u * 1024 * 1024) {
709 tcg_ctx
.code_gen_buffer_size
= size
= 800u * 1024 * 1024;
711 # elif defined(__sparc__)
712 start
= 0x40000000ul
;
713 # elif defined(__s390x__)
714 start
= 0x90000000ul
;
715 # elif defined(__mips__)
716 # if _MIPS_SIM == _ABI64
717 start
= 0x128000000ul
;
719 start
= 0x08000000ul
;
723 buf
= mmap((void *)start
, size
+ qemu_real_host_page_size
,
724 PROT_NONE
, flags
, -1, 0);
725 if (buf
== MAP_FAILED
) {
730 if (cross_256mb(buf
, size
)) {
731 /* Try again, with the original still mapped, to avoid re-acquiring
732 that 256mb crossing. This time don't specify an address. */
734 void *buf2
= mmap(NULL
, size
+ qemu_real_host_page_size
,
735 PROT_NONE
, flags
, -1, 0);
736 switch ((int)(buf2
!= MAP_FAILED
)) {
738 if (!cross_256mb(buf2
, size
)) {
739 /* Success! Use the new buffer. */
740 munmap(buf
, size
+ qemu_real_host_page_size
);
743 /* Failure. Work with what we had. */
744 munmap(buf2
, size
+ qemu_real_host_page_size
);
747 /* Split the original buffer. Free the smaller half. */
748 buf2
= split_cross_256mb(buf
, size
);
749 size2
= tcg_ctx
.code_gen_buffer_size
;
751 munmap(buf
+ size2
+ qemu_real_host_page_size
, size
- size2
);
753 munmap(buf
, size
- size2
);
762 /* Make the final buffer accessible. The guard page at the end
763 will remain inaccessible with PROT_NONE. */
764 mprotect(buf
, size
, PROT_WRITE
| PROT_READ
| PROT_EXEC
);
766 /* Request large pages for the buffer. */
767 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
771 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
773 static inline void code_gen_alloc(size_t tb_size
)
775 tcg_ctx
.code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
776 tcg_ctx
.code_gen_buffer
= alloc_code_gen_buffer();
777 if (tcg_ctx
.code_gen_buffer
== NULL
) {
778 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
782 /* size this conservatively -- realloc later if needed */
783 tcg_ctx
.tb_ctx
.tbs_size
=
784 tcg_ctx
.code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
/ 8;
785 if (unlikely(!tcg_ctx
.tb_ctx
.tbs_size
)) {
786 tcg_ctx
.tb_ctx
.tbs_size
= 64 * 1024;
788 tcg_ctx
.tb_ctx
.tbs
= g_new(TranslationBlock
*, tcg_ctx
.tb_ctx
.tbs_size
);
790 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
793 static void tb_htable_init(void)
795 unsigned int mode
= QHT_MODE_AUTO_RESIZE
;
797 qht_init(&tcg_ctx
.tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
, mode
);
800 /* Must be called before using the QEMU cpus. 'tb_size' is the size
801 (in bytes) allocated to the translation buffer. Zero means default
803 void tcg_exec_init(unsigned long tb_size
)
808 code_gen_alloc(tb_size
);
809 #if defined(CONFIG_SOFTMMU)
810 /* There's no guest base to take into account, so go ahead and
811 initialize the prologue now. */
812 tcg_prologue_init(&tcg_ctx
);
816 bool tcg_enabled(void)
818 return tcg_ctx
.code_gen_buffer
!= NULL
;
822 * Allocate a new translation block. Flush the translation buffer if
823 * too many translation blocks or too much generated code.
825 * Called with tb_lock held.
827 static TranslationBlock
*tb_alloc(target_ulong pc
)
829 TranslationBlock
*tb
;
834 tb
= tcg_tb_alloc(&tcg_ctx
);
835 if (unlikely(tb
== NULL
)) {
838 ctx
= &tcg_ctx
.tb_ctx
;
839 if (unlikely(ctx
->nb_tbs
== ctx
->tbs_size
)) {
841 ctx
->tbs
= g_renew(TranslationBlock
*, ctx
->tbs
, ctx
->tbs_size
);
843 ctx
->tbs
[ctx
->nb_tbs
++] = tb
;
847 /* Called with tb_lock held. */
848 void tb_free(TranslationBlock
*tb
)
852 /* In practice this is mostly used for single use temporary TB
853 Ignore the hard cases and just back up if this TB happens to
854 be the last one generated. */
855 if (tcg_ctx
.tb_ctx
.nb_tbs
> 0 &&
856 tb
== tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
- 1]) {
857 size_t struct_size
= ROUND_UP(sizeof(*tb
), qemu_icache_linesize
);
859 tcg_ctx
.code_gen_ptr
= tb
->tc_ptr
- struct_size
;
860 tcg_ctx
.tb_ctx
.nb_tbs
--;
864 static inline void invalidate_page_bitmap(PageDesc
*p
)
866 #ifdef CONFIG_SOFTMMU
867 g_free(p
->code_bitmap
);
868 p
->code_bitmap
= NULL
;
869 p
->code_write_count
= 0;
873 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
874 static void page_flush_tb_1(int level
, void **lp
)
884 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
885 pd
[i
].first_tb
= NULL
;
886 invalidate_page_bitmap(pd
+ i
);
891 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
892 page_flush_tb_1(level
- 1, pp
+ i
);
897 static void page_flush_tb(void)
899 int i
, l1_sz
= v_l1_size
;
901 for (i
= 0; i
< l1_sz
; i
++) {
902 page_flush_tb_1(v_l2_levels
, l1_map
+ i
);
906 /* flush all the translation blocks */
907 static void do_tb_flush(CPUState
*cpu
, run_on_cpu_data tb_flush_count
)
911 /* If it is already been done on request of another CPU,
914 if (tcg_ctx
.tb_ctx
.tb_flush_count
!= tb_flush_count
.host_int
) {
918 #if defined(DEBUG_TB_FLUSH)
919 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
920 (unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
),
921 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.tb_ctx
.nb_tbs
> 0 ?
922 ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)) /
923 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
925 if ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)
926 > tcg_ctx
.code_gen_buffer_size
) {
927 cpu_abort(cpu
, "Internal error: code buffer overflow\n");
933 for (i
= 0; i
< TB_JMP_CACHE_SIZE
; ++i
) {
934 atomic_set(&cpu
->tb_jmp_cache
[i
], NULL
);
938 tcg_ctx
.tb_ctx
.nb_tbs
= 0;
939 qht_reset_size(&tcg_ctx
.tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
);
942 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
943 /* XXX: flush processor icache at this point if cache flush is
945 atomic_mb_set(&tcg_ctx
.tb_ctx
.tb_flush_count
,
946 tcg_ctx
.tb_ctx
.tb_flush_count
+ 1);
952 void tb_flush(CPUState
*cpu
)
955 unsigned tb_flush_count
= atomic_mb_read(&tcg_ctx
.tb_ctx
.tb_flush_count
);
956 async_safe_run_on_cpu(cpu
, do_tb_flush
,
957 RUN_ON_CPU_HOST_INT(tb_flush_count
));
961 #ifdef DEBUG_TB_CHECK
964 do_tb_invalidate_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
966 TranslationBlock
*tb
= p
;
967 target_ulong addr
= *(target_ulong
*)userp
;
969 if (!(addr
+ TARGET_PAGE_SIZE
<= tb
->pc
|| addr
>= tb
->pc
+ tb
->size
)) {
970 printf("ERROR invalidate: address=" TARGET_FMT_lx
971 " PC=%08lx size=%04x\n", addr
, (long)tb
->pc
, tb
->size
);
975 /* verify that all the pages have correct rights for code
977 * Called with tb_lock held.
979 static void tb_invalidate_check(target_ulong address
)
981 address
&= TARGET_PAGE_MASK
;
982 qht_iter(&tcg_ctx
.tb_ctx
.htable
, do_tb_invalidate_check
, &address
);
986 do_tb_page_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
988 TranslationBlock
*tb
= p
;
991 flags1
= page_get_flags(tb
->pc
);
992 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
993 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
994 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
995 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
999 /* verify that all the pages have correct rights for code */
1000 static void tb_page_check(void)
1002 qht_iter(&tcg_ctx
.tb_ctx
.htable
, do_tb_page_check
, NULL
);
1007 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
1009 TranslationBlock
*tb1
;
1014 n1
= (uintptr_t)tb1
& 3;
1015 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1017 *ptb
= tb1
->page_next
[n1
];
1020 ptb
= &tb1
->page_next
[n1
];
1024 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1025 static inline void tb_remove_from_jmp_list(TranslationBlock
*tb
, int n
)
1027 TranslationBlock
*tb1
;
1028 uintptr_t *ptb
, ntb
;
1031 ptb
= &tb
->jmp_list_next
[n
];
1033 /* find tb(n) in circular list */
1037 tb1
= (TranslationBlock
*)(ntb
& ~3);
1038 if (n1
== n
&& tb1
== tb
) {
1042 ptb
= &tb1
->jmp_list_first
;
1044 ptb
= &tb1
->jmp_list_next
[n1
];
1047 /* now we can suppress tb(n) from the list */
1048 *ptb
= tb
->jmp_list_next
[n
];
1050 tb
->jmp_list_next
[n
] = (uintptr_t)NULL
;
1054 /* reset the jump entry 'n' of a TB so that it is not chained to
1056 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
1058 uintptr_t addr
= (uintptr_t)(tb
->tc_ptr
+ tb
->jmp_reset_offset
[n
]);
1059 tb_set_jmp_target(tb
, n
, addr
);
1062 /* remove any jumps to the TB */
1063 static inline void tb_jmp_unlink(TranslationBlock
*tb
)
1065 TranslationBlock
*tb1
;
1066 uintptr_t *ptb
, ntb
;
1069 ptb
= &tb
->jmp_list_first
;
1073 tb1
= (TranslationBlock
*)(ntb
& ~3);
1077 tb_reset_jump(tb1
, n1
);
1078 *ptb
= tb1
->jmp_list_next
[n1
];
1079 tb1
->jmp_list_next
[n1
] = (uintptr_t)NULL
;
1083 /* invalidate one TB
1085 * Called with tb_lock held.
1087 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
1092 tb_page_addr_t phys_pc
;
1096 atomic_set(&tb
->invalid
, true);
1098 /* remove the TB from the hash list */
1099 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1100 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
);
1101 qht_remove(&tcg_ctx
.tb_ctx
.htable
, tb
, h
);
1103 /* remove the TB from the page list */
1104 if (tb
->page_addr
[0] != page_addr
) {
1105 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
1106 tb_page_remove(&p
->first_tb
, tb
);
1107 invalidate_page_bitmap(p
);
1109 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
1110 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
1111 tb_page_remove(&p
->first_tb
, tb
);
1112 invalidate_page_bitmap(p
);
1115 /* remove the TB from the hash list */
1116 h
= tb_jmp_cache_hash_func(tb
->pc
);
1118 if (atomic_read(&cpu
->tb_jmp_cache
[h
]) == tb
) {
1119 atomic_set(&cpu
->tb_jmp_cache
[h
], NULL
);
1123 /* suppress this TB from the two jump lists */
1124 tb_remove_from_jmp_list(tb
, 0);
1125 tb_remove_from_jmp_list(tb
, 1);
1127 /* suppress any remaining jumps to this TB */
1130 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
++;
1133 #ifdef CONFIG_SOFTMMU
1134 static void build_page_bitmap(PageDesc
*p
)
1136 int n
, tb_start
, tb_end
;
1137 TranslationBlock
*tb
;
1139 p
->code_bitmap
= bitmap_new(TARGET_PAGE_SIZE
);
1142 while (tb
!= NULL
) {
1143 n
= (uintptr_t)tb
& 3;
1144 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1145 /* NOTE: this is subtle as a TB may span two physical pages */
1147 /* NOTE: tb_end may be after the end of the page, but
1148 it is not a problem */
1149 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1150 tb_end
= tb_start
+ tb
->size
;
1151 if (tb_end
> TARGET_PAGE_SIZE
) {
1152 tb_end
= TARGET_PAGE_SIZE
;
1156 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1158 bitmap_set(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1159 tb
= tb
->page_next
[n
];
1164 /* add the tb in the target page and protect it if necessary
1166 * Called with mmap_lock held for user-mode emulation.
1168 static inline void tb_alloc_page(TranslationBlock
*tb
,
1169 unsigned int n
, tb_page_addr_t page_addr
)
1172 #ifndef CONFIG_USER_ONLY
1173 bool page_already_protected
;
1176 assert_memory_lock();
1178 tb
->page_addr
[n
] = page_addr
;
1179 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1180 tb
->page_next
[n
] = p
->first_tb
;
1181 #ifndef CONFIG_USER_ONLY
1182 page_already_protected
= p
->first_tb
!= NULL
;
1184 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1185 invalidate_page_bitmap(p
);
1187 #if defined(CONFIG_USER_ONLY)
1188 if (p
->flags
& PAGE_WRITE
) {
1193 /* force the host page as non writable (writes will have a
1194 page fault + mprotect overhead) */
1195 page_addr
&= qemu_host_page_mask
;
1197 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1198 addr
+= TARGET_PAGE_SIZE
) {
1200 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1205 p2
->flags
&= ~PAGE_WRITE
;
1207 mprotect(g2h(page_addr
), qemu_host_page_size
,
1208 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1209 #ifdef DEBUG_TB_INVALIDATE
1210 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1215 /* if some code is already present, then the pages are already
1216 protected. So we handle the case where only the first TB is
1217 allocated in a physical page */
1218 if (!page_already_protected
) {
1219 tlb_protect_code(page_addr
);
1224 /* add a new TB and link it to the physical page tables. phys_page2 is
1225 * (-1) to indicate that only one page contains the TB.
1227 * Called with mmap_lock held for user-mode emulation.
1229 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1230 tb_page_addr_t phys_page2
)
1234 assert_memory_lock();
1236 /* add in the page list */
1237 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1238 if (phys_page2
!= -1) {
1239 tb_alloc_page(tb
, 1, phys_page2
);
1241 tb
->page_addr
[1] = -1;
1244 /* add in the hash table */
1245 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
);
1246 qht_insert(&tcg_ctx
.tb_ctx
.htable
, tb
, h
);
1248 #ifdef DEBUG_TB_CHECK
1253 /* Called with mmap_lock held for user mode emulation. */
1254 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
1255 target_ulong pc
, target_ulong cs_base
,
1256 uint32_t flags
, int cflags
)
1258 CPUArchState
*env
= cpu
->env_ptr
;
1259 TranslationBlock
*tb
;
1260 tb_page_addr_t phys_pc
, phys_page2
;
1261 target_ulong virt_page2
;
1262 tcg_insn_unit
*gen_code_buf
;
1263 int gen_code_size
, search_size
;
1264 #ifdef CONFIG_PROFILER
1267 assert_memory_lock();
1269 phys_pc
= get_page_addr_code(env
, pc
);
1270 if (use_icount
&& !(cflags
& CF_IGNORE_ICOUNT
)) {
1271 cflags
|= CF_USE_ICOUNT
;
1275 if (unlikely(!tb
)) {
1277 /* flush must be done */
1280 /* Make the execution loop process the flush as soon as possible. */
1281 cpu
->exception_index
= EXCP_INTERRUPT
;
1285 gen_code_buf
= tcg_ctx
.code_gen_ptr
;
1286 tb
->tc_ptr
= gen_code_buf
;
1288 tb
->cs_base
= cs_base
;
1290 tb
->cflags
= cflags
;
1291 tb
->invalid
= false;
1293 #ifdef CONFIG_PROFILER
1294 tcg_ctx
.tb_count1
++; /* includes aborted translations because of
1296 ti
= profile_getclock();
1299 tcg_func_start(&tcg_ctx
);
1301 tcg_ctx
.cpu
= ENV_GET_CPU(env
);
1302 gen_intermediate_code(env
, tb
);
1305 trace_translate_block(tb
, tb
->pc
, tb
->tc_ptr
);
1307 /* generate machine code */
1308 tb
->jmp_reset_offset
[0] = TB_JMP_RESET_OFFSET_INVALID
;
1309 tb
->jmp_reset_offset
[1] = TB_JMP_RESET_OFFSET_INVALID
;
1310 tcg_ctx
.tb_jmp_reset_offset
= tb
->jmp_reset_offset
;
1311 #ifdef USE_DIRECT_JUMP
1312 tcg_ctx
.tb_jmp_insn_offset
= tb
->jmp_insn_offset
;
1313 tcg_ctx
.tb_jmp_target_addr
= NULL
;
1315 tcg_ctx
.tb_jmp_insn_offset
= NULL
;
1316 tcg_ctx
.tb_jmp_target_addr
= tb
->jmp_target_addr
;
1319 #ifdef CONFIG_PROFILER
1321 tcg_ctx
.interm_time
+= profile_getclock() - ti
;
1322 tcg_ctx
.code_time
-= profile_getclock();
1325 /* ??? Overflow could be handled better here. In particular, we
1326 don't need to re-do gen_intermediate_code, nor should we re-do
1327 the tcg optimization currently hidden inside tcg_gen_code. All
1328 that should be required is to flush the TBs, allocate a new TB,
1329 re-initialize it per above, and re-do the actual code generation. */
1330 gen_code_size
= tcg_gen_code(&tcg_ctx
, tb
);
1331 if (unlikely(gen_code_size
< 0)) {
1332 goto buffer_overflow
;
1334 search_size
= encode_search(tb
, (void *)gen_code_buf
+ gen_code_size
);
1335 if (unlikely(search_size
< 0)) {
1336 goto buffer_overflow
;
1339 #ifdef CONFIG_PROFILER
1340 tcg_ctx
.code_time
+= profile_getclock();
1341 tcg_ctx
.code_in_len
+= tb
->size
;
1342 tcg_ctx
.code_out_len
+= gen_code_size
;
1343 tcg_ctx
.search_out_len
+= search_size
;
1347 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
) &&
1348 qemu_log_in_addr_range(tb
->pc
)) {
1350 qemu_log("OUT: [size=%d]\n", gen_code_size
);
1351 log_disas(tb
->tc_ptr
, gen_code_size
);
1358 tcg_ctx
.code_gen_ptr
= (void *)
1359 ROUND_UP((uintptr_t)gen_code_buf
+ gen_code_size
+ search_size
,
1362 /* init jump list */
1363 assert(((uintptr_t)tb
& 3) == 0);
1364 tb
->jmp_list_first
= (uintptr_t)tb
| 2;
1365 tb
->jmp_list_next
[0] = (uintptr_t)NULL
;
1366 tb
->jmp_list_next
[1] = (uintptr_t)NULL
;
1368 /* init original jump addresses wich has been set during tcg_gen_code() */
1369 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1370 tb_reset_jump(tb
, 0);
1372 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1373 tb_reset_jump(tb
, 1);
1376 /* check next page if needed */
1377 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1379 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1380 phys_page2
= get_page_addr_code(env
, virt_page2
);
1382 /* As long as consistency of the TB stuff is provided by tb_lock in user
1383 * mode and is implicit in single-threaded softmmu emulation, no explicit
1384 * memory barrier is required before tb_link_page() makes the TB visible
1385 * through the physical hash table and physical page list.
1387 tb_link_page(tb
, phys_pc
, phys_page2
);
1392 * Invalidate all TBs which intersect with the target physical address range
1393 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1394 * 'is_cpu_write_access' should be true if called from a real cpu write
1395 * access: the virtual CPU will exit the current TB if code is modified inside
1398 * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1399 * Called with tb_lock held for system-mode emulation
1401 static void tb_invalidate_phys_range_1(tb_page_addr_t start
, tb_page_addr_t end
)
1403 while (start
< end
) {
1404 tb_invalidate_phys_page_range(start
, end
, 0);
1405 start
&= TARGET_PAGE_MASK
;
1406 start
+= TARGET_PAGE_SIZE
;
1410 #ifdef CONFIG_SOFTMMU
1411 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1414 tb_invalidate_phys_range_1(start
, end
);
1417 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1419 assert_memory_lock();
1421 tb_invalidate_phys_range_1(start
, end
);
1426 * Invalidate all TBs which intersect with the target physical address range
1427 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1428 * 'is_cpu_write_access' should be true if called from a real cpu write
1429 * access: the virtual CPU will exit the current TB if code is modified inside
1432 * Called with tb_lock/mmap_lock held for user-mode emulation
1433 * Called with tb_lock held for system-mode emulation
1435 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1436 int is_cpu_write_access
)
1438 TranslationBlock
*tb
, *tb_next
;
1439 #if defined(TARGET_HAS_PRECISE_SMC)
1440 CPUState
*cpu
= current_cpu
;
1441 CPUArchState
*env
= NULL
;
1443 tb_page_addr_t tb_start
, tb_end
;
1446 #ifdef TARGET_HAS_PRECISE_SMC
1447 int current_tb_not_found
= is_cpu_write_access
;
1448 TranslationBlock
*current_tb
= NULL
;
1449 int current_tb_modified
= 0;
1450 target_ulong current_pc
= 0;
1451 target_ulong current_cs_base
= 0;
1452 uint32_t current_flags
= 0;
1453 #endif /* TARGET_HAS_PRECISE_SMC */
1455 assert_memory_lock();
1458 p
= page_find(start
>> TARGET_PAGE_BITS
);
1462 #if defined(TARGET_HAS_PRECISE_SMC)
1468 /* we remove all the TBs in the range [start, end[ */
1469 /* XXX: see if in some cases it could be faster to invalidate all
1472 while (tb
!= NULL
) {
1473 n
= (uintptr_t)tb
& 3;
1474 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1475 tb_next
= tb
->page_next
[n
];
1476 /* NOTE: this is subtle as a TB may span two physical pages */
1478 /* NOTE: tb_end may be after the end of the page, but
1479 it is not a problem */
1480 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1481 tb_end
= tb_start
+ tb
->size
;
1483 tb_start
= tb
->page_addr
[1];
1484 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1486 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1487 #ifdef TARGET_HAS_PRECISE_SMC
1488 if (current_tb_not_found
) {
1489 current_tb_not_found
= 0;
1491 if (cpu
->mem_io_pc
) {
1492 /* now we have a real cpu fault */
1493 current_tb
= tb_find_pc(cpu
->mem_io_pc
);
1496 if (current_tb
== tb
&&
1497 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1498 /* If we are modifying the current TB, we must stop
1499 its execution. We could be more precise by checking
1500 that the modification is after the current PC, but it
1501 would require a specialized function to partially
1502 restore the CPU state */
1504 current_tb_modified
= 1;
1505 cpu_restore_state_from_tb(cpu
, current_tb
, cpu
->mem_io_pc
);
1506 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1509 #endif /* TARGET_HAS_PRECISE_SMC */
1510 tb_phys_invalidate(tb
, -1);
1514 #if !defined(CONFIG_USER_ONLY)
1515 /* if no code remaining, no need to continue to use slow writes */
1517 invalidate_page_bitmap(p
);
1518 tlb_unprotect_code(start
);
1521 #ifdef TARGET_HAS_PRECISE_SMC
1522 if (current_tb_modified
) {
1523 /* we generate a block containing just the instruction
1524 modifying the memory. It will ensure that it cannot modify
1526 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1527 cpu_loop_exit_noexc(cpu
);
1532 #ifdef CONFIG_SOFTMMU
1533 /* len must be <= 8 and start must be a multiple of len.
1534 * Called via softmmu_template.h when code areas are written to with
1535 * iothread mutex not held.
1537 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1543 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1544 cpu_single_env
->mem_io_vaddr
, len
,
1545 cpu_single_env
->eip
,
1546 cpu_single_env
->eip
+
1547 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1550 assert_memory_lock();
1552 p
= page_find(start
>> TARGET_PAGE_BITS
);
1556 if (!p
->code_bitmap
&&
1557 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
1558 /* build code bitmap. FIXME: writes should be protected by
1559 * tb_lock, reads by tb_lock or RCU.
1561 build_page_bitmap(p
);
1563 if (p
->code_bitmap
) {
1567 nr
= start
& ~TARGET_PAGE_MASK
;
1568 b
= p
->code_bitmap
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1));
1569 if (b
& ((1 << len
) - 1)) {
1574 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1578 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1579 * host PC of the faulting store instruction that caused this invalidate.
1580 * Returns true if the caller needs to abort execution of the current
1581 * TB (because it was modified by this store and the guest CPU has
1582 * precise-SMC semantics).
1584 static bool tb_invalidate_phys_page(tb_page_addr_t addr
, uintptr_t pc
)
1586 TranslationBlock
*tb
;
1589 #ifdef TARGET_HAS_PRECISE_SMC
1590 TranslationBlock
*current_tb
= NULL
;
1591 CPUState
*cpu
= current_cpu
;
1592 CPUArchState
*env
= NULL
;
1593 int current_tb_modified
= 0;
1594 target_ulong current_pc
= 0;
1595 target_ulong current_cs_base
= 0;
1596 uint32_t current_flags
= 0;
1599 assert_memory_lock();
1601 addr
&= TARGET_PAGE_MASK
;
1602 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1609 #ifdef TARGET_HAS_PRECISE_SMC
1610 if (tb
&& pc
!= 0) {
1611 current_tb
= tb_find_pc(pc
);
1617 while (tb
!= NULL
) {
1618 n
= (uintptr_t)tb
& 3;
1619 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1620 #ifdef TARGET_HAS_PRECISE_SMC
1621 if (current_tb
== tb
&&
1622 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1623 /* If we are modifying the current TB, we must stop
1624 its execution. We could be more precise by checking
1625 that the modification is after the current PC, but it
1626 would require a specialized function to partially
1627 restore the CPU state */
1629 current_tb_modified
= 1;
1630 cpu_restore_state_from_tb(cpu
, current_tb
, pc
);
1631 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1634 #endif /* TARGET_HAS_PRECISE_SMC */
1635 tb_phys_invalidate(tb
, addr
);
1636 tb
= tb
->page_next
[n
];
1639 #ifdef TARGET_HAS_PRECISE_SMC
1640 if (current_tb_modified
) {
1641 /* we generate a block containing just the instruction
1642 modifying the memory. It will ensure that it cannot modify
1644 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1645 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
1646 * back into the cpu_exec loop. */
1656 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1657 tb[1].tc_ptr. Return NULL if not found */
1658 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1660 int m_min
, m_max
, m
;
1662 TranslationBlock
*tb
;
1664 if (tcg_ctx
.tb_ctx
.nb_tbs
<= 0) {
1667 if (tc_ptr
< (uintptr_t)tcg_ctx
.code_gen_buffer
||
1668 tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_ptr
) {
1671 /* binary search (cf Knuth) */
1673 m_max
= tcg_ctx
.tb_ctx
.nb_tbs
- 1;
1674 while (m_min
<= m_max
) {
1675 m
= (m_min
+ m_max
) >> 1;
1676 tb
= tcg_ctx
.tb_ctx
.tbs
[m
];
1677 v
= (uintptr_t)tb
->tc_ptr
;
1680 } else if (tc_ptr
< v
) {
1686 return tcg_ctx
.tb_ctx
.tbs
[m_max
];
1689 #if !defined(CONFIG_USER_ONLY)
1690 void tb_invalidate_phys_addr(AddressSpace
*as
, hwaddr addr
)
1692 ram_addr_t ram_addr
;
1697 mr
= address_space_translate(as
, addr
, &addr
, &l
, false);
1698 if (!(memory_region_is_ram(mr
)
1699 || memory_region_is_romd(mr
))) {
1703 ram_addr
= memory_region_get_ram_addr(mr
) + addr
;
1705 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1709 #endif /* !defined(CONFIG_USER_ONLY) */
1711 /* Called with tb_lock held. */
1712 void tb_check_watchpoint(CPUState
*cpu
)
1714 TranslationBlock
*tb
;
1716 tb
= tb_find_pc(cpu
->mem_io_pc
);
1718 /* We can use retranslation to find the PC. */
1719 cpu_restore_state_from_tb(cpu
, tb
, cpu
->mem_io_pc
);
1720 tb_phys_invalidate(tb
, -1);
1722 /* The exception probably happened in a helper. The CPU state should
1723 have been saved before calling it. Fetch the PC from there. */
1724 CPUArchState
*env
= cpu
->env_ptr
;
1725 target_ulong pc
, cs_base
;
1726 tb_page_addr_t addr
;
1729 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
1730 addr
= get_page_addr_code(env
, pc
);
1731 tb_invalidate_phys_range(addr
, addr
+ 1);
1735 #ifndef CONFIG_USER_ONLY
1736 /* in deterministic execution mode, instructions doing device I/Os
1737 * must be at the end of the TB.
1739 * Called by softmmu_template.h, with iothread mutex not held.
1741 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
1743 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1744 CPUArchState
*env
= cpu
->env_ptr
;
1746 TranslationBlock
*tb
;
1748 target_ulong pc
, cs_base
;
1752 tb
= tb_find_pc(retaddr
);
1754 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
1757 n
= cpu
->icount_decr
.u16
.low
+ tb
->icount
;
1758 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
1759 /* Calculate how many instructions had been executed before the fault
1761 n
= n
- cpu
->icount_decr
.u16
.low
;
1762 /* Generate a new TB ending on the I/O insn. */
1764 /* On MIPS and SH, delay slot instructions can only be restarted if
1765 they were already the first instruction in the TB. If this is not
1766 the first instruction in a TB then re-execute the preceding
1768 #if defined(TARGET_MIPS)
1769 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1770 env
->active_tc
.PC
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
1771 cpu
->icount_decr
.u16
.low
++;
1772 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1774 #elif defined(TARGET_SH4)
1775 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1778 cpu
->icount_decr
.u16
.low
++;
1779 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1782 /* This should never happen. */
1783 if (n
> CF_COUNT_MASK
) {
1784 cpu_abort(cpu
, "TB too big during recompile");
1787 cflags
= n
| CF_LAST_IO
;
1789 cs_base
= tb
->cs_base
;
1791 tb_phys_invalidate(tb
, -1);
1792 if (tb
->cflags
& CF_NOCACHE
) {
1794 /* Invalidate original TB if this TB was generated in
1795 * cpu_exec_nocache() */
1796 tb_phys_invalidate(tb
->orig_tb
, -1);
1800 /* FIXME: In theory this could raise an exception. In practice
1801 we have already translated the block once so it's probably ok. */
1802 tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
1804 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1805 * the first in the TB) then we end up generating a whole new TB and
1806 * repeating the fault, which is horribly inefficient.
1807 * Better would be to execute just this insn uncached, or generate a
1810 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
1811 * tb_lock gets reset.
1813 cpu_loop_exit_noexc(cpu
);
1816 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
1820 /* Discard jump cache entries for any tb which might potentially
1821 overlap the flushed page. */
1822 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1823 memset(&cpu
->tb_jmp_cache
[i
], 0,
1824 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1826 i
= tb_jmp_cache_hash_page(addr
);
1827 memset(&cpu
->tb_jmp_cache
[i
], 0,
1828 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1831 static void print_qht_statistics(FILE *f
, fprintf_function cpu_fprintf
,
1832 struct qht_stats hst
)
1834 uint32_t hgram_opts
;
1838 if (!hst
.head_buckets
) {
1841 cpu_fprintf(f
, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1842 hst
.used_head_buckets
, hst
.head_buckets
,
1843 (double)hst
.used_head_buckets
/ hst
.head_buckets
* 100);
1845 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1846 hgram_opts
|= QDIST_PR_100X
| QDIST_PR_PERCENT
;
1847 if (qdist_xmax(&hst
.occupancy
) - qdist_xmin(&hst
.occupancy
) == 1) {
1848 hgram_opts
|= QDIST_PR_NODECIMAL
;
1850 hgram
= qdist_pr(&hst
.occupancy
, 10, hgram_opts
);
1851 cpu_fprintf(f
, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1852 qdist_avg(&hst
.occupancy
) * 100, hgram
);
1855 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1856 hgram_bins
= qdist_xmax(&hst
.chain
) - qdist_xmin(&hst
.chain
);
1857 if (hgram_bins
> 10) {
1861 hgram_opts
|= QDIST_PR_NODECIMAL
| QDIST_PR_NOBINRANGE
;
1863 hgram
= qdist_pr(&hst
.chain
, hgram_bins
, hgram_opts
);
1864 cpu_fprintf(f
, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1865 qdist_avg(&hst
.chain
), hgram
);
1869 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1871 int i
, target_code_size
, max_target_code_size
;
1872 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
1873 TranslationBlock
*tb
;
1874 struct qht_stats hst
;
1878 target_code_size
= 0;
1879 max_target_code_size
= 0;
1881 direct_jmp_count
= 0;
1882 direct_jmp2_count
= 0;
1883 for (i
= 0; i
< tcg_ctx
.tb_ctx
.nb_tbs
; i
++) {
1884 tb
= tcg_ctx
.tb_ctx
.tbs
[i
];
1885 target_code_size
+= tb
->size
;
1886 if (tb
->size
> max_target_code_size
) {
1887 max_target_code_size
= tb
->size
;
1889 if (tb
->page_addr
[1] != -1) {
1892 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1894 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1895 direct_jmp2_count
++;
1899 /* XXX: avoid using doubles ? */
1900 cpu_fprintf(f
, "Translation buffer state:\n");
1901 cpu_fprintf(f
, "gen code size %td/%zd\n",
1902 tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
,
1903 tcg_ctx
.code_gen_highwater
- tcg_ctx
.code_gen_buffer
);
1904 cpu_fprintf(f
, "TB count %d\n", tcg_ctx
.tb_ctx
.nb_tbs
);
1905 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
1906 tcg_ctx
.tb_ctx
.nb_tbs
? target_code_size
/
1907 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1908 max_target_code_size
);
1909 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1910 tcg_ctx
.tb_ctx
.nb_tbs
? (tcg_ctx
.code_gen_ptr
-
1911 tcg_ctx
.code_gen_buffer
) /
1912 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1913 target_code_size
? (double) (tcg_ctx
.code_gen_ptr
-
1914 tcg_ctx
.code_gen_buffer
) /
1915 target_code_size
: 0);
1916 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n", cross_page
,
1917 tcg_ctx
.tb_ctx
.nb_tbs
? (cross_page
* 100) /
1918 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1919 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1921 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp_count
* 100) /
1922 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1924 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp2_count
* 100) /
1925 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1927 qht_statistics_init(&tcg_ctx
.tb_ctx
.htable
, &hst
);
1928 print_qht_statistics(f
, cpu_fprintf
, hst
);
1929 qht_statistics_destroy(&hst
);
1931 cpu_fprintf(f
, "\nStatistics:\n");
1932 cpu_fprintf(f
, "TB flush count %u\n",
1933 atomic_read(&tcg_ctx
.tb_ctx
.tb_flush_count
));
1934 cpu_fprintf(f
, "TB invalidate count %d\n",
1935 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
);
1936 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
1937 tcg_dump_info(f
, cpu_fprintf
);
1942 void dump_opcount_info(FILE *f
, fprintf_function cpu_fprintf
)
1944 tcg_dump_op_count(f
, cpu_fprintf
);
1947 #else /* CONFIG_USER_ONLY */
1949 void cpu_interrupt(CPUState
*cpu
, int mask
)
1951 g_assert(qemu_mutex_iothread_locked());
1952 cpu
->interrupt_request
|= mask
;
1953 cpu
->icount_decr
.u16
.high
= -1;
1957 * Walks guest process memory "regions" one by one
1958 * and calls callback function 'fn' for each region.
1960 struct walk_memory_regions_data
{
1961 walk_memory_regions_fn fn
;
1967 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1968 target_ulong end
, int new_prot
)
1970 if (data
->start
!= -1u) {
1971 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1977 data
->start
= (new_prot
? end
: -1u);
1978 data
->prot
= new_prot
;
1983 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1984 target_ulong base
, int level
, void **lp
)
1990 return walk_memory_regions_end(data
, base
, 0);
1996 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1997 int prot
= pd
[i
].flags
;
1999 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2000 if (prot
!= data
->prot
) {
2001 rc
= walk_memory_regions_end(data
, pa
, prot
);
2010 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2011 pa
= base
| ((target_ulong
)i
<<
2012 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
2013 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2023 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2025 struct walk_memory_regions_data data
;
2026 uintptr_t i
, l1_sz
= v_l1_size
;
2033 for (i
= 0; i
< l1_sz
; i
++) {
2034 target_ulong base
= i
<< (v_l1_shift
+ TARGET_PAGE_BITS
);
2035 int rc
= walk_memory_regions_1(&data
, base
, v_l2_levels
, l1_map
+ i
);
2041 return walk_memory_regions_end(&data
, 0, 0);
2044 static int dump_region(void *priv
, target_ulong start
,
2045 target_ulong end
, unsigned long prot
)
2047 FILE *f
= (FILE *)priv
;
2049 (void) fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
2050 " "TARGET_FMT_lx
" %c%c%c\n",
2051 start
, end
, end
- start
,
2052 ((prot
& PAGE_READ
) ? 'r' : '-'),
2053 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2054 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2059 /* dump memory mappings */
2060 void page_dump(FILE *f
)
2062 const int length
= sizeof(target_ulong
) * 2;
2063 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
2064 length
, "start", length
, "end", length
, "size", "prot");
2065 walk_memory_regions(f
, dump_region
);
2068 int page_get_flags(target_ulong address
)
2072 p
= page_find(address
>> TARGET_PAGE_BITS
);
2079 /* Modify the flags of a page and invalidate the code if necessary.
2080 The flag PAGE_WRITE_ORG is positioned automatically depending
2081 on PAGE_WRITE. The mmap_lock should already be held. */
2082 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2084 target_ulong addr
, len
;
2086 /* This function should never be called with addresses outside the
2087 guest address space. If this assert fires, it probably indicates
2088 a missing call to h2g_valid. */
2089 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2090 assert(end
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2092 assert(start
< end
);
2093 assert_memory_lock();
2095 start
= start
& TARGET_PAGE_MASK
;
2096 end
= TARGET_PAGE_ALIGN(end
);
2098 if (flags
& PAGE_WRITE
) {
2099 flags
|= PAGE_WRITE_ORG
;
2102 for (addr
= start
, len
= end
- start
;
2104 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2105 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2107 /* If the write protection bit is set, then we invalidate
2109 if (!(p
->flags
& PAGE_WRITE
) &&
2110 (flags
& PAGE_WRITE
) &&
2112 tb_invalidate_phys_page(addr
, 0);
2118 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2124 /* This function should never be called with addresses outside the
2125 guest address space. If this assert fires, it probably indicates
2126 a missing call to h2g_valid. */
2127 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2128 assert(start
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2134 if (start
+ len
- 1 < start
) {
2135 /* We've wrapped around. */
2139 /* must do before we loose bits in the next step */
2140 end
= TARGET_PAGE_ALIGN(start
+ len
);
2141 start
= start
& TARGET_PAGE_MASK
;
2143 for (addr
= start
, len
= end
- start
;
2145 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2146 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2150 if (!(p
->flags
& PAGE_VALID
)) {
2154 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
2157 if (flags
& PAGE_WRITE
) {
2158 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
2161 /* unprotect the page if it was put read-only because it
2162 contains translated code */
2163 if (!(p
->flags
& PAGE_WRITE
)) {
2164 if (!page_unprotect(addr
, 0)) {
2173 /* called from signal handler: invalidate the code and unprotect the
2174 * page. Return 0 if the fault was not handled, 1 if it was handled,
2175 * and 2 if it was handled but the caller must cause the TB to be
2176 * immediately exited. (We can only return 2 if the 'pc' argument is
2179 int page_unprotect(target_ulong address
, uintptr_t pc
)
2182 bool current_tb_invalidated
;
2184 target_ulong host_start
, host_end
, addr
;
2186 /* Technically this isn't safe inside a signal handler. However we
2187 know this only ever happens in a synchronous SEGV handler, so in
2188 practice it seems to be ok. */
2191 p
= page_find(address
>> TARGET_PAGE_BITS
);
2197 /* if the page was really writable, then we change its
2198 protection back to writable */
2199 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2200 host_start
= address
& qemu_host_page_mask
;
2201 host_end
= host_start
+ qemu_host_page_size
;
2204 current_tb_invalidated
= false;
2205 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2206 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2207 p
->flags
|= PAGE_WRITE
;
2210 /* and since the content will be modified, we must invalidate
2211 the corresponding translated code. */
2212 current_tb_invalidated
|= tb_invalidate_phys_page(addr
, pc
);
2213 #ifdef DEBUG_TB_CHECK
2214 tb_invalidate_check(addr
);
2217 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2221 /* If current TB was invalidated return to main loop */
2222 return current_tb_invalidated
? 2 : 1;
2227 #endif /* CONFIG_USER_ONLY */