4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
32 #if defined(CONFIG_USER_ONLY)
34 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
35 #include <sys/param.h>
36 #if __FreeBSD_version >= 700104
37 #define HAVE_KINFO_GETVMMAP
38 #define sigqueue sigqueue_freebsd /* avoid redefinition */
40 #include <machine/profile.h>
49 #include "exec/address-spaces.h"
52 #include "exec/cputlb.h"
53 #include "exec/tb-hash.h"
54 #include "translate-all.h"
55 #include "qemu/bitmap.h"
56 #include "qemu/error-report.h"
57 #include "qemu/timer.h"
58 #include "qemu/main-loop.h"
60 #include "sysemu/cpus.h"
62 /* #define DEBUG_TB_INVALIDATE */
63 /* #define DEBUG_TB_FLUSH */
64 /* make various TB consistency checks */
65 /* #define DEBUG_TB_CHECK */
67 #ifdef DEBUG_TB_INVALIDATE
68 #define DEBUG_TB_INVALIDATE_GATE 1
70 #define DEBUG_TB_INVALIDATE_GATE 0
74 #define DEBUG_TB_FLUSH_GATE 1
76 #define DEBUG_TB_FLUSH_GATE 0
79 #if !defined(CONFIG_USER_ONLY)
80 /* TB consistency checks only implemented for usermode emulation. */
85 #define DEBUG_TB_CHECK_GATE 1
87 #define DEBUG_TB_CHECK_GATE 0
90 /* Access to the various translations structures need to be serialised via locks
91 * for consistency. This is automatic for SoftMMU based system
92 * emulation due to its single threaded nature. In user-mode emulation
93 * access to the memory related structures are protected with the
97 #define assert_memory_lock() tcg_debug_assert(have_tb_lock)
99 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
102 #define SMC_BITMAP_USE_THRESHOLD 10
104 typedef struct PageDesc
{
105 /* list of TBs intersecting this ram page */
106 TranslationBlock
*first_tb
;
107 #ifdef CONFIG_SOFTMMU
108 /* in order to optimize self modifying code, we count the number
109 of lookups we do to a given page to use a bitmap */
110 unsigned int code_write_count
;
111 unsigned long *code_bitmap
;
117 /* In system mode we want L1_MAP to be based on ram offsets,
118 while in user mode we want it to be based on virtual addresses. */
119 #if !defined(CONFIG_USER_ONLY)
120 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
121 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
123 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
126 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
129 /* Size of the L2 (and L3, etc) page tables. */
131 #define V_L2_SIZE (1 << V_L2_BITS)
133 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
134 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS
>
135 sizeof(((TranslationBlock
*)0)->trace_vcpu_dstate
)
139 * L1 Mapping properties
141 static int v_l1_size
;
142 static int v_l1_shift
;
143 static int v_l2_levels
;
145 /* The bottom level has pointers to PageDesc, and is indexed by
146 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
148 #define V_L1_MIN_BITS 4
149 #define V_L1_MAX_BITS (V_L2_BITS + 3)
150 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
152 static void *l1_map
[V_L1_MAX_SIZE
];
154 /* code generation context */
155 TCGContext tcg_init_ctx
;
156 __thread TCGContext
*tcg_ctx
;
160 /* translation block context */
161 static __thread
int have_tb_lock
;
163 static void page_table_config_init(void)
167 assert(TARGET_PAGE_BITS
);
168 /* The bits remaining after N lower levels of page tables. */
169 v_l1_bits
= (L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
) % V_L2_BITS
;
170 if (v_l1_bits
< V_L1_MIN_BITS
) {
171 v_l1_bits
+= V_L2_BITS
;
174 v_l1_size
= 1 << v_l1_bits
;
175 v_l1_shift
= L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
- v_l1_bits
;
176 v_l2_levels
= v_l1_shift
/ V_L2_BITS
- 1;
178 assert(v_l1_bits
<= V_L1_MAX_BITS
);
179 assert(v_l1_shift
% V_L2_BITS
== 0);
180 assert(v_l2_levels
>= 0);
183 #define assert_tb_locked() tcg_debug_assert(have_tb_lock)
184 #define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
188 assert_tb_unlocked();
189 qemu_mutex_lock(&tb_ctx
.tb_lock
);
197 qemu_mutex_unlock(&tb_ctx
.tb_lock
);
200 void tb_lock_reset(void)
203 qemu_mutex_unlock(&tb_ctx
.tb_lock
);
208 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
210 void cpu_gen_init(void)
212 tcg_context_init(&tcg_init_ctx
);
215 /* Encode VAL as a signed leb128 sequence at P.
216 Return P incremented past the encoded value. */
217 static uint8_t *encode_sleb128(uint8_t *p
, target_long val
)
224 more
= !((val
== 0 && (byte
& 0x40) == 0)
225 || (val
== -1 && (byte
& 0x40) != 0));
235 /* Decode a signed leb128 sequence at *PP; increment *PP past the
236 decoded value. Return the decoded value. */
237 static target_long
decode_sleb128(uint8_t **pp
)
245 val
|= (target_ulong
)(byte
& 0x7f) << shift
;
247 } while (byte
& 0x80);
248 if (shift
< TARGET_LONG_BITS
&& (byte
& 0x40)) {
249 val
|= -(target_ulong
)1 << shift
;
256 /* Encode the data collected about the instructions while compiling TB.
257 Place the data at BLOCK, and return the number of bytes consumed.
259 The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
260 which come from the target's insn_start data, followed by a uintptr_t
261 which comes from the host pc of the end of the code implementing the insn.
263 Each line of the table is encoded as sleb128 deltas from the previous
264 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
265 That is, the first column is seeded with the guest pc, the last column
266 with the host pc, and the middle columns with zeros. */
268 static int encode_search(TranslationBlock
*tb
, uint8_t *block
)
270 uint8_t *highwater
= tcg_ctx
->code_gen_highwater
;
274 for (i
= 0, n
= tb
->icount
; i
< n
; ++i
) {
277 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
279 prev
= (j
== 0 ? tb
->pc
: 0);
281 prev
= tcg_ctx
->gen_insn_data
[i
- 1][j
];
283 p
= encode_sleb128(p
, tcg_ctx
->gen_insn_data
[i
][j
] - prev
);
285 prev
= (i
== 0 ? 0 : tcg_ctx
->gen_insn_end_off
[i
- 1]);
286 p
= encode_sleb128(p
, tcg_ctx
->gen_insn_end_off
[i
] - prev
);
288 /* Test for (pending) buffer overflow. The assumption is that any
289 one row beginning below the high water mark cannot overrun
290 the buffer completely. Thus we can test for overflow after
291 encoding a row without having to check during encoding. */
292 if (unlikely(p
> highwater
)) {
300 /* The cpu state corresponding to 'searched_pc' is restored.
301 * Called with tb_lock held.
303 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
304 uintptr_t searched_pc
)
306 target_ulong data
[TARGET_INSN_START_WORDS
] = { tb
->pc
};
307 uintptr_t host_pc
= (uintptr_t)tb
->tc
.ptr
;
308 CPUArchState
*env
= cpu
->env_ptr
;
309 uint8_t *p
= tb
->tc
.ptr
+ tb
->tc
.size
;
310 int i
, j
, num_insns
= tb
->icount
;
311 #ifdef CONFIG_PROFILER
312 TCGProfile
*prof
= &tcg_ctx
->prof
;
313 int64_t ti
= profile_getclock();
316 searched_pc
-= GETPC_ADJ
;
318 if (searched_pc
< host_pc
) {
322 /* Reconstruct the stored insn data while looking for the point at
323 which the end of the insn exceeds the searched_pc. */
324 for (i
= 0; i
< num_insns
; ++i
) {
325 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
326 data
[j
] += decode_sleb128(&p
);
328 host_pc
+= decode_sleb128(&p
);
329 if (host_pc
> searched_pc
) {
336 if (tb
->cflags
& CF_USE_ICOUNT
) {
338 /* Reset the cycle counter to the start of the block. */
339 cpu
->icount_decr
.u16
.low
+= num_insns
;
340 /* Clear the IO flag. */
343 cpu
->icount_decr
.u16
.low
-= i
;
344 restore_state_to_opc(env
, tb
, data
);
346 #ifdef CONFIG_PROFILER
347 atomic_set(&prof
->restore_time
,
348 prof
->restore_time
+ profile_getclock() - ti
);
349 atomic_set(&prof
->restore_count
, prof
->restore_count
+ 1);
354 bool cpu_restore_state(CPUState
*cpu
, uintptr_t host_pc
)
356 TranslationBlock
*tb
;
358 uintptr_t check_offset
;
360 /* The host_pc has to be in the region of current code buffer. If
361 * it is not we will not be able to resolve it here. The two cases
362 * where host_pc will not be correct are:
364 * - fault during translation (instruction fetch)
365 * - fault from helper (not using GETPC() macro)
367 * Either way we need return early to avoid blowing up on a
368 * recursive tb_lock() as we can't resolve it here.
370 * We are using unsigned arithmetic so if host_pc <
371 * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
372 * above the code_gen_buffer_size
374 check_offset
= host_pc
- (uintptr_t) tcg_init_ctx
.code_gen_buffer
;
376 if (check_offset
< tcg_init_ctx
.code_gen_buffer_size
) {
378 tb
= tb_find_pc(host_pc
);
380 cpu_restore_state_from_tb(cpu
, tb
, host_pc
);
381 if (tb
->cflags
& CF_NOCACHE
) {
382 /* one-shot translation, invalidate it immediately */
383 tb_phys_invalidate(tb
, -1);
394 static void page_init(void)
397 page_table_config_init();
399 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
401 #ifdef HAVE_KINFO_GETVMMAP
402 struct kinfo_vmentry
*freep
;
405 freep
= kinfo_getvmmap(getpid(), &cnt
);
408 for (i
= 0; i
< cnt
; i
++) {
409 unsigned long startaddr
, endaddr
;
411 startaddr
= freep
[i
].kve_start
;
412 endaddr
= freep
[i
].kve_end
;
413 if (h2g_valid(startaddr
)) {
414 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
416 if (h2g_valid(endaddr
)) {
417 endaddr
= h2g(endaddr
);
418 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
420 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
422 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
433 last_brk
= (unsigned long)sbrk(0);
435 f
= fopen("/compat/linux/proc/self/maps", "r");
440 unsigned long startaddr
, endaddr
;
443 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
445 if (n
== 2 && h2g_valid(startaddr
)) {
446 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
448 if (h2g_valid(endaddr
)) {
449 endaddr
= h2g(endaddr
);
453 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
466 * Called with tb_lock held for system emulation.
467 * Called with mmap_lock held for user-mode emulation.
469 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
476 assert_memory_lock();
479 /* Level 1. Always allocated. */
480 lp
= l1_map
+ ((index
>> v_l1_shift
) & (v_l1_size
- 1));
483 for (i
= v_l2_levels
; i
> 0; i
--) {
484 void **p
= atomic_rcu_read(lp
);
490 p
= g_new0(void *, V_L2_SIZE
);
491 atomic_rcu_set(lp
, p
);
494 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
497 pd
= atomic_rcu_read(lp
);
502 pd
= g_new0(PageDesc
, V_L2_SIZE
);
503 atomic_rcu_set(lp
, pd
);
506 return pd
+ (index
& (V_L2_SIZE
- 1));
509 static inline PageDesc
*page_find(tb_page_addr_t index
)
511 return page_find_alloc(index
, 0);
514 #if defined(CONFIG_USER_ONLY)
515 /* Currently it is not recommended to allocate big chunks of data in
516 user mode. It will change when a dedicated libc will be used. */
517 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
518 region in which the guest needs to run. Revisit this. */
519 #define USE_STATIC_CODE_GEN_BUFFER
522 /* Minimum size of the code gen buffer. This number is randomly chosen,
523 but not so small that we can't have a fair number of TB's live. */
524 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
526 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
527 indicated, this is constrained by the range of direct branches on the
528 host cpu, as used by the TCG implementation of goto_tb. */
529 #if defined(__x86_64__)
530 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
531 #elif defined(__sparc__)
532 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
533 #elif defined(__powerpc64__)
534 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
535 #elif defined(__powerpc__)
536 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
537 #elif defined(__aarch64__)
538 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
539 #elif defined(__s390x__)
540 /* We have a +- 4GB range on the branches; leave some slop. */
541 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
542 #elif defined(__mips__)
543 /* We have a 256MB branch region, but leave room to make sure the
544 main executable is also within that region. */
545 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
547 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
550 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
552 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
553 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
554 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
556 static inline size_t size_code_gen_buffer(size_t tb_size
)
558 /* Size the buffer. */
560 #ifdef USE_STATIC_CODE_GEN_BUFFER
561 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
563 /* ??? Needs adjustments. */
564 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
565 static buffer, we could size this on RESERVED_VA, on the text
566 segment size of the executable, or continue to use the default. */
567 tb_size
= (unsigned long)(ram_size
/ 4);
570 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
571 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
573 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
574 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
580 /* In order to use J and JAL within the code_gen_buffer, we require
581 that the buffer not cross a 256MB boundary. */
582 static inline bool cross_256mb(void *addr
, size_t size
)
584 return ((uintptr_t)addr
^ ((uintptr_t)addr
+ size
)) & ~0x0ffffffful
;
587 /* We weren't able to allocate a buffer without crossing that boundary,
588 so make do with the larger portion of the buffer that doesn't cross.
589 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
590 static inline void *split_cross_256mb(void *buf1
, size_t size1
)
592 void *buf2
= (void *)(((uintptr_t)buf1
+ size1
) & ~0x0ffffffful
);
593 size_t size2
= buf1
+ size1
- buf2
;
601 tcg_ctx
->code_gen_buffer_size
= size1
;
606 #ifdef USE_STATIC_CODE_GEN_BUFFER
607 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
608 __attribute__((aligned(CODE_GEN_ALIGN
)));
610 static inline void *alloc_code_gen_buffer(void)
612 void *buf
= static_code_gen_buffer
;
613 void *end
= static_code_gen_buffer
+ sizeof(static_code_gen_buffer
);
616 /* page-align the beginning and end of the buffer */
617 buf
= QEMU_ALIGN_PTR_UP(buf
, qemu_real_host_page_size
);
618 end
= QEMU_ALIGN_PTR_DOWN(end
, qemu_real_host_page_size
);
622 /* Honor a command-line option limiting the size of the buffer. */
623 if (size
> tcg_ctx
->code_gen_buffer_size
) {
624 size
= QEMU_ALIGN_DOWN(tcg_ctx
->code_gen_buffer_size
,
625 qemu_real_host_page_size
);
627 tcg_ctx
->code_gen_buffer_size
= size
;
630 if (cross_256mb(buf
, size
)) {
631 buf
= split_cross_256mb(buf
, size
);
632 size
= tcg_ctx
->code_gen_buffer_size
;
636 if (qemu_mprotect_rwx(buf
, size
)) {
639 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
643 #elif defined(_WIN32)
644 static inline void *alloc_code_gen_buffer(void)
646 size_t size
= tcg_ctx
->code_gen_buffer_size
;
649 buf
= VirtualAlloc(NULL
, size
, MEM_RESERVE
| MEM_COMMIT
,
650 PAGE_EXECUTE_READWRITE
);
654 static inline void *alloc_code_gen_buffer(void)
656 int prot
= PROT_WRITE
| PROT_READ
| PROT_EXEC
;
657 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
659 size_t size
= tcg_ctx
->code_gen_buffer_size
;
662 /* Constrain the position of the buffer based on the host cpu.
663 Note that these addresses are chosen in concert with the
664 addresses assigned in the relevant linker script file. */
665 # if defined(__PIE__) || defined(__PIC__)
666 /* Don't bother setting a preferred location if we're building
667 a position-independent executable. We're more likely to get
668 an address near the main executable if we let the kernel
669 choose the address. */
670 # elif defined(__x86_64__) && defined(MAP_32BIT)
671 /* Force the memory down into low memory with the executable.
672 Leave the choice of exact location with the kernel. */
674 /* Cannot expect to map more than 800MB in low memory. */
675 if (size
> 800u * 1024 * 1024) {
676 tcg_ctx
->code_gen_buffer_size
= size
= 800u * 1024 * 1024;
678 # elif defined(__sparc__)
679 start
= 0x40000000ul
;
680 # elif defined(__s390x__)
681 start
= 0x90000000ul
;
682 # elif defined(__mips__)
683 # if _MIPS_SIM == _ABI64
684 start
= 0x128000000ul
;
686 start
= 0x08000000ul
;
690 buf
= mmap((void *)start
, size
, prot
, flags
, -1, 0);
691 if (buf
== MAP_FAILED
) {
696 if (cross_256mb(buf
, size
)) {
697 /* Try again, with the original still mapped, to avoid re-acquiring
698 that 256mb crossing. This time don't specify an address. */
700 void *buf2
= mmap(NULL
, size
, prot
, flags
, -1, 0);
701 switch ((int)(buf2
!= MAP_FAILED
)) {
703 if (!cross_256mb(buf2
, size
)) {
704 /* Success! Use the new buffer. */
708 /* Failure. Work with what we had. */
712 /* Split the original buffer. Free the smaller half. */
713 buf2
= split_cross_256mb(buf
, size
);
714 size2
= tcg_ctx
->code_gen_buffer_size
;
716 munmap(buf
+ size2
, size
- size2
);
718 munmap(buf
, size
- size2
);
727 /* Request large pages for the buffer. */
728 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
732 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
734 /* compare a pointer @ptr and a tb_tc @s */
735 static int ptr_cmp_tb_tc(const void *ptr
, const struct tb_tc
*s
)
737 if (ptr
>= s
->ptr
+ s
->size
) {
739 } else if (ptr
< s
->ptr
) {
745 static gint
tb_tc_cmp(gconstpointer ap
, gconstpointer bp
)
747 const struct tb_tc
*a
= ap
;
748 const struct tb_tc
*b
= bp
;
751 * When both sizes are set, we know this isn't a lookup.
752 * This is the most likely case: every TB must be inserted; lookups
753 * are a lot less frequent.
755 if (likely(a
->size
&& b
->size
)) {
756 if (a
->ptr
> b
->ptr
) {
758 } else if (a
->ptr
< b
->ptr
) {
761 /* a->ptr == b->ptr should happen only on deletions */
762 g_assert(a
->size
== b
->size
);
766 * All lookups have either .size field set to 0.
767 * From the glib sources we see that @ap is always the lookup key. However
768 * the docs provide no guarantee, so we just mark this case as likely.
770 if (likely(a
->size
== 0)) {
771 return ptr_cmp_tb_tc(a
->ptr
, b
);
773 return ptr_cmp_tb_tc(b
->ptr
, a
);
776 static inline void code_gen_alloc(size_t tb_size
)
778 tcg_ctx
->code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
779 tcg_ctx
->code_gen_buffer
= alloc_code_gen_buffer();
780 if (tcg_ctx
->code_gen_buffer
== NULL
) {
781 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
784 tb_ctx
.tb_tree
= g_tree_new(tb_tc_cmp
);
785 qemu_mutex_init(&tb_ctx
.tb_lock
);
788 static void tb_htable_init(void)
790 unsigned int mode
= QHT_MODE_AUTO_RESIZE
;
792 qht_init(&tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
, mode
);
795 /* Must be called before using the QEMU cpus. 'tb_size' is the size
796 (in bytes) allocated to the translation buffer. Zero means default
798 void tcg_exec_init(unsigned long tb_size
)
804 code_gen_alloc(tb_size
);
805 #if defined(CONFIG_SOFTMMU)
806 /* There's no guest base to take into account, so go ahead and
807 initialize the prologue now. */
808 tcg_prologue_init(tcg_ctx
);
813 * Allocate a new translation block. Flush the translation buffer if
814 * too many translation blocks or too much generated code.
816 * Called with tb_lock held.
818 static TranslationBlock
*tb_alloc(target_ulong pc
)
820 TranslationBlock
*tb
;
824 tb
= tcg_tb_alloc(tcg_ctx
);
825 if (unlikely(tb
== NULL
)) {
831 /* Called with tb_lock held. */
832 void tb_remove(TranslationBlock
*tb
)
836 g_tree_remove(tb_ctx
.tb_tree
, &tb
->tc
);
839 static inline void invalidate_page_bitmap(PageDesc
*p
)
841 #ifdef CONFIG_SOFTMMU
842 g_free(p
->code_bitmap
);
843 p
->code_bitmap
= NULL
;
844 p
->code_write_count
= 0;
848 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
849 static void page_flush_tb_1(int level
, void **lp
)
859 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
860 pd
[i
].first_tb
= NULL
;
861 invalidate_page_bitmap(pd
+ i
);
866 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
867 page_flush_tb_1(level
- 1, pp
+ i
);
872 static void page_flush_tb(void)
874 int i
, l1_sz
= v_l1_size
;
876 for (i
= 0; i
< l1_sz
; i
++) {
877 page_flush_tb_1(v_l2_levels
, l1_map
+ i
);
881 static gboolean
tb_host_size_iter(gpointer key
, gpointer value
, gpointer data
)
883 const TranslationBlock
*tb
= value
;
886 *size
+= tb
->tc
.size
;
890 /* flush all the translation blocks */
891 static void do_tb_flush(CPUState
*cpu
, run_on_cpu_data tb_flush_count
)
895 /* If it is already been done on request of another CPU,
898 if (tb_ctx
.tb_flush_count
!= tb_flush_count
.host_int
) {
902 if (DEBUG_TB_FLUSH_GATE
) {
903 size_t nb_tbs
= g_tree_nnodes(tb_ctx
.tb_tree
);
904 size_t host_size
= 0;
906 g_tree_foreach(tb_ctx
.tb_tree
, tb_host_size_iter
, &host_size
);
907 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
908 tcg_code_size(), nb_tbs
, nb_tbs
> 0 ? host_size
/ nb_tbs
: 0);
912 cpu_tb_jmp_cache_clear(cpu
);
915 /* Increment the refcount first so that destroy acts as a reset */
916 g_tree_ref(tb_ctx
.tb_tree
);
917 g_tree_destroy(tb_ctx
.tb_tree
);
919 qht_reset_size(&tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
);
922 tcg_region_reset_all();
923 /* XXX: flush processor icache at this point if cache flush is
925 atomic_mb_set(&tb_ctx
.tb_flush_count
, tb_ctx
.tb_flush_count
+ 1);
931 void tb_flush(CPUState
*cpu
)
934 unsigned tb_flush_count
= atomic_mb_read(&tb_ctx
.tb_flush_count
);
935 async_safe_run_on_cpu(cpu
, do_tb_flush
,
936 RUN_ON_CPU_HOST_INT(tb_flush_count
));
941 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
942 * so in order to prevent bit rot we compile them unconditionally in user-mode,
943 * and let the optimizer get rid of them by wrapping their user-only callers
944 * with if (DEBUG_TB_CHECK_GATE).
946 #ifdef CONFIG_USER_ONLY
949 do_tb_invalidate_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
951 TranslationBlock
*tb
= p
;
952 target_ulong addr
= *(target_ulong
*)userp
;
954 if (!(addr
+ TARGET_PAGE_SIZE
<= tb
->pc
|| addr
>= tb
->pc
+ tb
->size
)) {
955 printf("ERROR invalidate: address=" TARGET_FMT_lx
956 " PC=%08lx size=%04x\n", addr
, (long)tb
->pc
, tb
->size
);
960 /* verify that all the pages have correct rights for code
962 * Called with tb_lock held.
964 static void tb_invalidate_check(target_ulong address
)
966 address
&= TARGET_PAGE_MASK
;
967 qht_iter(&tb_ctx
.htable
, do_tb_invalidate_check
, &address
);
971 do_tb_page_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
973 TranslationBlock
*tb
= p
;
976 flags1
= page_get_flags(tb
->pc
);
977 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
978 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
979 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
980 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
984 /* verify that all the pages have correct rights for code */
985 static void tb_page_check(void)
987 qht_iter(&tb_ctx
.htable
, do_tb_page_check
, NULL
);
990 #endif /* CONFIG_USER_ONLY */
992 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
994 TranslationBlock
*tb1
;
999 n1
= (uintptr_t)tb1
& 3;
1000 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1002 *ptb
= tb1
->page_next
[n1
];
1005 ptb
= &tb1
->page_next
[n1
];
1009 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1010 static inline void tb_remove_from_jmp_list(TranslationBlock
*tb
, int n
)
1012 TranslationBlock
*tb1
;
1013 uintptr_t *ptb
, ntb
;
1016 ptb
= &tb
->jmp_list_next
[n
];
1018 /* find tb(n) in circular list */
1022 tb1
= (TranslationBlock
*)(ntb
& ~3);
1023 if (n1
== n
&& tb1
== tb
) {
1027 ptb
= &tb1
->jmp_list_first
;
1029 ptb
= &tb1
->jmp_list_next
[n1
];
1032 /* now we can suppress tb(n) from the list */
1033 *ptb
= tb
->jmp_list_next
[n
];
1035 tb
->jmp_list_next
[n
] = (uintptr_t)NULL
;
1039 /* reset the jump entry 'n' of a TB so that it is not chained to
1041 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
1043 uintptr_t addr
= (uintptr_t)(tb
->tc
.ptr
+ tb
->jmp_reset_offset
[n
]);
1044 tb_set_jmp_target(tb
, n
, addr
);
1047 /* remove any jumps to the TB */
1048 static inline void tb_jmp_unlink(TranslationBlock
*tb
)
1050 TranslationBlock
*tb1
;
1051 uintptr_t *ptb
, ntb
;
1054 ptb
= &tb
->jmp_list_first
;
1058 tb1
= (TranslationBlock
*)(ntb
& ~3);
1062 tb_reset_jump(tb1
, n1
);
1063 *ptb
= tb1
->jmp_list_next
[n1
];
1064 tb1
->jmp_list_next
[n1
] = (uintptr_t)NULL
;
1068 /* invalidate one TB
1070 * Called with tb_lock held.
1072 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
1077 tb_page_addr_t phys_pc
;
1081 atomic_set(&tb
->cflags
, tb
->cflags
| CF_INVALID
);
1083 /* remove the TB from the hash list */
1084 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1085 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, tb
->cflags
& CF_HASH_MASK
,
1086 tb
->trace_vcpu_dstate
);
1087 if (!qht_remove(&tb_ctx
.htable
, tb
, h
)) {
1091 /* remove the TB from the page list */
1092 if (tb
->page_addr
[0] != page_addr
) {
1093 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
1094 tb_page_remove(&p
->first_tb
, tb
);
1095 invalidate_page_bitmap(p
);
1097 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
1098 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
1099 tb_page_remove(&p
->first_tb
, tb
);
1100 invalidate_page_bitmap(p
);
1103 /* remove the TB from the hash list */
1104 h
= tb_jmp_cache_hash_func(tb
->pc
);
1106 if (atomic_read(&cpu
->tb_jmp_cache
[h
]) == tb
) {
1107 atomic_set(&cpu
->tb_jmp_cache
[h
], NULL
);
1111 /* suppress this TB from the two jump lists */
1112 tb_remove_from_jmp_list(tb
, 0);
1113 tb_remove_from_jmp_list(tb
, 1);
1115 /* suppress any remaining jumps to this TB */
1118 tb_ctx
.tb_phys_invalidate_count
++;
1121 #ifdef CONFIG_SOFTMMU
1122 static void build_page_bitmap(PageDesc
*p
)
1124 int n
, tb_start
, tb_end
;
1125 TranslationBlock
*tb
;
1127 p
->code_bitmap
= bitmap_new(TARGET_PAGE_SIZE
);
1130 while (tb
!= NULL
) {
1131 n
= (uintptr_t)tb
& 3;
1132 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1133 /* NOTE: this is subtle as a TB may span two physical pages */
1135 /* NOTE: tb_end may be after the end of the page, but
1136 it is not a problem */
1137 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1138 tb_end
= tb_start
+ tb
->size
;
1139 if (tb_end
> TARGET_PAGE_SIZE
) {
1140 tb_end
= TARGET_PAGE_SIZE
;
1144 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1146 bitmap_set(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1147 tb
= tb
->page_next
[n
];
1152 /* add the tb in the target page and protect it if necessary
1154 * Called with mmap_lock held for user-mode emulation.
1156 static inline void tb_alloc_page(TranslationBlock
*tb
,
1157 unsigned int n
, tb_page_addr_t page_addr
)
1160 #ifndef CONFIG_USER_ONLY
1161 bool page_already_protected
;
1164 assert_memory_lock();
1166 tb
->page_addr
[n
] = page_addr
;
1167 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1168 tb
->page_next
[n
] = p
->first_tb
;
1169 #ifndef CONFIG_USER_ONLY
1170 page_already_protected
= p
->first_tb
!= NULL
;
1172 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1173 invalidate_page_bitmap(p
);
1175 #if defined(CONFIG_USER_ONLY)
1176 if (p
->flags
& PAGE_WRITE
) {
1181 /* force the host page as non writable (writes will have a
1182 page fault + mprotect overhead) */
1183 page_addr
&= qemu_host_page_mask
;
1185 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1186 addr
+= TARGET_PAGE_SIZE
) {
1188 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1193 p2
->flags
&= ~PAGE_WRITE
;
1195 mprotect(g2h(page_addr
), qemu_host_page_size
,
1196 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1197 if (DEBUG_TB_INVALIDATE_GATE
) {
1198 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT
"\n", page_addr
);
1202 /* if some code is already present, then the pages are already
1203 protected. So we handle the case where only the first TB is
1204 allocated in a physical page */
1205 if (!page_already_protected
) {
1206 tlb_protect_code(page_addr
);
1211 /* add a new TB and link it to the physical page tables. phys_page2 is
1212 * (-1) to indicate that only one page contains the TB.
1214 * Called with mmap_lock held for user-mode emulation.
1216 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1217 tb_page_addr_t phys_page2
)
1221 assert_memory_lock();
1223 /* add in the page list */
1224 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1225 if (phys_page2
!= -1) {
1226 tb_alloc_page(tb
, 1, phys_page2
);
1228 tb
->page_addr
[1] = -1;
1231 /* add in the hash table */
1232 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, tb
->cflags
& CF_HASH_MASK
,
1233 tb
->trace_vcpu_dstate
);
1234 qht_insert(&tb_ctx
.htable
, tb
, h
);
1236 #ifdef CONFIG_USER_ONLY
1237 if (DEBUG_TB_CHECK_GATE
) {
1243 /* Called with mmap_lock held for user mode emulation. */
1244 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
1245 target_ulong pc
, target_ulong cs_base
,
1246 uint32_t flags
, int cflags
)
1248 CPUArchState
*env
= cpu
->env_ptr
;
1249 TranslationBlock
*tb
;
1250 tb_page_addr_t phys_pc
, phys_page2
;
1251 target_ulong virt_page2
;
1252 tcg_insn_unit
*gen_code_buf
;
1253 int gen_code_size
, search_size
;
1254 #ifdef CONFIG_PROFILER
1255 TCGProfile
*prof
= &tcg_ctx
->prof
;
1258 assert_memory_lock();
1260 phys_pc
= get_page_addr_code(env
, pc
);
1264 if (unlikely(!tb
)) {
1265 /* flush must be done */
1268 /* Make the execution loop process the flush as soon as possible. */
1269 cpu
->exception_index
= EXCP_INTERRUPT
;
1273 gen_code_buf
= tcg_ctx
->code_gen_ptr
;
1274 tb
->tc
.ptr
= gen_code_buf
;
1276 tb
->cs_base
= cs_base
;
1278 tb
->cflags
= cflags
;
1279 tb
->trace_vcpu_dstate
= *cpu
->trace_dstate
;
1280 tcg_ctx
->tb_cflags
= cflags
;
1282 #ifdef CONFIG_PROFILER
1283 /* includes aborted translations because of exceptions */
1284 atomic_set(&prof
->tb_count1
, prof
->tb_count1
+ 1);
1285 ti
= profile_getclock();
1288 tcg_func_start(tcg_ctx
);
1290 tcg_ctx
->cpu
= ENV_GET_CPU(env
);
1291 gen_intermediate_code(cpu
, tb
);
1292 tcg_ctx
->cpu
= NULL
;
1294 trace_translate_block(tb
, tb
->pc
, tb
->tc
.ptr
);
1296 /* generate machine code */
1297 tb
->jmp_reset_offset
[0] = TB_JMP_RESET_OFFSET_INVALID
;
1298 tb
->jmp_reset_offset
[1] = TB_JMP_RESET_OFFSET_INVALID
;
1299 tcg_ctx
->tb_jmp_reset_offset
= tb
->jmp_reset_offset
;
1300 if (TCG_TARGET_HAS_direct_jump
) {
1301 tcg_ctx
->tb_jmp_insn_offset
= tb
->jmp_target_arg
;
1302 tcg_ctx
->tb_jmp_target_addr
= NULL
;
1304 tcg_ctx
->tb_jmp_insn_offset
= NULL
;
1305 tcg_ctx
->tb_jmp_target_addr
= tb
->jmp_target_arg
;
1308 #ifdef CONFIG_PROFILER
1309 atomic_set(&prof
->tb_count
, prof
->tb_count
+ 1);
1310 atomic_set(&prof
->interm_time
, prof
->interm_time
+ profile_getclock() - ti
);
1311 ti
= profile_getclock();
1314 /* ??? Overflow could be handled better here. In particular, we
1315 don't need to re-do gen_intermediate_code, nor should we re-do
1316 the tcg optimization currently hidden inside tcg_gen_code. All
1317 that should be required is to flush the TBs, allocate a new TB,
1318 re-initialize it per above, and re-do the actual code generation. */
1319 gen_code_size
= tcg_gen_code(tcg_ctx
, tb
);
1320 if (unlikely(gen_code_size
< 0)) {
1321 goto buffer_overflow
;
1323 search_size
= encode_search(tb
, (void *)gen_code_buf
+ gen_code_size
);
1324 if (unlikely(search_size
< 0)) {
1325 goto buffer_overflow
;
1327 tb
->tc
.size
= gen_code_size
;
1329 #ifdef CONFIG_PROFILER
1330 atomic_set(&prof
->code_time
, prof
->code_time
+ profile_getclock() - ti
);
1331 atomic_set(&prof
->code_in_len
, prof
->code_in_len
+ tb
->size
);
1332 atomic_set(&prof
->code_out_len
, prof
->code_out_len
+ gen_code_size
);
1333 atomic_set(&prof
->search_out_len
, prof
->search_out_len
+ search_size
);
1337 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
) &&
1338 qemu_log_in_addr_range(tb
->pc
)) {
1340 qemu_log("OUT: [size=%d]\n", gen_code_size
);
1341 if (tcg_ctx
->data_gen_ptr
) {
1342 size_t code_size
= tcg_ctx
->data_gen_ptr
- tb
->tc
.ptr
;
1343 size_t data_size
= gen_code_size
- code_size
;
1346 log_disas(tb
->tc
.ptr
, code_size
);
1348 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
1349 if (sizeof(tcg_target_ulong
) == 8) {
1350 qemu_log("0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
1351 (uintptr_t)tcg_ctx
->data_gen_ptr
+ i
,
1352 *(uint64_t *)(tcg_ctx
->data_gen_ptr
+ i
));
1354 qemu_log("0x%08" PRIxPTR
": .long 0x%08x\n",
1355 (uintptr_t)tcg_ctx
->data_gen_ptr
+ i
,
1356 *(uint32_t *)(tcg_ctx
->data_gen_ptr
+ i
));
1360 log_disas(tb
->tc
.ptr
, gen_code_size
);
1368 atomic_set(&tcg_ctx
->code_gen_ptr
, (void *)
1369 ROUND_UP((uintptr_t)gen_code_buf
+ gen_code_size
+ search_size
,
1372 /* init jump list */
1373 assert(((uintptr_t)tb
& 3) == 0);
1374 tb
->jmp_list_first
= (uintptr_t)tb
| 2;
1375 tb
->jmp_list_next
[0] = (uintptr_t)NULL
;
1376 tb
->jmp_list_next
[1] = (uintptr_t)NULL
;
1378 /* init original jump addresses wich has been set during tcg_gen_code() */
1379 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1380 tb_reset_jump(tb
, 0);
1382 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1383 tb_reset_jump(tb
, 1);
1386 /* check next page if needed */
1387 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1389 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1390 phys_page2
= get_page_addr_code(env
, virt_page2
);
1392 /* As long as consistency of the TB stuff is provided by tb_lock in user
1393 * mode and is implicit in single-threaded softmmu emulation, no explicit
1394 * memory barrier is required before tb_link_page() makes the TB visible
1395 * through the physical hash table and physical page list.
1397 tb_link_page(tb
, phys_pc
, phys_page2
);
1398 g_tree_insert(tb_ctx
.tb_tree
, &tb
->tc
, tb
);
1403 * Invalidate all TBs which intersect with the target physical address range
1404 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1405 * 'is_cpu_write_access' should be true if called from a real cpu write
1406 * access: the virtual CPU will exit the current TB if code is modified inside
1409 * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1410 * Called with tb_lock held for system-mode emulation
1412 static void tb_invalidate_phys_range_1(tb_page_addr_t start
, tb_page_addr_t end
)
1414 while (start
< end
) {
1415 tb_invalidate_phys_page_range(start
, end
, 0);
1416 start
&= TARGET_PAGE_MASK
;
1417 start
+= TARGET_PAGE_SIZE
;
1421 #ifdef CONFIG_SOFTMMU
1422 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1425 tb_invalidate_phys_range_1(start
, end
);
1428 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1430 assert_memory_lock();
1432 tb_invalidate_phys_range_1(start
, end
);
1437 * Invalidate all TBs which intersect with the target physical address range
1438 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1439 * 'is_cpu_write_access' should be true if called from a real cpu write
1440 * access: the virtual CPU will exit the current TB if code is modified inside
1443 * Called with tb_lock/mmap_lock held for user-mode emulation
1444 * Called with tb_lock held for system-mode emulation
1446 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1447 int is_cpu_write_access
)
1449 TranslationBlock
*tb
, *tb_next
;
1450 tb_page_addr_t tb_start
, tb_end
;
1453 #ifdef TARGET_HAS_PRECISE_SMC
1454 CPUState
*cpu
= current_cpu
;
1455 CPUArchState
*env
= NULL
;
1456 int current_tb_not_found
= is_cpu_write_access
;
1457 TranslationBlock
*current_tb
= NULL
;
1458 int current_tb_modified
= 0;
1459 target_ulong current_pc
= 0;
1460 target_ulong current_cs_base
= 0;
1461 uint32_t current_flags
= 0;
1462 #endif /* TARGET_HAS_PRECISE_SMC */
1464 assert_memory_lock();
1467 p
= page_find(start
>> TARGET_PAGE_BITS
);
1471 #if defined(TARGET_HAS_PRECISE_SMC)
1477 /* we remove all the TBs in the range [start, end[ */
1478 /* XXX: see if in some cases it could be faster to invalidate all
1481 while (tb
!= NULL
) {
1482 n
= (uintptr_t)tb
& 3;
1483 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1484 tb_next
= tb
->page_next
[n
];
1485 /* NOTE: this is subtle as a TB may span two physical pages */
1487 /* NOTE: tb_end may be after the end of the page, but
1488 it is not a problem */
1489 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1490 tb_end
= tb_start
+ tb
->size
;
1492 tb_start
= tb
->page_addr
[1];
1493 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1495 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1496 #ifdef TARGET_HAS_PRECISE_SMC
1497 if (current_tb_not_found
) {
1498 current_tb_not_found
= 0;
1500 if (cpu
->mem_io_pc
) {
1501 /* now we have a real cpu fault */
1502 current_tb
= tb_find_pc(cpu
->mem_io_pc
);
1505 if (current_tb
== tb
&&
1506 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1507 /* If we are modifying the current TB, we must stop
1508 its execution. We could be more precise by checking
1509 that the modification is after the current PC, but it
1510 would require a specialized function to partially
1511 restore the CPU state */
1513 current_tb_modified
= 1;
1514 cpu_restore_state_from_tb(cpu
, current_tb
, cpu
->mem_io_pc
);
1515 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1518 #endif /* TARGET_HAS_PRECISE_SMC */
1519 tb_phys_invalidate(tb
, -1);
1523 #if !defined(CONFIG_USER_ONLY)
1524 /* if no code remaining, no need to continue to use slow writes */
1526 invalidate_page_bitmap(p
);
1527 tlb_unprotect_code(start
);
1530 #ifdef TARGET_HAS_PRECISE_SMC
1531 if (current_tb_modified
) {
1532 /* Force execution of one insn next time. */
1533 cpu
->cflags_next_tb
= 1 | curr_cflags();
1534 cpu_loop_exit_noexc(cpu
);
1539 #ifdef CONFIG_SOFTMMU
1540 /* len must be <= 8 and start must be a multiple of len.
1541 * Called via softmmu_template.h when code areas are written to with
1542 * iothread mutex not held.
1544 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1550 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1551 cpu_single_env
->mem_io_vaddr
, len
,
1552 cpu_single_env
->eip
,
1553 cpu_single_env
->eip
+
1554 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1557 assert_memory_lock();
1559 p
= page_find(start
>> TARGET_PAGE_BITS
);
1563 if (!p
->code_bitmap
&&
1564 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
1565 /* build code bitmap. FIXME: writes should be protected by
1566 * tb_lock, reads by tb_lock or RCU.
1568 build_page_bitmap(p
);
1570 if (p
->code_bitmap
) {
1574 nr
= start
& ~TARGET_PAGE_MASK
;
1575 b
= p
->code_bitmap
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1));
1576 if (b
& ((1 << len
) - 1)) {
1581 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1585 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1586 * host PC of the faulting store instruction that caused this invalidate.
1587 * Returns true if the caller needs to abort execution of the current
1588 * TB (because it was modified by this store and the guest CPU has
1589 * precise-SMC semantics).
1591 static bool tb_invalidate_phys_page(tb_page_addr_t addr
, uintptr_t pc
)
1593 TranslationBlock
*tb
;
1596 #ifdef TARGET_HAS_PRECISE_SMC
1597 TranslationBlock
*current_tb
= NULL
;
1598 CPUState
*cpu
= current_cpu
;
1599 CPUArchState
*env
= NULL
;
1600 int current_tb_modified
= 0;
1601 target_ulong current_pc
= 0;
1602 target_ulong current_cs_base
= 0;
1603 uint32_t current_flags
= 0;
1606 assert_memory_lock();
1608 addr
&= TARGET_PAGE_MASK
;
1609 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1616 #ifdef TARGET_HAS_PRECISE_SMC
1617 if (tb
&& pc
!= 0) {
1618 current_tb
= tb_find_pc(pc
);
1624 while (tb
!= NULL
) {
1625 n
= (uintptr_t)tb
& 3;
1626 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1627 #ifdef TARGET_HAS_PRECISE_SMC
1628 if (current_tb
== tb
&&
1629 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1630 /* If we are modifying the current TB, we must stop
1631 its execution. We could be more precise by checking
1632 that the modification is after the current PC, but it
1633 would require a specialized function to partially
1634 restore the CPU state */
1636 current_tb_modified
= 1;
1637 cpu_restore_state_from_tb(cpu
, current_tb
, pc
);
1638 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1641 #endif /* TARGET_HAS_PRECISE_SMC */
1642 tb_phys_invalidate(tb
, addr
);
1643 tb
= tb
->page_next
[n
];
1646 #ifdef TARGET_HAS_PRECISE_SMC
1647 if (current_tb_modified
) {
1648 /* Force execution of one insn next time. */
1649 cpu
->cflags_next_tb
= 1 | curr_cflags();
1650 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
1651 * back into the cpu_exec loop. */
1662 * Find the TB 'tb' such that
1663 * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
1664 * Return NULL if not found.
1666 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1668 struct tb_tc s
= { .ptr
= (void *)tc_ptr
};
1670 return g_tree_lookup(tb_ctx
.tb_tree
, &s
);
1673 #if !defined(CONFIG_USER_ONLY)
1674 void tb_invalidate_phys_addr(AddressSpace
*as
, hwaddr addr
)
1676 ram_addr_t ram_addr
;
1681 mr
= address_space_translate(as
, addr
, &addr
, &l
, false);
1682 if (!(memory_region_is_ram(mr
)
1683 || memory_region_is_romd(mr
))) {
1687 ram_addr
= memory_region_get_ram_addr(mr
) + addr
;
1689 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1693 #endif /* !defined(CONFIG_USER_ONLY) */
1695 /* Called with tb_lock held. */
1696 void tb_check_watchpoint(CPUState
*cpu
)
1698 TranslationBlock
*tb
;
1700 tb
= tb_find_pc(cpu
->mem_io_pc
);
1702 /* We can use retranslation to find the PC. */
1703 cpu_restore_state_from_tb(cpu
, tb
, cpu
->mem_io_pc
);
1704 tb_phys_invalidate(tb
, -1);
1706 /* The exception probably happened in a helper. The CPU state should
1707 have been saved before calling it. Fetch the PC from there. */
1708 CPUArchState
*env
= cpu
->env_ptr
;
1709 target_ulong pc
, cs_base
;
1710 tb_page_addr_t addr
;
1713 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
1714 addr
= get_page_addr_code(env
, pc
);
1715 tb_invalidate_phys_range(addr
, addr
+ 1);
1719 #ifndef CONFIG_USER_ONLY
1720 /* in deterministic execution mode, instructions doing device I/Os
1721 * must be at the end of the TB.
1723 * Called by softmmu_template.h, with iothread mutex not held.
1725 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
1727 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1728 CPUArchState
*env
= cpu
->env_ptr
;
1730 TranslationBlock
*tb
;
1734 tb
= tb_find_pc(retaddr
);
1736 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
1739 n
= cpu
->icount_decr
.u16
.low
+ tb
->icount
;
1740 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
1741 /* Calculate how many instructions had been executed before the fault
1743 n
= n
- cpu
->icount_decr
.u16
.low
;
1744 /* Generate a new TB ending on the I/O insn. */
1746 /* On MIPS and SH, delay slot instructions can only be restarted if
1747 they were already the first instruction in the TB. If this is not
1748 the first instruction in a TB then re-execute the preceding
1750 #if defined(TARGET_MIPS)
1751 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1752 env
->active_tc
.PC
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
1753 cpu
->icount_decr
.u16
.low
++;
1754 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1756 #elif defined(TARGET_SH4)
1757 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1760 cpu
->icount_decr
.u16
.low
++;
1761 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1764 /* This should never happen. */
1765 if (n
> CF_COUNT_MASK
) {
1766 cpu_abort(cpu
, "TB too big during recompile");
1769 /* Adjust the execution state of the next TB. */
1770 cpu
->cflags_next_tb
= curr_cflags() | CF_LAST_IO
| n
;
1772 if (tb
->cflags
& CF_NOCACHE
) {
1774 /* Invalidate original TB if this TB was generated in
1775 * cpu_exec_nocache() */
1776 tb_phys_invalidate(tb
->orig_tb
, -1);
1781 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1782 * the first in the TB) then we end up generating a whole new TB and
1783 * repeating the fault, which is horribly inefficient.
1784 * Better would be to execute just this insn uncached, or generate a
1787 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
1788 * tb_lock gets reset.
1790 cpu_loop_exit_noexc(cpu
);
1793 static void tb_jmp_cache_clear_page(CPUState
*cpu
, target_ulong page_addr
)
1795 unsigned int i
, i0
= tb_jmp_cache_hash_page(page_addr
);
1797 for (i
= 0; i
< TB_JMP_PAGE_SIZE
; i
++) {
1798 atomic_set(&cpu
->tb_jmp_cache
[i0
+ i
], NULL
);
1802 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
1804 /* Discard jump cache entries for any tb which might potentially
1805 overlap the flushed page. */
1806 tb_jmp_cache_clear_page(cpu
, addr
- TARGET_PAGE_SIZE
);
1807 tb_jmp_cache_clear_page(cpu
, addr
);
1810 static void print_qht_statistics(FILE *f
, fprintf_function cpu_fprintf
,
1811 struct qht_stats hst
)
1813 uint32_t hgram_opts
;
1817 if (!hst
.head_buckets
) {
1820 cpu_fprintf(f
, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1821 hst
.used_head_buckets
, hst
.head_buckets
,
1822 (double)hst
.used_head_buckets
/ hst
.head_buckets
* 100);
1824 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1825 hgram_opts
|= QDIST_PR_100X
| QDIST_PR_PERCENT
;
1826 if (qdist_xmax(&hst
.occupancy
) - qdist_xmin(&hst
.occupancy
) == 1) {
1827 hgram_opts
|= QDIST_PR_NODECIMAL
;
1829 hgram
= qdist_pr(&hst
.occupancy
, 10, hgram_opts
);
1830 cpu_fprintf(f
, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1831 qdist_avg(&hst
.occupancy
) * 100, hgram
);
1834 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1835 hgram_bins
= qdist_xmax(&hst
.chain
) - qdist_xmin(&hst
.chain
);
1836 if (hgram_bins
> 10) {
1840 hgram_opts
|= QDIST_PR_NODECIMAL
| QDIST_PR_NOBINRANGE
;
1842 hgram
= qdist_pr(&hst
.chain
, hgram_bins
, hgram_opts
);
1843 cpu_fprintf(f
, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1844 qdist_avg(&hst
.chain
), hgram
);
1848 struct tb_tree_stats
{
1851 size_t max_target_size
;
1852 size_t direct_jmp_count
;
1853 size_t direct_jmp2_count
;
1857 static gboolean
tb_tree_stats_iter(gpointer key
, gpointer value
, gpointer data
)
1859 const TranslationBlock
*tb
= value
;
1860 struct tb_tree_stats
*tst
= data
;
1862 tst
->host_size
+= tb
->tc
.size
;
1863 tst
->target_size
+= tb
->size
;
1864 if (tb
->size
> tst
->max_target_size
) {
1865 tst
->max_target_size
= tb
->size
;
1867 if (tb
->page_addr
[1] != -1) {
1870 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1871 tst
->direct_jmp_count
++;
1872 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1873 tst
->direct_jmp2_count
++;
1879 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1881 struct tb_tree_stats tst
= {};
1882 struct qht_stats hst
;
1887 nb_tbs
= g_tree_nnodes(tb_ctx
.tb_tree
);
1888 g_tree_foreach(tb_ctx
.tb_tree
, tb_tree_stats_iter
, &tst
);
1889 /* XXX: avoid using doubles ? */
1890 cpu_fprintf(f
, "Translation buffer state:\n");
1892 * Report total code size including the padding and TB structs;
1893 * otherwise users might think "-tb-size" is not honoured.
1894 * For avg host size we use the precise numbers from tb_tree_stats though.
1896 cpu_fprintf(f
, "gen code size %zu/%zu\n",
1897 tcg_code_size(), tcg_code_capacity());
1898 cpu_fprintf(f
, "TB count %zu\n", nb_tbs
);
1899 cpu_fprintf(f
, "TB avg target size %zu max=%zu bytes\n",
1900 nb_tbs
? tst
.target_size
/ nb_tbs
: 0,
1901 tst
.max_target_size
);
1902 cpu_fprintf(f
, "TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
1903 nb_tbs
? tst
.host_size
/ nb_tbs
: 0,
1904 tst
.target_size
? (double)tst
.host_size
/ tst
.target_size
: 0);
1905 cpu_fprintf(f
, "cross page TB count %zu (%zu%%)\n", tst
.cross_page
,
1906 nb_tbs
? (tst
.cross_page
* 100) / nb_tbs
: 0);
1907 cpu_fprintf(f
, "direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
1908 tst
.direct_jmp_count
,
1909 nb_tbs
? (tst
.direct_jmp_count
* 100) / nb_tbs
: 0,
1910 tst
.direct_jmp2_count
,
1911 nb_tbs
? (tst
.direct_jmp2_count
* 100) / nb_tbs
: 0);
1913 qht_statistics_init(&tb_ctx
.htable
, &hst
);
1914 print_qht_statistics(f
, cpu_fprintf
, hst
);
1915 qht_statistics_destroy(&hst
);
1917 cpu_fprintf(f
, "\nStatistics:\n");
1918 cpu_fprintf(f
, "TB flush count %u\n",
1919 atomic_read(&tb_ctx
.tb_flush_count
));
1920 cpu_fprintf(f
, "TB invalidate count %d\n", tb_ctx
.tb_phys_invalidate_count
);
1921 cpu_fprintf(f
, "TLB flush count %zu\n", tlb_flush_count());
1922 tcg_dump_info(f
, cpu_fprintf
);
1927 void dump_opcount_info(FILE *f
, fprintf_function cpu_fprintf
)
1929 tcg_dump_op_count(f
, cpu_fprintf
);
1932 #else /* CONFIG_USER_ONLY */
1934 void cpu_interrupt(CPUState
*cpu
, int mask
)
1936 g_assert(qemu_mutex_iothread_locked());
1937 cpu
->interrupt_request
|= mask
;
1938 cpu
->icount_decr
.u16
.high
= -1;
1942 * Walks guest process memory "regions" one by one
1943 * and calls callback function 'fn' for each region.
1945 struct walk_memory_regions_data
{
1946 walk_memory_regions_fn fn
;
1952 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1953 target_ulong end
, int new_prot
)
1955 if (data
->start
!= -1u) {
1956 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1962 data
->start
= (new_prot
? end
: -1u);
1963 data
->prot
= new_prot
;
1968 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1969 target_ulong base
, int level
, void **lp
)
1975 return walk_memory_regions_end(data
, base
, 0);
1981 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1982 int prot
= pd
[i
].flags
;
1984 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1985 if (prot
!= data
->prot
) {
1986 rc
= walk_memory_regions_end(data
, pa
, prot
);
1995 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1996 pa
= base
| ((target_ulong
)i
<<
1997 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
1998 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2008 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2010 struct walk_memory_regions_data data
;
2011 uintptr_t i
, l1_sz
= v_l1_size
;
2018 for (i
= 0; i
< l1_sz
; i
++) {
2019 target_ulong base
= i
<< (v_l1_shift
+ TARGET_PAGE_BITS
);
2020 int rc
= walk_memory_regions_1(&data
, base
, v_l2_levels
, l1_map
+ i
);
2026 return walk_memory_regions_end(&data
, 0, 0);
2029 static int dump_region(void *priv
, target_ulong start
,
2030 target_ulong end
, unsigned long prot
)
2032 FILE *f
= (FILE *)priv
;
2034 (void) fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
2035 " "TARGET_FMT_lx
" %c%c%c\n",
2036 start
, end
, end
- start
,
2037 ((prot
& PAGE_READ
) ? 'r' : '-'),
2038 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2039 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2044 /* dump memory mappings */
2045 void page_dump(FILE *f
)
2047 const int length
= sizeof(target_ulong
) * 2;
2048 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
2049 length
, "start", length
, "end", length
, "size", "prot");
2050 walk_memory_regions(f
, dump_region
);
2053 int page_get_flags(target_ulong address
)
2057 p
= page_find(address
>> TARGET_PAGE_BITS
);
2064 /* Modify the flags of a page and invalidate the code if necessary.
2065 The flag PAGE_WRITE_ORG is positioned automatically depending
2066 on PAGE_WRITE. The mmap_lock should already be held. */
2067 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2069 target_ulong addr
, len
;
2071 /* This function should never be called with addresses outside the
2072 guest address space. If this assert fires, it probably indicates
2073 a missing call to h2g_valid. */
2074 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2075 assert(end
<= ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2077 assert(start
< end
);
2078 assert_memory_lock();
2080 start
= start
& TARGET_PAGE_MASK
;
2081 end
= TARGET_PAGE_ALIGN(end
);
2083 if (flags
& PAGE_WRITE
) {
2084 flags
|= PAGE_WRITE_ORG
;
2087 for (addr
= start
, len
= end
- start
;
2089 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2090 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2092 /* If the write protection bit is set, then we invalidate
2094 if (!(p
->flags
& PAGE_WRITE
) &&
2095 (flags
& PAGE_WRITE
) &&
2097 tb_invalidate_phys_page(addr
, 0);
2103 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2109 /* This function should never be called with addresses outside the
2110 guest address space. If this assert fires, it probably indicates
2111 a missing call to h2g_valid. */
2112 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2113 assert(start
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2119 if (start
+ len
- 1 < start
) {
2120 /* We've wrapped around. */
2124 /* must do before we loose bits in the next step */
2125 end
= TARGET_PAGE_ALIGN(start
+ len
);
2126 start
= start
& TARGET_PAGE_MASK
;
2128 for (addr
= start
, len
= end
- start
;
2130 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2131 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2135 if (!(p
->flags
& PAGE_VALID
)) {
2139 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
2142 if (flags
& PAGE_WRITE
) {
2143 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
2146 /* unprotect the page if it was put read-only because it
2147 contains translated code */
2148 if (!(p
->flags
& PAGE_WRITE
)) {
2149 if (!page_unprotect(addr
, 0)) {
2158 /* called from signal handler: invalidate the code and unprotect the
2159 * page. Return 0 if the fault was not handled, 1 if it was handled,
2160 * and 2 if it was handled but the caller must cause the TB to be
2161 * immediately exited. (We can only return 2 if the 'pc' argument is
2164 int page_unprotect(target_ulong address
, uintptr_t pc
)
2167 bool current_tb_invalidated
;
2169 target_ulong host_start
, host_end
, addr
;
2171 /* Technically this isn't safe inside a signal handler. However we
2172 know this only ever happens in a synchronous SEGV handler, so in
2173 practice it seems to be ok. */
2176 p
= page_find(address
>> TARGET_PAGE_BITS
);
2182 /* if the page was really writable, then we change its
2183 protection back to writable */
2184 if (p
->flags
& PAGE_WRITE_ORG
) {
2185 current_tb_invalidated
= false;
2186 if (p
->flags
& PAGE_WRITE
) {
2187 /* If the page is actually marked WRITE then assume this is because
2188 * this thread raced with another one which got here first and
2189 * set the page to PAGE_WRITE and did the TB invalidate for us.
2191 #ifdef TARGET_HAS_PRECISE_SMC
2192 TranslationBlock
*current_tb
= tb_find_pc(pc
);
2194 current_tb_invalidated
= tb_cflags(current_tb
) & CF_INVALID
;
2198 host_start
= address
& qemu_host_page_mask
;
2199 host_end
= host_start
+ qemu_host_page_size
;
2202 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2203 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2204 p
->flags
|= PAGE_WRITE
;
2207 /* and since the content will be modified, we must invalidate
2208 the corresponding translated code. */
2209 current_tb_invalidated
|= tb_invalidate_phys_page(addr
, pc
);
2210 #ifdef CONFIG_USER_ONLY
2211 if (DEBUG_TB_CHECK_GATE
) {
2212 tb_invalidate_check(addr
);
2216 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2220 /* If current TB was invalidated return to main loop */
2221 return current_tb_invalidated
? 2 : 1;
2226 #endif /* CONFIG_USER_ONLY */
2228 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2229 void tcg_flush_softmmu_tlb(CPUState
*cs
)
2231 #ifdef CONFIG_SOFTMMU