4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
23 #define NO_CPU_IO_DEFS
26 #include "disas/disas.h"
27 #include "exec/exec-all.h"
29 #if defined(CONFIG_USER_ONLY)
31 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
32 #include <sys/param.h>
33 #if __FreeBSD_version >= 700104
34 #define HAVE_KINFO_GETVMMAP
35 #define sigqueue sigqueue_freebsd /* avoid redefinition */
37 #include <machine/profile.h>
46 #include "exec/ram_addr.h"
49 #include "exec/cputlb.h"
50 #include "exec/tb-hash.h"
51 #include "translate-all.h"
52 #include "qemu/bitmap.h"
53 #include "qemu/error-report.h"
54 #include "qemu/qemu-print.h"
55 #include "qemu/timer.h"
56 #include "qemu/main-loop.h"
58 #include "sysemu/cpus.h"
59 #include "sysemu/tcg.h"
61 /* #define DEBUG_TB_INVALIDATE */
62 /* #define DEBUG_TB_FLUSH */
63 /* make various TB consistency checks */
64 /* #define DEBUG_TB_CHECK */
66 #ifdef DEBUG_TB_INVALIDATE
67 #define DEBUG_TB_INVALIDATE_GATE 1
69 #define DEBUG_TB_INVALIDATE_GATE 0
73 #define DEBUG_TB_FLUSH_GATE 1
75 #define DEBUG_TB_FLUSH_GATE 0
78 #if !defined(CONFIG_USER_ONLY)
79 /* TB consistency checks only implemented for usermode emulation. */
84 #define DEBUG_TB_CHECK_GATE 1
86 #define DEBUG_TB_CHECK_GATE 0
89 /* Access to the various translations structures need to be serialised via locks
91 * In user-mode emulation access to the memory related structures are protected
93 * In !user-mode we use per-page locks.
96 #define assert_memory_lock()
98 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
101 #define SMC_BITMAP_USE_THRESHOLD 10
103 typedef struct PageDesc
{
104 /* list of TBs intersecting this ram page */
106 #ifdef CONFIG_SOFTMMU
107 /* in order to optimize self modifying code, we count the number
108 of lookups we do to a given page to use a bitmap */
109 unsigned long *code_bitmap
;
110 unsigned int code_write_count
;
114 #ifndef CONFIG_USER_ONLY
120 * struct page_entry - page descriptor entry
121 * @pd: pointer to the &struct PageDesc of the page this entry represents
122 * @index: page index of the page
123 * @locked: whether the page is locked
125 * This struct helps us keep track of the locked state of a page, without
126 * bloating &struct PageDesc.
128 * A page lock protects accesses to all fields of &struct PageDesc.
130 * See also: &struct page_collection.
134 tb_page_addr_t index
;
139 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
140 * @tree: Binary search tree (BST) of the pages, with key == page index
141 * @max: Pointer to the page in @tree with the highest page index
143 * To avoid deadlock we lock pages in ascending order of page index.
144 * When operating on a set of pages, we need to keep track of them so that
145 * we can lock them in order and also unlock them later. For this we collect
146 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
147 * @tree implementation we use does not provide an O(1) operation to obtain the
148 * highest-ranked element, we use @max to keep track of the inserted page
149 * with the highest index. This is valuable because if a page is not in
150 * the tree and its index is higher than @max's, then we can lock it
151 * without breaking the locking order rule.
153 * Note on naming: 'struct page_set' would be shorter, but we already have a few
154 * page_set_*() helpers, so page_collection is used instead to avoid confusion.
156 * See also: page_collection_lock().
158 struct page_collection
{
160 struct page_entry
*max
;
163 /* list iterators for lists of tagged pointers in TranslationBlock */
164 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \
165 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
166 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
167 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
169 #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
170 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
172 #define TB_FOR_EACH_JMP(head_tb, tb, n) \
173 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
175 /* In system mode we want L1_MAP to be based on ram offsets,
176 while in user mode we want it to be based on virtual addresses. */
177 #if !defined(CONFIG_USER_ONLY)
178 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
179 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
181 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
184 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
187 /* Size of the L2 (and L3, etc) page tables. */
189 #define V_L2_SIZE (1 << V_L2_BITS)
191 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
192 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS
>
193 sizeof_field(TranslationBlock
, trace_vcpu_dstate
)
197 * L1 Mapping properties
199 static int v_l1_size
;
200 static int v_l1_shift
;
201 static int v_l2_levels
;
203 /* The bottom level has pointers to PageDesc, and is indexed by
204 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
206 #define V_L1_MIN_BITS 4
207 #define V_L1_MAX_BITS (V_L2_BITS + 3)
208 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
210 static void *l1_map
[V_L1_MAX_SIZE
];
212 /* code generation context */
213 TCGContext tcg_init_ctx
;
214 __thread TCGContext
*tcg_ctx
;
218 static void page_table_config_init(void)
222 assert(TARGET_PAGE_BITS
);
223 /* The bits remaining after N lower levels of page tables. */
224 v_l1_bits
= (L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
) % V_L2_BITS
;
225 if (v_l1_bits
< V_L1_MIN_BITS
) {
226 v_l1_bits
+= V_L2_BITS
;
229 v_l1_size
= 1 << v_l1_bits
;
230 v_l1_shift
= L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
- v_l1_bits
;
231 v_l2_levels
= v_l1_shift
/ V_L2_BITS
- 1;
233 assert(v_l1_bits
<= V_L1_MAX_BITS
);
234 assert(v_l1_shift
% V_L2_BITS
== 0);
235 assert(v_l2_levels
>= 0);
238 void cpu_gen_init(void)
240 tcg_context_init(&tcg_init_ctx
);
243 /* Encode VAL as a signed leb128 sequence at P.
244 Return P incremented past the encoded value. */
245 static uint8_t *encode_sleb128(uint8_t *p
, target_long val
)
252 more
= !((val
== 0 && (byte
& 0x40) == 0)
253 || (val
== -1 && (byte
& 0x40) != 0));
263 /* Decode a signed leb128 sequence at *PP; increment *PP past the
264 decoded value. Return the decoded value. */
265 static target_long
decode_sleb128(uint8_t **pp
)
273 val
|= (target_ulong
)(byte
& 0x7f) << shift
;
275 } while (byte
& 0x80);
276 if (shift
< TARGET_LONG_BITS
&& (byte
& 0x40)) {
277 val
|= -(target_ulong
)1 << shift
;
284 /* Encode the data collected about the instructions while compiling TB.
285 Place the data at BLOCK, and return the number of bytes consumed.
287 The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
288 which come from the target's insn_start data, followed by a uintptr_t
289 which comes from the host pc of the end of the code implementing the insn.
291 Each line of the table is encoded as sleb128 deltas from the previous
292 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
293 That is, the first column is seeded with the guest pc, the last column
294 with the host pc, and the middle columns with zeros. */
296 static int encode_search(TranslationBlock
*tb
, uint8_t *block
)
298 uint8_t *highwater
= tcg_ctx
->code_gen_highwater
;
302 for (i
= 0, n
= tb
->icount
; i
< n
; ++i
) {
305 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
307 prev
= (j
== 0 ? tb
->pc
: 0);
309 prev
= tcg_ctx
->gen_insn_data
[i
- 1][j
];
311 p
= encode_sleb128(p
, tcg_ctx
->gen_insn_data
[i
][j
] - prev
);
313 prev
= (i
== 0 ? 0 : tcg_ctx
->gen_insn_end_off
[i
- 1]);
314 p
= encode_sleb128(p
, tcg_ctx
->gen_insn_end_off
[i
] - prev
);
316 /* Test for (pending) buffer overflow. The assumption is that any
317 one row beginning below the high water mark cannot overrun
318 the buffer completely. Thus we can test for overflow after
319 encoding a row without having to check during encoding. */
320 if (unlikely(p
> highwater
)) {
328 /* The cpu state corresponding to 'searched_pc' is restored.
329 * When reset_icount is true, current TB will be interrupted and
330 * icount should be recalculated.
332 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
333 uintptr_t searched_pc
, bool reset_icount
)
335 target_ulong data
[TARGET_INSN_START_WORDS
] = { tb
->pc
};
336 uintptr_t host_pc
= (uintptr_t)tb
->tc
.ptr
;
337 CPUArchState
*env
= cpu
->env_ptr
;
338 uint8_t *p
= tb
->tc
.ptr
+ tb
->tc
.size
;
339 int i
, j
, num_insns
= tb
->icount
;
340 #ifdef CONFIG_PROFILER
341 TCGProfile
*prof
= &tcg_ctx
->prof
;
342 int64_t ti
= profile_getclock();
345 searched_pc
-= GETPC_ADJ
;
347 if (searched_pc
< host_pc
) {
351 /* Reconstruct the stored insn data while looking for the point at
352 which the end of the insn exceeds the searched_pc. */
353 for (i
= 0; i
< num_insns
; ++i
) {
354 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
355 data
[j
] += decode_sleb128(&p
);
357 host_pc
+= decode_sleb128(&p
);
358 if (host_pc
> searched_pc
) {
365 if (reset_icount
&& (tb_cflags(tb
) & CF_USE_ICOUNT
)) {
367 /* Reset the cycle counter to the start of the block
368 and shift if to the number of actually executed instructions */
369 cpu_neg(cpu
)->icount_decr
.u16
.low
+= num_insns
- i
;
371 restore_state_to_opc(env
, tb
, data
);
373 #ifdef CONFIG_PROFILER
374 atomic_set(&prof
->restore_time
,
375 prof
->restore_time
+ profile_getclock() - ti
);
376 atomic_set(&prof
->restore_count
, prof
->restore_count
+ 1);
381 bool cpu_restore_state(CPUState
*cpu
, uintptr_t host_pc
, bool will_exit
)
383 TranslationBlock
*tb
;
385 uintptr_t check_offset
;
387 /* The host_pc has to be in the region of current code buffer. If
388 * it is not we will not be able to resolve it here. The two cases
389 * where host_pc will not be correct are:
391 * - fault during translation (instruction fetch)
392 * - fault from helper (not using GETPC() macro)
394 * Either way we need return early as we can't resolve it here.
396 * We are using unsigned arithmetic so if host_pc <
397 * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
398 * above the code_gen_buffer_size
400 check_offset
= host_pc
- (uintptr_t) tcg_init_ctx
.code_gen_buffer
;
402 if (check_offset
< tcg_init_ctx
.code_gen_buffer_size
) {
403 tb
= tcg_tb_lookup(host_pc
);
405 cpu_restore_state_from_tb(cpu
, tb
, host_pc
, will_exit
);
406 if (tb_cflags(tb
) & CF_NOCACHE
) {
407 /* one-shot translation, invalidate it immediately */
408 tb_phys_invalidate(tb
, -1);
418 static void page_init(void)
421 page_table_config_init();
423 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
425 #ifdef HAVE_KINFO_GETVMMAP
426 struct kinfo_vmentry
*freep
;
429 freep
= kinfo_getvmmap(getpid(), &cnt
);
432 for (i
= 0; i
< cnt
; i
++) {
433 unsigned long startaddr
, endaddr
;
435 startaddr
= freep
[i
].kve_start
;
436 endaddr
= freep
[i
].kve_end
;
437 if (h2g_valid(startaddr
)) {
438 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
440 if (h2g_valid(endaddr
)) {
441 endaddr
= h2g(endaddr
);
442 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
444 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
446 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
457 last_brk
= (unsigned long)sbrk(0);
459 f
= fopen("/compat/linux/proc/self/maps", "r");
464 unsigned long startaddr
, endaddr
;
467 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
469 if (n
== 2 && h2g_valid(startaddr
)) {
470 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
472 if (h2g_valid(endaddr
)) {
473 endaddr
= h2g(endaddr
);
477 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
489 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
495 /* Level 1. Always allocated. */
496 lp
= l1_map
+ ((index
>> v_l1_shift
) & (v_l1_size
- 1));
499 for (i
= v_l2_levels
; i
> 0; i
--) {
500 void **p
= atomic_rcu_read(lp
);
508 p
= g_new0(void *, V_L2_SIZE
);
509 existing
= atomic_cmpxchg(lp
, NULL
, p
);
510 if (unlikely(existing
)) {
516 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
519 pd
= atomic_rcu_read(lp
);
526 pd
= g_new0(PageDesc
, V_L2_SIZE
);
527 #ifndef CONFIG_USER_ONLY
531 for (i
= 0; i
< V_L2_SIZE
; i
++) {
532 qemu_spin_init(&pd
[i
].lock
);
536 existing
= atomic_cmpxchg(lp
, NULL
, pd
);
537 if (unlikely(existing
)) {
543 return pd
+ (index
& (V_L2_SIZE
- 1));
546 static inline PageDesc
*page_find(tb_page_addr_t index
)
548 return page_find_alloc(index
, 0);
551 static void page_lock_pair(PageDesc
**ret_p1
, tb_page_addr_t phys1
,
552 PageDesc
**ret_p2
, tb_page_addr_t phys2
, int alloc
);
554 /* In user-mode page locks aren't used; mmap_lock is enough */
555 #ifdef CONFIG_USER_ONLY
557 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
559 static inline void page_lock(PageDesc
*pd
)
562 static inline void page_unlock(PageDesc
*pd
)
565 static inline void page_lock_tb(const TranslationBlock
*tb
)
568 static inline void page_unlock_tb(const TranslationBlock
*tb
)
571 struct page_collection
*
572 page_collection_lock(tb_page_addr_t start
, tb_page_addr_t end
)
577 void page_collection_unlock(struct page_collection
*set
)
579 #else /* !CONFIG_USER_ONLY */
581 #ifdef CONFIG_DEBUG_TCG
583 static __thread GHashTable
*ht_pages_locked_debug
;
585 static void ht_pages_locked_debug_init(void)
587 if (ht_pages_locked_debug
) {
590 ht_pages_locked_debug
= g_hash_table_new(NULL
, NULL
);
593 static bool page_is_locked(const PageDesc
*pd
)
597 ht_pages_locked_debug_init();
598 found
= g_hash_table_lookup(ht_pages_locked_debug
, pd
);
602 static void page_lock__debug(PageDesc
*pd
)
604 ht_pages_locked_debug_init();
605 g_assert(!page_is_locked(pd
));
606 g_hash_table_insert(ht_pages_locked_debug
, pd
, pd
);
609 static void page_unlock__debug(const PageDesc
*pd
)
613 ht_pages_locked_debug_init();
614 g_assert(page_is_locked(pd
));
615 removed
= g_hash_table_remove(ht_pages_locked_debug
, pd
);
620 do_assert_page_locked(const PageDesc
*pd
, const char *file
, int line
)
622 if (unlikely(!page_is_locked(pd
))) {
623 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
629 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
631 void assert_no_pages_locked(void)
633 ht_pages_locked_debug_init();
634 g_assert(g_hash_table_size(ht_pages_locked_debug
) == 0);
637 #else /* !CONFIG_DEBUG_TCG */
639 #define assert_page_locked(pd)
641 static inline void page_lock__debug(const PageDesc
*pd
)
645 static inline void page_unlock__debug(const PageDesc
*pd
)
649 #endif /* CONFIG_DEBUG_TCG */
651 static inline void page_lock(PageDesc
*pd
)
653 page_lock__debug(pd
);
654 qemu_spin_lock(&pd
->lock
);
657 static inline void page_unlock(PageDesc
*pd
)
659 qemu_spin_unlock(&pd
->lock
);
660 page_unlock__debug(pd
);
663 /* lock the page(s) of a TB in the correct acquisition order */
664 static inline void page_lock_tb(const TranslationBlock
*tb
)
666 page_lock_pair(NULL
, tb
->page_addr
[0], NULL
, tb
->page_addr
[1], 0);
669 static inline void page_unlock_tb(const TranslationBlock
*tb
)
671 PageDesc
*p1
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
674 if (unlikely(tb
->page_addr
[1] != -1)) {
675 PageDesc
*p2
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
683 static inline struct page_entry
*
684 page_entry_new(PageDesc
*pd
, tb_page_addr_t index
)
686 struct page_entry
*pe
= g_malloc(sizeof(*pe
));
694 static void page_entry_destroy(gpointer p
)
696 struct page_entry
*pe
= p
;
698 g_assert(pe
->locked
);
703 /* returns false on success */
704 static bool page_entry_trylock(struct page_entry
*pe
)
708 busy
= qemu_spin_trylock(&pe
->pd
->lock
);
710 g_assert(!pe
->locked
);
712 page_lock__debug(pe
->pd
);
717 static void do_page_entry_lock(struct page_entry
*pe
)
720 g_assert(!pe
->locked
);
724 static gboolean
page_entry_lock(gpointer key
, gpointer value
, gpointer data
)
726 struct page_entry
*pe
= value
;
728 do_page_entry_lock(pe
);
732 static gboolean
page_entry_unlock(gpointer key
, gpointer value
, gpointer data
)
734 struct page_entry
*pe
= value
;
744 * Trylock a page, and if successful, add the page to a collection.
745 * Returns true ("busy") if the page could not be locked; false otherwise.
747 static bool page_trylock_add(struct page_collection
*set
, tb_page_addr_t addr
)
749 tb_page_addr_t index
= addr
>> TARGET_PAGE_BITS
;
750 struct page_entry
*pe
;
753 pe
= g_tree_lookup(set
->tree
, &index
);
758 pd
= page_find(index
);
763 pe
= page_entry_new(pd
, index
);
764 g_tree_insert(set
->tree
, &pe
->index
, pe
);
767 * If this is either (1) the first insertion or (2) a page whose index
768 * is higher than any other so far, just lock the page and move on.
770 if (set
->max
== NULL
|| pe
->index
> set
->max
->index
) {
772 do_page_entry_lock(pe
);
776 * Try to acquire out-of-order lock; if busy, return busy so that we acquire
779 return page_entry_trylock(pe
);
782 static gint
tb_page_addr_cmp(gconstpointer ap
, gconstpointer bp
, gpointer udata
)
784 tb_page_addr_t a
= *(const tb_page_addr_t
*)ap
;
785 tb_page_addr_t b
= *(const tb_page_addr_t
*)bp
;
796 * Lock a range of pages ([@start,@end[) as well as the pages of all
798 * Locking order: acquire locks in ascending order of page index.
800 struct page_collection
*
801 page_collection_lock(tb_page_addr_t start
, tb_page_addr_t end
)
803 struct page_collection
*set
= g_malloc(sizeof(*set
));
804 tb_page_addr_t index
;
807 start
>>= TARGET_PAGE_BITS
;
808 end
>>= TARGET_PAGE_BITS
;
809 g_assert(start
<= end
);
811 set
->tree
= g_tree_new_full(tb_page_addr_cmp
, NULL
, NULL
,
814 assert_no_pages_locked();
817 g_tree_foreach(set
->tree
, page_entry_lock
, NULL
);
819 for (index
= start
; index
<= end
; index
++) {
820 TranslationBlock
*tb
;
823 pd
= page_find(index
);
827 if (page_trylock_add(set
, index
<< TARGET_PAGE_BITS
)) {
828 g_tree_foreach(set
->tree
, page_entry_unlock
, NULL
);
831 assert_page_locked(pd
);
832 PAGE_FOR_EACH_TB(pd
, tb
, n
) {
833 if (page_trylock_add(set
, tb
->page_addr
[0]) ||
834 (tb
->page_addr
[1] != -1 &&
835 page_trylock_add(set
, tb
->page_addr
[1]))) {
836 /* drop all locks, and reacquire in order */
837 g_tree_foreach(set
->tree
, page_entry_unlock
, NULL
);
845 void page_collection_unlock(struct page_collection
*set
)
847 /* entries are unlocked and freed via page_entry_destroy */
848 g_tree_destroy(set
->tree
);
852 #endif /* !CONFIG_USER_ONLY */
854 static void page_lock_pair(PageDesc
**ret_p1
, tb_page_addr_t phys1
,
855 PageDesc
**ret_p2
, tb_page_addr_t phys2
, int alloc
)
858 tb_page_addr_t page1
;
859 tb_page_addr_t page2
;
861 assert_memory_lock();
862 g_assert(phys1
!= -1);
864 page1
= phys1
>> TARGET_PAGE_BITS
;
865 page2
= phys2
>> TARGET_PAGE_BITS
;
867 p1
= page_find_alloc(page1
, alloc
);
871 if (likely(phys2
== -1)) {
874 } else if (page1
== page2
) {
881 p2
= page_find_alloc(page2
, alloc
);
894 #if defined(CONFIG_USER_ONLY)
895 /* Currently it is not recommended to allocate big chunks of data in
896 user mode. It will change when a dedicated libc will be used. */
897 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
898 region in which the guest needs to run. Revisit this. */
899 #define USE_STATIC_CODE_GEN_BUFFER
902 /* Minimum size of the code gen buffer. This number is randomly chosen,
903 but not so small that we can't have a fair number of TB's live. */
904 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
906 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
907 indicated, this is constrained by the range of direct branches on the
908 host cpu, as used by the TCG implementation of goto_tb. */
909 #if defined(__x86_64__)
910 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
911 #elif defined(__sparc__)
912 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
913 #elif defined(__powerpc64__)
914 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
915 #elif defined(__powerpc__)
916 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
917 #elif defined(__aarch64__)
918 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
919 #elif defined(__s390x__)
920 /* We have a +- 4GB range on the branches; leave some slop. */
921 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
922 #elif defined(__mips__)
923 /* We have a 256MB branch region, but leave room to make sure the
924 main executable is also within that region. */
925 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
927 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
930 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
932 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
933 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
934 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
936 static inline size_t size_code_gen_buffer(size_t tb_size
)
938 /* Size the buffer. */
940 #ifdef USE_STATIC_CODE_GEN_BUFFER
941 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
943 /* ??? Needs adjustments. */
944 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
945 static buffer, we could size this on RESERVED_VA, on the text
946 segment size of the executable, or continue to use the default. */
947 tb_size
= (unsigned long)(ram_size
/ 4);
950 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
951 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
953 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
954 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
960 /* In order to use J and JAL within the code_gen_buffer, we require
961 that the buffer not cross a 256MB boundary. */
962 static inline bool cross_256mb(void *addr
, size_t size
)
964 return ((uintptr_t)addr
^ ((uintptr_t)addr
+ size
)) & ~0x0ffffffful
;
967 /* We weren't able to allocate a buffer without crossing that boundary,
968 so make do with the larger portion of the buffer that doesn't cross.
969 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
970 static inline void *split_cross_256mb(void *buf1
, size_t size1
)
972 void *buf2
= (void *)(((uintptr_t)buf1
+ size1
) & ~0x0ffffffful
);
973 size_t size2
= buf1
+ size1
- buf2
;
981 tcg_ctx
->code_gen_buffer_size
= size1
;
986 #ifdef USE_STATIC_CODE_GEN_BUFFER
987 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
988 __attribute__((aligned(CODE_GEN_ALIGN
)));
990 static inline void *alloc_code_gen_buffer(void)
992 void *buf
= static_code_gen_buffer
;
993 void *end
= static_code_gen_buffer
+ sizeof(static_code_gen_buffer
);
996 /* page-align the beginning and end of the buffer */
997 buf
= QEMU_ALIGN_PTR_UP(buf
, qemu_real_host_page_size
);
998 end
= QEMU_ALIGN_PTR_DOWN(end
, qemu_real_host_page_size
);
1002 /* Honor a command-line option limiting the size of the buffer. */
1003 if (size
> tcg_ctx
->code_gen_buffer_size
) {
1004 size
= QEMU_ALIGN_DOWN(tcg_ctx
->code_gen_buffer_size
,
1005 qemu_real_host_page_size
);
1007 tcg_ctx
->code_gen_buffer_size
= size
;
1010 if (cross_256mb(buf
, size
)) {
1011 buf
= split_cross_256mb(buf
, size
);
1012 size
= tcg_ctx
->code_gen_buffer_size
;
1016 if (qemu_mprotect_rwx(buf
, size
)) {
1019 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
1023 #elif defined(_WIN32)
1024 static inline void *alloc_code_gen_buffer(void)
1026 size_t size
= tcg_ctx
->code_gen_buffer_size
;
1027 return VirtualAlloc(NULL
, size
, MEM_RESERVE
| MEM_COMMIT
,
1028 PAGE_EXECUTE_READWRITE
);
1031 static inline void *alloc_code_gen_buffer(void)
1033 int prot
= PROT_WRITE
| PROT_READ
| PROT_EXEC
;
1034 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
1035 uintptr_t start
= 0;
1036 size_t size
= tcg_ctx
->code_gen_buffer_size
;
1039 /* Constrain the position of the buffer based on the host cpu.
1040 Note that these addresses are chosen in concert with the
1041 addresses assigned in the relevant linker script file. */
1042 # if defined(__PIE__) || defined(__PIC__)
1043 /* Don't bother setting a preferred location if we're building
1044 a position-independent executable. We're more likely to get
1045 an address near the main executable if we let the kernel
1046 choose the address. */
1047 # elif defined(__x86_64__) && defined(MAP_32BIT)
1048 /* Force the memory down into low memory with the executable.
1049 Leave the choice of exact location with the kernel. */
1051 /* Cannot expect to map more than 800MB in low memory. */
1052 if (size
> 800u * 1024 * 1024) {
1053 tcg_ctx
->code_gen_buffer_size
= size
= 800u * 1024 * 1024;
1055 # elif defined(__sparc__)
1056 start
= 0x40000000ul
;
1057 # elif defined(__s390x__)
1058 start
= 0x90000000ul
;
1059 # elif defined(__mips__)
1060 # if _MIPS_SIM == _ABI64
1061 start
= 0x128000000ul
;
1063 start
= 0x08000000ul
;
1067 buf
= mmap((void *)start
, size
, prot
, flags
, -1, 0);
1068 if (buf
== MAP_FAILED
) {
1073 if (cross_256mb(buf
, size
)) {
1074 /* Try again, with the original still mapped, to avoid re-acquiring
1075 that 256mb crossing. This time don't specify an address. */
1077 void *buf2
= mmap(NULL
, size
, prot
, flags
, -1, 0);
1078 switch ((int)(buf2
!= MAP_FAILED
)) {
1080 if (!cross_256mb(buf2
, size
)) {
1081 /* Success! Use the new buffer. */
1085 /* Failure. Work with what we had. */
1089 /* Split the original buffer. Free the smaller half. */
1090 buf2
= split_cross_256mb(buf
, size
);
1091 size2
= tcg_ctx
->code_gen_buffer_size
;
1093 munmap(buf
+ size2
, size
- size2
);
1095 munmap(buf
, size
- size2
);
1104 /* Request large pages for the buffer. */
1105 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
1109 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1111 static inline void code_gen_alloc(size_t tb_size
)
1113 tcg_ctx
->code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
1114 tcg_ctx
->code_gen_buffer
= alloc_code_gen_buffer();
1115 if (tcg_ctx
->code_gen_buffer
== NULL
) {
1116 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
1121 static bool tb_cmp(const void *ap
, const void *bp
)
1123 const TranslationBlock
*a
= ap
;
1124 const TranslationBlock
*b
= bp
;
1126 return a
->pc
== b
->pc
&&
1127 a
->cs_base
== b
->cs_base
&&
1128 a
->flags
== b
->flags
&&
1129 (tb_cflags(a
) & CF_HASH_MASK
) == (tb_cflags(b
) & CF_HASH_MASK
) &&
1130 a
->trace_vcpu_dstate
== b
->trace_vcpu_dstate
&&
1131 a
->page_addr
[0] == b
->page_addr
[0] &&
1132 a
->page_addr
[1] == b
->page_addr
[1];
1135 static void tb_htable_init(void)
1137 unsigned int mode
= QHT_MODE_AUTO_RESIZE
;
1139 qht_init(&tb_ctx
.htable
, tb_cmp
, CODE_GEN_HTABLE_SIZE
, mode
);
1142 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1143 (in bytes) allocated to the translation buffer. Zero means default
1145 void tcg_exec_init(unsigned long tb_size
)
1151 code_gen_alloc(tb_size
);
1152 #if defined(CONFIG_SOFTMMU)
1153 /* There's no guest base to take into account, so go ahead and
1154 initialize the prologue now. */
1155 tcg_prologue_init(tcg_ctx
);
1160 * Allocate a new translation block. Flush the translation buffer if
1161 * too many translation blocks or too much generated code.
1163 static TranslationBlock
*tb_alloc(target_ulong pc
)
1165 TranslationBlock
*tb
;
1167 assert_memory_lock();
1169 tb
= tcg_tb_alloc(tcg_ctx
);
1170 if (unlikely(tb
== NULL
)) {
1176 /* call with @p->lock held */
1177 static inline void invalidate_page_bitmap(PageDesc
*p
)
1179 assert_page_locked(p
);
1180 #ifdef CONFIG_SOFTMMU
1181 g_free(p
->code_bitmap
);
1182 p
->code_bitmap
= NULL
;
1183 p
->code_write_count
= 0;
1187 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
1188 static void page_flush_tb_1(int level
, void **lp
)
1198 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1200 pd
[i
].first_tb
= (uintptr_t)NULL
;
1201 invalidate_page_bitmap(pd
+ i
);
1202 page_unlock(&pd
[i
]);
1207 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1208 page_flush_tb_1(level
- 1, pp
+ i
);
1213 static void page_flush_tb(void)
1215 int i
, l1_sz
= v_l1_size
;
1217 for (i
= 0; i
< l1_sz
; i
++) {
1218 page_flush_tb_1(v_l2_levels
, l1_map
+ i
);
1222 static gboolean
tb_host_size_iter(gpointer key
, gpointer value
, gpointer data
)
1224 const TranslationBlock
*tb
= value
;
1225 size_t *size
= data
;
1227 *size
+= tb
->tc
.size
;
1231 /* flush all the translation blocks */
1232 static void do_tb_flush(CPUState
*cpu
, run_on_cpu_data tb_flush_count
)
1235 /* If it is already been done on request of another CPU,
1238 if (tb_ctx
.tb_flush_count
!= tb_flush_count
.host_int
) {
1242 if (DEBUG_TB_FLUSH_GATE
) {
1243 size_t nb_tbs
= tcg_nb_tbs();
1244 size_t host_size
= 0;
1246 tcg_tb_foreach(tb_host_size_iter
, &host_size
);
1247 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1248 tcg_code_size(), nb_tbs
, nb_tbs
> 0 ? host_size
/ nb_tbs
: 0);
1252 cpu_tb_jmp_cache_clear(cpu
);
1255 qht_reset_size(&tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
);
1258 tcg_region_reset_all();
1259 /* XXX: flush processor icache at this point if cache flush is
1261 atomic_mb_set(&tb_ctx
.tb_flush_count
, tb_ctx
.tb_flush_count
+ 1);
1267 void tb_flush(CPUState
*cpu
)
1269 if (tcg_enabled()) {
1270 unsigned tb_flush_count
= atomic_mb_read(&tb_ctx
.tb_flush_count
);
1272 if (cpu_in_exclusive_context(cpu
)) {
1273 do_tb_flush(cpu
, RUN_ON_CPU_HOST_INT(tb_flush_count
));
1275 async_safe_run_on_cpu(cpu
, do_tb_flush
,
1276 RUN_ON_CPU_HOST_INT(tb_flush_count
));
1282 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1283 * so in order to prevent bit rot we compile them unconditionally in user-mode,
1284 * and let the optimizer get rid of them by wrapping their user-only callers
1285 * with if (DEBUG_TB_CHECK_GATE).
1287 #ifdef CONFIG_USER_ONLY
1289 static void do_tb_invalidate_check(void *p
, uint32_t hash
, void *userp
)
1291 TranslationBlock
*tb
= p
;
1292 target_ulong addr
= *(target_ulong
*)userp
;
1294 if (!(addr
+ TARGET_PAGE_SIZE
<= tb
->pc
|| addr
>= tb
->pc
+ tb
->size
)) {
1295 printf("ERROR invalidate: address=" TARGET_FMT_lx
1296 " PC=%08lx size=%04x\n", addr
, (long)tb
->pc
, tb
->size
);
1300 /* verify that all the pages have correct rights for code
1302 * Called with mmap_lock held.
1304 static void tb_invalidate_check(target_ulong address
)
1306 address
&= TARGET_PAGE_MASK
;
1307 qht_iter(&tb_ctx
.htable
, do_tb_invalidate_check
, &address
);
1310 static void do_tb_page_check(void *p
, uint32_t hash
, void *userp
)
1312 TranslationBlock
*tb
= p
;
1315 flags1
= page_get_flags(tb
->pc
);
1316 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
1317 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
1318 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1319 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
1323 /* verify that all the pages have correct rights for code */
1324 static void tb_page_check(void)
1326 qht_iter(&tb_ctx
.htable
, do_tb_page_check
, NULL
);
1329 #endif /* CONFIG_USER_ONLY */
1332 * user-mode: call with mmap_lock held
1333 * !user-mode: call with @pd->lock held
1335 static inline void tb_page_remove(PageDesc
*pd
, TranslationBlock
*tb
)
1337 TranslationBlock
*tb1
;
1341 assert_page_locked(pd
);
1342 pprev
= &pd
->first_tb
;
1343 PAGE_FOR_EACH_TB(pd
, tb1
, n1
) {
1345 *pprev
= tb1
->page_next
[n1
];
1348 pprev
= &tb1
->page_next
[n1
];
1350 g_assert_not_reached();
1353 /* remove @orig from its @n_orig-th jump list */
1354 static inline void tb_remove_from_jmp_list(TranslationBlock
*orig
, int n_orig
)
1356 uintptr_t ptr
, ptr_locked
;
1357 TranslationBlock
*dest
;
1358 TranslationBlock
*tb
;
1362 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1363 ptr
= atomic_or_fetch(&orig
->jmp_dest
[n_orig
], 1);
1364 dest
= (TranslationBlock
*)(ptr
& ~1);
1369 qemu_spin_lock(&dest
->jmp_lock
);
1371 * While acquiring the lock, the jump might have been removed if the
1372 * destination TB was invalidated; check again.
1374 ptr_locked
= atomic_read(&orig
->jmp_dest
[n_orig
]);
1375 if (ptr_locked
!= ptr
) {
1376 qemu_spin_unlock(&dest
->jmp_lock
);
1378 * The only possibility is that the jump was unlinked via
1379 * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1380 * because we set the LSB above.
1382 g_assert(ptr_locked
== 1 && dest
->cflags
& CF_INVALID
);
1386 * We first acquired the lock, and since the destination pointer matches,
1387 * we know for sure that @orig is in the jmp list.
1389 pprev
= &dest
->jmp_list_head
;
1390 TB_FOR_EACH_JMP(dest
, tb
, n
) {
1391 if (tb
== orig
&& n
== n_orig
) {
1392 *pprev
= tb
->jmp_list_next
[n
];
1393 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1394 qemu_spin_unlock(&dest
->jmp_lock
);
1397 pprev
= &tb
->jmp_list_next
[n
];
1399 g_assert_not_reached();
1402 /* reset the jump entry 'n' of a TB so that it is not chained to
1404 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
1406 uintptr_t addr
= (uintptr_t)(tb
->tc
.ptr
+ tb
->jmp_reset_offset
[n
]);
1407 tb_set_jmp_target(tb
, n
, addr
);
1410 /* remove any jumps to the TB */
1411 static inline void tb_jmp_unlink(TranslationBlock
*dest
)
1413 TranslationBlock
*tb
;
1416 qemu_spin_lock(&dest
->jmp_lock
);
1418 TB_FOR_EACH_JMP(dest
, tb
, n
) {
1419 tb_reset_jump(tb
, n
);
1420 atomic_and(&tb
->jmp_dest
[n
], (uintptr_t)NULL
| 1);
1421 /* No need to clear the list entry; setting the dest ptr is enough */
1423 dest
->jmp_list_head
= (uintptr_t)NULL
;
1425 qemu_spin_unlock(&dest
->jmp_lock
);
1429 * In user-mode, call with mmap_lock held.
1430 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1433 static void do_tb_phys_invalidate(TranslationBlock
*tb
, bool rm_from_page_list
)
1438 tb_page_addr_t phys_pc
;
1440 assert_memory_lock();
1442 /* make sure no further incoming jumps will be chained to this TB */
1443 qemu_spin_lock(&tb
->jmp_lock
);
1444 atomic_set(&tb
->cflags
, tb
->cflags
| CF_INVALID
);
1445 qemu_spin_unlock(&tb
->jmp_lock
);
1447 /* remove the TB from the hash list */
1448 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1449 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, tb_cflags(tb
) & CF_HASH_MASK
,
1450 tb
->trace_vcpu_dstate
);
1451 if (!(tb
->cflags
& CF_NOCACHE
) &&
1452 !qht_remove(&tb_ctx
.htable
, tb
, h
)) {
1456 /* remove the TB from the page list */
1457 if (rm_from_page_list
) {
1458 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
1459 tb_page_remove(p
, tb
);
1460 invalidate_page_bitmap(p
);
1461 if (tb
->page_addr
[1] != -1) {
1462 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
1463 tb_page_remove(p
, tb
);
1464 invalidate_page_bitmap(p
);
1468 /* remove the TB from the hash list */
1469 h
= tb_jmp_cache_hash_func(tb
->pc
);
1471 if (atomic_read(&cpu
->tb_jmp_cache
[h
]) == tb
) {
1472 atomic_set(&cpu
->tb_jmp_cache
[h
], NULL
);
1476 /* suppress this TB from the two jump lists */
1477 tb_remove_from_jmp_list(tb
, 0);
1478 tb_remove_from_jmp_list(tb
, 1);
1480 /* suppress any remaining jumps to this TB */
1483 atomic_set(&tcg_ctx
->tb_phys_invalidate_count
,
1484 tcg_ctx
->tb_phys_invalidate_count
+ 1);
1487 static void tb_phys_invalidate__locked(TranslationBlock
*tb
)
1489 do_tb_phys_invalidate(tb
, true);
1492 /* invalidate one TB
1494 * Called with mmap_lock held in user-mode.
1496 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
1498 if (page_addr
== -1 && tb
->page_addr
[0] != -1) {
1500 do_tb_phys_invalidate(tb
, true);
1503 do_tb_phys_invalidate(tb
, false);
1507 #ifdef CONFIG_SOFTMMU
1508 /* call with @p->lock held */
1509 static void build_page_bitmap(PageDesc
*p
)
1511 int n
, tb_start
, tb_end
;
1512 TranslationBlock
*tb
;
1514 assert_page_locked(p
);
1515 p
->code_bitmap
= bitmap_new(TARGET_PAGE_SIZE
);
1517 PAGE_FOR_EACH_TB(p
, tb
, n
) {
1518 /* NOTE: this is subtle as a TB may span two physical pages */
1520 /* NOTE: tb_end may be after the end of the page, but
1521 it is not a problem */
1522 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1523 tb_end
= tb_start
+ tb
->size
;
1524 if (tb_end
> TARGET_PAGE_SIZE
) {
1525 tb_end
= TARGET_PAGE_SIZE
;
1529 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1531 bitmap_set(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1536 /* add the tb in the target page and protect it if necessary
1538 * Called with mmap_lock held for user-mode emulation.
1539 * Called with @p->lock held in !user-mode.
1541 static inline void tb_page_add(PageDesc
*p
, TranslationBlock
*tb
,
1542 unsigned int n
, tb_page_addr_t page_addr
)
1544 #ifndef CONFIG_USER_ONLY
1545 bool page_already_protected
;
1548 assert_page_locked(p
);
1550 tb
->page_addr
[n
] = page_addr
;
1551 tb
->page_next
[n
] = p
->first_tb
;
1552 #ifndef CONFIG_USER_ONLY
1553 page_already_protected
= p
->first_tb
!= (uintptr_t)NULL
;
1555 p
->first_tb
= (uintptr_t)tb
| n
;
1556 invalidate_page_bitmap(p
);
1558 #if defined(CONFIG_USER_ONLY)
1559 if (p
->flags
& PAGE_WRITE
) {
1564 /* force the host page as non writable (writes will have a
1565 page fault + mprotect overhead) */
1566 page_addr
&= qemu_host_page_mask
;
1568 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1569 addr
+= TARGET_PAGE_SIZE
) {
1571 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1576 p2
->flags
&= ~PAGE_WRITE
;
1578 mprotect(g2h(page_addr
), qemu_host_page_size
,
1579 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1580 if (DEBUG_TB_INVALIDATE_GATE
) {
1581 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT
"\n", page_addr
);
1585 /* if some code is already present, then the pages are already
1586 protected. So we handle the case where only the first TB is
1587 allocated in a physical page */
1588 if (!page_already_protected
) {
1589 tlb_protect_code(page_addr
);
1594 /* add a new TB and link it to the physical page tables. phys_page2 is
1595 * (-1) to indicate that only one page contains the TB.
1597 * Called with mmap_lock held for user-mode emulation.
1599 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1600 * Note that in !user-mode, another thread might have already added a TB
1601 * for the same block of guest code that @tb corresponds to. In that case,
1602 * the caller should discard the original @tb, and use instead the returned TB.
1604 static TranslationBlock
*
1605 tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1606 tb_page_addr_t phys_page2
)
1609 PageDesc
*p2
= NULL
;
1611 assert_memory_lock();
1613 if (phys_pc
== -1) {
1615 * If the TB is not associated with a physical RAM page then
1616 * it must be a temporary one-insn TB, and we have nothing to do
1617 * except fill in the page_addr[] fields.
1619 assert(tb
->cflags
& CF_NOCACHE
);
1620 tb
->page_addr
[0] = tb
->page_addr
[1] = -1;
1625 * Add the TB to the page list, acquiring first the pages's locks.
1626 * We keep the locks held until after inserting the TB in the hash table,
1627 * so that if the insertion fails we know for sure that the TBs are still
1628 * in the page descriptors.
1629 * Note that inserting into the hash table first isn't an option, since
1630 * we can only insert TBs that are fully initialized.
1632 page_lock_pair(&p
, phys_pc
, &p2
, phys_page2
, 1);
1633 tb_page_add(p
, tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1635 tb_page_add(p2
, tb
, 1, phys_page2
);
1637 tb
->page_addr
[1] = -1;
1640 if (!(tb
->cflags
& CF_NOCACHE
)) {
1641 void *existing_tb
= NULL
;
1644 /* add in the hash table */
1645 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, tb
->cflags
& CF_HASH_MASK
,
1646 tb
->trace_vcpu_dstate
);
1647 qht_insert(&tb_ctx
.htable
, tb
, h
, &existing_tb
);
1649 /* remove TB from the page(s) if we couldn't insert it */
1650 if (unlikely(existing_tb
)) {
1651 tb_page_remove(p
, tb
);
1652 invalidate_page_bitmap(p
);
1654 tb_page_remove(p2
, tb
);
1655 invalidate_page_bitmap(p2
);
1661 if (p2
&& p2
!= p
) {
1666 #ifdef CONFIG_USER_ONLY
1667 if (DEBUG_TB_CHECK_GATE
) {
1674 /* Called with mmap_lock held for user mode emulation. */
1675 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
1676 target_ulong pc
, target_ulong cs_base
,
1677 uint32_t flags
, int cflags
)
1679 CPUArchState
*env
= cpu
->env_ptr
;
1680 TranslationBlock
*tb
, *existing_tb
;
1681 tb_page_addr_t phys_pc
, phys_page2
;
1682 target_ulong virt_page2
;
1683 tcg_insn_unit
*gen_code_buf
;
1684 int gen_code_size
, search_size
, max_insns
;
1685 #ifdef CONFIG_PROFILER
1686 TCGProfile
*prof
= &tcg_ctx
->prof
;
1689 assert_memory_lock();
1691 phys_pc
= get_page_addr_code(env
, pc
);
1693 if (phys_pc
== -1) {
1694 /* Generate a temporary TB with 1 insn in it */
1695 cflags
&= ~CF_COUNT_MASK
;
1696 cflags
|= CF_NOCACHE
| 1;
1699 cflags
&= ~CF_CLUSTER_MASK
;
1700 cflags
|= cpu
->cluster_index
<< CF_CLUSTER_SHIFT
;
1702 max_insns
= cflags
& CF_COUNT_MASK
;
1703 if (max_insns
== 0) {
1704 max_insns
= CF_COUNT_MASK
;
1706 if (max_insns
> TCG_MAX_INSNS
) {
1707 max_insns
= TCG_MAX_INSNS
;
1709 if (cpu
->singlestep_enabled
|| singlestep
) {
1715 if (unlikely(!tb
)) {
1716 /* flush must be done */
1719 /* Make the execution loop process the flush as soon as possible. */
1720 cpu
->exception_index
= EXCP_INTERRUPT
;
1724 gen_code_buf
= tcg_ctx
->code_gen_ptr
;
1725 tb
->tc
.ptr
= gen_code_buf
;
1727 tb
->cs_base
= cs_base
;
1729 tb
->cflags
= cflags
;
1730 tb
->trace_vcpu_dstate
= *cpu
->trace_dstate
;
1731 tcg_ctx
->tb_cflags
= cflags
;
1734 #ifdef CONFIG_PROFILER
1735 /* includes aborted translations because of exceptions */
1736 atomic_set(&prof
->tb_count1
, prof
->tb_count1
+ 1);
1737 ti
= profile_getclock();
1740 tcg_func_start(tcg_ctx
);
1742 tcg_ctx
->cpu
= env_cpu(env
);
1743 gen_intermediate_code(cpu
, tb
, max_insns
);
1744 tcg_ctx
->cpu
= NULL
;
1746 trace_translate_block(tb
, tb
->pc
, tb
->tc
.ptr
);
1748 /* generate machine code */
1749 tb
->jmp_reset_offset
[0] = TB_JMP_RESET_OFFSET_INVALID
;
1750 tb
->jmp_reset_offset
[1] = TB_JMP_RESET_OFFSET_INVALID
;
1751 tcg_ctx
->tb_jmp_reset_offset
= tb
->jmp_reset_offset
;
1752 if (TCG_TARGET_HAS_direct_jump
) {
1753 tcg_ctx
->tb_jmp_insn_offset
= tb
->jmp_target_arg
;
1754 tcg_ctx
->tb_jmp_target_addr
= NULL
;
1756 tcg_ctx
->tb_jmp_insn_offset
= NULL
;
1757 tcg_ctx
->tb_jmp_target_addr
= tb
->jmp_target_arg
;
1760 #ifdef CONFIG_PROFILER
1761 atomic_set(&prof
->tb_count
, prof
->tb_count
+ 1);
1762 atomic_set(&prof
->interm_time
, prof
->interm_time
+ profile_getclock() - ti
);
1763 ti
= profile_getclock();
1766 gen_code_size
= tcg_gen_code(tcg_ctx
, tb
);
1767 if (unlikely(gen_code_size
< 0)) {
1768 switch (gen_code_size
) {
1771 * Overflow of code_gen_buffer, or the current slice of it.
1773 * TODO: We don't need to re-do gen_intermediate_code, nor
1774 * should we re-do the tcg optimization currently hidden
1775 * inside tcg_gen_code. All that should be required is to
1776 * flush the TBs, allocate a new TB, re-initialize it per
1777 * above, and re-do the actual code generation.
1779 goto buffer_overflow
;
1783 * The code generated for the TranslationBlock is too large.
1784 * The maximum size allowed by the unwind info is 64k.
1785 * There may be stricter constraints from relocations
1786 * in the tcg backend.
1788 * Try again with half as many insns as we attempted this time.
1789 * If a single insn overflows, there's a bug somewhere...
1791 max_insns
= tb
->icount
;
1792 assert(max_insns
> 1);
1797 g_assert_not_reached();
1800 search_size
= encode_search(tb
, (void *)gen_code_buf
+ gen_code_size
);
1801 if (unlikely(search_size
< 0)) {
1802 goto buffer_overflow
;
1804 tb
->tc
.size
= gen_code_size
;
1806 #ifdef CONFIG_PROFILER
1807 atomic_set(&prof
->code_time
, prof
->code_time
+ profile_getclock() - ti
);
1808 atomic_set(&prof
->code_in_len
, prof
->code_in_len
+ tb
->size
);
1809 atomic_set(&prof
->code_out_len
, prof
->code_out_len
+ gen_code_size
);
1810 atomic_set(&prof
->search_out_len
, prof
->search_out_len
+ search_size
);
1814 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
) &&
1815 qemu_log_in_addr_range(tb
->pc
)) {
1817 qemu_log("OUT: [size=%d]\n", gen_code_size
);
1818 if (tcg_ctx
->data_gen_ptr
) {
1819 size_t code_size
= tcg_ctx
->data_gen_ptr
- tb
->tc
.ptr
;
1820 size_t data_size
= gen_code_size
- code_size
;
1823 log_disas(tb
->tc
.ptr
, code_size
);
1825 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
1826 if (sizeof(tcg_target_ulong
) == 8) {
1827 qemu_log("0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
1828 (uintptr_t)tcg_ctx
->data_gen_ptr
+ i
,
1829 *(uint64_t *)(tcg_ctx
->data_gen_ptr
+ i
));
1831 qemu_log("0x%08" PRIxPTR
": .long 0x%08x\n",
1832 (uintptr_t)tcg_ctx
->data_gen_ptr
+ i
,
1833 *(uint32_t *)(tcg_ctx
->data_gen_ptr
+ i
));
1837 log_disas(tb
->tc
.ptr
, gen_code_size
);
1845 atomic_set(&tcg_ctx
->code_gen_ptr
, (void *)
1846 ROUND_UP((uintptr_t)gen_code_buf
+ gen_code_size
+ search_size
,
1849 /* init jump list */
1850 qemu_spin_init(&tb
->jmp_lock
);
1851 tb
->jmp_list_head
= (uintptr_t)NULL
;
1852 tb
->jmp_list_next
[0] = (uintptr_t)NULL
;
1853 tb
->jmp_list_next
[1] = (uintptr_t)NULL
;
1854 tb
->jmp_dest
[0] = (uintptr_t)NULL
;
1855 tb
->jmp_dest
[1] = (uintptr_t)NULL
;
1857 /* init original jump addresses which have been set during tcg_gen_code() */
1858 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1859 tb_reset_jump(tb
, 0);
1861 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1862 tb_reset_jump(tb
, 1);
1865 /* check next page if needed */
1866 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1868 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1869 phys_page2
= get_page_addr_code(env
, virt_page2
);
1872 * No explicit memory barrier is required -- tb_link_page() makes the
1873 * TB visible in a consistent state.
1875 existing_tb
= tb_link_page(tb
, phys_pc
, phys_page2
);
1876 /* if the TB already exists, discard what we just translated */
1877 if (unlikely(existing_tb
!= tb
)) {
1878 uintptr_t orig_aligned
= (uintptr_t)gen_code_buf
;
1880 orig_aligned
-= ROUND_UP(sizeof(*tb
), qemu_icache_linesize
);
1881 atomic_set(&tcg_ctx
->code_gen_ptr
, (void *)orig_aligned
);
1889 * @p must be non-NULL.
1890 * user-mode: call with mmap_lock held.
1891 * !user-mode: call with all @pages locked.
1894 tb_invalidate_phys_page_range__locked(struct page_collection
*pages
,
1895 PageDesc
*p
, tb_page_addr_t start
,
1899 TranslationBlock
*tb
;
1900 tb_page_addr_t tb_start
, tb_end
;
1902 #ifdef TARGET_HAS_PRECISE_SMC
1903 CPUState
*cpu
= current_cpu
;
1904 CPUArchState
*env
= NULL
;
1905 bool current_tb_not_found
= retaddr
!= 0;
1906 bool current_tb_modified
= false;
1907 TranslationBlock
*current_tb
= NULL
;
1908 target_ulong current_pc
= 0;
1909 target_ulong current_cs_base
= 0;
1910 uint32_t current_flags
= 0;
1911 #endif /* TARGET_HAS_PRECISE_SMC */
1913 assert_page_locked(p
);
1915 #if defined(TARGET_HAS_PRECISE_SMC)
1921 /* we remove all the TBs in the range [start, end[ */
1922 /* XXX: see if in some cases it could be faster to invalidate all
1924 PAGE_FOR_EACH_TB(p
, tb
, n
) {
1925 assert_page_locked(p
);
1926 /* NOTE: this is subtle as a TB may span two physical pages */
1928 /* NOTE: tb_end may be after the end of the page, but
1929 it is not a problem */
1930 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1931 tb_end
= tb_start
+ tb
->size
;
1933 tb_start
= tb
->page_addr
[1];
1934 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1936 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1937 #ifdef TARGET_HAS_PRECISE_SMC
1938 if (current_tb_not_found
) {
1939 current_tb_not_found
= false;
1940 /* now we have a real cpu fault */
1941 current_tb
= tcg_tb_lookup(retaddr
);
1943 if (current_tb
== tb
&&
1944 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
1946 * If we are modifying the current TB, we must stop
1947 * its execution. We could be more precise by checking
1948 * that the modification is after the current PC, but it
1949 * would require a specialized function to partially
1950 * restore the CPU state.
1952 current_tb_modified
= true;
1953 cpu_restore_state_from_tb(cpu
, current_tb
, retaddr
, true);
1954 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1957 #endif /* TARGET_HAS_PRECISE_SMC */
1958 tb_phys_invalidate__locked(tb
);
1961 #if !defined(CONFIG_USER_ONLY)
1962 /* if no code remaining, no need to continue to use slow writes */
1964 invalidate_page_bitmap(p
);
1965 tlb_unprotect_code(start
);
1968 #ifdef TARGET_HAS_PRECISE_SMC
1969 if (current_tb_modified
) {
1970 page_collection_unlock(pages
);
1971 /* Force execution of one insn next time. */
1972 cpu
->cflags_next_tb
= 1 | curr_cflags();
1974 cpu_loop_exit_noexc(cpu
);
1980 * Invalidate all TBs which intersect with the target physical address range
1981 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1982 * 'is_cpu_write_access' should be true if called from a real cpu write
1983 * access: the virtual CPU will exit the current TB if code is modified inside
1986 * Called with mmap_lock held for user-mode emulation
1988 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
)
1990 struct page_collection
*pages
;
1993 assert_memory_lock();
1995 p
= page_find(start
>> TARGET_PAGE_BITS
);
1999 pages
= page_collection_lock(start
, end
);
2000 tb_invalidate_phys_page_range__locked(pages
, p
, start
, end
, 0);
2001 page_collection_unlock(pages
);
2005 * Invalidate all TBs which intersect with the target physical address range
2006 * [start;end[. NOTE: start and end may refer to *different* physical pages.
2007 * 'is_cpu_write_access' should be true if called from a real cpu write
2008 * access: the virtual CPU will exit the current TB if code is modified inside
2011 * Called with mmap_lock held for user-mode emulation.
2013 #ifdef CONFIG_SOFTMMU
2014 void tb_invalidate_phys_range(ram_addr_t start
, ram_addr_t end
)
2016 void tb_invalidate_phys_range(target_ulong start
, target_ulong end
)
2019 struct page_collection
*pages
;
2020 tb_page_addr_t next
;
2022 assert_memory_lock();
2024 pages
= page_collection_lock(start
, end
);
2025 for (next
= (start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
2027 start
= next
, next
+= TARGET_PAGE_SIZE
) {
2028 PageDesc
*pd
= page_find(start
>> TARGET_PAGE_BITS
);
2029 tb_page_addr_t bound
= MIN(next
, end
);
2034 tb_invalidate_phys_page_range__locked(pages
, pd
, start
, bound
, 0);
2036 page_collection_unlock(pages
);
2039 #ifdef CONFIG_SOFTMMU
2040 /* len must be <= 8 and start must be a multiple of len.
2041 * Called via softmmu_template.h when code areas are written to with
2042 * iothread mutex not held.
2044 * Call with all @pages in the range [@start, @start + len[ locked.
2046 void tb_invalidate_phys_page_fast(struct page_collection
*pages
,
2047 tb_page_addr_t start
, int len
,
2052 assert_memory_lock();
2054 p
= page_find(start
>> TARGET_PAGE_BITS
);
2059 assert_page_locked(p
);
2060 if (!p
->code_bitmap
&&
2061 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
2062 build_page_bitmap(p
);
2064 if (p
->code_bitmap
) {
2068 nr
= start
& ~TARGET_PAGE_MASK
;
2069 b
= p
->code_bitmap
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1));
2070 if (b
& ((1 << len
) - 1)) {
2075 tb_invalidate_phys_page_range__locked(pages
, p
, start
, start
+ len
,
2080 /* Called with mmap_lock held. If pc is not 0 then it indicates the
2081 * host PC of the faulting store instruction that caused this invalidate.
2082 * Returns true if the caller needs to abort execution of the current
2083 * TB (because it was modified by this store and the guest CPU has
2084 * precise-SMC semantics).
2086 static bool tb_invalidate_phys_page(tb_page_addr_t addr
, uintptr_t pc
)
2088 TranslationBlock
*tb
;
2091 #ifdef TARGET_HAS_PRECISE_SMC
2092 TranslationBlock
*current_tb
= NULL
;
2093 CPUState
*cpu
= current_cpu
;
2094 CPUArchState
*env
= NULL
;
2095 int current_tb_modified
= 0;
2096 target_ulong current_pc
= 0;
2097 target_ulong current_cs_base
= 0;
2098 uint32_t current_flags
= 0;
2101 assert_memory_lock();
2103 addr
&= TARGET_PAGE_MASK
;
2104 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2109 #ifdef TARGET_HAS_PRECISE_SMC
2110 if (p
->first_tb
&& pc
!= 0) {
2111 current_tb
= tcg_tb_lookup(pc
);
2117 assert_page_locked(p
);
2118 PAGE_FOR_EACH_TB(p
, tb
, n
) {
2119 #ifdef TARGET_HAS_PRECISE_SMC
2120 if (current_tb
== tb
&&
2121 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
2122 /* If we are modifying the current TB, we must stop
2123 its execution. We could be more precise by checking
2124 that the modification is after the current PC, but it
2125 would require a specialized function to partially
2126 restore the CPU state */
2128 current_tb_modified
= 1;
2129 cpu_restore_state_from_tb(cpu
, current_tb
, pc
, true);
2130 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
2133 #endif /* TARGET_HAS_PRECISE_SMC */
2134 tb_phys_invalidate(tb
, addr
);
2136 p
->first_tb
= (uintptr_t)NULL
;
2137 #ifdef TARGET_HAS_PRECISE_SMC
2138 if (current_tb_modified
) {
2139 /* Force execution of one insn next time. */
2140 cpu
->cflags_next_tb
= 1 | curr_cflags();
2149 /* user-mode: call with mmap_lock held */
2150 void tb_check_watchpoint(CPUState
*cpu
, uintptr_t retaddr
)
2152 TranslationBlock
*tb
;
2154 assert_memory_lock();
2156 tb
= tcg_tb_lookup(retaddr
);
2158 /* We can use retranslation to find the PC. */
2159 cpu_restore_state_from_tb(cpu
, tb
, retaddr
, true);
2160 tb_phys_invalidate(tb
, -1);
2162 /* The exception probably happened in a helper. The CPU state should
2163 have been saved before calling it. Fetch the PC from there. */
2164 CPUArchState
*env
= cpu
->env_ptr
;
2165 target_ulong pc
, cs_base
;
2166 tb_page_addr_t addr
;
2169 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
2170 addr
= get_page_addr_code(env
, pc
);
2172 tb_invalidate_phys_range(addr
, addr
+ 1);
2177 #ifndef CONFIG_USER_ONLY
2178 /* in deterministic execution mode, instructions doing device I/Os
2179 * must be at the end of the TB.
2181 * Called by softmmu_template.h, with iothread mutex not held.
2183 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
2185 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
2186 CPUArchState
*env
= cpu
->env_ptr
;
2188 TranslationBlock
*tb
;
2191 tb
= tcg_tb_lookup(retaddr
);
2193 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
2196 cpu_restore_state_from_tb(cpu
, tb
, retaddr
, true);
2198 /* On MIPS and SH, delay slot instructions can only be restarted if
2199 they were already the first instruction in the TB. If this is not
2200 the first instruction in a TB then re-execute the preceding
2203 #if defined(TARGET_MIPS)
2204 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0
2205 && env
->active_tc
.PC
!= tb
->pc
) {
2206 env
->active_tc
.PC
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
2207 cpu_neg(cpu
)->icount_decr
.u16
.low
++;
2208 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
2211 #elif defined(TARGET_SH4)
2212 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
2213 && env
->pc
!= tb
->pc
) {
2215 cpu_neg(cpu
)->icount_decr
.u16
.low
++;
2216 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
2221 /* Generate a new TB executing the I/O insn. */
2222 cpu
->cflags_next_tb
= curr_cflags() | CF_LAST_IO
| n
;
2224 if (tb_cflags(tb
) & CF_NOCACHE
) {
2226 /* Invalidate original TB if this TB was generated in
2227 * cpu_exec_nocache() */
2228 tb_phys_invalidate(tb
->orig_tb
, -1);
2233 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2234 * the first in the TB) then we end up generating a whole new TB and
2235 * repeating the fault, which is horribly inefficient.
2236 * Better would be to execute just this insn uncached, or generate a
2239 cpu_loop_exit_noexc(cpu
);
2242 static void tb_jmp_cache_clear_page(CPUState
*cpu
, target_ulong page_addr
)
2244 unsigned int i
, i0
= tb_jmp_cache_hash_page(page_addr
);
2246 for (i
= 0; i
< TB_JMP_PAGE_SIZE
; i
++) {
2247 atomic_set(&cpu
->tb_jmp_cache
[i0
+ i
], NULL
);
2251 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
2253 /* Discard jump cache entries for any tb which might potentially
2254 overlap the flushed page. */
2255 tb_jmp_cache_clear_page(cpu
, addr
- TARGET_PAGE_SIZE
);
2256 tb_jmp_cache_clear_page(cpu
, addr
);
2259 static void print_qht_statistics(struct qht_stats hst
)
2261 uint32_t hgram_opts
;
2265 if (!hst
.head_buckets
) {
2268 qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
2269 hst
.used_head_buckets
, hst
.head_buckets
,
2270 (double)hst
.used_head_buckets
/ hst
.head_buckets
* 100);
2272 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
2273 hgram_opts
|= QDIST_PR_100X
| QDIST_PR_PERCENT
;
2274 if (qdist_xmax(&hst
.occupancy
) - qdist_xmin(&hst
.occupancy
) == 1) {
2275 hgram_opts
|= QDIST_PR_NODECIMAL
;
2277 hgram
= qdist_pr(&hst
.occupancy
, 10, hgram_opts
);
2278 qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
2279 qdist_avg(&hst
.occupancy
) * 100, hgram
);
2282 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
2283 hgram_bins
= qdist_xmax(&hst
.chain
) - qdist_xmin(&hst
.chain
);
2284 if (hgram_bins
> 10) {
2288 hgram_opts
|= QDIST_PR_NODECIMAL
| QDIST_PR_NOBINRANGE
;
2290 hgram
= qdist_pr(&hst
.chain
, hgram_bins
, hgram_opts
);
2291 qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n",
2292 qdist_avg(&hst
.chain
), hgram
);
2296 struct tb_tree_stats
{
2300 size_t max_target_size
;
2301 size_t direct_jmp_count
;
2302 size_t direct_jmp2_count
;
2306 static gboolean
tb_tree_stats_iter(gpointer key
, gpointer value
, gpointer data
)
2308 const TranslationBlock
*tb
= value
;
2309 struct tb_tree_stats
*tst
= data
;
2312 tst
->host_size
+= tb
->tc
.size
;
2313 tst
->target_size
+= tb
->size
;
2314 if (tb
->size
> tst
->max_target_size
) {
2315 tst
->max_target_size
= tb
->size
;
2317 if (tb
->page_addr
[1] != -1) {
2320 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
2321 tst
->direct_jmp_count
++;
2322 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
2323 tst
->direct_jmp2_count
++;
2329 void dump_exec_info(void)
2331 struct tb_tree_stats tst
= {};
2332 struct qht_stats hst
;
2333 size_t nb_tbs
, flush_full
, flush_part
, flush_elide
;
2335 tcg_tb_foreach(tb_tree_stats_iter
, &tst
);
2336 nb_tbs
= tst
.nb_tbs
;
2337 /* XXX: avoid using doubles ? */
2338 qemu_printf("Translation buffer state:\n");
2340 * Report total code size including the padding and TB structs;
2341 * otherwise users might think "-tb-size" is not honoured.
2342 * For avg host size we use the precise numbers from tb_tree_stats though.
2344 qemu_printf("gen code size %zu/%zu\n",
2345 tcg_code_size(), tcg_code_capacity());
2346 qemu_printf("TB count %zu\n", nb_tbs
);
2347 qemu_printf("TB avg target size %zu max=%zu bytes\n",
2348 nb_tbs
? tst
.target_size
/ nb_tbs
: 0,
2349 tst
.max_target_size
);
2350 qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
2351 nb_tbs
? tst
.host_size
/ nb_tbs
: 0,
2352 tst
.target_size
? (double)tst
.host_size
/ tst
.target_size
: 0);
2353 qemu_printf("cross page TB count %zu (%zu%%)\n", tst
.cross_page
,
2354 nb_tbs
? (tst
.cross_page
* 100) / nb_tbs
: 0);
2355 qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2356 tst
.direct_jmp_count
,
2357 nb_tbs
? (tst
.direct_jmp_count
* 100) / nb_tbs
: 0,
2358 tst
.direct_jmp2_count
,
2359 nb_tbs
? (tst
.direct_jmp2_count
* 100) / nb_tbs
: 0);
2361 qht_statistics_init(&tb_ctx
.htable
, &hst
);
2362 print_qht_statistics(hst
);
2363 qht_statistics_destroy(&hst
);
2365 qemu_printf("\nStatistics:\n");
2366 qemu_printf("TB flush count %u\n",
2367 atomic_read(&tb_ctx
.tb_flush_count
));
2368 qemu_printf("TB invalidate count %zu\n",
2369 tcg_tb_phys_invalidate_count());
2371 tlb_flush_counts(&flush_full
, &flush_part
, &flush_elide
);
2372 qemu_printf("TLB full flushes %zu\n", flush_full
);
2373 qemu_printf("TLB partial flushes %zu\n", flush_part
);
2374 qemu_printf("TLB elided flushes %zu\n", flush_elide
);
2378 void dump_opcount_info(void)
2380 tcg_dump_op_count();
2383 #else /* CONFIG_USER_ONLY */
2385 void cpu_interrupt(CPUState
*cpu
, int mask
)
2387 g_assert(qemu_mutex_iothread_locked());
2388 cpu
->interrupt_request
|= mask
;
2389 atomic_set(&cpu_neg(cpu
)->icount_decr
.u16
.high
, -1);
2393 * Walks guest process memory "regions" one by one
2394 * and calls callback function 'fn' for each region.
2396 struct walk_memory_regions_data
{
2397 walk_memory_regions_fn fn
;
2403 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2404 target_ulong end
, int new_prot
)
2406 if (data
->start
!= -1u) {
2407 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2413 data
->start
= (new_prot
? end
: -1u);
2414 data
->prot
= new_prot
;
2419 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2420 target_ulong base
, int level
, void **lp
)
2426 return walk_memory_regions_end(data
, base
, 0);
2432 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2433 int prot
= pd
[i
].flags
;
2435 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2436 if (prot
!= data
->prot
) {
2437 rc
= walk_memory_regions_end(data
, pa
, prot
);
2446 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2447 pa
= base
| ((target_ulong
)i
<<
2448 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
2449 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2459 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2461 struct walk_memory_regions_data data
;
2462 uintptr_t i
, l1_sz
= v_l1_size
;
2469 for (i
= 0; i
< l1_sz
; i
++) {
2470 target_ulong base
= i
<< (v_l1_shift
+ TARGET_PAGE_BITS
);
2471 int rc
= walk_memory_regions_1(&data
, base
, v_l2_levels
, l1_map
+ i
);
2477 return walk_memory_regions_end(&data
, 0, 0);
2480 static int dump_region(void *priv
, target_ulong start
,
2481 target_ulong end
, unsigned long prot
)
2483 FILE *f
= (FILE *)priv
;
2485 (void) fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
2486 " "TARGET_FMT_lx
" %c%c%c\n",
2487 start
, end
, end
- start
,
2488 ((prot
& PAGE_READ
) ? 'r' : '-'),
2489 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2490 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2495 /* dump memory mappings */
2496 void page_dump(FILE *f
)
2498 const int length
= sizeof(target_ulong
) * 2;
2499 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
2500 length
, "start", length
, "end", length
, "size", "prot");
2501 walk_memory_regions(f
, dump_region
);
2504 int page_get_flags(target_ulong address
)
2508 p
= page_find(address
>> TARGET_PAGE_BITS
);
2515 /* Modify the flags of a page and invalidate the code if necessary.
2516 The flag PAGE_WRITE_ORG is positioned automatically depending
2517 on PAGE_WRITE. The mmap_lock should already be held. */
2518 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2520 target_ulong addr
, len
;
2522 /* This function should never be called with addresses outside the
2523 guest address space. If this assert fires, it probably indicates
2524 a missing call to h2g_valid. */
2525 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2526 assert(end
<= ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2528 assert(start
< end
);
2529 assert_memory_lock();
2531 start
= start
& TARGET_PAGE_MASK
;
2532 end
= TARGET_PAGE_ALIGN(end
);
2534 if (flags
& PAGE_WRITE
) {
2535 flags
|= PAGE_WRITE_ORG
;
2538 for (addr
= start
, len
= end
- start
;
2540 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2541 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2543 /* If the write protection bit is set, then we invalidate
2545 if (!(p
->flags
& PAGE_WRITE
) &&
2546 (flags
& PAGE_WRITE
) &&
2548 tb_invalidate_phys_page(addr
, 0);
2554 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2560 /* This function should never be called with addresses outside the
2561 guest address space. If this assert fires, it probably indicates
2562 a missing call to h2g_valid. */
2563 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2564 assert(start
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2570 if (start
+ len
- 1 < start
) {
2571 /* We've wrapped around. */
2575 /* must do before we loose bits in the next step */
2576 end
= TARGET_PAGE_ALIGN(start
+ len
);
2577 start
= start
& TARGET_PAGE_MASK
;
2579 for (addr
= start
, len
= end
- start
;
2581 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2582 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2586 if (!(p
->flags
& PAGE_VALID
)) {
2590 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
2593 if (flags
& PAGE_WRITE
) {
2594 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
2597 /* unprotect the page if it was put read-only because it
2598 contains translated code */
2599 if (!(p
->flags
& PAGE_WRITE
)) {
2600 if (!page_unprotect(addr
, 0)) {
2609 /* called from signal handler: invalidate the code and unprotect the
2610 * page. Return 0 if the fault was not handled, 1 if it was handled,
2611 * and 2 if it was handled but the caller must cause the TB to be
2612 * immediately exited. (We can only return 2 if the 'pc' argument is
2615 int page_unprotect(target_ulong address
, uintptr_t pc
)
2618 bool current_tb_invalidated
;
2620 target_ulong host_start
, host_end
, addr
;
2622 /* Technically this isn't safe inside a signal handler. However we
2623 know this only ever happens in a synchronous SEGV handler, so in
2624 practice it seems to be ok. */
2627 p
= page_find(address
>> TARGET_PAGE_BITS
);
2633 /* if the page was really writable, then we change its
2634 protection back to writable */
2635 if (p
->flags
& PAGE_WRITE_ORG
) {
2636 current_tb_invalidated
= false;
2637 if (p
->flags
& PAGE_WRITE
) {
2638 /* If the page is actually marked WRITE then assume this is because
2639 * this thread raced with another one which got here first and
2640 * set the page to PAGE_WRITE and did the TB invalidate for us.
2642 #ifdef TARGET_HAS_PRECISE_SMC
2643 TranslationBlock
*current_tb
= tcg_tb_lookup(pc
);
2645 current_tb_invalidated
= tb_cflags(current_tb
) & CF_INVALID
;
2649 host_start
= address
& qemu_host_page_mask
;
2650 host_end
= host_start
+ qemu_host_page_size
;
2653 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2654 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2655 p
->flags
|= PAGE_WRITE
;
2658 /* and since the content will be modified, we must invalidate
2659 the corresponding translated code. */
2660 current_tb_invalidated
|= tb_invalidate_phys_page(addr
, pc
);
2661 #ifdef CONFIG_USER_ONLY
2662 if (DEBUG_TB_CHECK_GATE
) {
2663 tb_invalidate_check(addr
);
2667 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2671 /* If current TB was invalidated return to main loop */
2672 return current_tb_invalidated
? 2 : 1;
2677 #endif /* CONFIG_USER_ONLY */
2679 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2680 void tcg_flush_softmmu_tlb(CPUState
*cs
)
2682 #ifdef CONFIG_SOFTMMU