4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
24 #include "qemu-common.h"
25 #define NO_CPU_IO_DEFS
28 #include "disas/disas.h"
29 #include "exec/exec-all.h"
31 #if defined(CONFIG_USER_ONLY)
33 #if defined(TARGET_X86_64)
36 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
37 #include <sys/param.h>
38 #if __FreeBSD_version >= 700104
39 #define HAVE_KINFO_GETVMMAP
40 #define sigqueue sigqueue_freebsd /* avoid redefinition */
42 #include <machine/profile.h>
51 #include "exec/ram_addr.h"
54 #include "exec/cputlb.h"
55 #include "exec/tb-hash.h"
56 #include "translate-all.h"
57 #include "qemu/bitmap.h"
58 #include "qemu/error-report.h"
59 #include "qemu/timer.h"
60 #include "qemu/main-loop.h"
62 #include "sysemu/cpus.h"
64 /* #define DEBUG_TB_INVALIDATE */
65 /* #define DEBUG_TB_FLUSH */
66 /* make various TB consistency checks */
67 /* #define DEBUG_TB_CHECK */
69 #ifdef DEBUG_TB_INVALIDATE
70 #define DEBUG_TB_INVALIDATE_GATE 1
72 #define DEBUG_TB_INVALIDATE_GATE 0
76 #define DEBUG_TB_FLUSH_GATE 1
78 #define DEBUG_TB_FLUSH_GATE 0
81 #if !defined(CONFIG_USER_ONLY)
82 /* TB consistency checks only implemented for usermode emulation. */
87 #define DEBUG_TB_CHECK_GATE 1
89 #define DEBUG_TB_CHECK_GATE 0
92 /* Access to the various translations structures need to be serialised via locks
94 * In user-mode emulation access to the memory related structures are protected
96 * In !user-mode we use per-page locks.
99 #define assert_memory_lock()
101 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
104 #define SMC_BITMAP_USE_THRESHOLD 10
106 typedef struct PageDesc
{
107 /* list of TBs intersecting this ram page */
109 #ifdef CONFIG_SOFTMMU
110 /* in order to optimize self modifying code, we count the number
111 of lookups we do to a given page to use a bitmap */
112 unsigned long *code_bitmap
;
113 unsigned int code_write_count
;
117 #ifndef CONFIG_USER_ONLY
123 * struct page_entry - page descriptor entry
124 * @pd: pointer to the &struct PageDesc of the page this entry represents
125 * @index: page index of the page
126 * @locked: whether the page is locked
128 * This struct helps us keep track of the locked state of a page, without
129 * bloating &struct PageDesc.
131 * A page lock protects accesses to all fields of &struct PageDesc.
133 * See also: &struct page_collection.
137 tb_page_addr_t index
;
142 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
143 * @tree: Binary search tree (BST) of the pages, with key == page index
144 * @max: Pointer to the page in @tree with the highest page index
146 * To avoid deadlock we lock pages in ascending order of page index.
147 * When operating on a set of pages, we need to keep track of them so that
148 * we can lock them in order and also unlock them later. For this we collect
149 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
150 * @tree implementation we use does not provide an O(1) operation to obtain the
151 * highest-ranked element, we use @max to keep track of the inserted page
152 * with the highest index. This is valuable because if a page is not in
153 * the tree and its index is higher than @max's, then we can lock it
154 * without breaking the locking order rule.
156 * Note on naming: 'struct page_set' would be shorter, but we already have a few
157 * page_set_*() helpers, so page_collection is used instead to avoid confusion.
159 * See also: page_collection_lock().
161 struct page_collection
{
163 struct page_entry
*max
;
166 /* list iterators for lists of tagged pointers in TranslationBlock */
167 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \
168 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
169 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
170 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
172 #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
173 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
175 #define TB_FOR_EACH_JMP(head_tb, tb, n) \
176 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
178 /* In system mode we want L1_MAP to be based on ram offsets,
179 while in user mode we want it to be based on virtual addresses. */
180 #if !defined(CONFIG_USER_ONLY)
181 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
182 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
184 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
187 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
190 /* Size of the L2 (and L3, etc) page tables. */
192 #define V_L2_SIZE (1 << V_L2_BITS)
194 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
195 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS
>
196 sizeof_field(TranslationBlock
, trace_vcpu_dstate
)
200 * L1 Mapping properties
202 static int v_l1_size
;
203 static int v_l1_shift
;
204 static int v_l2_levels
;
206 /* The bottom level has pointers to PageDesc, and is indexed by
207 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
209 #define V_L1_MIN_BITS 4
210 #define V_L1_MAX_BITS (V_L2_BITS + 3)
211 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
213 static void *l1_map
[V_L1_MAX_SIZE
];
215 /* code generation context */
216 TCGContext tcg_init_ctx
;
217 __thread TCGContext
*tcg_ctx
;
221 static void page_table_config_init(void)
225 assert(TARGET_PAGE_BITS
);
226 /* The bits remaining after N lower levels of page tables. */
227 v_l1_bits
= (L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
) % V_L2_BITS
;
228 if (v_l1_bits
< V_L1_MIN_BITS
) {
229 v_l1_bits
+= V_L2_BITS
;
232 v_l1_size
= 1 << v_l1_bits
;
233 v_l1_shift
= L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
- v_l1_bits
;
234 v_l2_levels
= v_l1_shift
/ V_L2_BITS
- 1;
236 assert(v_l1_bits
<= V_L1_MAX_BITS
);
237 assert(v_l1_shift
% V_L2_BITS
== 0);
238 assert(v_l2_levels
>= 0);
241 void cpu_gen_init(void)
243 tcg_context_init(&tcg_init_ctx
);
246 /* Encode VAL as a signed leb128 sequence at P.
247 Return P incremented past the encoded value. */
248 static uint8_t *encode_sleb128(uint8_t *p
, target_long val
)
255 more
= !((val
== 0 && (byte
& 0x40) == 0)
256 || (val
== -1 && (byte
& 0x40) != 0));
266 /* Decode a signed leb128 sequence at *PP; increment *PP past the
267 decoded value. Return the decoded value. */
268 static target_long
decode_sleb128(uint8_t **pp
)
276 val
|= (target_ulong
)(byte
& 0x7f) << shift
;
278 } while (byte
& 0x80);
279 if (shift
< TARGET_LONG_BITS
&& (byte
& 0x40)) {
280 val
|= -(target_ulong
)1 << shift
;
287 /* Encode the data collected about the instructions while compiling TB.
288 Place the data at BLOCK, and return the number of bytes consumed.
290 The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
291 which come from the target's insn_start data, followed by a uintptr_t
292 which comes from the host pc of the end of the code implementing the insn.
294 Each line of the table is encoded as sleb128 deltas from the previous
295 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
296 That is, the first column is seeded with the guest pc, the last column
297 with the host pc, and the middle columns with zeros. */
299 static int encode_search(TranslationBlock
*tb
, uint8_t *block
)
301 uint8_t *highwater
= tcg_ctx
->code_gen_highwater
;
305 for (i
= 0, n
= tb
->icount
; i
< n
; ++i
) {
308 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
310 prev
= (j
== 0 ? tb
->pc
: 0);
312 prev
= tcg_ctx
->gen_insn_data
[i
- 1][j
];
314 p
= encode_sleb128(p
, tcg_ctx
->gen_insn_data
[i
][j
] - prev
);
316 prev
= (i
== 0 ? 0 : tcg_ctx
->gen_insn_end_off
[i
- 1]);
317 p
= encode_sleb128(p
, tcg_ctx
->gen_insn_end_off
[i
] - prev
);
319 /* Test for (pending) buffer overflow. The assumption is that any
320 one row beginning below the high water mark cannot overrun
321 the buffer completely. Thus we can test for overflow after
322 encoding a row without having to check during encoding. */
323 if (unlikely(p
> highwater
)) {
331 /* The cpu state corresponding to 'searched_pc' is restored.
332 * When reset_icount is true, current TB will be interrupted and
333 * icount should be recalculated.
335 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
336 uintptr_t searched_pc
, bool reset_icount
)
338 target_ulong data
[TARGET_INSN_START_WORDS
] = { tb
->pc
};
339 uintptr_t host_pc
= (uintptr_t)tb
->tc
.ptr
;
340 CPUArchState
*env
= cpu
->env_ptr
;
341 uint8_t *p
= tb
->tc
.ptr
+ tb
->tc
.size
;
342 int i
, j
, num_insns
= tb
->icount
;
343 #ifdef CONFIG_PROFILER
344 TCGProfile
*prof
= &tcg_ctx
->prof
;
345 int64_t ti
= profile_getclock();
348 searched_pc
-= GETPC_ADJ
;
350 if (searched_pc
< host_pc
) {
354 /* Reconstruct the stored insn data while looking for the point at
355 which the end of the insn exceeds the searched_pc. */
356 for (i
= 0; i
< num_insns
; ++i
) {
357 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
358 data
[j
] += decode_sleb128(&p
);
360 host_pc
+= decode_sleb128(&p
);
361 if (host_pc
> searched_pc
) {
368 if (reset_icount
&& (tb_cflags(tb
) & CF_USE_ICOUNT
)) {
370 /* Reset the cycle counter to the start of the block
371 and shift if to the number of actually executed instructions */
372 cpu
->icount_decr
.u16
.low
+= num_insns
- i
;
374 restore_state_to_opc(env
, tb
, data
);
376 #ifdef CONFIG_PROFILER
377 atomic_set(&prof
->restore_time
,
378 prof
->restore_time
+ profile_getclock() - ti
);
379 atomic_set(&prof
->restore_count
, prof
->restore_count
+ 1);
384 bool cpu_restore_state(CPUState
*cpu
, uintptr_t host_pc
, bool will_exit
)
386 TranslationBlock
*tb
;
388 uintptr_t check_offset
;
390 /* The host_pc has to be in the region of current code buffer. If
391 * it is not we will not be able to resolve it here. The two cases
392 * where host_pc will not be correct are:
394 * - fault during translation (instruction fetch)
395 * - fault from helper (not using GETPC() macro)
397 * Either way we need return early as we can't resolve it here.
399 * We are using unsigned arithmetic so if host_pc <
400 * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
401 * above the code_gen_buffer_size
403 check_offset
= host_pc
- (uintptr_t) tcg_init_ctx
.code_gen_buffer
;
405 if (check_offset
< tcg_init_ctx
.code_gen_buffer_size
) {
406 tb
= tcg_tb_lookup(host_pc
);
408 cpu_restore_state_from_tb(cpu
, tb
, host_pc
, will_exit
);
409 if (tb_cflags(tb
) & CF_NOCACHE
) {
410 /* one-shot translation, invalidate it immediately */
411 tb_phys_invalidate(tb
, -1);
421 static void page_init(void)
424 page_table_config_init();
426 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
428 #ifdef HAVE_KINFO_GETVMMAP
429 struct kinfo_vmentry
*freep
;
432 freep
= kinfo_getvmmap(getpid(), &cnt
);
435 for (i
= 0; i
< cnt
; i
++) {
436 unsigned long startaddr
, endaddr
;
438 startaddr
= freep
[i
].kve_start
;
439 endaddr
= freep
[i
].kve_end
;
440 if (h2g_valid(startaddr
)) {
441 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
443 if (h2g_valid(endaddr
)) {
444 endaddr
= h2g(endaddr
);
445 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
447 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
449 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
460 last_brk
= (unsigned long)sbrk(0);
462 f
= fopen("/compat/linux/proc/self/maps", "r");
467 unsigned long startaddr
, endaddr
;
470 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
472 if (n
== 2 && h2g_valid(startaddr
)) {
473 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
475 if (h2g_valid(endaddr
)) {
476 endaddr
= h2g(endaddr
);
480 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
492 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
498 /* Level 1. Always allocated. */
499 lp
= l1_map
+ ((index
>> v_l1_shift
) & (v_l1_size
- 1));
502 for (i
= v_l2_levels
; i
> 0; i
--) {
503 void **p
= atomic_rcu_read(lp
);
511 p
= g_new0(void *, V_L2_SIZE
);
512 existing
= atomic_cmpxchg(lp
, NULL
, p
);
513 if (unlikely(existing
)) {
519 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
522 pd
= atomic_rcu_read(lp
);
529 pd
= g_new0(PageDesc
, V_L2_SIZE
);
530 #ifndef CONFIG_USER_ONLY
534 for (i
= 0; i
< V_L2_SIZE
; i
++) {
535 qemu_spin_init(&pd
[i
].lock
);
539 existing
= atomic_cmpxchg(lp
, NULL
, pd
);
540 if (unlikely(existing
)) {
546 return pd
+ (index
& (V_L2_SIZE
- 1));
549 static inline PageDesc
*page_find(tb_page_addr_t index
)
551 return page_find_alloc(index
, 0);
554 static void page_lock_pair(PageDesc
**ret_p1
, tb_page_addr_t phys1
,
555 PageDesc
**ret_p2
, tb_page_addr_t phys2
, int alloc
);
557 /* In user-mode page locks aren't used; mmap_lock is enough */
558 #ifdef CONFIG_USER_ONLY
560 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
562 static inline void page_lock(PageDesc
*pd
)
565 static inline void page_unlock(PageDesc
*pd
)
568 static inline void page_lock_tb(const TranslationBlock
*tb
)
571 static inline void page_unlock_tb(const TranslationBlock
*tb
)
574 struct page_collection
*
575 page_collection_lock(tb_page_addr_t start
, tb_page_addr_t end
)
580 void page_collection_unlock(struct page_collection
*set
)
582 #else /* !CONFIG_USER_ONLY */
584 #ifdef CONFIG_DEBUG_TCG
586 static __thread GHashTable
*ht_pages_locked_debug
;
588 static void ht_pages_locked_debug_init(void)
590 if (ht_pages_locked_debug
) {
593 ht_pages_locked_debug
= g_hash_table_new(NULL
, NULL
);
596 static bool page_is_locked(const PageDesc
*pd
)
600 ht_pages_locked_debug_init();
601 found
= g_hash_table_lookup(ht_pages_locked_debug
, pd
);
605 static void page_lock__debug(PageDesc
*pd
)
607 ht_pages_locked_debug_init();
608 g_assert(!page_is_locked(pd
));
609 g_hash_table_insert(ht_pages_locked_debug
, pd
, pd
);
612 static void page_unlock__debug(const PageDesc
*pd
)
616 ht_pages_locked_debug_init();
617 g_assert(page_is_locked(pd
));
618 removed
= g_hash_table_remove(ht_pages_locked_debug
, pd
);
623 do_assert_page_locked(const PageDesc
*pd
, const char *file
, int line
)
625 if (unlikely(!page_is_locked(pd
))) {
626 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
632 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
634 void assert_no_pages_locked(void)
636 ht_pages_locked_debug_init();
637 g_assert(g_hash_table_size(ht_pages_locked_debug
) == 0);
640 #else /* !CONFIG_DEBUG_TCG */
642 #define assert_page_locked(pd)
644 static inline void page_lock__debug(const PageDesc
*pd
)
648 static inline void page_unlock__debug(const PageDesc
*pd
)
652 #endif /* CONFIG_DEBUG_TCG */
654 static inline void page_lock(PageDesc
*pd
)
656 page_lock__debug(pd
);
657 qemu_spin_lock(&pd
->lock
);
660 static inline void page_unlock(PageDesc
*pd
)
662 qemu_spin_unlock(&pd
->lock
);
663 page_unlock__debug(pd
);
666 /* lock the page(s) of a TB in the correct acquisition order */
667 static inline void page_lock_tb(const TranslationBlock
*tb
)
669 page_lock_pair(NULL
, tb
->page_addr
[0], NULL
, tb
->page_addr
[1], 0);
672 static inline void page_unlock_tb(const TranslationBlock
*tb
)
674 PageDesc
*p1
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
677 if (unlikely(tb
->page_addr
[1] != -1)) {
678 PageDesc
*p2
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
686 static inline struct page_entry
*
687 page_entry_new(PageDesc
*pd
, tb_page_addr_t index
)
689 struct page_entry
*pe
= g_malloc(sizeof(*pe
));
697 static void page_entry_destroy(gpointer p
)
699 struct page_entry
*pe
= p
;
701 g_assert(pe
->locked
);
706 /* returns false on success */
707 static bool page_entry_trylock(struct page_entry
*pe
)
711 busy
= qemu_spin_trylock(&pe
->pd
->lock
);
713 g_assert(!pe
->locked
);
715 page_lock__debug(pe
->pd
);
720 static void do_page_entry_lock(struct page_entry
*pe
)
723 g_assert(!pe
->locked
);
727 static gboolean
page_entry_lock(gpointer key
, gpointer value
, gpointer data
)
729 struct page_entry
*pe
= value
;
731 do_page_entry_lock(pe
);
735 static gboolean
page_entry_unlock(gpointer key
, gpointer value
, gpointer data
)
737 struct page_entry
*pe
= value
;
747 * Trylock a page, and if successful, add the page to a collection.
748 * Returns true ("busy") if the page could not be locked; false otherwise.
750 static bool page_trylock_add(struct page_collection
*set
, tb_page_addr_t addr
)
752 tb_page_addr_t index
= addr
>> TARGET_PAGE_BITS
;
753 struct page_entry
*pe
;
756 pe
= g_tree_lookup(set
->tree
, &index
);
761 pd
= page_find(index
);
766 pe
= page_entry_new(pd
, index
);
767 g_tree_insert(set
->tree
, &pe
->index
, pe
);
770 * If this is either (1) the first insertion or (2) a page whose index
771 * is higher than any other so far, just lock the page and move on.
773 if (set
->max
== NULL
|| pe
->index
> set
->max
->index
) {
775 do_page_entry_lock(pe
);
779 * Try to acquire out-of-order lock; if busy, return busy so that we acquire
782 return page_entry_trylock(pe
);
785 static gint
tb_page_addr_cmp(gconstpointer ap
, gconstpointer bp
, gpointer udata
)
787 tb_page_addr_t a
= *(const tb_page_addr_t
*)ap
;
788 tb_page_addr_t b
= *(const tb_page_addr_t
*)bp
;
799 * Lock a range of pages ([@start,@end[) as well as the pages of all
801 * Locking order: acquire locks in ascending order of page index.
803 struct page_collection
*
804 page_collection_lock(tb_page_addr_t start
, tb_page_addr_t end
)
806 struct page_collection
*set
= g_malloc(sizeof(*set
));
807 tb_page_addr_t index
;
810 start
>>= TARGET_PAGE_BITS
;
811 end
>>= TARGET_PAGE_BITS
;
812 g_assert(start
<= end
);
814 set
->tree
= g_tree_new_full(tb_page_addr_cmp
, NULL
, NULL
,
817 assert_no_pages_locked();
820 g_tree_foreach(set
->tree
, page_entry_lock
, NULL
);
822 for (index
= start
; index
<= end
; index
++) {
823 TranslationBlock
*tb
;
826 pd
= page_find(index
);
830 if (page_trylock_add(set
, index
<< TARGET_PAGE_BITS
)) {
831 g_tree_foreach(set
->tree
, page_entry_unlock
, NULL
);
834 assert_page_locked(pd
);
835 PAGE_FOR_EACH_TB(pd
, tb
, n
) {
836 if (page_trylock_add(set
, tb
->page_addr
[0]) ||
837 (tb
->page_addr
[1] != -1 &&
838 page_trylock_add(set
, tb
->page_addr
[1]))) {
839 /* drop all locks, and reacquire in order */
840 g_tree_foreach(set
->tree
, page_entry_unlock
, NULL
);
848 void page_collection_unlock(struct page_collection
*set
)
850 /* entries are unlocked and freed via page_entry_destroy */
851 g_tree_destroy(set
->tree
);
855 #endif /* !CONFIG_USER_ONLY */
857 static void page_lock_pair(PageDesc
**ret_p1
, tb_page_addr_t phys1
,
858 PageDesc
**ret_p2
, tb_page_addr_t phys2
, int alloc
)
861 tb_page_addr_t page1
;
862 tb_page_addr_t page2
;
864 assert_memory_lock();
865 g_assert(phys1
!= -1);
867 page1
= phys1
>> TARGET_PAGE_BITS
;
868 page2
= phys2
>> TARGET_PAGE_BITS
;
870 p1
= page_find_alloc(page1
, alloc
);
874 if (likely(phys2
== -1)) {
877 } else if (page1
== page2
) {
884 p2
= page_find_alloc(page2
, alloc
);
897 #if defined(CONFIG_USER_ONLY)
898 /* Currently it is not recommended to allocate big chunks of data in
899 user mode. It will change when a dedicated libc will be used. */
900 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
901 region in which the guest needs to run. Revisit this. */
902 #define USE_STATIC_CODE_GEN_BUFFER
905 /* Minimum size of the code gen buffer. This number is randomly chosen,
906 but not so small that we can't have a fair number of TB's live. */
907 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
909 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
910 indicated, this is constrained by the range of direct branches on the
911 host cpu, as used by the TCG implementation of goto_tb. */
912 #if defined(__x86_64__)
913 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
914 #elif defined(__sparc__)
915 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
916 #elif defined(__powerpc64__)
917 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
918 #elif defined(__powerpc__)
919 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
920 #elif defined(__aarch64__)
921 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
922 #elif defined(__s390x__)
923 /* We have a +- 4GB range on the branches; leave some slop. */
924 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
925 #elif defined(__mips__)
926 /* We have a 256MB branch region, but leave room to make sure the
927 main executable is also within that region. */
928 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
930 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
933 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
935 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
936 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
937 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
939 static inline size_t size_code_gen_buffer(size_t tb_size
)
941 /* Size the buffer. */
943 #ifdef USE_STATIC_CODE_GEN_BUFFER
944 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
946 /* ??? Needs adjustments. */
947 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
948 static buffer, we could size this on RESERVED_VA, on the text
949 segment size of the executable, or continue to use the default. */
950 tb_size
= (unsigned long)(ram_size
/ 4);
953 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
954 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
956 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
957 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
963 /* In order to use J and JAL within the code_gen_buffer, we require
964 that the buffer not cross a 256MB boundary. */
965 static inline bool cross_256mb(void *addr
, size_t size
)
967 return ((uintptr_t)addr
^ ((uintptr_t)addr
+ size
)) & ~0x0ffffffful
;
970 /* We weren't able to allocate a buffer without crossing that boundary,
971 so make do with the larger portion of the buffer that doesn't cross.
972 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
973 static inline void *split_cross_256mb(void *buf1
, size_t size1
)
975 void *buf2
= (void *)(((uintptr_t)buf1
+ size1
) & ~0x0ffffffful
);
976 size_t size2
= buf1
+ size1
- buf2
;
984 tcg_ctx
->code_gen_buffer_size
= size1
;
989 #ifdef USE_STATIC_CODE_GEN_BUFFER
990 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
991 __attribute__((aligned(CODE_GEN_ALIGN
)));
993 static inline void *alloc_code_gen_buffer(void)
995 void *buf
= static_code_gen_buffer
;
996 void *end
= static_code_gen_buffer
+ sizeof(static_code_gen_buffer
);
999 /* page-align the beginning and end of the buffer */
1000 buf
= QEMU_ALIGN_PTR_UP(buf
, qemu_real_host_page_size
);
1001 end
= QEMU_ALIGN_PTR_DOWN(end
, qemu_real_host_page_size
);
1005 /* Honor a command-line option limiting the size of the buffer. */
1006 if (size
> tcg_ctx
->code_gen_buffer_size
) {
1007 size
= QEMU_ALIGN_DOWN(tcg_ctx
->code_gen_buffer_size
,
1008 qemu_real_host_page_size
);
1010 tcg_ctx
->code_gen_buffer_size
= size
;
1013 if (cross_256mb(buf
, size
)) {
1014 buf
= split_cross_256mb(buf
, size
);
1015 size
= tcg_ctx
->code_gen_buffer_size
;
1019 if (qemu_mprotect_rwx(buf
, size
)) {
1022 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
1026 #elif defined(_WIN32)
1027 static inline void *alloc_code_gen_buffer(void)
1029 size_t size
= tcg_ctx
->code_gen_buffer_size
;
1030 return VirtualAlloc(NULL
, size
, MEM_RESERVE
| MEM_COMMIT
,
1031 PAGE_EXECUTE_READWRITE
);
1034 static inline void *alloc_code_gen_buffer(void)
1036 int prot
= PROT_WRITE
| PROT_READ
| PROT_EXEC
;
1037 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
1038 uintptr_t start
= 0;
1039 size_t size
= tcg_ctx
->code_gen_buffer_size
;
1042 /* Constrain the position of the buffer based on the host cpu.
1043 Note that these addresses are chosen in concert with the
1044 addresses assigned in the relevant linker script file. */
1045 # if defined(__PIE__) || defined(__PIC__)
1046 /* Don't bother setting a preferred location if we're building
1047 a position-independent executable. We're more likely to get
1048 an address near the main executable if we let the kernel
1049 choose the address. */
1050 # elif defined(__x86_64__) && defined(MAP_32BIT)
1051 /* Force the memory down into low memory with the executable.
1052 Leave the choice of exact location with the kernel. */
1054 /* Cannot expect to map more than 800MB in low memory. */
1055 if (size
> 800u * 1024 * 1024) {
1056 tcg_ctx
->code_gen_buffer_size
= size
= 800u * 1024 * 1024;
1058 # elif defined(__sparc__)
1059 start
= 0x40000000ul
;
1060 # elif defined(__s390x__)
1061 start
= 0x90000000ul
;
1062 # elif defined(__mips__)
1063 # if _MIPS_SIM == _ABI64
1064 start
= 0x128000000ul
;
1066 start
= 0x08000000ul
;
1070 buf
= mmap((void *)start
, size
, prot
, flags
, -1, 0);
1071 if (buf
== MAP_FAILED
) {
1076 if (cross_256mb(buf
, size
)) {
1077 /* Try again, with the original still mapped, to avoid re-acquiring
1078 that 256mb crossing. This time don't specify an address. */
1080 void *buf2
= mmap(NULL
, size
, prot
, flags
, -1, 0);
1081 switch ((int)(buf2
!= MAP_FAILED
)) {
1083 if (!cross_256mb(buf2
, size
)) {
1084 /* Success! Use the new buffer. */
1088 /* Failure. Work with what we had. */
1092 /* Split the original buffer. Free the smaller half. */
1093 buf2
= split_cross_256mb(buf
, size
);
1094 size2
= tcg_ctx
->code_gen_buffer_size
;
1096 munmap(buf
+ size2
, size
- size2
);
1098 munmap(buf
, size
- size2
);
1107 /* Request large pages for the buffer. */
1108 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
1112 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1114 static inline void code_gen_alloc(size_t tb_size
)
1116 tcg_ctx
->code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
1117 tcg_ctx
->code_gen_buffer
= alloc_code_gen_buffer();
1118 if (tcg_ctx
->code_gen_buffer
== NULL
) {
1119 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
1124 static bool tb_cmp(const void *ap
, const void *bp
)
1126 const TranslationBlock
*a
= ap
;
1127 const TranslationBlock
*b
= bp
;
1129 return a
->pc
== b
->pc
&&
1130 a
->cs_base
== b
->cs_base
&&
1131 a
->flags
== b
->flags
&&
1132 (tb_cflags(a
) & CF_HASH_MASK
) == (tb_cflags(b
) & CF_HASH_MASK
) &&
1133 a
->trace_vcpu_dstate
== b
->trace_vcpu_dstate
&&
1134 a
->page_addr
[0] == b
->page_addr
[0] &&
1135 a
->page_addr
[1] == b
->page_addr
[1];
1138 static void tb_htable_init(void)
1140 unsigned int mode
= QHT_MODE_AUTO_RESIZE
;
1142 qht_init(&tb_ctx
.htable
, tb_cmp
, CODE_GEN_HTABLE_SIZE
, mode
);
1145 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1146 (in bytes) allocated to the translation buffer. Zero means default
1148 void tcg_exec_init(uintptr_t tb_size
)
1154 code_gen_alloc(tb_size
);
1155 #if defined(CONFIG_SOFTMMU)
1156 /* There's no guest base to take into account, so go ahead and
1157 initialize the prologue now. */
1158 tcg_prologue_init(tcg_ctx
);
1163 * Allocate a new translation block. Flush the translation buffer if
1164 * too many translation blocks or too much generated code.
1166 static TranslationBlock
*tb_alloc(target_ulong pc
)
1168 TranslationBlock
*tb
;
1170 assert_memory_lock();
1172 tb
= tcg_tb_alloc(tcg_ctx
);
1173 if (unlikely(tb
== NULL
)) {
1179 /* call with @p->lock held */
1180 static inline void invalidate_page_bitmap(PageDesc
*p
)
1182 assert_page_locked(p
);
1183 #ifdef CONFIG_SOFTMMU
1184 g_free(p
->code_bitmap
);
1185 p
->code_bitmap
= NULL
;
1186 p
->code_write_count
= 0;
1190 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
1191 static void page_flush_tb_1(int level
, void **lp
)
1201 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1203 pd
[i
].first_tb
= (uintptr_t)NULL
;
1204 invalidate_page_bitmap(pd
+ i
);
1205 page_unlock(&pd
[i
]);
1210 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1211 page_flush_tb_1(level
- 1, pp
+ i
);
1216 static void page_flush_tb(void)
1218 int i
, l1_sz
= v_l1_size
;
1220 for (i
= 0; i
< l1_sz
; i
++) {
1221 page_flush_tb_1(v_l2_levels
, l1_map
+ i
);
1225 static gboolean
tb_host_size_iter(gpointer key
, gpointer value
, gpointer data
)
1227 const TranslationBlock
*tb
= value
;
1228 size_t *size
= data
;
1230 *size
+= tb
->tc
.size
;
1234 /* flush all the translation blocks */
1235 static void do_tb_flush(CPUState
*cpu
, run_on_cpu_data tb_flush_count
)
1238 /* If it is already been done on request of another CPU,
1241 if (tb_ctx
.tb_flush_count
!= tb_flush_count
.host_int
) {
1245 if (DEBUG_TB_FLUSH_GATE
) {
1246 size_t nb_tbs
= tcg_nb_tbs();
1247 size_t host_size
= 0;
1249 tcg_tb_foreach(tb_host_size_iter
, &host_size
);
1250 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1251 tcg_code_size(), nb_tbs
, nb_tbs
> 0 ? host_size
/ nb_tbs
: 0);
1255 cpu_tb_jmp_cache_clear(cpu
);
1258 qht_reset_size(&tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
);
1261 tcg_region_reset_all();
1262 /* XXX: flush processor icache at this point if cache flush is
1264 atomic_mb_set(&tb_ctx
.tb_flush_count
, tb_ctx
.tb_flush_count
+ 1);
1270 void tb_flush(CPUState
*cpu
)
1272 if (tcg_enabled()) {
1273 unsigned tb_flush_count
= atomic_mb_read(&tb_ctx
.tb_flush_count
);
1274 async_safe_run_on_cpu(cpu
, do_tb_flush
,
1275 RUN_ON_CPU_HOST_INT(tb_flush_count
));
1280 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1281 * so in order to prevent bit rot we compile them unconditionally in user-mode,
1282 * and let the optimizer get rid of them by wrapping their user-only callers
1283 * with if (DEBUG_TB_CHECK_GATE).
1285 #ifdef CONFIG_USER_ONLY
1288 do_tb_invalidate_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
1290 TranslationBlock
*tb
= p
;
1291 target_ulong addr
= *(target_ulong
*)userp
;
1293 if (!(addr
+ TARGET_PAGE_SIZE
<= tb
->pc
|| addr
>= tb
->pc
+ tb
->size
)) {
1294 printf("ERROR invalidate: address=" TARGET_FMT_lx
1295 " PC=%08lx size=%04x\n", addr
, (long)tb
->pc
, tb
->size
);
1299 /* verify that all the pages have correct rights for code
1301 * Called with mmap_lock held.
1303 static void tb_invalidate_check(target_ulong address
)
1305 address
&= TARGET_PAGE_MASK
;
1306 qht_iter(&tb_ctx
.htable
, do_tb_invalidate_check
, &address
);
1310 do_tb_page_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
1312 TranslationBlock
*tb
= p
;
1315 flags1
= page_get_flags(tb
->pc
);
1316 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
1317 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
1318 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1319 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
1323 /* verify that all the pages have correct rights for code */
1324 static void tb_page_check(void)
1326 qht_iter(&tb_ctx
.htable
, do_tb_page_check
, NULL
);
1329 #endif /* CONFIG_USER_ONLY */
1332 * user-mode: call with mmap_lock held
1333 * !user-mode: call with @pd->lock held
1335 static inline void tb_page_remove(PageDesc
*pd
, TranslationBlock
*tb
)
1337 TranslationBlock
*tb1
;
1341 assert_page_locked(pd
);
1342 pprev
= &pd
->first_tb
;
1343 PAGE_FOR_EACH_TB(pd
, tb1
, n1
) {
1345 *pprev
= tb1
->page_next
[n1
];
1348 pprev
= &tb1
->page_next
[n1
];
1350 g_assert_not_reached();
1353 /* remove @orig from its @n_orig-th jump list */
1354 static inline void tb_remove_from_jmp_list(TranslationBlock
*orig
, int n_orig
)
1356 uintptr_t ptr
, ptr_locked
;
1357 TranslationBlock
*dest
;
1358 TranslationBlock
*tb
;
1362 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1363 ptr
= atomic_or_fetch(&orig
->jmp_dest
[n_orig
], 1);
1364 dest
= (TranslationBlock
*)(ptr
& ~1);
1369 qemu_spin_lock(&dest
->jmp_lock
);
1371 * While acquiring the lock, the jump might have been removed if the
1372 * destination TB was invalidated; check again.
1374 ptr_locked
= atomic_read(&orig
->jmp_dest
[n_orig
]);
1375 if (ptr_locked
!= ptr
) {
1376 qemu_spin_unlock(&dest
->jmp_lock
);
1378 * The only possibility is that the jump was unlinked via
1379 * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1380 * because we set the LSB above.
1382 g_assert(ptr_locked
== 1 && dest
->cflags
& CF_INVALID
);
1386 * We first acquired the lock, and since the destination pointer matches,
1387 * we know for sure that @orig is in the jmp list.
1389 pprev
= &dest
->jmp_list_head
;
1390 TB_FOR_EACH_JMP(dest
, tb
, n
) {
1391 if (tb
== orig
&& n
== n_orig
) {
1392 *pprev
= tb
->jmp_list_next
[n
];
1393 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1394 qemu_spin_unlock(&dest
->jmp_lock
);
1397 pprev
= &tb
->jmp_list_next
[n
];
1399 g_assert_not_reached();
1402 /* reset the jump entry 'n' of a TB so that it is not chained to
1404 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
1406 uintptr_t addr
= (uintptr_t)(tb
->tc
.ptr
+ tb
->jmp_reset_offset
[n
]);
1407 tb_set_jmp_target(tb
, n
, addr
);
1410 /* remove any jumps to the TB */
1411 static inline void tb_jmp_unlink(TranslationBlock
*dest
)
1413 TranslationBlock
*tb
;
1416 qemu_spin_lock(&dest
->jmp_lock
);
1418 TB_FOR_EACH_JMP(dest
, tb
, n
) {
1419 tb_reset_jump(tb
, n
);
1420 atomic_and(&tb
->jmp_dest
[n
], (uintptr_t)NULL
| 1);
1421 /* No need to clear the list entry; setting the dest ptr is enough */
1423 dest
->jmp_list_head
= (uintptr_t)NULL
;
1425 qemu_spin_unlock(&dest
->jmp_lock
);
1429 * In user-mode, call with mmap_lock held.
1430 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1433 static void do_tb_phys_invalidate(TranslationBlock
*tb
, bool rm_from_page_list
)
1438 tb_page_addr_t phys_pc
;
1440 assert_memory_lock();
1442 /* make sure no further incoming jumps will be chained to this TB */
1443 qemu_spin_lock(&tb
->jmp_lock
);
1444 atomic_set(&tb
->cflags
, tb
->cflags
| CF_INVALID
);
1445 qemu_spin_unlock(&tb
->jmp_lock
);
1447 /* remove the TB from the hash list */
1448 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1449 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, tb_cflags(tb
) & CF_HASH_MASK
,
1450 tb
->trace_vcpu_dstate
);
1451 if (!(tb
->cflags
& CF_NOCACHE
) &&
1452 !qht_remove(&tb_ctx
.htable
, tb
, h
)) {
1456 /* remove the TB from the page list */
1457 if (rm_from_page_list
) {
1458 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
1459 tb_page_remove(p
, tb
);
1460 invalidate_page_bitmap(p
);
1461 if (tb
->page_addr
[1] != -1) {
1462 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
1463 tb_page_remove(p
, tb
);
1464 invalidate_page_bitmap(p
);
1468 /* remove the TB from the hash list */
1469 h
= tb_jmp_cache_hash_func(tb
->pc
);
1471 if (atomic_read(&cpu
->tb_jmp_cache
[h
]) == tb
) {
1472 atomic_set(&cpu
->tb_jmp_cache
[h
], NULL
);
1476 /* suppress this TB from the two jump lists */
1477 tb_remove_from_jmp_list(tb
, 0);
1478 tb_remove_from_jmp_list(tb
, 1);
1480 /* suppress any remaining jumps to this TB */
1483 atomic_set(&tcg_ctx
->tb_phys_invalidate_count
,
1484 tcg_ctx
->tb_phys_invalidate_count
+ 1);
1487 static void tb_phys_invalidate__locked(TranslationBlock
*tb
)
1489 do_tb_phys_invalidate(tb
, true);
1492 /* invalidate one TB
1494 * Called with mmap_lock held in user-mode.
1496 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
1498 if (page_addr
== -1) {
1500 do_tb_phys_invalidate(tb
, true);
1503 do_tb_phys_invalidate(tb
, false);
1507 #ifdef CONFIG_SOFTMMU
1508 /* call with @p->lock held */
1509 static void build_page_bitmap(PageDesc
*p
)
1511 int n
, tb_start
, tb_end
;
1512 TranslationBlock
*tb
;
1514 assert_page_locked(p
);
1515 p
->code_bitmap
= bitmap_new(TARGET_PAGE_SIZE
);
1517 PAGE_FOR_EACH_TB(p
, tb
, n
) {
1518 /* NOTE: this is subtle as a TB may span two physical pages */
1520 /* NOTE: tb_end may be after the end of the page, but
1521 it is not a problem */
1522 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1523 tb_end
= tb_start
+ tb
->size
;
1524 if (tb_end
> TARGET_PAGE_SIZE
) {
1525 tb_end
= TARGET_PAGE_SIZE
;
1529 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1531 bitmap_set(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1536 /* add the tb in the target page and protect it if necessary
1538 * Called with mmap_lock held for user-mode emulation.
1539 * Called with @p->lock held in !user-mode.
1541 static inline void tb_page_add(PageDesc
*p
, TranslationBlock
*tb
,
1542 unsigned int n
, tb_page_addr_t page_addr
)
1544 #ifndef CONFIG_USER_ONLY
1545 bool page_already_protected
;
1548 assert_page_locked(p
);
1550 tb
->page_addr
[n
] = page_addr
;
1551 tb
->page_next
[n
] = p
->first_tb
;
1552 #ifndef CONFIG_USER_ONLY
1553 page_already_protected
= p
->first_tb
!= (uintptr_t)NULL
;
1555 p
->first_tb
= (uintptr_t)tb
| n
;
1556 invalidate_page_bitmap(p
);
1558 #if defined(CONFIG_USER_ONLY)
1559 if (p
->flags
& PAGE_WRITE
) {
1564 /* force the host page as non writable (writes will have a
1565 page fault + mprotect overhead) */
1566 page_addr
&= qemu_host_page_mask
;
1568 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1569 addr
+= TARGET_PAGE_SIZE
) {
1571 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1576 p2
->flags
&= ~PAGE_WRITE
;
1578 mprotect(g2h(page_addr
), qemu_host_page_size
,
1579 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1580 if (DEBUG_TB_INVALIDATE_GATE
) {
1581 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT
"\n", page_addr
);
1585 /* if some code is already present, then the pages are already
1586 protected. So we handle the case where only the first TB is
1587 allocated in a physical page */
1588 if (!page_already_protected
) {
1589 tlb_protect_code(page_addr
);
1594 /* add a new TB and link it to the physical page tables. phys_page2 is
1595 * (-1) to indicate that only one page contains the TB.
1597 * Called with mmap_lock held for user-mode emulation.
1599 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1600 * Note that in !user-mode, another thread might have already added a TB
1601 * for the same block of guest code that @tb corresponds to. In that case,
1602 * the caller should discard the original @tb, and use instead the returned TB.
1604 static TranslationBlock
*
1605 tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1606 tb_page_addr_t phys_page2
)
1609 PageDesc
*p2
= NULL
;
1611 assert_memory_lock();
1614 * Add the TB to the page list, acquiring first the pages's locks.
1615 * We keep the locks held until after inserting the TB in the hash table,
1616 * so that if the insertion fails we know for sure that the TBs are still
1617 * in the page descriptors.
1618 * Note that inserting into the hash table first isn't an option, since
1619 * we can only insert TBs that are fully initialized.
1621 page_lock_pair(&p
, phys_pc
, &p2
, phys_page2
, 1);
1622 tb_page_add(p
, tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1624 tb_page_add(p2
, tb
, 1, phys_page2
);
1626 tb
->page_addr
[1] = -1;
1629 if (!(tb
->cflags
& CF_NOCACHE
)) {
1630 void *existing_tb
= NULL
;
1633 /* add in the hash table */
1634 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, tb
->cflags
& CF_HASH_MASK
,
1635 tb
->trace_vcpu_dstate
);
1636 qht_insert(&tb_ctx
.htable
, tb
, h
, &existing_tb
);
1638 /* remove TB from the page(s) if we couldn't insert it */
1639 if (unlikely(existing_tb
)) {
1640 tb_page_remove(p
, tb
);
1641 invalidate_page_bitmap(p
);
1643 tb_page_remove(p2
, tb
);
1644 invalidate_page_bitmap(p2
);
1650 if (p2
&& p2
!= p
) {
1655 #ifdef CONFIG_USER_ONLY
1656 if (DEBUG_TB_CHECK_GATE
) {
1663 /* Called with mmap_lock held for user mode emulation. */
1664 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
1665 target_ulong pc
, target_ulong cs_base
,
1666 uint32_t flags
, int cflags
)
1668 CPUArchState
*env
= cpu
->env_ptr
;
1669 TranslationBlock
*tb
, *existing_tb
;
1670 tb_page_addr_t phys_pc
, phys_page2
;
1671 target_ulong virt_page2
;
1672 tcg_insn_unit
*gen_code_buf
;
1673 int gen_code_size
, search_size
;
1674 #ifdef CONFIG_PROFILER
1675 TCGProfile
*prof
= &tcg_ctx
->prof
;
1678 assert_memory_lock();
1680 phys_pc
= get_page_addr_code(env
, pc
);
1684 if (unlikely(!tb
)) {
1685 /* flush must be done */
1688 /* Make the execution loop process the flush as soon as possible. */
1689 cpu
->exception_index
= EXCP_INTERRUPT
;
1693 gen_code_buf
= tcg_ctx
->code_gen_ptr
;
1694 tb
->tc
.ptr
= gen_code_buf
;
1696 tb
->cs_base
= cs_base
;
1698 tb
->cflags
= cflags
;
1699 tb
->trace_vcpu_dstate
= *cpu
->trace_dstate
;
1700 tcg_ctx
->tb_cflags
= cflags
;
1702 #ifdef CONFIG_PROFILER
1703 /* includes aborted translations because of exceptions */
1704 atomic_set(&prof
->tb_count1
, prof
->tb_count1
+ 1);
1705 ti
= profile_getclock();
1708 tcg_func_start(tcg_ctx
);
1710 tcg_ctx
->cpu
= ENV_GET_CPU(env
);
1711 gen_intermediate_code(cpu
, tb
);
1712 tcg_ctx
->cpu
= NULL
;
1714 trace_translate_block(tb
, tb
->pc
, tb
->tc
.ptr
);
1716 /* generate machine code */
1717 tb
->jmp_reset_offset
[0] = TB_JMP_RESET_OFFSET_INVALID
;
1718 tb
->jmp_reset_offset
[1] = TB_JMP_RESET_OFFSET_INVALID
;
1719 tcg_ctx
->tb_jmp_reset_offset
= tb
->jmp_reset_offset
;
1720 if (TCG_TARGET_HAS_direct_jump
) {
1721 tcg_ctx
->tb_jmp_insn_offset
= tb
->jmp_target_arg
;
1722 tcg_ctx
->tb_jmp_target_addr
= NULL
;
1724 tcg_ctx
->tb_jmp_insn_offset
= NULL
;
1725 tcg_ctx
->tb_jmp_target_addr
= tb
->jmp_target_arg
;
1728 #ifdef CONFIG_PROFILER
1729 atomic_set(&prof
->tb_count
, prof
->tb_count
+ 1);
1730 atomic_set(&prof
->interm_time
, prof
->interm_time
+ profile_getclock() - ti
);
1731 ti
= profile_getclock();
1734 /* ??? Overflow could be handled better here. In particular, we
1735 don't need to re-do gen_intermediate_code, nor should we re-do
1736 the tcg optimization currently hidden inside tcg_gen_code. All
1737 that should be required is to flush the TBs, allocate a new TB,
1738 re-initialize it per above, and re-do the actual code generation. */
1739 gen_code_size
= tcg_gen_code(tcg_ctx
, tb
);
1740 if (unlikely(gen_code_size
< 0)) {
1741 goto buffer_overflow
;
1743 search_size
= encode_search(tb
, (void *)gen_code_buf
+ gen_code_size
);
1744 if (unlikely(search_size
< 0)) {
1745 goto buffer_overflow
;
1747 tb
->tc
.size
= gen_code_size
;
1749 #ifdef CONFIG_PROFILER
1750 atomic_set(&prof
->code_time
, prof
->code_time
+ profile_getclock() - ti
);
1751 atomic_set(&prof
->code_in_len
, prof
->code_in_len
+ tb
->size
);
1752 atomic_set(&prof
->code_out_len
, prof
->code_out_len
+ gen_code_size
);
1753 atomic_set(&prof
->search_out_len
, prof
->search_out_len
+ search_size
);
1757 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
) &&
1758 qemu_log_in_addr_range(tb
->pc
)) {
1760 qemu_log("OUT: [size=%d]\n", gen_code_size
);
1761 if (tcg_ctx
->data_gen_ptr
) {
1762 size_t code_size
= tcg_ctx
->data_gen_ptr
- tb
->tc
.ptr
;
1763 size_t data_size
= gen_code_size
- code_size
;
1766 log_disas(tb
->tc
.ptr
, code_size
);
1768 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
1769 if (sizeof(tcg_target_ulong
) == 8) {
1770 qemu_log("0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
1771 (uintptr_t)tcg_ctx
->data_gen_ptr
+ i
,
1772 *(uint64_t *)(tcg_ctx
->data_gen_ptr
+ i
));
1774 qemu_log("0x%08" PRIxPTR
": .long 0x%08x\n",
1775 (uintptr_t)tcg_ctx
->data_gen_ptr
+ i
,
1776 *(uint32_t *)(tcg_ctx
->data_gen_ptr
+ i
));
1780 log_disas(tb
->tc
.ptr
, gen_code_size
);
1788 atomic_set(&tcg_ctx
->code_gen_ptr
, (void *)
1789 ROUND_UP((uintptr_t)gen_code_buf
+ gen_code_size
+ search_size
,
1792 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
1793 /* if we are doing vsyscall don't link the page as it lies in high memory
1794 and tb_alloc_page will abort due to page_l1_map returning NULL */
1795 if (unlikely(phys_pc
>= TARGET_VSYSCALL_START
1796 && phys_pc
< TARGET_VSYSCALL_END
))
1800 /* init jump list */
1801 qemu_spin_init(&tb
->jmp_lock
);
1802 tb
->jmp_list_head
= (uintptr_t)NULL
;
1803 tb
->jmp_list_next
[0] = (uintptr_t)NULL
;
1804 tb
->jmp_list_next
[1] = (uintptr_t)NULL
;
1805 tb
->jmp_dest
[0] = (uintptr_t)NULL
;
1806 tb
->jmp_dest
[1] = (uintptr_t)NULL
;
1808 /* init original jump addresses which have been set during tcg_gen_code() */
1809 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1810 tb_reset_jump(tb
, 0);
1812 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1813 tb_reset_jump(tb
, 1);
1816 /* check next page if needed */
1817 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1819 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1820 phys_page2
= get_page_addr_code(env
, virt_page2
);
1823 * No explicit memory barrier is required -- tb_link_page() makes the
1824 * TB visible in a consistent state.
1826 existing_tb
= tb_link_page(tb
, phys_pc
, phys_page2
);
1827 /* if the TB already exists, discard what we just translated */
1828 if (unlikely(existing_tb
!= tb
)) {
1829 uintptr_t orig_aligned
= (uintptr_t)gen_code_buf
;
1831 orig_aligned
-= ROUND_UP(sizeof(*tb
), qemu_icache_linesize
);
1832 atomic_set(&tcg_ctx
->code_gen_ptr
, (void *)orig_aligned
);
1840 * @p must be non-NULL.
1841 * user-mode: call with mmap_lock held.
1842 * !user-mode: call with all @pages locked.
1845 tb_invalidate_phys_page_range__locked(struct page_collection
*pages
,
1846 PageDesc
*p
, tb_page_addr_t start
,
1848 int is_cpu_write_access
)
1850 TranslationBlock
*tb
;
1851 tb_page_addr_t tb_start
, tb_end
;
1853 #ifdef TARGET_HAS_PRECISE_SMC
1854 CPUState
*cpu
= current_cpu
;
1855 CPUArchState
*env
= NULL
;
1856 int current_tb_not_found
= is_cpu_write_access
;
1857 TranslationBlock
*current_tb
= NULL
;
1858 int current_tb_modified
= 0;
1859 target_ulong current_pc
= 0;
1860 target_ulong current_cs_base
= 0;
1861 uint32_t current_flags
= 0;
1862 #endif /* TARGET_HAS_PRECISE_SMC */
1864 assert_page_locked(p
);
1866 #if defined(TARGET_HAS_PRECISE_SMC)
1872 /* we remove all the TBs in the range [start, end[ */
1873 /* XXX: see if in some cases it could be faster to invalidate all
1875 PAGE_FOR_EACH_TB(p
, tb
, n
) {
1876 assert_page_locked(p
);
1877 /* NOTE: this is subtle as a TB may span two physical pages */
1879 /* NOTE: tb_end may be after the end of the page, but
1880 it is not a problem */
1881 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1882 tb_end
= tb_start
+ tb
->size
;
1884 tb_start
= tb
->page_addr
[1];
1885 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1887 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1888 #ifdef TARGET_HAS_PRECISE_SMC
1889 if (current_tb_not_found
) {
1890 current_tb_not_found
= 0;
1892 if (cpu
->mem_io_pc
) {
1893 /* now we have a real cpu fault */
1894 current_tb
= tcg_tb_lookup(cpu
->mem_io_pc
);
1897 if (current_tb
== tb
&&
1898 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
1899 /* If we are modifying the current TB, we must stop
1900 its execution. We could be more precise by checking
1901 that the modification is after the current PC, but it
1902 would require a specialized function to partially
1903 restore the CPU state */
1905 current_tb_modified
= 1;
1906 cpu_restore_state_from_tb(cpu
, current_tb
,
1907 cpu
->mem_io_pc
, true);
1908 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1911 #endif /* TARGET_HAS_PRECISE_SMC */
1912 tb_phys_invalidate__locked(tb
);
1915 #if !defined(CONFIG_USER_ONLY)
1916 /* if no code remaining, no need to continue to use slow writes */
1918 invalidate_page_bitmap(p
);
1919 tlb_unprotect_code(start
);
1922 #ifdef TARGET_HAS_PRECISE_SMC
1923 if (current_tb_modified
) {
1924 page_collection_unlock(pages
);
1925 /* Force execution of one insn next time. */
1926 cpu
->cflags_next_tb
= 1 | curr_cflags();
1928 cpu_loop_exit_noexc(cpu
);
1934 * Invalidate all TBs which intersect with the target physical address range
1935 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1936 * 'is_cpu_write_access' should be true if called from a real cpu write
1937 * access: the virtual CPU will exit the current TB if code is modified inside
1940 * Called with mmap_lock held for user-mode emulation
1942 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1943 int is_cpu_write_access
)
1945 struct page_collection
*pages
;
1948 assert_memory_lock();
1950 p
= page_find(start
>> TARGET_PAGE_BITS
);
1954 pages
= page_collection_lock(start
, end
);
1955 tb_invalidate_phys_page_range__locked(pages
, p
, start
, end
,
1956 is_cpu_write_access
);
1957 page_collection_unlock(pages
);
1961 * Invalidate all TBs which intersect with the target physical address range
1962 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1963 * 'is_cpu_write_access' should be true if called from a real cpu write
1964 * access: the virtual CPU will exit the current TB if code is modified inside
1967 * Called with mmap_lock held for user-mode emulation.
1969 #ifdef CONFIG_SOFTMMU
1970 void tb_invalidate_phys_range(ram_addr_t start
, ram_addr_t end
)
1972 void tb_invalidate_phys_range(target_ulong start
, target_ulong end
)
1975 struct page_collection
*pages
;
1976 tb_page_addr_t next
;
1978 assert_memory_lock();
1980 pages
= page_collection_lock(start
, end
);
1981 for (next
= (start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1983 start
= next
, next
+= TARGET_PAGE_SIZE
) {
1984 PageDesc
*pd
= page_find(start
>> TARGET_PAGE_BITS
);
1985 tb_page_addr_t bound
= MIN(next
, end
);
1990 tb_invalidate_phys_page_range__locked(pages
, pd
, start
, bound
, 0);
1992 page_collection_unlock(pages
);
1995 #ifdef CONFIG_SOFTMMU
1996 /* len must be <= 8 and start must be a multiple of len.
1997 * Called via softmmu_template.h when code areas are written to with
1998 * iothread mutex not held.
2000 * Call with all @pages in the range [@start, @start + len[ locked.
2002 void tb_invalidate_phys_page_fast(struct page_collection
*pages
,
2003 tb_page_addr_t start
, int len
)
2009 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
2010 cpu_single_env
->mem_io_vaddr
, len
,
2011 cpu_single_env
->eip
,
2012 cpu_single_env
->eip
+
2013 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
2016 assert_memory_lock();
2018 p
= page_find(start
>> TARGET_PAGE_BITS
);
2023 assert_page_locked(p
);
2024 if (!p
->code_bitmap
&&
2025 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
2026 build_page_bitmap(p
);
2028 if (p
->code_bitmap
) {
2032 nr
= start
& ~TARGET_PAGE_MASK
;
2033 b
= p
->code_bitmap
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1));
2034 if (b
& ((1 << len
) - 1)) {
2039 tb_invalidate_phys_page_range__locked(pages
, p
, start
, start
+ len
, 1);
2043 /* Called with mmap_lock held. If pc is not 0 then it indicates the
2044 * host PC of the faulting store instruction that caused this invalidate.
2045 * Returns true if the caller needs to abort execution of the current
2046 * TB (because it was modified by this store and the guest CPU has
2047 * precise-SMC semantics).
2049 static bool tb_invalidate_phys_page(tb_page_addr_t addr
, uintptr_t pc
)
2051 TranslationBlock
*tb
;
2054 #ifdef TARGET_HAS_PRECISE_SMC
2055 TranslationBlock
*current_tb
= NULL
;
2056 CPUState
*cpu
= current_cpu
;
2057 CPUArchState
*env
= NULL
;
2058 int current_tb_modified
= 0;
2059 target_ulong current_pc
= 0;
2060 target_ulong current_cs_base
= 0;
2061 uint32_t current_flags
= 0;
2064 assert_memory_lock();
2066 addr
&= TARGET_PAGE_MASK
;
2067 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2072 #ifdef TARGET_HAS_PRECISE_SMC
2073 if (p
->first_tb
&& pc
!= 0) {
2074 current_tb
= tcg_tb_lookup(pc
);
2080 assert_page_locked(p
);
2081 PAGE_FOR_EACH_TB(p
, tb
, n
) {
2082 #ifdef TARGET_HAS_PRECISE_SMC
2083 if (current_tb
== tb
&&
2084 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
2085 /* If we are modifying the current TB, we must stop
2086 its execution. We could be more precise by checking
2087 that the modification is after the current PC, but it
2088 would require a specialized function to partially
2089 restore the CPU state */
2091 current_tb_modified
= 1;
2092 cpu_restore_state_from_tb(cpu
, current_tb
, pc
, true);
2093 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
2096 #endif /* TARGET_HAS_PRECISE_SMC */
2097 tb_phys_invalidate(tb
, addr
);
2099 p
->first_tb
= (uintptr_t)NULL
;
2100 #ifdef TARGET_HAS_PRECISE_SMC
2101 if (current_tb_modified
) {
2102 /* Force execution of one insn next time. */
2103 cpu
->cflags_next_tb
= 1 | curr_cflags();
2112 /* user-mode: call with mmap_lock held */
2113 void tb_check_watchpoint(CPUState
*cpu
)
2115 TranslationBlock
*tb
;
2117 assert_memory_lock();
2119 tb
= tcg_tb_lookup(cpu
->mem_io_pc
);
2121 /* We can use retranslation to find the PC. */
2122 cpu_restore_state_from_tb(cpu
, tb
, cpu
->mem_io_pc
, true);
2123 tb_phys_invalidate(tb
, -1);
2125 /* The exception probably happened in a helper. The CPU state should
2126 have been saved before calling it. Fetch the PC from there. */
2127 CPUArchState
*env
= cpu
->env_ptr
;
2128 target_ulong pc
, cs_base
;
2129 tb_page_addr_t addr
;
2132 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
2133 addr
= get_page_addr_code(env
, pc
);
2134 tb_invalidate_phys_range(addr
, addr
+ 1);
2138 #ifndef CONFIG_USER_ONLY
2139 /* in deterministic execution mode, instructions doing device I/Os
2140 * must be at the end of the TB.
2142 * Called by softmmu_template.h, with iothread mutex not held.
2144 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
2146 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
2147 CPUArchState
*env
= cpu
->env_ptr
;
2149 TranslationBlock
*tb
;
2152 tb
= tcg_tb_lookup(retaddr
);
2154 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
2157 cpu_restore_state_from_tb(cpu
, tb
, retaddr
, true);
2159 /* On MIPS and SH, delay slot instructions can only be restarted if
2160 they were already the first instruction in the TB. If this is not
2161 the first instruction in a TB then re-execute the preceding
2164 #if defined(TARGET_MIPS)
2165 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0
2166 && env
->active_tc
.PC
!= tb
->pc
) {
2167 env
->active_tc
.PC
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
2168 cpu
->icount_decr
.u16
.low
++;
2169 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
2172 #elif defined(TARGET_SH4)
2173 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
2174 && env
->pc
!= tb
->pc
) {
2176 cpu
->icount_decr
.u16
.low
++;
2177 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
2182 /* Generate a new TB executing the I/O insn. */
2183 cpu
->cflags_next_tb
= curr_cflags() | CF_LAST_IO
| n
;
2185 if (tb_cflags(tb
) & CF_NOCACHE
) {
2187 /* Invalidate original TB if this TB was generated in
2188 * cpu_exec_nocache() */
2189 tb_phys_invalidate(tb
->orig_tb
, -1);
2194 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2195 * the first in the TB) then we end up generating a whole new TB and
2196 * repeating the fault, which is horribly inefficient.
2197 * Better would be to execute just this insn uncached, or generate a
2200 cpu_loop_exit_noexc(cpu
);
2203 static void tb_jmp_cache_clear_page(CPUState
*cpu
, target_ulong page_addr
)
2205 unsigned int i
, i0
= tb_jmp_cache_hash_page(page_addr
);
2207 for (i
= 0; i
< TB_JMP_PAGE_SIZE
; i
++) {
2208 atomic_set(&cpu
->tb_jmp_cache
[i0
+ i
], NULL
);
2212 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
2214 /* Discard jump cache entries for any tb which might potentially
2215 overlap the flushed page. */
2216 tb_jmp_cache_clear_page(cpu
, addr
- TARGET_PAGE_SIZE
);
2217 tb_jmp_cache_clear_page(cpu
, addr
);
2220 static void print_qht_statistics(FILE *f
, fprintf_function cpu_fprintf
,
2221 struct qht_stats hst
)
2223 uint32_t hgram_opts
;
2227 if (!hst
.head_buckets
) {
2230 cpu_fprintf(f
, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
2231 hst
.used_head_buckets
, hst
.head_buckets
,
2232 (double)hst
.used_head_buckets
/ hst
.head_buckets
* 100);
2234 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
2235 hgram_opts
|= QDIST_PR_100X
| QDIST_PR_PERCENT
;
2236 if (qdist_xmax(&hst
.occupancy
) - qdist_xmin(&hst
.occupancy
) == 1) {
2237 hgram_opts
|= QDIST_PR_NODECIMAL
;
2239 hgram
= qdist_pr(&hst
.occupancy
, 10, hgram_opts
);
2240 cpu_fprintf(f
, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
2241 qdist_avg(&hst
.occupancy
) * 100, hgram
);
2244 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
2245 hgram_bins
= qdist_xmax(&hst
.chain
) - qdist_xmin(&hst
.chain
);
2246 if (hgram_bins
> 10) {
2250 hgram_opts
|= QDIST_PR_NODECIMAL
| QDIST_PR_NOBINRANGE
;
2252 hgram
= qdist_pr(&hst
.chain
, hgram_bins
, hgram_opts
);
2253 cpu_fprintf(f
, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
2254 qdist_avg(&hst
.chain
), hgram
);
2258 struct tb_tree_stats
{
2262 size_t max_target_size
;
2263 size_t direct_jmp_count
;
2264 size_t direct_jmp2_count
;
2268 static gboolean
tb_tree_stats_iter(gpointer key
, gpointer value
, gpointer data
)
2270 const TranslationBlock
*tb
= value
;
2271 struct tb_tree_stats
*tst
= data
;
2274 tst
->host_size
+= tb
->tc
.size
;
2275 tst
->target_size
+= tb
->size
;
2276 if (tb
->size
> tst
->max_target_size
) {
2277 tst
->max_target_size
= tb
->size
;
2279 if (tb
->page_addr
[1] != -1) {
2282 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
2283 tst
->direct_jmp_count
++;
2284 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
2285 tst
->direct_jmp2_count
++;
2291 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
2293 struct tb_tree_stats tst
= {};
2294 struct qht_stats hst
;
2297 tcg_tb_foreach(tb_tree_stats_iter
, &tst
);
2298 nb_tbs
= tst
.nb_tbs
;
2299 /* XXX: avoid using doubles ? */
2300 cpu_fprintf(f
, "Translation buffer state:\n");
2302 * Report total code size including the padding and TB structs;
2303 * otherwise users might think "-tb-size" is not honoured.
2304 * For avg host size we use the precise numbers from tb_tree_stats though.
2306 cpu_fprintf(f
, "gen code size %zu/%zu\n",
2307 tcg_code_size(), tcg_code_capacity());
2308 cpu_fprintf(f
, "TB count %zu\n", nb_tbs
);
2309 cpu_fprintf(f
, "TB avg target size %zu max=%zu bytes\n",
2310 nb_tbs
? tst
.target_size
/ nb_tbs
: 0,
2311 tst
.max_target_size
);
2312 cpu_fprintf(f
, "TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
2313 nb_tbs
? tst
.host_size
/ nb_tbs
: 0,
2314 tst
.target_size
? (double)tst
.host_size
/ tst
.target_size
: 0);
2315 cpu_fprintf(f
, "cross page TB count %zu (%zu%%)\n", tst
.cross_page
,
2316 nb_tbs
? (tst
.cross_page
* 100) / nb_tbs
: 0);
2317 cpu_fprintf(f
, "direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2318 tst
.direct_jmp_count
,
2319 nb_tbs
? (tst
.direct_jmp_count
* 100) / nb_tbs
: 0,
2320 tst
.direct_jmp2_count
,
2321 nb_tbs
? (tst
.direct_jmp2_count
* 100) / nb_tbs
: 0);
2323 qht_statistics_init(&tb_ctx
.htable
, &hst
);
2324 print_qht_statistics(f
, cpu_fprintf
, hst
);
2325 qht_statistics_destroy(&hst
);
2327 cpu_fprintf(f
, "\nStatistics:\n");
2328 cpu_fprintf(f
, "TB flush count %u\n",
2329 atomic_read(&tb_ctx
.tb_flush_count
));
2330 cpu_fprintf(f
, "TB invalidate count %zu\n", tcg_tb_phys_invalidate_count());
2331 cpu_fprintf(f
, "TLB flush count %zu\n", tlb_flush_count());
2332 tcg_dump_info(f
, cpu_fprintf
);
2335 void dump_opcount_info(FILE *f
, fprintf_function cpu_fprintf
)
2337 tcg_dump_op_count(f
, cpu_fprintf
);
2340 #else /* CONFIG_USER_ONLY */
2342 void cpu_interrupt(CPUState
*cpu
, int mask
)
2344 g_assert(qemu_mutex_iothread_locked());
2345 cpu
->interrupt_request
|= mask
;
2346 cpu
->icount_decr
.u16
.high
= -1;
2350 * Walks guest process memory "regions" one by one
2351 * and calls callback function 'fn' for each region.
2353 struct walk_memory_regions_data
{
2354 walk_memory_regions_fn fn
;
2360 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2361 target_ulong end
, int new_prot
)
2363 if (data
->start
!= -1u) {
2364 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2370 data
->start
= (new_prot
? end
: -1u);
2371 data
->prot
= new_prot
;
2376 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2377 target_ulong base
, int level
, void **lp
)
2383 return walk_memory_regions_end(data
, base
, 0);
2389 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2390 int prot
= pd
[i
].flags
;
2392 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2393 if (prot
!= data
->prot
) {
2394 rc
= walk_memory_regions_end(data
, pa
, prot
);
2403 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2404 pa
= base
| ((target_ulong
)i
<<
2405 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
2406 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2416 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2418 struct walk_memory_regions_data data
;
2419 uintptr_t i
, l1_sz
= v_l1_size
;
2426 for (i
= 0; i
< l1_sz
; i
++) {
2427 target_ulong base
= i
<< (v_l1_shift
+ TARGET_PAGE_BITS
);
2428 int rc
= walk_memory_regions_1(&data
, base
, v_l2_levels
, l1_map
+ i
);
2434 return walk_memory_regions_end(&data
, 0, 0);
2437 static int dump_region(void *priv
, target_ulong start
,
2438 target_ulong end
, abi_ulong prot
)
2440 FILE *f
= (FILE *)priv
;
2442 (void) fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
2443 " "TARGET_FMT_lx
" %c%c%c\n",
2444 start
, end
, end
- start
,
2445 ((prot
& PAGE_READ
) ? 'r' : '-'),
2446 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2447 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2452 /* dump memory mappings */
2453 void page_dump(FILE *f
)
2455 const int length
= sizeof(target_ulong
) * 2;
2456 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
2457 length
, "start", length
, "end", length
, "size", "prot");
2458 walk_memory_regions(f
, dump_region
);
2461 int page_get_flags(target_ulong address
)
2465 p
= page_find(address
>> TARGET_PAGE_BITS
);
2472 /* Modify the flags of a page and invalidate the code if necessary.
2473 The flag PAGE_WRITE_ORG is positioned automatically depending
2474 on PAGE_WRITE. The mmap_lock should already be held. */
2475 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2477 target_ulong addr
, len
;
2479 /* This function should never be called with addresses outside the
2480 guest address space. If this assert fires, it probably indicates
2481 a missing call to h2g_valid. */
2482 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2483 assert(end
<= ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2485 assert(start
< end
);
2486 assert_memory_lock();
2488 start
= start
& TARGET_PAGE_MASK
;
2489 end
= TARGET_PAGE_ALIGN(end
);
2491 if (flags
& PAGE_WRITE
) {
2492 flags
|= PAGE_WRITE_ORG
;
2495 for (addr
= start
, len
= end
- start
;
2497 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2498 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2500 /* If the write protection bit is set, then we invalidate
2502 if (!(p
->flags
& PAGE_WRITE
) &&
2503 (flags
& PAGE_WRITE
) &&
2505 tb_invalidate_phys_page(addr
, 0);
2511 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2517 /* This function should never be called with addresses outside the
2518 guest address space. If this assert fires, it probably indicates
2519 a missing call to h2g_valid. */
2520 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2521 assert(start
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2527 if (start
+ len
- 1 < start
) {
2528 /* We've wrapped around. */
2532 /* must do before we loose bits in the next step */
2533 end
= TARGET_PAGE_ALIGN(start
+ len
);
2534 start
= start
& TARGET_PAGE_MASK
;
2536 for (addr
= start
, len
= end
- start
;
2538 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2539 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2543 if (!(p
->flags
& PAGE_VALID
)) {
2547 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
2550 if (flags
& PAGE_WRITE
) {
2551 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
2554 /* unprotect the page if it was put read-only because it
2555 contains translated code */
2556 if (!(p
->flags
& PAGE_WRITE
)) {
2557 if (!page_unprotect(addr
, 0)) {
2566 /* called from signal handler: invalidate the code and unprotect the
2567 * page. Return 0 if the fault was not handled, 1 if it was handled,
2568 * and 2 if it was handled but the caller must cause the TB to be
2569 * immediately exited. (We can only return 2 if the 'pc' argument is
2572 int page_unprotect(target_ulong address
, uintptr_t pc
)
2575 bool current_tb_invalidated
;
2577 target_ulong host_start
, host_end
, addr
;
2579 /* Technically this isn't safe inside a signal handler. However we
2580 know this only ever happens in a synchronous SEGV handler, so in
2581 practice it seems to be ok. */
2584 p
= page_find(address
>> TARGET_PAGE_BITS
);
2590 /* if the page was really writable, then we change its
2591 protection back to writable */
2592 if (p
->flags
& PAGE_WRITE_ORG
) {
2593 current_tb_invalidated
= false;
2594 if (p
->flags
& PAGE_WRITE
) {
2595 /* If the page is actually marked WRITE then assume this is because
2596 * this thread raced with another one which got here first and
2597 * set the page to PAGE_WRITE and did the TB invalidate for us.
2599 #ifdef TARGET_HAS_PRECISE_SMC
2600 TranslationBlock
*current_tb
= tcg_tb_lookup(pc
);
2602 current_tb_invalidated
= tb_cflags(current_tb
) & CF_INVALID
;
2606 host_start
= address
& qemu_host_page_mask
;
2607 host_end
= host_start
+ qemu_host_page_size
;
2610 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2611 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2612 p
->flags
|= PAGE_WRITE
;
2615 /* and since the content will be modified, we must invalidate
2616 the corresponding translated code. */
2617 current_tb_invalidated
|= tb_invalidate_phys_page(addr
, pc
);
2618 #ifdef CONFIG_USER_ONLY
2619 if (DEBUG_TB_CHECK_GATE
) {
2620 tb_invalidate_check(addr
);
2624 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2628 /* If current TB was invalidated return to main loop */
2629 return current_tb_invalidated
? 2 : 1;
2634 #endif /* CONFIG_USER_ONLY */
2636 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2637 void tcg_flush_softmmu_tlb(CPUState
*cs
)
2639 #ifdef CONFIG_SOFTMMU