4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu-common.h"
24 #define NO_CPU_IO_DEFS
27 #include "disas/disas.h"
28 #include "exec/exec-all.h"
30 #if defined(CONFIG_USER_ONLY)
32 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
33 #include <sys/param.h>
34 #if __FreeBSD_version >= 700104
35 #define HAVE_KINFO_GETVMMAP
36 #define sigqueue sigqueue_freebsd /* avoid redefinition */
38 #include <machine/profile.h>
47 #include "exec/ram_addr.h"
50 #include "exec/cputlb.h"
51 #include "exec/tb-hash.h"
52 #include "translate-all.h"
53 #include "qemu/bitmap.h"
54 #include "qemu/error-report.h"
55 #include "qemu/qemu-print.h"
56 #include "qemu/timer.h"
57 #include "qemu/main-loop.h"
59 #include "sysemu/cpus.h"
60 #include "sysemu/tcg.h"
62 /* #define DEBUG_TB_INVALIDATE */
63 /* #define DEBUG_TB_FLUSH */
64 /* make various TB consistency checks */
65 /* #define DEBUG_TB_CHECK */
67 #ifdef DEBUG_TB_INVALIDATE
68 #define DEBUG_TB_INVALIDATE_GATE 1
70 #define DEBUG_TB_INVALIDATE_GATE 0
74 #define DEBUG_TB_FLUSH_GATE 1
76 #define DEBUG_TB_FLUSH_GATE 0
79 #if !defined(CONFIG_USER_ONLY)
80 /* TB consistency checks only implemented for usermode emulation. */
85 #define DEBUG_TB_CHECK_GATE 1
87 #define DEBUG_TB_CHECK_GATE 0
90 /* Access to the various translations structures need to be serialised via locks
92 * In user-mode emulation access to the memory related structures are protected
94 * In !user-mode we use per-page locks.
97 #define assert_memory_lock()
99 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
102 #define SMC_BITMAP_USE_THRESHOLD 10
104 typedef struct PageDesc
{
105 /* list of TBs intersecting this ram page */
107 #ifdef CONFIG_SOFTMMU
108 /* in order to optimize self modifying code, we count the number
109 of lookups we do to a given page to use a bitmap */
110 unsigned long *code_bitmap
;
111 unsigned int code_write_count
;
115 #ifndef CONFIG_USER_ONLY
121 * struct page_entry - page descriptor entry
122 * @pd: pointer to the &struct PageDesc of the page this entry represents
123 * @index: page index of the page
124 * @locked: whether the page is locked
126 * This struct helps us keep track of the locked state of a page, without
127 * bloating &struct PageDesc.
129 * A page lock protects accesses to all fields of &struct PageDesc.
131 * See also: &struct page_collection.
135 tb_page_addr_t index
;
140 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
141 * @tree: Binary search tree (BST) of the pages, with key == page index
142 * @max: Pointer to the page in @tree with the highest page index
144 * To avoid deadlock we lock pages in ascending order of page index.
145 * When operating on a set of pages, we need to keep track of them so that
146 * we can lock them in order and also unlock them later. For this we collect
147 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
148 * @tree implementation we use does not provide an O(1) operation to obtain the
149 * highest-ranked element, we use @max to keep track of the inserted page
150 * with the highest index. This is valuable because if a page is not in
151 * the tree and its index is higher than @max's, then we can lock it
152 * without breaking the locking order rule.
154 * Note on naming: 'struct page_set' would be shorter, but we already have a few
155 * page_set_*() helpers, so page_collection is used instead to avoid confusion.
157 * See also: page_collection_lock().
159 struct page_collection
{
161 struct page_entry
*max
;
164 /* list iterators for lists of tagged pointers in TranslationBlock */
165 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \
166 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
167 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
168 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
170 #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
171 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
173 #define TB_FOR_EACH_JMP(head_tb, tb, n) \
174 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
177 * In system mode we want L1_MAP to be based on ram offsets,
178 * while in user mode we want it to be based on virtual addresses.
180 * TODO: For user mode, see the caveat re host vs guest virtual
181 * address spaces near GUEST_ADDR_MAX.
183 #if !defined(CONFIG_USER_ONLY)
184 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
185 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
187 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
190 # define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
193 /* Size of the L2 (and L3, etc) page tables. */
195 #define V_L2_SIZE (1 << V_L2_BITS)
197 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
198 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS
>
199 sizeof_field(TranslationBlock
, trace_vcpu_dstate
)
203 * L1 Mapping properties
205 static int v_l1_size
;
206 static int v_l1_shift
;
207 static int v_l2_levels
;
209 /* The bottom level has pointers to PageDesc, and is indexed by
210 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
212 #define V_L1_MIN_BITS 4
213 #define V_L1_MAX_BITS (V_L2_BITS + 3)
214 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
216 static void *l1_map
[V_L1_MAX_SIZE
];
218 /* code generation context */
219 TCGContext tcg_init_ctx
;
220 __thread TCGContext
*tcg_ctx
;
224 static void page_table_config_init(void)
228 assert(TARGET_PAGE_BITS
);
229 /* The bits remaining after N lower levels of page tables. */
230 v_l1_bits
= (L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
) % V_L2_BITS
;
231 if (v_l1_bits
< V_L1_MIN_BITS
) {
232 v_l1_bits
+= V_L2_BITS
;
235 v_l1_size
= 1 << v_l1_bits
;
236 v_l1_shift
= L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
- v_l1_bits
;
237 v_l2_levels
= v_l1_shift
/ V_L2_BITS
- 1;
239 assert(v_l1_bits
<= V_L1_MAX_BITS
);
240 assert(v_l1_shift
% V_L2_BITS
== 0);
241 assert(v_l2_levels
>= 0);
244 void cpu_gen_init(void)
246 tcg_context_init(&tcg_init_ctx
);
249 /* Encode VAL as a signed leb128 sequence at P.
250 Return P incremented past the encoded value. */
251 static uint8_t *encode_sleb128(uint8_t *p
, target_long val
)
258 more
= !((val
== 0 && (byte
& 0x40) == 0)
259 || (val
== -1 && (byte
& 0x40) != 0));
269 /* Decode a signed leb128 sequence at *PP; increment *PP past the
270 decoded value. Return the decoded value. */
271 static target_long
decode_sleb128(uint8_t **pp
)
279 val
|= (target_ulong
)(byte
& 0x7f) << shift
;
281 } while (byte
& 0x80);
282 if (shift
< TARGET_LONG_BITS
&& (byte
& 0x40)) {
283 val
|= -(target_ulong
)1 << shift
;
290 /* Encode the data collected about the instructions while compiling TB.
291 Place the data at BLOCK, and return the number of bytes consumed.
293 The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
294 which come from the target's insn_start data, followed by a uintptr_t
295 which comes from the host pc of the end of the code implementing the insn.
297 Each line of the table is encoded as sleb128 deltas from the previous
298 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
299 That is, the first column is seeded with the guest pc, the last column
300 with the host pc, and the middle columns with zeros. */
302 static int encode_search(TranslationBlock
*tb
, uint8_t *block
)
304 uint8_t *highwater
= tcg_ctx
->code_gen_highwater
;
308 for (i
= 0, n
= tb
->icount
; i
< n
; ++i
) {
311 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
313 prev
= (j
== 0 ? tb
->pc
: 0);
315 prev
= tcg_ctx
->gen_insn_data
[i
- 1][j
];
317 p
= encode_sleb128(p
, tcg_ctx
->gen_insn_data
[i
][j
] - prev
);
319 prev
= (i
== 0 ? 0 : tcg_ctx
->gen_insn_end_off
[i
- 1]);
320 p
= encode_sleb128(p
, tcg_ctx
->gen_insn_end_off
[i
] - prev
);
322 /* Test for (pending) buffer overflow. The assumption is that any
323 one row beginning below the high water mark cannot overrun
324 the buffer completely. Thus we can test for overflow after
325 encoding a row without having to check during encoding. */
326 if (unlikely(p
> highwater
)) {
334 /* The cpu state corresponding to 'searched_pc' is restored.
335 * When reset_icount is true, current TB will be interrupted and
336 * icount should be recalculated.
338 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
339 uintptr_t searched_pc
, bool reset_icount
)
341 target_ulong data
[TARGET_INSN_START_WORDS
] = { tb
->pc
};
342 uintptr_t host_pc
= (uintptr_t)tb
->tc
.ptr
;
343 CPUArchState
*env
= cpu
->env_ptr
;
344 uint8_t *p
= tb
->tc
.ptr
+ tb
->tc
.size
;
345 int i
, j
, num_insns
= tb
->icount
;
346 #ifdef CONFIG_PROFILER
347 TCGProfile
*prof
= &tcg_ctx
->prof
;
348 int64_t ti
= profile_getclock();
351 searched_pc
-= GETPC_ADJ
;
353 if (searched_pc
< host_pc
) {
357 /* Reconstruct the stored insn data while looking for the point at
358 which the end of the insn exceeds the searched_pc. */
359 for (i
= 0; i
< num_insns
; ++i
) {
360 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
361 data
[j
] += decode_sleb128(&p
);
363 host_pc
+= decode_sleb128(&p
);
364 if (host_pc
> searched_pc
) {
371 if (reset_icount
&& (tb_cflags(tb
) & CF_USE_ICOUNT
)) {
373 /* Reset the cycle counter to the start of the block
374 and shift if to the number of actually executed instructions */
375 cpu_neg(cpu
)->icount_decr
.u16
.low
+= num_insns
- i
;
377 restore_state_to_opc(env
, tb
, data
);
379 #ifdef CONFIG_PROFILER
380 atomic_set(&prof
->restore_time
,
381 prof
->restore_time
+ profile_getclock() - ti
);
382 atomic_set(&prof
->restore_count
, prof
->restore_count
+ 1);
387 void tb_destroy(TranslationBlock
*tb
)
389 qemu_spin_destroy(&tb
->jmp_lock
);
392 bool cpu_restore_state(CPUState
*cpu
, uintptr_t host_pc
, bool will_exit
)
394 TranslationBlock
*tb
;
396 uintptr_t check_offset
;
398 /* The host_pc has to be in the region of current code buffer. If
399 * it is not we will not be able to resolve it here. The two cases
400 * where host_pc will not be correct are:
402 * - fault during translation (instruction fetch)
403 * - fault from helper (not using GETPC() macro)
405 * Either way we need return early as we can't resolve it here.
407 * We are using unsigned arithmetic so if host_pc <
408 * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
409 * above the code_gen_buffer_size
411 check_offset
= host_pc
- (uintptr_t) tcg_init_ctx
.code_gen_buffer
;
413 if (check_offset
< tcg_init_ctx
.code_gen_buffer_size
) {
414 tb
= tcg_tb_lookup(host_pc
);
416 cpu_restore_state_from_tb(cpu
, tb
, host_pc
, will_exit
);
417 if (tb_cflags(tb
) & CF_NOCACHE
) {
418 /* one-shot translation, invalidate it immediately */
419 tb_phys_invalidate(tb
, -1);
430 static void page_init(void)
433 page_table_config_init();
435 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
437 #ifdef HAVE_KINFO_GETVMMAP
438 struct kinfo_vmentry
*freep
;
441 freep
= kinfo_getvmmap(getpid(), &cnt
);
444 for (i
= 0; i
< cnt
; i
++) {
445 unsigned long startaddr
, endaddr
;
447 startaddr
= freep
[i
].kve_start
;
448 endaddr
= freep
[i
].kve_end
;
449 if (h2g_valid(startaddr
)) {
450 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
452 if (h2g_valid(endaddr
)) {
453 endaddr
= h2g(endaddr
);
454 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
456 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
458 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
469 last_brk
= (unsigned long)sbrk(0);
471 f
= fopen("/compat/linux/proc/self/maps", "r");
476 unsigned long startaddr
, endaddr
;
479 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
481 if (n
== 2 && h2g_valid(startaddr
)) {
482 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
484 if (h2g_valid(endaddr
)) {
485 endaddr
= h2g(endaddr
);
489 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
501 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
507 /* Level 1. Always allocated. */
508 lp
= l1_map
+ ((index
>> v_l1_shift
) & (v_l1_size
- 1));
511 for (i
= v_l2_levels
; i
> 0; i
--) {
512 void **p
= atomic_rcu_read(lp
);
520 p
= g_new0(void *, V_L2_SIZE
);
521 existing
= atomic_cmpxchg(lp
, NULL
, p
);
522 if (unlikely(existing
)) {
528 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
531 pd
= atomic_rcu_read(lp
);
538 pd
= g_new0(PageDesc
, V_L2_SIZE
);
539 #ifndef CONFIG_USER_ONLY
543 for (i
= 0; i
< V_L2_SIZE
; i
++) {
544 qemu_spin_init(&pd
[i
].lock
);
548 existing
= atomic_cmpxchg(lp
, NULL
, pd
);
549 if (unlikely(existing
)) {
550 #ifndef CONFIG_USER_ONLY
554 for (i
= 0; i
< V_L2_SIZE
; i
++) {
555 qemu_spin_destroy(&pd
[i
].lock
);
564 return pd
+ (index
& (V_L2_SIZE
- 1));
567 static inline PageDesc
*page_find(tb_page_addr_t index
)
569 return page_find_alloc(index
, 0);
572 static void page_lock_pair(PageDesc
**ret_p1
, tb_page_addr_t phys1
,
573 PageDesc
**ret_p2
, tb_page_addr_t phys2
, int alloc
);
575 /* In user-mode page locks aren't used; mmap_lock is enough */
576 #ifdef CONFIG_USER_ONLY
578 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
580 static inline void page_lock(PageDesc
*pd
)
583 static inline void page_unlock(PageDesc
*pd
)
586 static inline void page_lock_tb(const TranslationBlock
*tb
)
589 static inline void page_unlock_tb(const TranslationBlock
*tb
)
592 struct page_collection
*
593 page_collection_lock(tb_page_addr_t start
, tb_page_addr_t end
)
598 void page_collection_unlock(struct page_collection
*set
)
600 #else /* !CONFIG_USER_ONLY */
602 #ifdef CONFIG_DEBUG_TCG
604 static __thread GHashTable
*ht_pages_locked_debug
;
606 static void ht_pages_locked_debug_init(void)
608 if (ht_pages_locked_debug
) {
611 ht_pages_locked_debug
= g_hash_table_new(NULL
, NULL
);
614 static bool page_is_locked(const PageDesc
*pd
)
618 ht_pages_locked_debug_init();
619 found
= g_hash_table_lookup(ht_pages_locked_debug
, pd
);
623 static void page_lock__debug(PageDesc
*pd
)
625 ht_pages_locked_debug_init();
626 g_assert(!page_is_locked(pd
));
627 g_hash_table_insert(ht_pages_locked_debug
, pd
, pd
);
630 static void page_unlock__debug(const PageDesc
*pd
)
634 ht_pages_locked_debug_init();
635 g_assert(page_is_locked(pd
));
636 removed
= g_hash_table_remove(ht_pages_locked_debug
, pd
);
641 do_assert_page_locked(const PageDesc
*pd
, const char *file
, int line
)
643 if (unlikely(!page_is_locked(pd
))) {
644 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
650 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
652 void assert_no_pages_locked(void)
654 ht_pages_locked_debug_init();
655 g_assert(g_hash_table_size(ht_pages_locked_debug
) == 0);
658 #else /* !CONFIG_DEBUG_TCG */
660 #define assert_page_locked(pd)
662 static inline void page_lock__debug(const PageDesc
*pd
)
666 static inline void page_unlock__debug(const PageDesc
*pd
)
670 #endif /* CONFIG_DEBUG_TCG */
672 static inline void page_lock(PageDesc
*pd
)
674 page_lock__debug(pd
);
675 qemu_spin_lock(&pd
->lock
);
678 static inline void page_unlock(PageDesc
*pd
)
680 qemu_spin_unlock(&pd
->lock
);
681 page_unlock__debug(pd
);
684 /* lock the page(s) of a TB in the correct acquisition order */
685 static inline void page_lock_tb(const TranslationBlock
*tb
)
687 page_lock_pair(NULL
, tb
->page_addr
[0], NULL
, tb
->page_addr
[1], 0);
690 static inline void page_unlock_tb(const TranslationBlock
*tb
)
692 PageDesc
*p1
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
695 if (unlikely(tb
->page_addr
[1] != -1)) {
696 PageDesc
*p2
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
704 static inline struct page_entry
*
705 page_entry_new(PageDesc
*pd
, tb_page_addr_t index
)
707 struct page_entry
*pe
= g_malloc(sizeof(*pe
));
715 static void page_entry_destroy(gpointer p
)
717 struct page_entry
*pe
= p
;
719 g_assert(pe
->locked
);
724 /* returns false on success */
725 static bool page_entry_trylock(struct page_entry
*pe
)
729 busy
= qemu_spin_trylock(&pe
->pd
->lock
);
731 g_assert(!pe
->locked
);
733 page_lock__debug(pe
->pd
);
738 static void do_page_entry_lock(struct page_entry
*pe
)
741 g_assert(!pe
->locked
);
745 static gboolean
page_entry_lock(gpointer key
, gpointer value
, gpointer data
)
747 struct page_entry
*pe
= value
;
749 do_page_entry_lock(pe
);
753 static gboolean
page_entry_unlock(gpointer key
, gpointer value
, gpointer data
)
755 struct page_entry
*pe
= value
;
765 * Trylock a page, and if successful, add the page to a collection.
766 * Returns true ("busy") if the page could not be locked; false otherwise.
768 static bool page_trylock_add(struct page_collection
*set
, tb_page_addr_t addr
)
770 tb_page_addr_t index
= addr
>> TARGET_PAGE_BITS
;
771 struct page_entry
*pe
;
774 pe
= g_tree_lookup(set
->tree
, &index
);
779 pd
= page_find(index
);
784 pe
= page_entry_new(pd
, index
);
785 g_tree_insert(set
->tree
, &pe
->index
, pe
);
788 * If this is either (1) the first insertion or (2) a page whose index
789 * is higher than any other so far, just lock the page and move on.
791 if (set
->max
== NULL
|| pe
->index
> set
->max
->index
) {
793 do_page_entry_lock(pe
);
797 * Try to acquire out-of-order lock; if busy, return busy so that we acquire
800 return page_entry_trylock(pe
);
803 static gint
tb_page_addr_cmp(gconstpointer ap
, gconstpointer bp
, gpointer udata
)
805 tb_page_addr_t a
= *(const tb_page_addr_t
*)ap
;
806 tb_page_addr_t b
= *(const tb_page_addr_t
*)bp
;
817 * Lock a range of pages ([@start,@end[) as well as the pages of all
819 * Locking order: acquire locks in ascending order of page index.
821 struct page_collection
*
822 page_collection_lock(tb_page_addr_t start
, tb_page_addr_t end
)
824 struct page_collection
*set
= g_malloc(sizeof(*set
));
825 tb_page_addr_t index
;
828 start
>>= TARGET_PAGE_BITS
;
829 end
>>= TARGET_PAGE_BITS
;
830 g_assert(start
<= end
);
832 set
->tree
= g_tree_new_full(tb_page_addr_cmp
, NULL
, NULL
,
835 assert_no_pages_locked();
838 g_tree_foreach(set
->tree
, page_entry_lock
, NULL
);
840 for (index
= start
; index
<= end
; index
++) {
841 TranslationBlock
*tb
;
844 pd
= page_find(index
);
848 if (page_trylock_add(set
, index
<< TARGET_PAGE_BITS
)) {
849 g_tree_foreach(set
->tree
, page_entry_unlock
, NULL
);
852 assert_page_locked(pd
);
853 PAGE_FOR_EACH_TB(pd
, tb
, n
) {
854 if (page_trylock_add(set
, tb
->page_addr
[0]) ||
855 (tb
->page_addr
[1] != -1 &&
856 page_trylock_add(set
, tb
->page_addr
[1]))) {
857 /* drop all locks, and reacquire in order */
858 g_tree_foreach(set
->tree
, page_entry_unlock
, NULL
);
866 void page_collection_unlock(struct page_collection
*set
)
868 /* entries are unlocked and freed via page_entry_destroy */
869 g_tree_destroy(set
->tree
);
873 #endif /* !CONFIG_USER_ONLY */
875 static void page_lock_pair(PageDesc
**ret_p1
, tb_page_addr_t phys1
,
876 PageDesc
**ret_p2
, tb_page_addr_t phys2
, int alloc
)
879 tb_page_addr_t page1
;
880 tb_page_addr_t page2
;
882 assert_memory_lock();
883 g_assert(phys1
!= -1);
885 page1
= phys1
>> TARGET_PAGE_BITS
;
886 page2
= phys2
>> TARGET_PAGE_BITS
;
888 p1
= page_find_alloc(page1
, alloc
);
892 if (likely(phys2
== -1)) {
895 } else if (page1
== page2
) {
902 p2
= page_find_alloc(page2
, alloc
);
915 /* Minimum size of the code gen buffer. This number is randomly chosen,
916 but not so small that we can't have a fair number of TB's live. */
917 #define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
919 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
920 indicated, this is constrained by the range of direct branches on the
921 host cpu, as used by the TCG implementation of goto_tb. */
922 #if defined(__x86_64__)
923 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
924 #elif defined(__sparc__)
925 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
926 #elif defined(__powerpc64__)
927 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
928 #elif defined(__powerpc__)
929 # define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
930 #elif defined(__aarch64__)
931 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
932 #elif defined(__s390x__)
933 /* We have a +- 4GB range on the branches; leave some slop. */
934 # define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
935 #elif defined(__mips__)
936 /* We have a 256MB branch region, but leave room to make sure the
937 main executable is also within that region. */
938 # define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB)
940 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
943 #if TCG_TARGET_REG_BITS == 32
944 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
945 #ifdef CONFIG_USER_ONLY
947 * For user mode on smaller 32 bit systems we may run into trouble
948 * allocating big chunks of data in the right place. On these systems
949 * we utilise a static code generation buffer directly in the binary.
951 #define USE_STATIC_CODE_GEN_BUFFER
953 #else /* TCG_TARGET_REG_BITS == 64 */
954 #ifdef CONFIG_USER_ONLY
956 * As user-mode emulation typically means running multiple instances
957 * of the translator don't go too nuts with our default code gen
958 * buffer lest we make things too hard for the OS.
960 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
963 * We expect most system emulation to run one or two guests per host.
964 * Users running large scale system emulation may want to tweak their
965 * runtime setup via the tb-size control on the command line.
967 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
971 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
972 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
973 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
975 static inline size_t size_code_gen_buffer(size_t tb_size
)
977 /* Size the buffer. */
979 size_t phys_mem
= qemu_get_host_physmem();
981 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
983 tb_size
= MIN(DEFAULT_CODE_GEN_BUFFER_SIZE
, phys_mem
/ 8);
986 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
987 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
989 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
990 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
996 /* In order to use J and JAL within the code_gen_buffer, we require
997 that the buffer not cross a 256MB boundary. */
998 static inline bool cross_256mb(void *addr
, size_t size
)
1000 return ((uintptr_t)addr
^ ((uintptr_t)addr
+ size
)) & ~0x0ffffffful
;
1003 /* We weren't able to allocate a buffer without crossing that boundary,
1004 so make do with the larger portion of the buffer that doesn't cross.
1005 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
1006 static inline void *split_cross_256mb(void *buf1
, size_t size1
)
1008 void *buf2
= (void *)(((uintptr_t)buf1
+ size1
) & ~0x0ffffffful
);
1009 size_t size2
= buf1
+ size1
- buf2
;
1011 size1
= buf2
- buf1
;
1012 if (size1
< size2
) {
1017 tcg_ctx
->code_gen_buffer_size
= size1
;
1022 #ifdef USE_STATIC_CODE_GEN_BUFFER
1023 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
1024 __attribute__((aligned(CODE_GEN_ALIGN
)));
1026 static inline void *alloc_code_gen_buffer(void)
1028 void *buf
= static_code_gen_buffer
;
1029 void *end
= static_code_gen_buffer
+ sizeof(static_code_gen_buffer
);
1032 /* page-align the beginning and end of the buffer */
1033 buf
= QEMU_ALIGN_PTR_UP(buf
, qemu_real_host_page_size
);
1034 end
= QEMU_ALIGN_PTR_DOWN(end
, qemu_real_host_page_size
);
1038 /* Honor a command-line option limiting the size of the buffer. */
1039 if (size
> tcg_ctx
->code_gen_buffer_size
) {
1040 size
= QEMU_ALIGN_DOWN(tcg_ctx
->code_gen_buffer_size
,
1041 qemu_real_host_page_size
);
1043 tcg_ctx
->code_gen_buffer_size
= size
;
1046 if (cross_256mb(buf
, size
)) {
1047 buf
= split_cross_256mb(buf
, size
);
1048 size
= tcg_ctx
->code_gen_buffer_size
;
1052 if (qemu_mprotect_rwx(buf
, size
)) {
1055 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
1059 #elif defined(_WIN32)
1060 static inline void *alloc_code_gen_buffer(void)
1062 size_t size
= tcg_ctx
->code_gen_buffer_size
;
1063 return VirtualAlloc(NULL
, size
, MEM_RESERVE
| MEM_COMMIT
,
1064 PAGE_EXECUTE_READWRITE
);
1067 static inline void *alloc_code_gen_buffer(void)
1069 int prot
= PROT_WRITE
| PROT_READ
| PROT_EXEC
;
1070 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
1071 size_t size
= tcg_ctx
->code_gen_buffer_size
;
1074 buf
= mmap(NULL
, size
, prot
, flags
, -1, 0);
1075 if (buf
== MAP_FAILED
) {
1080 if (cross_256mb(buf
, size
)) {
1082 * Try again, with the original still mapped, to avoid re-acquiring
1083 * the same 256mb crossing.
1086 void *buf2
= mmap(NULL
, size
, prot
, flags
, -1, 0);
1087 switch ((int)(buf2
!= MAP_FAILED
)) {
1089 if (!cross_256mb(buf2
, size
)) {
1090 /* Success! Use the new buffer. */
1094 /* Failure. Work with what we had. */
1098 /* Split the original buffer. Free the smaller half. */
1099 buf2
= split_cross_256mb(buf
, size
);
1100 size2
= tcg_ctx
->code_gen_buffer_size
;
1102 munmap(buf
+ size2
, size
- size2
);
1104 munmap(buf
, size
- size2
);
1113 /* Request large pages for the buffer. */
1114 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
1118 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1120 static inline void code_gen_alloc(size_t tb_size
)
1122 tcg_ctx
->code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
1123 tcg_ctx
->code_gen_buffer
= alloc_code_gen_buffer();
1124 if (tcg_ctx
->code_gen_buffer
== NULL
) {
1125 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
1130 static bool tb_cmp(const void *ap
, const void *bp
)
1132 const TranslationBlock
*a
= ap
;
1133 const TranslationBlock
*b
= bp
;
1135 return a
->pc
== b
->pc
&&
1136 a
->cs_base
== b
->cs_base
&&
1137 a
->flags
== b
->flags
&&
1138 (tb_cflags(a
) & CF_HASH_MASK
) == (tb_cflags(b
) & CF_HASH_MASK
) &&
1139 a
->trace_vcpu_dstate
== b
->trace_vcpu_dstate
&&
1140 a
->page_addr
[0] == b
->page_addr
[0] &&
1141 a
->page_addr
[1] == b
->page_addr
[1];
1144 static void tb_htable_init(void)
1146 unsigned int mode
= QHT_MODE_AUTO_RESIZE
;
1148 qht_init(&tb_ctx
.htable
, tb_cmp
, CODE_GEN_HTABLE_SIZE
, mode
);
1151 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1152 (in bytes) allocated to the translation buffer. Zero means default
1154 void tcg_exec_init(unsigned long tb_size
)
1160 code_gen_alloc(tb_size
);
1161 #if defined(CONFIG_SOFTMMU)
1162 /* There's no guest base to take into account, so go ahead and
1163 initialize the prologue now. */
1164 tcg_prologue_init(tcg_ctx
);
1168 /* call with @p->lock held */
1169 static inline void invalidate_page_bitmap(PageDesc
*p
)
1171 assert_page_locked(p
);
1172 #ifdef CONFIG_SOFTMMU
1173 g_free(p
->code_bitmap
);
1174 p
->code_bitmap
= NULL
;
1175 p
->code_write_count
= 0;
1179 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
1180 static void page_flush_tb_1(int level
, void **lp
)
1190 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1192 pd
[i
].first_tb
= (uintptr_t)NULL
;
1193 invalidate_page_bitmap(pd
+ i
);
1194 page_unlock(&pd
[i
]);
1199 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1200 page_flush_tb_1(level
- 1, pp
+ i
);
1205 static void page_flush_tb(void)
1207 int i
, l1_sz
= v_l1_size
;
1209 for (i
= 0; i
< l1_sz
; i
++) {
1210 page_flush_tb_1(v_l2_levels
, l1_map
+ i
);
1214 static gboolean
tb_host_size_iter(gpointer key
, gpointer value
, gpointer data
)
1216 const TranslationBlock
*tb
= value
;
1217 size_t *size
= data
;
1219 *size
+= tb
->tc
.size
;
1223 /* flush all the translation blocks */
1224 static void do_tb_flush(CPUState
*cpu
, run_on_cpu_data tb_flush_count
)
1226 bool did_flush
= false;
1229 /* If it is already been done on request of another CPU,
1232 if (tb_ctx
.tb_flush_count
!= tb_flush_count
.host_int
) {
1237 if (DEBUG_TB_FLUSH_GATE
) {
1238 size_t nb_tbs
= tcg_nb_tbs();
1239 size_t host_size
= 0;
1241 tcg_tb_foreach(tb_host_size_iter
, &host_size
);
1242 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1243 tcg_code_size(), nb_tbs
, nb_tbs
> 0 ? host_size
/ nb_tbs
: 0);
1247 cpu_tb_jmp_cache_clear(cpu
);
1250 qht_reset_size(&tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
);
1253 tcg_region_reset_all();
1254 /* XXX: flush processor icache at this point if cache flush is
1256 atomic_mb_set(&tb_ctx
.tb_flush_count
, tb_ctx
.tb_flush_count
+ 1);
1261 qemu_plugin_flush_cb();
1265 void tb_flush(CPUState
*cpu
)
1267 if (tcg_enabled()) {
1268 unsigned tb_flush_count
= atomic_mb_read(&tb_ctx
.tb_flush_count
);
1270 if (cpu_in_exclusive_context(cpu
)) {
1271 do_tb_flush(cpu
, RUN_ON_CPU_HOST_INT(tb_flush_count
));
1273 async_safe_run_on_cpu(cpu
, do_tb_flush
,
1274 RUN_ON_CPU_HOST_INT(tb_flush_count
));
1280 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1281 * so in order to prevent bit rot we compile them unconditionally in user-mode,
1282 * and let the optimizer get rid of them by wrapping their user-only callers
1283 * with if (DEBUG_TB_CHECK_GATE).
1285 #ifdef CONFIG_USER_ONLY
1287 static void do_tb_invalidate_check(void *p
, uint32_t hash
, void *userp
)
1289 TranslationBlock
*tb
= p
;
1290 target_ulong addr
= *(target_ulong
*)userp
;
1292 if (!(addr
+ TARGET_PAGE_SIZE
<= tb
->pc
|| addr
>= tb
->pc
+ tb
->size
)) {
1293 printf("ERROR invalidate: address=" TARGET_FMT_lx
1294 " PC=%08lx size=%04x\n", addr
, (long)tb
->pc
, tb
->size
);
1298 /* verify that all the pages have correct rights for code
1300 * Called with mmap_lock held.
1302 static void tb_invalidate_check(target_ulong address
)
1304 address
&= TARGET_PAGE_MASK
;
1305 qht_iter(&tb_ctx
.htable
, do_tb_invalidate_check
, &address
);
1308 static void do_tb_page_check(void *p
, uint32_t hash
, void *userp
)
1310 TranslationBlock
*tb
= p
;
1313 flags1
= page_get_flags(tb
->pc
);
1314 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
1315 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
1316 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1317 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
1321 /* verify that all the pages have correct rights for code */
1322 static void tb_page_check(void)
1324 qht_iter(&tb_ctx
.htable
, do_tb_page_check
, NULL
);
1327 #endif /* CONFIG_USER_ONLY */
1330 * user-mode: call with mmap_lock held
1331 * !user-mode: call with @pd->lock held
1333 static inline void tb_page_remove(PageDesc
*pd
, TranslationBlock
*tb
)
1335 TranslationBlock
*tb1
;
1339 assert_page_locked(pd
);
1340 pprev
= &pd
->first_tb
;
1341 PAGE_FOR_EACH_TB(pd
, tb1
, n1
) {
1343 *pprev
= tb1
->page_next
[n1
];
1346 pprev
= &tb1
->page_next
[n1
];
1348 g_assert_not_reached();
1351 /* remove @orig from its @n_orig-th jump list */
1352 static inline void tb_remove_from_jmp_list(TranslationBlock
*orig
, int n_orig
)
1354 uintptr_t ptr
, ptr_locked
;
1355 TranslationBlock
*dest
;
1356 TranslationBlock
*tb
;
1360 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1361 ptr
= atomic_or_fetch(&orig
->jmp_dest
[n_orig
], 1);
1362 dest
= (TranslationBlock
*)(ptr
& ~1);
1367 qemu_spin_lock(&dest
->jmp_lock
);
1369 * While acquiring the lock, the jump might have been removed if the
1370 * destination TB was invalidated; check again.
1372 ptr_locked
= atomic_read(&orig
->jmp_dest
[n_orig
]);
1373 if (ptr_locked
!= ptr
) {
1374 qemu_spin_unlock(&dest
->jmp_lock
);
1376 * The only possibility is that the jump was unlinked via
1377 * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1378 * because we set the LSB above.
1380 g_assert(ptr_locked
== 1 && dest
->cflags
& CF_INVALID
);
1384 * We first acquired the lock, and since the destination pointer matches,
1385 * we know for sure that @orig is in the jmp list.
1387 pprev
= &dest
->jmp_list_head
;
1388 TB_FOR_EACH_JMP(dest
, tb
, n
) {
1389 if (tb
== orig
&& n
== n_orig
) {
1390 *pprev
= tb
->jmp_list_next
[n
];
1391 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1392 qemu_spin_unlock(&dest
->jmp_lock
);
1395 pprev
= &tb
->jmp_list_next
[n
];
1397 g_assert_not_reached();
1400 /* reset the jump entry 'n' of a TB so that it is not chained to
1402 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
1404 uintptr_t addr
= (uintptr_t)(tb
->tc
.ptr
+ tb
->jmp_reset_offset
[n
]);
1405 tb_set_jmp_target(tb
, n
, addr
);
1408 /* remove any jumps to the TB */
1409 static inline void tb_jmp_unlink(TranslationBlock
*dest
)
1411 TranslationBlock
*tb
;
1414 qemu_spin_lock(&dest
->jmp_lock
);
1416 TB_FOR_EACH_JMP(dest
, tb
, n
) {
1417 tb_reset_jump(tb
, n
);
1418 atomic_and(&tb
->jmp_dest
[n
], (uintptr_t)NULL
| 1);
1419 /* No need to clear the list entry; setting the dest ptr is enough */
1421 dest
->jmp_list_head
= (uintptr_t)NULL
;
1423 qemu_spin_unlock(&dest
->jmp_lock
);
1427 * In user-mode, call with mmap_lock held.
1428 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1431 static void do_tb_phys_invalidate(TranslationBlock
*tb
, bool rm_from_page_list
)
1436 tb_page_addr_t phys_pc
;
1438 assert_memory_lock();
1440 /* make sure no further incoming jumps will be chained to this TB */
1441 qemu_spin_lock(&tb
->jmp_lock
);
1442 atomic_set(&tb
->cflags
, tb
->cflags
| CF_INVALID
);
1443 qemu_spin_unlock(&tb
->jmp_lock
);
1445 /* remove the TB from the hash list */
1446 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1447 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, tb_cflags(tb
) & CF_HASH_MASK
,
1448 tb
->trace_vcpu_dstate
);
1449 if (!(tb
->cflags
& CF_NOCACHE
) &&
1450 !qht_remove(&tb_ctx
.htable
, tb
, h
)) {
1454 /* remove the TB from the page list */
1455 if (rm_from_page_list
) {
1456 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
1457 tb_page_remove(p
, tb
);
1458 invalidate_page_bitmap(p
);
1459 if (tb
->page_addr
[1] != -1) {
1460 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
1461 tb_page_remove(p
, tb
);
1462 invalidate_page_bitmap(p
);
1466 /* remove the TB from the hash list */
1467 h
= tb_jmp_cache_hash_func(tb
->pc
);
1469 if (atomic_read(&cpu
->tb_jmp_cache
[h
]) == tb
) {
1470 atomic_set(&cpu
->tb_jmp_cache
[h
], NULL
);
1474 /* suppress this TB from the two jump lists */
1475 tb_remove_from_jmp_list(tb
, 0);
1476 tb_remove_from_jmp_list(tb
, 1);
1478 /* suppress any remaining jumps to this TB */
1481 atomic_set(&tcg_ctx
->tb_phys_invalidate_count
,
1482 tcg_ctx
->tb_phys_invalidate_count
+ 1);
1485 static void tb_phys_invalidate__locked(TranslationBlock
*tb
)
1487 do_tb_phys_invalidate(tb
, true);
1490 /* invalidate one TB
1492 * Called with mmap_lock held in user-mode.
1494 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
1496 if (page_addr
== -1 && tb
->page_addr
[0] != -1) {
1498 do_tb_phys_invalidate(tb
, true);
1501 do_tb_phys_invalidate(tb
, false);
1505 #ifdef CONFIG_SOFTMMU
1506 /* call with @p->lock held */
1507 static void build_page_bitmap(PageDesc
*p
)
1509 int n
, tb_start
, tb_end
;
1510 TranslationBlock
*tb
;
1512 assert_page_locked(p
);
1513 p
->code_bitmap
= bitmap_new(TARGET_PAGE_SIZE
);
1515 PAGE_FOR_EACH_TB(p
, tb
, n
) {
1516 /* NOTE: this is subtle as a TB may span two physical pages */
1518 /* NOTE: tb_end may be after the end of the page, but
1519 it is not a problem */
1520 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1521 tb_end
= tb_start
+ tb
->size
;
1522 if (tb_end
> TARGET_PAGE_SIZE
) {
1523 tb_end
= TARGET_PAGE_SIZE
;
1527 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1529 bitmap_set(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1534 /* add the tb in the target page and protect it if necessary
1536 * Called with mmap_lock held for user-mode emulation.
1537 * Called with @p->lock held in !user-mode.
1539 static inline void tb_page_add(PageDesc
*p
, TranslationBlock
*tb
,
1540 unsigned int n
, tb_page_addr_t page_addr
)
1542 #ifndef CONFIG_USER_ONLY
1543 bool page_already_protected
;
1546 assert_page_locked(p
);
1548 tb
->page_addr
[n
] = page_addr
;
1549 tb
->page_next
[n
] = p
->first_tb
;
1550 #ifndef CONFIG_USER_ONLY
1551 page_already_protected
= p
->first_tb
!= (uintptr_t)NULL
;
1553 p
->first_tb
= (uintptr_t)tb
| n
;
1554 invalidate_page_bitmap(p
);
1556 #if defined(CONFIG_USER_ONLY)
1557 if (p
->flags
& PAGE_WRITE
) {
1562 /* force the host page as non writable (writes will have a
1563 page fault + mprotect overhead) */
1564 page_addr
&= qemu_host_page_mask
;
1566 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1567 addr
+= TARGET_PAGE_SIZE
) {
1569 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1574 p2
->flags
&= ~PAGE_WRITE
;
1576 mprotect(g2h(page_addr
), qemu_host_page_size
,
1577 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1578 if (DEBUG_TB_INVALIDATE_GATE
) {
1579 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT
"\n", page_addr
);
1583 /* if some code is already present, then the pages are already
1584 protected. So we handle the case where only the first TB is
1585 allocated in a physical page */
1586 if (!page_already_protected
) {
1587 tlb_protect_code(page_addr
);
1592 /* add a new TB and link it to the physical page tables. phys_page2 is
1593 * (-1) to indicate that only one page contains the TB.
1595 * Called with mmap_lock held for user-mode emulation.
1597 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1598 * Note that in !user-mode, another thread might have already added a TB
1599 * for the same block of guest code that @tb corresponds to. In that case,
1600 * the caller should discard the original @tb, and use instead the returned TB.
1602 static TranslationBlock
*
1603 tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1604 tb_page_addr_t phys_page2
)
1607 PageDesc
*p2
= NULL
;
1609 assert_memory_lock();
1611 if (phys_pc
== -1) {
1613 * If the TB is not associated with a physical RAM page then
1614 * it must be a temporary one-insn TB, and we have nothing to do
1615 * except fill in the page_addr[] fields.
1617 assert(tb
->cflags
& CF_NOCACHE
);
1618 tb
->page_addr
[0] = tb
->page_addr
[1] = -1;
1623 * Add the TB to the page list, acquiring first the pages's locks.
1624 * We keep the locks held until after inserting the TB in the hash table,
1625 * so that if the insertion fails we know for sure that the TBs are still
1626 * in the page descriptors.
1627 * Note that inserting into the hash table first isn't an option, since
1628 * we can only insert TBs that are fully initialized.
1630 page_lock_pair(&p
, phys_pc
, &p2
, phys_page2
, 1);
1631 tb_page_add(p
, tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1633 tb_page_add(p2
, tb
, 1, phys_page2
);
1635 tb
->page_addr
[1] = -1;
1638 if (!(tb
->cflags
& CF_NOCACHE
)) {
1639 void *existing_tb
= NULL
;
1642 /* add in the hash table */
1643 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, tb
->cflags
& CF_HASH_MASK
,
1644 tb
->trace_vcpu_dstate
);
1645 qht_insert(&tb_ctx
.htable
, tb
, h
, &existing_tb
);
1647 /* remove TB from the page(s) if we couldn't insert it */
1648 if (unlikely(existing_tb
)) {
1649 tb_page_remove(p
, tb
);
1650 invalidate_page_bitmap(p
);
1652 tb_page_remove(p2
, tb
);
1653 invalidate_page_bitmap(p2
);
1659 if (p2
&& p2
!= p
) {
1664 #ifdef CONFIG_USER_ONLY
1665 if (DEBUG_TB_CHECK_GATE
) {
1672 /* Called with mmap_lock held for user mode emulation. */
1673 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
1674 target_ulong pc
, target_ulong cs_base
,
1675 uint32_t flags
, int cflags
)
1677 CPUArchState
*env
= cpu
->env_ptr
;
1678 TranslationBlock
*tb
, *existing_tb
;
1679 tb_page_addr_t phys_pc
, phys_page2
;
1680 target_ulong virt_page2
;
1681 tcg_insn_unit
*gen_code_buf
;
1682 int gen_code_size
, search_size
, max_insns
;
1683 #ifdef CONFIG_PROFILER
1684 TCGProfile
*prof
= &tcg_ctx
->prof
;
1688 assert_memory_lock();
1690 phys_pc
= get_page_addr_code(env
, pc
);
1692 if (phys_pc
== -1) {
1693 /* Generate a temporary TB with 1 insn in it */
1694 cflags
&= ~CF_COUNT_MASK
;
1695 cflags
|= CF_NOCACHE
| 1;
1698 cflags
&= ~CF_CLUSTER_MASK
;
1699 cflags
|= cpu
->cluster_index
<< CF_CLUSTER_SHIFT
;
1701 max_insns
= cflags
& CF_COUNT_MASK
;
1702 if (max_insns
== 0) {
1703 max_insns
= CF_COUNT_MASK
;
1705 if (max_insns
> TCG_MAX_INSNS
) {
1706 max_insns
= TCG_MAX_INSNS
;
1708 if (cpu
->singlestep_enabled
|| singlestep
) {
1713 tb
= tcg_tb_alloc(tcg_ctx
);
1714 if (unlikely(!tb
)) {
1715 /* flush must be done */
1718 /* Make the execution loop process the flush as soon as possible. */
1719 cpu
->exception_index
= EXCP_INTERRUPT
;
1723 gen_code_buf
= tcg_ctx
->code_gen_ptr
;
1724 tb
->tc
.ptr
= gen_code_buf
;
1726 tb
->cs_base
= cs_base
;
1728 tb
->cflags
= cflags
;
1730 tb
->trace_vcpu_dstate
= *cpu
->trace_dstate
;
1731 tcg_ctx
->tb_cflags
= cflags
;
1734 #ifdef CONFIG_PROFILER
1735 /* includes aborted translations because of exceptions */
1736 atomic_set(&prof
->tb_count1
, prof
->tb_count1
+ 1);
1737 ti
= profile_getclock();
1740 tcg_func_start(tcg_ctx
);
1742 tcg_ctx
->cpu
= env_cpu(env
);
1743 gen_intermediate_code(cpu
, tb
, max_insns
);
1744 tcg_ctx
->cpu
= NULL
;
1746 trace_translate_block(tb
, tb
->pc
, tb
->tc
.ptr
);
1748 /* generate machine code */
1749 tb
->jmp_reset_offset
[0] = TB_JMP_RESET_OFFSET_INVALID
;
1750 tb
->jmp_reset_offset
[1] = TB_JMP_RESET_OFFSET_INVALID
;
1751 tcg_ctx
->tb_jmp_reset_offset
= tb
->jmp_reset_offset
;
1752 if (TCG_TARGET_HAS_direct_jump
) {
1753 tcg_ctx
->tb_jmp_insn_offset
= tb
->jmp_target_arg
;
1754 tcg_ctx
->tb_jmp_target_addr
= NULL
;
1756 tcg_ctx
->tb_jmp_insn_offset
= NULL
;
1757 tcg_ctx
->tb_jmp_target_addr
= tb
->jmp_target_arg
;
1760 #ifdef CONFIG_PROFILER
1761 atomic_set(&prof
->tb_count
, prof
->tb_count
+ 1);
1762 atomic_set(&prof
->interm_time
, prof
->interm_time
+ profile_getclock() - ti
);
1763 ti
= profile_getclock();
1766 gen_code_size
= tcg_gen_code(tcg_ctx
, tb
);
1767 if (unlikely(gen_code_size
< 0)) {
1768 switch (gen_code_size
) {
1771 * Overflow of code_gen_buffer, or the current slice of it.
1773 * TODO: We don't need to re-do gen_intermediate_code, nor
1774 * should we re-do the tcg optimization currently hidden
1775 * inside tcg_gen_code. All that should be required is to
1776 * flush the TBs, allocate a new TB, re-initialize it per
1777 * above, and re-do the actual code generation.
1779 goto buffer_overflow
;
1783 * The code generated for the TranslationBlock is too large.
1784 * The maximum size allowed by the unwind info is 64k.
1785 * There may be stricter constraints from relocations
1786 * in the tcg backend.
1788 * Try again with half as many insns as we attempted this time.
1789 * If a single insn overflows, there's a bug somewhere...
1791 max_insns
= tb
->icount
;
1792 assert(max_insns
> 1);
1797 g_assert_not_reached();
1800 search_size
= encode_search(tb
, (void *)gen_code_buf
+ gen_code_size
);
1801 if (unlikely(search_size
< 0)) {
1802 goto buffer_overflow
;
1804 tb
->tc
.size
= gen_code_size
;
1806 #ifdef CONFIG_PROFILER
1807 atomic_set(&prof
->code_time
, prof
->code_time
+ profile_getclock() - ti
);
1808 atomic_set(&prof
->code_in_len
, prof
->code_in_len
+ tb
->size
);
1809 atomic_set(&prof
->code_out_len
, prof
->code_out_len
+ gen_code_size
);
1810 atomic_set(&prof
->search_out_len
, prof
->search_out_len
+ search_size
);
1814 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
) &&
1815 qemu_log_in_addr_range(tb
->pc
)) {
1816 FILE *logfile
= qemu_log_lock();
1817 int code_size
, data_size
= 0;
1818 g_autoptr(GString
) note
= g_string_new("[tb header & initial instruction]");
1819 size_t chunk_start
= 0;
1821 qemu_log("OUT: [size=%d]\n", gen_code_size
);
1822 if (tcg_ctx
->data_gen_ptr
) {
1823 code_size
= tcg_ctx
->data_gen_ptr
- tb
->tc
.ptr
;
1824 data_size
= gen_code_size
- code_size
;
1826 code_size
= gen_code_size
;
1829 /* Dump header and the first instruction */
1830 chunk_start
= tcg_ctx
->gen_insn_end_off
[insn
];
1831 log_disas(tb
->tc
.ptr
, chunk_start
, note
->str
);
1834 * Dump each instruction chunk, wrapping up empty chunks into
1835 * the next instruction. The whole array is offset so the
1836 * first entry is the beginning of the 2nd instruction.
1838 while (insn
<= tb
->icount
&& chunk_start
< code_size
) {
1839 size_t chunk_end
= tcg_ctx
->gen_insn_end_off
[insn
];
1840 if (chunk_end
> chunk_start
) {
1841 g_string_printf(note
, "[guest addr: " TARGET_FMT_lx
"]",
1842 tcg_ctx
->gen_insn_data
[insn
][0]);
1843 log_disas(tb
->tc
.ptr
+ chunk_start
, chunk_end
- chunk_start
,
1845 chunk_start
= chunk_end
;
1850 /* Finally dump any data we may have after the block */
1853 qemu_log(" data: [size=%d]\n", data_size
);
1854 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
1855 if (sizeof(tcg_target_ulong
) == 8) {
1856 qemu_log("0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
1857 (uintptr_t)tcg_ctx
->data_gen_ptr
+ i
,
1858 *(uint64_t *)(tcg_ctx
->data_gen_ptr
+ i
));
1860 qemu_log("0x%08" PRIxPTR
": .long 0x%08x\n",
1861 (uintptr_t)tcg_ctx
->data_gen_ptr
+ i
,
1862 *(uint32_t *)(tcg_ctx
->data_gen_ptr
+ i
));
1868 qemu_log_unlock(logfile
);
1872 atomic_set(&tcg_ctx
->code_gen_ptr
, (void *)
1873 ROUND_UP((uintptr_t)gen_code_buf
+ gen_code_size
+ search_size
,
1876 /* init jump list */
1877 qemu_spin_init(&tb
->jmp_lock
);
1878 tb
->jmp_list_head
= (uintptr_t)NULL
;
1879 tb
->jmp_list_next
[0] = (uintptr_t)NULL
;
1880 tb
->jmp_list_next
[1] = (uintptr_t)NULL
;
1881 tb
->jmp_dest
[0] = (uintptr_t)NULL
;
1882 tb
->jmp_dest
[1] = (uintptr_t)NULL
;
1884 /* init original jump addresses which have been set during tcg_gen_code() */
1885 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1886 tb_reset_jump(tb
, 0);
1888 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1889 tb_reset_jump(tb
, 1);
1892 /* check next page if needed */
1893 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1895 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1896 phys_page2
= get_page_addr_code(env
, virt_page2
);
1899 * No explicit memory barrier is required -- tb_link_page() makes the
1900 * TB visible in a consistent state.
1902 existing_tb
= tb_link_page(tb
, phys_pc
, phys_page2
);
1903 /* if the TB already exists, discard what we just translated */
1904 if (unlikely(existing_tb
!= tb
)) {
1905 uintptr_t orig_aligned
= (uintptr_t)gen_code_buf
;
1907 orig_aligned
-= ROUND_UP(sizeof(*tb
), qemu_icache_linesize
);
1908 atomic_set(&tcg_ctx
->code_gen_ptr
, (void *)orig_aligned
);
1917 * @p must be non-NULL.
1918 * user-mode: call with mmap_lock held.
1919 * !user-mode: call with all @pages locked.
1922 tb_invalidate_phys_page_range__locked(struct page_collection
*pages
,
1923 PageDesc
*p
, tb_page_addr_t start
,
1927 TranslationBlock
*tb
;
1928 tb_page_addr_t tb_start
, tb_end
;
1930 #ifdef TARGET_HAS_PRECISE_SMC
1931 CPUState
*cpu
= current_cpu
;
1932 CPUArchState
*env
= NULL
;
1933 bool current_tb_not_found
= retaddr
!= 0;
1934 bool current_tb_modified
= false;
1935 TranslationBlock
*current_tb
= NULL
;
1936 target_ulong current_pc
= 0;
1937 target_ulong current_cs_base
= 0;
1938 uint32_t current_flags
= 0;
1939 #endif /* TARGET_HAS_PRECISE_SMC */
1941 assert_page_locked(p
);
1943 #if defined(TARGET_HAS_PRECISE_SMC)
1949 /* we remove all the TBs in the range [start, end[ */
1950 /* XXX: see if in some cases it could be faster to invalidate all
1952 PAGE_FOR_EACH_TB(p
, tb
, n
) {
1953 assert_page_locked(p
);
1954 /* NOTE: this is subtle as a TB may span two physical pages */
1956 /* NOTE: tb_end may be after the end of the page, but
1957 it is not a problem */
1958 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1959 tb_end
= tb_start
+ tb
->size
;
1961 tb_start
= tb
->page_addr
[1];
1962 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1964 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1965 #ifdef TARGET_HAS_PRECISE_SMC
1966 if (current_tb_not_found
) {
1967 current_tb_not_found
= false;
1968 /* now we have a real cpu fault */
1969 current_tb
= tcg_tb_lookup(retaddr
);
1971 if (current_tb
== tb
&&
1972 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
1974 * If we are modifying the current TB, we must stop
1975 * its execution. We could be more precise by checking
1976 * that the modification is after the current PC, but it
1977 * would require a specialized function to partially
1978 * restore the CPU state.
1980 current_tb_modified
= true;
1981 cpu_restore_state_from_tb(cpu
, current_tb
, retaddr
, true);
1982 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1985 #endif /* TARGET_HAS_PRECISE_SMC */
1986 tb_phys_invalidate__locked(tb
);
1989 #if !defined(CONFIG_USER_ONLY)
1990 /* if no code remaining, no need to continue to use slow writes */
1992 invalidate_page_bitmap(p
);
1993 tlb_unprotect_code(start
);
1996 #ifdef TARGET_HAS_PRECISE_SMC
1997 if (current_tb_modified
) {
1998 page_collection_unlock(pages
);
1999 /* Force execution of one insn next time. */
2000 cpu
->cflags_next_tb
= 1 | curr_cflags();
2002 cpu_loop_exit_noexc(cpu
);
2008 * Invalidate all TBs which intersect with the target physical address range
2009 * [start;end[. NOTE: start and end must refer to the *same* physical page.
2010 * 'is_cpu_write_access' should be true if called from a real cpu write
2011 * access: the virtual CPU will exit the current TB if code is modified inside
2014 * Called with mmap_lock held for user-mode emulation
2016 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
)
2018 struct page_collection
*pages
;
2021 assert_memory_lock();
2023 p
= page_find(start
>> TARGET_PAGE_BITS
);
2027 pages
= page_collection_lock(start
, end
);
2028 tb_invalidate_phys_page_range__locked(pages
, p
, start
, end
, 0);
2029 page_collection_unlock(pages
);
2033 * Invalidate all TBs which intersect with the target physical address range
2034 * [start;end[. NOTE: start and end may refer to *different* physical pages.
2035 * 'is_cpu_write_access' should be true if called from a real cpu write
2036 * access: the virtual CPU will exit the current TB if code is modified inside
2039 * Called with mmap_lock held for user-mode emulation.
2041 #ifdef CONFIG_SOFTMMU
2042 void tb_invalidate_phys_range(ram_addr_t start
, ram_addr_t end
)
2044 void tb_invalidate_phys_range(target_ulong start
, target_ulong end
)
2047 struct page_collection
*pages
;
2048 tb_page_addr_t next
;
2050 assert_memory_lock();
2052 pages
= page_collection_lock(start
, end
);
2053 for (next
= (start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
2055 start
= next
, next
+= TARGET_PAGE_SIZE
) {
2056 PageDesc
*pd
= page_find(start
>> TARGET_PAGE_BITS
);
2057 tb_page_addr_t bound
= MIN(next
, end
);
2062 tb_invalidate_phys_page_range__locked(pages
, pd
, start
, bound
, 0);
2064 page_collection_unlock(pages
);
2067 #ifdef CONFIG_SOFTMMU
2068 /* len must be <= 8 and start must be a multiple of len.
2069 * Called via softmmu_template.h when code areas are written to with
2070 * iothread mutex not held.
2072 * Call with all @pages in the range [@start, @start + len[ locked.
2074 void tb_invalidate_phys_page_fast(struct page_collection
*pages
,
2075 tb_page_addr_t start
, int len
,
2080 assert_memory_lock();
2082 p
= page_find(start
>> TARGET_PAGE_BITS
);
2087 assert_page_locked(p
);
2088 if (!p
->code_bitmap
&&
2089 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
2090 build_page_bitmap(p
);
2092 if (p
->code_bitmap
) {
2096 nr
= start
& ~TARGET_PAGE_MASK
;
2097 b
= p
->code_bitmap
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1));
2098 if (b
& ((1 << len
) - 1)) {
2103 tb_invalidate_phys_page_range__locked(pages
, p
, start
, start
+ len
,
2108 /* Called with mmap_lock held. If pc is not 0 then it indicates the
2109 * host PC of the faulting store instruction that caused this invalidate.
2110 * Returns true if the caller needs to abort execution of the current
2111 * TB (because it was modified by this store and the guest CPU has
2112 * precise-SMC semantics).
2114 static bool tb_invalidate_phys_page(tb_page_addr_t addr
, uintptr_t pc
)
2116 TranslationBlock
*tb
;
2119 #ifdef TARGET_HAS_PRECISE_SMC
2120 TranslationBlock
*current_tb
= NULL
;
2121 CPUState
*cpu
= current_cpu
;
2122 CPUArchState
*env
= NULL
;
2123 int current_tb_modified
= 0;
2124 target_ulong current_pc
= 0;
2125 target_ulong current_cs_base
= 0;
2126 uint32_t current_flags
= 0;
2129 assert_memory_lock();
2131 addr
&= TARGET_PAGE_MASK
;
2132 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2137 #ifdef TARGET_HAS_PRECISE_SMC
2138 if (p
->first_tb
&& pc
!= 0) {
2139 current_tb
= tcg_tb_lookup(pc
);
2145 assert_page_locked(p
);
2146 PAGE_FOR_EACH_TB(p
, tb
, n
) {
2147 #ifdef TARGET_HAS_PRECISE_SMC
2148 if (current_tb
== tb
&&
2149 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
2150 /* If we are modifying the current TB, we must stop
2151 its execution. We could be more precise by checking
2152 that the modification is after the current PC, but it
2153 would require a specialized function to partially
2154 restore the CPU state */
2156 current_tb_modified
= 1;
2157 cpu_restore_state_from_tb(cpu
, current_tb
, pc
, true);
2158 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
2161 #endif /* TARGET_HAS_PRECISE_SMC */
2162 tb_phys_invalidate(tb
, addr
);
2164 p
->first_tb
= (uintptr_t)NULL
;
2165 #ifdef TARGET_HAS_PRECISE_SMC
2166 if (current_tb_modified
) {
2167 /* Force execution of one insn next time. */
2168 cpu
->cflags_next_tb
= 1 | curr_cflags();
2177 /* user-mode: call with mmap_lock held */
2178 void tb_check_watchpoint(CPUState
*cpu
, uintptr_t retaddr
)
2180 TranslationBlock
*tb
;
2182 assert_memory_lock();
2184 tb
= tcg_tb_lookup(retaddr
);
2186 /* We can use retranslation to find the PC. */
2187 cpu_restore_state_from_tb(cpu
, tb
, retaddr
, true);
2188 tb_phys_invalidate(tb
, -1);
2190 /* The exception probably happened in a helper. The CPU state should
2191 have been saved before calling it. Fetch the PC from there. */
2192 CPUArchState
*env
= cpu
->env_ptr
;
2193 target_ulong pc
, cs_base
;
2194 tb_page_addr_t addr
;
2197 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
2198 addr
= get_page_addr_code(env
, pc
);
2200 tb_invalidate_phys_range(addr
, addr
+ 1);
2205 #ifndef CONFIG_USER_ONLY
2206 /* in deterministic execution mode, instructions doing device I/Os
2207 * must be at the end of the TB.
2209 * Called by softmmu_template.h, with iothread mutex not held.
2211 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
2213 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
2214 CPUArchState
*env
= cpu
->env_ptr
;
2216 TranslationBlock
*tb
;
2219 tb
= tcg_tb_lookup(retaddr
);
2221 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
2224 cpu_restore_state_from_tb(cpu
, tb
, retaddr
, true);
2226 /* On MIPS and SH, delay slot instructions can only be restarted if
2227 they were already the first instruction in the TB. If this is not
2228 the first instruction in a TB then re-execute the preceding
2231 #if defined(TARGET_MIPS)
2232 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0
2233 && env
->active_tc
.PC
!= tb
->pc
) {
2234 env
->active_tc
.PC
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
2235 cpu_neg(cpu
)->icount_decr
.u16
.low
++;
2236 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
2239 #elif defined(TARGET_SH4)
2240 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
2241 && env
->pc
!= tb
->pc
) {
2243 cpu_neg(cpu
)->icount_decr
.u16
.low
++;
2244 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
2249 /* Generate a new TB executing the I/O insn. */
2250 cpu
->cflags_next_tb
= curr_cflags() | CF_LAST_IO
| n
;
2252 if (tb_cflags(tb
) & CF_NOCACHE
) {
2254 /* Invalidate original TB if this TB was generated in
2255 * cpu_exec_nocache() */
2256 tb_phys_invalidate(tb
->orig_tb
, -1);
2262 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2263 * the first in the TB) then we end up generating a whole new TB and
2264 * repeating the fault, which is horribly inefficient.
2265 * Better would be to execute just this insn uncached, or generate a
2268 cpu_loop_exit_noexc(cpu
);
2271 static void tb_jmp_cache_clear_page(CPUState
*cpu
, target_ulong page_addr
)
2273 unsigned int i
, i0
= tb_jmp_cache_hash_page(page_addr
);
2275 for (i
= 0; i
< TB_JMP_PAGE_SIZE
; i
++) {
2276 atomic_set(&cpu
->tb_jmp_cache
[i0
+ i
], NULL
);
2280 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
2282 /* Discard jump cache entries for any tb which might potentially
2283 overlap the flushed page. */
2284 tb_jmp_cache_clear_page(cpu
, addr
- TARGET_PAGE_SIZE
);
2285 tb_jmp_cache_clear_page(cpu
, addr
);
2288 static void print_qht_statistics(struct qht_stats hst
)
2290 uint32_t hgram_opts
;
2294 if (!hst
.head_buckets
) {
2297 qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
2298 hst
.used_head_buckets
, hst
.head_buckets
,
2299 (double)hst
.used_head_buckets
/ hst
.head_buckets
* 100);
2301 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
2302 hgram_opts
|= QDIST_PR_100X
| QDIST_PR_PERCENT
;
2303 if (qdist_xmax(&hst
.occupancy
) - qdist_xmin(&hst
.occupancy
) == 1) {
2304 hgram_opts
|= QDIST_PR_NODECIMAL
;
2306 hgram
= qdist_pr(&hst
.occupancy
, 10, hgram_opts
);
2307 qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
2308 qdist_avg(&hst
.occupancy
) * 100, hgram
);
2311 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
2312 hgram_bins
= qdist_xmax(&hst
.chain
) - qdist_xmin(&hst
.chain
);
2313 if (hgram_bins
> 10) {
2317 hgram_opts
|= QDIST_PR_NODECIMAL
| QDIST_PR_NOBINRANGE
;
2319 hgram
= qdist_pr(&hst
.chain
, hgram_bins
, hgram_opts
);
2320 qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n",
2321 qdist_avg(&hst
.chain
), hgram
);
2325 struct tb_tree_stats
{
2329 size_t max_target_size
;
2330 size_t direct_jmp_count
;
2331 size_t direct_jmp2_count
;
2335 static gboolean
tb_tree_stats_iter(gpointer key
, gpointer value
, gpointer data
)
2337 const TranslationBlock
*tb
= value
;
2338 struct tb_tree_stats
*tst
= data
;
2341 tst
->host_size
+= tb
->tc
.size
;
2342 tst
->target_size
+= tb
->size
;
2343 if (tb
->size
> tst
->max_target_size
) {
2344 tst
->max_target_size
= tb
->size
;
2346 if (tb
->page_addr
[1] != -1) {
2349 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
2350 tst
->direct_jmp_count
++;
2351 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
2352 tst
->direct_jmp2_count
++;
2358 void dump_exec_info(void)
2360 struct tb_tree_stats tst
= {};
2361 struct qht_stats hst
;
2362 size_t nb_tbs
, flush_full
, flush_part
, flush_elide
;
2364 tcg_tb_foreach(tb_tree_stats_iter
, &tst
);
2365 nb_tbs
= tst
.nb_tbs
;
2366 /* XXX: avoid using doubles ? */
2367 qemu_printf("Translation buffer state:\n");
2369 * Report total code size including the padding and TB structs;
2370 * otherwise users might think "-tb-size" is not honoured.
2371 * For avg host size we use the precise numbers from tb_tree_stats though.
2373 qemu_printf("gen code size %zu/%zu\n",
2374 tcg_code_size(), tcg_code_capacity());
2375 qemu_printf("TB count %zu\n", nb_tbs
);
2376 qemu_printf("TB avg target size %zu max=%zu bytes\n",
2377 nb_tbs
? tst
.target_size
/ nb_tbs
: 0,
2378 tst
.max_target_size
);
2379 qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
2380 nb_tbs
? tst
.host_size
/ nb_tbs
: 0,
2381 tst
.target_size
? (double)tst
.host_size
/ tst
.target_size
: 0);
2382 qemu_printf("cross page TB count %zu (%zu%%)\n", tst
.cross_page
,
2383 nb_tbs
? (tst
.cross_page
* 100) / nb_tbs
: 0);
2384 qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2385 tst
.direct_jmp_count
,
2386 nb_tbs
? (tst
.direct_jmp_count
* 100) / nb_tbs
: 0,
2387 tst
.direct_jmp2_count
,
2388 nb_tbs
? (tst
.direct_jmp2_count
* 100) / nb_tbs
: 0);
2390 qht_statistics_init(&tb_ctx
.htable
, &hst
);
2391 print_qht_statistics(hst
);
2392 qht_statistics_destroy(&hst
);
2394 qemu_printf("\nStatistics:\n");
2395 qemu_printf("TB flush count %u\n",
2396 atomic_read(&tb_ctx
.tb_flush_count
));
2397 qemu_printf("TB invalidate count %zu\n",
2398 tcg_tb_phys_invalidate_count());
2400 tlb_flush_counts(&flush_full
, &flush_part
, &flush_elide
);
2401 qemu_printf("TLB full flushes %zu\n", flush_full
);
2402 qemu_printf("TLB partial flushes %zu\n", flush_part
);
2403 qemu_printf("TLB elided flushes %zu\n", flush_elide
);
2407 void dump_opcount_info(void)
2409 tcg_dump_op_count();
2412 #else /* CONFIG_USER_ONLY */
2414 void cpu_interrupt(CPUState
*cpu
, int mask
)
2416 g_assert(qemu_mutex_iothread_locked());
2417 cpu
->interrupt_request
|= mask
;
2418 atomic_set(&cpu_neg(cpu
)->icount_decr
.u16
.high
, -1);
2422 * Walks guest process memory "regions" one by one
2423 * and calls callback function 'fn' for each region.
2425 struct walk_memory_regions_data
{
2426 walk_memory_regions_fn fn
;
2432 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2433 target_ulong end
, int new_prot
)
2435 if (data
->start
!= -1u) {
2436 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2442 data
->start
= (new_prot
? end
: -1u);
2443 data
->prot
= new_prot
;
2448 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2449 target_ulong base
, int level
, void **lp
)
2455 return walk_memory_regions_end(data
, base
, 0);
2461 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2462 int prot
= pd
[i
].flags
;
2464 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2465 if (prot
!= data
->prot
) {
2466 rc
= walk_memory_regions_end(data
, pa
, prot
);
2475 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2476 pa
= base
| ((target_ulong
)i
<<
2477 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
2478 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2488 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2490 struct walk_memory_regions_data data
;
2491 uintptr_t i
, l1_sz
= v_l1_size
;
2498 for (i
= 0; i
< l1_sz
; i
++) {
2499 target_ulong base
= i
<< (v_l1_shift
+ TARGET_PAGE_BITS
);
2500 int rc
= walk_memory_regions_1(&data
, base
, v_l2_levels
, l1_map
+ i
);
2506 return walk_memory_regions_end(&data
, 0, 0);
2509 static int dump_region(void *priv
, target_ulong start
,
2510 target_ulong end
, unsigned long prot
)
2512 FILE *f
= (FILE *)priv
;
2514 (void) fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
2515 " "TARGET_FMT_lx
" %c%c%c\n",
2516 start
, end
, end
- start
,
2517 ((prot
& PAGE_READ
) ? 'r' : '-'),
2518 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2519 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2524 /* dump memory mappings */
2525 void page_dump(FILE *f
)
2527 const int length
= sizeof(target_ulong
) * 2;
2528 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
2529 length
, "start", length
, "end", length
, "size", "prot");
2530 walk_memory_regions(f
, dump_region
);
2533 int page_get_flags(target_ulong address
)
2537 p
= page_find(address
>> TARGET_PAGE_BITS
);
2544 /* Modify the flags of a page and invalidate the code if necessary.
2545 The flag PAGE_WRITE_ORG is positioned automatically depending
2546 on PAGE_WRITE. The mmap_lock should already be held. */
2547 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2549 target_ulong addr
, len
;
2551 /* This function should never be called with addresses outside the
2552 guest address space. If this assert fires, it probably indicates
2553 a missing call to h2g_valid. */
2554 assert(end
- 1 <= GUEST_ADDR_MAX
);
2555 assert(start
< end
);
2556 assert_memory_lock();
2558 start
= start
& TARGET_PAGE_MASK
;
2559 end
= TARGET_PAGE_ALIGN(end
);
2561 if (flags
& PAGE_WRITE
) {
2562 flags
|= PAGE_WRITE_ORG
;
2565 for (addr
= start
, len
= end
- start
;
2567 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2568 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2570 /* If the write protection bit is set, then we invalidate
2572 if (!(p
->flags
& PAGE_WRITE
) &&
2573 (flags
& PAGE_WRITE
) &&
2575 tb_invalidate_phys_page(addr
, 0);
2581 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2587 /* This function should never be called with addresses outside the
2588 guest address space. If this assert fires, it probably indicates
2589 a missing call to h2g_valid. */
2590 if (TARGET_ABI_BITS
> L1_MAP_ADDR_SPACE_BITS
) {
2591 assert(start
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2597 if (start
+ len
- 1 < start
) {
2598 /* We've wrapped around. */
2602 /* must do before we loose bits in the next step */
2603 end
= TARGET_PAGE_ALIGN(start
+ len
);
2604 start
= start
& TARGET_PAGE_MASK
;
2606 for (addr
= start
, len
= end
- start
;
2608 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2609 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2613 if (!(p
->flags
& PAGE_VALID
)) {
2617 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
2620 if (flags
& PAGE_WRITE
) {
2621 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
2624 /* unprotect the page if it was put read-only because it
2625 contains translated code */
2626 if (!(p
->flags
& PAGE_WRITE
)) {
2627 if (!page_unprotect(addr
, 0)) {
2636 /* called from signal handler: invalidate the code and unprotect the
2637 * page. Return 0 if the fault was not handled, 1 if it was handled,
2638 * and 2 if it was handled but the caller must cause the TB to be
2639 * immediately exited. (We can only return 2 if the 'pc' argument is
2642 int page_unprotect(target_ulong address
, uintptr_t pc
)
2645 bool current_tb_invalidated
;
2647 target_ulong host_start
, host_end
, addr
;
2649 /* Technically this isn't safe inside a signal handler. However we
2650 know this only ever happens in a synchronous SEGV handler, so in
2651 practice it seems to be ok. */
2654 p
= page_find(address
>> TARGET_PAGE_BITS
);
2660 /* if the page was really writable, then we change its
2661 protection back to writable */
2662 if (p
->flags
& PAGE_WRITE_ORG
) {
2663 current_tb_invalidated
= false;
2664 if (p
->flags
& PAGE_WRITE
) {
2665 /* If the page is actually marked WRITE then assume this is because
2666 * this thread raced with another one which got here first and
2667 * set the page to PAGE_WRITE and did the TB invalidate for us.
2669 #ifdef TARGET_HAS_PRECISE_SMC
2670 TranslationBlock
*current_tb
= tcg_tb_lookup(pc
);
2672 current_tb_invalidated
= tb_cflags(current_tb
) & CF_INVALID
;
2676 host_start
= address
& qemu_host_page_mask
;
2677 host_end
= host_start
+ qemu_host_page_size
;
2680 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2681 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2682 p
->flags
|= PAGE_WRITE
;
2685 /* and since the content will be modified, we must invalidate
2686 the corresponding translated code. */
2687 current_tb_invalidated
|= tb_invalidate_phys_page(addr
, pc
);
2688 #ifdef CONFIG_USER_ONLY
2689 if (DEBUG_TB_CHECK_GATE
) {
2690 tb_invalidate_check(addr
);
2694 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2698 /* If current TB was invalidated return to main loop */
2699 return current_tb_invalidated
? 2 : 1;
2704 #endif /* CONFIG_USER_ONLY */
2706 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2707 void tcg_flush_softmmu_tlb(CPUState
*cs
)
2709 #ifdef CONFIG_SOFTMMU