2 * Translation Block Maintaince
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/interval-tree.h"
22 #include "exec/cputlb.h"
24 #include "exec/exec-all.h"
25 #include "exec/translate-all.h"
26 #include "sysemu/tcg.h"
29 #include "tb-context.h"
33 /* List iterators for lists of tagged pointers in TranslationBlock. */
34 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \
35 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
36 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
37 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
39 #define TB_FOR_EACH_JMP(head_tb, tb, n) \
40 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
42 static bool tb_cmp(const void *ap
, const void *bp
)
44 const TranslationBlock
*a
= ap
;
45 const TranslationBlock
*b
= bp
;
47 return ((TARGET_TB_PCREL
|| tb_pc(a
) == tb_pc(b
)) &&
48 a
->cs_base
== b
->cs_base
&&
49 a
->flags
== b
->flags
&&
50 (tb_cflags(a
) & ~CF_INVALID
) == (tb_cflags(b
) & ~CF_INVALID
) &&
51 a
->trace_vcpu_dstate
== b
->trace_vcpu_dstate
&&
52 tb_page_addr0(a
) == tb_page_addr0(b
) &&
53 tb_page_addr1(a
) == tb_page_addr1(b
));
56 void tb_htable_init(void)
58 unsigned int mode
= QHT_MODE_AUTO_RESIZE
;
60 qht_init(&tb_ctx
.htable
, tb_cmp
, CODE_GEN_HTABLE_SIZE
, mode
);
63 typedef struct PageDesc PageDesc
;
65 #ifdef CONFIG_USER_ONLY
68 * In user-mode page locks aren't used; mmap_lock is enough.
70 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
72 static inline void page_lock_pair(PageDesc
**ret_p1
, tb_page_addr_t phys1
,
73 PageDesc
**ret_p2
, tb_page_addr_t phys2
,
80 static inline void page_unlock(PageDesc
*pd
) { }
81 static inline void page_lock_tb(const TranslationBlock
*tb
) { }
82 static inline void page_unlock_tb(const TranslationBlock
*tb
) { }
85 * For user-only, since we are protecting all of memory with a single lock,
86 * and because the two pages of a TranslationBlock are always contiguous,
87 * use a single data structure to record all TranslationBlocks.
89 static IntervalTreeRoot tb_root
;
91 static void tb_remove_all(void)
94 memset(&tb_root
, 0, sizeof(tb_root
));
97 /* Call with mmap_lock held. */
98 static void tb_record(TranslationBlock
*tb
, PageDesc
*p1
, PageDesc
*p2
)
103 assert_memory_lock();
104 tb
->itree
.last
= tb
->itree
.start
+ tb
->size
- 1;
106 /* translator_loop() must have made all TB pages non-writable */
107 addr
= tb_page_addr0(tb
);
108 flags
= page_get_flags(addr
);
109 assert(!(flags
& PAGE_WRITE
));
111 addr
= tb_page_addr1(tb
);
113 flags
= page_get_flags(addr
);
114 assert(!(flags
& PAGE_WRITE
));
117 interval_tree_insert(&tb
->itree
, &tb_root
);
120 /* Call with mmap_lock held. */
121 static void tb_remove(TranslationBlock
*tb
)
123 assert_memory_lock();
124 interval_tree_remove(&tb
->itree
, &tb_root
);
127 /* TODO: For now, still shared with translate-all.c for system mode. */
128 #define PAGE_FOR_EACH_TB(start, end, pagedesc, T, N) \
129 for (T = foreach_tb_first(start, end), \
130 N = foreach_tb_next(T, start, end); \
132 T = N, N = foreach_tb_next(N, start, end))
134 typedef TranslationBlock
*PageForEachNext
;
136 static PageForEachNext
foreach_tb_first(tb_page_addr_t start
,
139 IntervalTreeNode
*n
= interval_tree_iter_first(&tb_root
, start
, end
- 1);
140 return n
? container_of(n
, TranslationBlock
, itree
) : NULL
;
143 static PageForEachNext
foreach_tb_next(PageForEachNext tb
,
144 tb_page_addr_t start
,
150 n
= interval_tree_iter_next(&tb
->itree
, start
, end
- 1);
152 return container_of(n
, TranslationBlock
, itree
);
160 * In system mode we want L1_MAP to be based on ram offsets.
162 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
163 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
165 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
168 /* Size of the L2 (and L3, etc) page tables. */
170 #define V_L2_SIZE (1 << V_L2_BITS)
173 * L1 Mapping properties
175 static int v_l1_size
;
176 static int v_l1_shift
;
177 static int v_l2_levels
;
180 * The bottom level has pointers to PageDesc, and is indexed by
181 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
183 #define V_L1_MIN_BITS 4
184 #define V_L1_MAX_BITS (V_L2_BITS + 3)
185 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
187 static void *l1_map
[V_L1_MAX_SIZE
];
191 /* list of TBs intersecting this ram page */
195 void page_table_config_init(void)
199 assert(TARGET_PAGE_BITS
);
200 /* The bits remaining after N lower levels of page tables. */
201 v_l1_bits
= (L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
) % V_L2_BITS
;
202 if (v_l1_bits
< V_L1_MIN_BITS
) {
203 v_l1_bits
+= V_L2_BITS
;
206 v_l1_size
= 1 << v_l1_bits
;
207 v_l1_shift
= L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
- v_l1_bits
;
208 v_l2_levels
= v_l1_shift
/ V_L2_BITS
- 1;
210 assert(v_l1_bits
<= V_L1_MAX_BITS
);
211 assert(v_l1_shift
% V_L2_BITS
== 0);
212 assert(v_l2_levels
>= 0);
215 static PageDesc
*page_find_alloc(tb_page_addr_t index
, bool alloc
)
221 /* Level 1. Always allocated. */
222 lp
= l1_map
+ ((index
>> v_l1_shift
) & (v_l1_size
- 1));
225 for (i
= v_l2_levels
; i
> 0; i
--) {
226 void **p
= qatomic_rcu_read(lp
);
234 p
= g_new0(void *, V_L2_SIZE
);
235 existing
= qatomic_cmpxchg(lp
, NULL
, p
);
236 if (unlikely(existing
)) {
242 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
245 pd
= qatomic_rcu_read(lp
);
253 pd
= g_new0(PageDesc
, V_L2_SIZE
);
254 for (int i
= 0; i
< V_L2_SIZE
; i
++) {
255 qemu_spin_init(&pd
[i
].lock
);
258 existing
= qatomic_cmpxchg(lp
, NULL
, pd
);
259 if (unlikely(existing
)) {
260 for (int i
= 0; i
< V_L2_SIZE
; i
++) {
261 qemu_spin_destroy(&pd
[i
].lock
);
268 return pd
+ (index
& (V_L2_SIZE
- 1));
271 static inline PageDesc
*page_find(tb_page_addr_t index
)
273 return page_find_alloc(index
, false);
277 * struct page_entry - page descriptor entry
278 * @pd: pointer to the &struct PageDesc of the page this entry represents
279 * @index: page index of the page
280 * @locked: whether the page is locked
282 * This struct helps us keep track of the locked state of a page, without
283 * bloating &struct PageDesc.
285 * A page lock protects accesses to all fields of &struct PageDesc.
287 * See also: &struct page_collection.
291 tb_page_addr_t index
;
296 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
297 * @tree: Binary search tree (BST) of the pages, with key == page index
298 * @max: Pointer to the page in @tree with the highest page index
300 * To avoid deadlock we lock pages in ascending order of page index.
301 * When operating on a set of pages, we need to keep track of them so that
302 * we can lock them in order and also unlock them later. For this we collect
303 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
304 * @tree implementation we use does not provide an O(1) operation to obtain the
305 * highest-ranked element, we use @max to keep track of the inserted page
306 * with the highest index. This is valuable because if a page is not in
307 * the tree and its index is higher than @max's, then we can lock it
308 * without breaking the locking order rule.
310 * Note on naming: 'struct page_set' would be shorter, but we already have a few
311 * page_set_*() helpers, so page_collection is used instead to avoid confusion.
313 * See also: page_collection_lock().
315 struct page_collection
{
317 struct page_entry
*max
;
320 typedef int PageForEachNext
;
321 #define PAGE_FOR_EACH_TB(start, end, pagedesc, tb, n) \
322 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
324 #ifdef CONFIG_DEBUG_TCG
326 static __thread GHashTable
*ht_pages_locked_debug
;
328 static void ht_pages_locked_debug_init(void)
330 if (ht_pages_locked_debug
) {
333 ht_pages_locked_debug
= g_hash_table_new(NULL
, NULL
);
336 static bool page_is_locked(const PageDesc
*pd
)
340 ht_pages_locked_debug_init();
341 found
= g_hash_table_lookup(ht_pages_locked_debug
, pd
);
345 static void page_lock__debug(PageDesc
*pd
)
347 ht_pages_locked_debug_init();
348 g_assert(!page_is_locked(pd
));
349 g_hash_table_insert(ht_pages_locked_debug
, pd
, pd
);
352 static void page_unlock__debug(const PageDesc
*pd
)
356 ht_pages_locked_debug_init();
357 g_assert(page_is_locked(pd
));
358 removed
= g_hash_table_remove(ht_pages_locked_debug
, pd
);
362 static void do_assert_page_locked(const PageDesc
*pd
,
363 const char *file
, int line
)
365 if (unlikely(!page_is_locked(pd
))) {
366 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
371 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
373 void assert_no_pages_locked(void)
375 ht_pages_locked_debug_init();
376 g_assert(g_hash_table_size(ht_pages_locked_debug
) == 0);
379 #else /* !CONFIG_DEBUG_TCG */
381 static inline void page_lock__debug(const PageDesc
*pd
) { }
382 static inline void page_unlock__debug(const PageDesc
*pd
) { }
383 static inline void assert_page_locked(const PageDesc
*pd
) { }
385 #endif /* CONFIG_DEBUG_TCG */
387 static void page_lock(PageDesc
*pd
)
389 page_lock__debug(pd
);
390 qemu_spin_lock(&pd
->lock
);
393 static void page_unlock(PageDesc
*pd
)
395 qemu_spin_unlock(&pd
->lock
);
396 page_unlock__debug(pd
);
399 static inline struct page_entry
*
400 page_entry_new(PageDesc
*pd
, tb_page_addr_t index
)
402 struct page_entry
*pe
= g_malloc(sizeof(*pe
));
410 static void page_entry_destroy(gpointer p
)
412 struct page_entry
*pe
= p
;
414 g_assert(pe
->locked
);
419 /* returns false on success */
420 static bool page_entry_trylock(struct page_entry
*pe
)
424 busy
= qemu_spin_trylock(&pe
->pd
->lock
);
426 g_assert(!pe
->locked
);
428 page_lock__debug(pe
->pd
);
433 static void do_page_entry_lock(struct page_entry
*pe
)
436 g_assert(!pe
->locked
);
440 static gboolean
page_entry_lock(gpointer key
, gpointer value
, gpointer data
)
442 struct page_entry
*pe
= value
;
444 do_page_entry_lock(pe
);
448 static gboolean
page_entry_unlock(gpointer key
, gpointer value
, gpointer data
)
450 struct page_entry
*pe
= value
;
460 * Trylock a page, and if successful, add the page to a collection.
461 * Returns true ("busy") if the page could not be locked; false otherwise.
463 static bool page_trylock_add(struct page_collection
*set
, tb_page_addr_t addr
)
465 tb_page_addr_t index
= addr
>> TARGET_PAGE_BITS
;
466 struct page_entry
*pe
;
469 pe
= g_tree_lookup(set
->tree
, &index
);
474 pd
= page_find(index
);
479 pe
= page_entry_new(pd
, index
);
480 g_tree_insert(set
->tree
, &pe
->index
, pe
);
483 * If this is either (1) the first insertion or (2) a page whose index
484 * is higher than any other so far, just lock the page and move on.
486 if (set
->max
== NULL
|| pe
->index
> set
->max
->index
) {
488 do_page_entry_lock(pe
);
492 * Try to acquire out-of-order lock; if busy, return busy so that we acquire
495 return page_entry_trylock(pe
);
498 static gint
tb_page_addr_cmp(gconstpointer ap
, gconstpointer bp
, gpointer udata
)
500 tb_page_addr_t a
= *(const tb_page_addr_t
*)ap
;
501 tb_page_addr_t b
= *(const tb_page_addr_t
*)bp
;
512 * Lock a range of pages ([@start,@end[) as well as the pages of all
514 * Locking order: acquire locks in ascending order of page index.
516 static struct page_collection
*page_collection_lock(tb_page_addr_t start
,
519 struct page_collection
*set
= g_malloc(sizeof(*set
));
520 tb_page_addr_t index
;
523 start
>>= TARGET_PAGE_BITS
;
524 end
>>= TARGET_PAGE_BITS
;
525 g_assert(start
<= end
);
527 set
->tree
= g_tree_new_full(tb_page_addr_cmp
, NULL
, NULL
,
530 assert_no_pages_locked();
533 g_tree_foreach(set
->tree
, page_entry_lock
, NULL
);
535 for (index
= start
; index
<= end
; index
++) {
536 TranslationBlock
*tb
;
539 pd
= page_find(index
);
543 if (page_trylock_add(set
, index
<< TARGET_PAGE_BITS
)) {
544 g_tree_foreach(set
->tree
, page_entry_unlock
, NULL
);
547 assert_page_locked(pd
);
548 PAGE_FOR_EACH_TB(unused
, unused
, pd
, tb
, n
) {
549 if (page_trylock_add(set
, tb_page_addr0(tb
)) ||
550 (tb_page_addr1(tb
) != -1 &&
551 page_trylock_add(set
, tb_page_addr1(tb
)))) {
552 /* drop all locks, and reacquire in order */
553 g_tree_foreach(set
->tree
, page_entry_unlock
, NULL
);
561 static void page_collection_unlock(struct page_collection
*set
)
563 /* entries are unlocked and freed via page_entry_destroy */
564 g_tree_destroy(set
->tree
);
568 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
569 static void tb_remove_all_1(int level
, void **lp
)
579 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
581 pd
[i
].first_tb
= (uintptr_t)NULL
;
587 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
588 tb_remove_all_1(level
- 1, pp
+ i
);
593 static void tb_remove_all(void)
595 int i
, l1_sz
= v_l1_size
;
597 for (i
= 0; i
< l1_sz
; i
++) {
598 tb_remove_all_1(v_l2_levels
, l1_map
+ i
);
603 * Add the tb in the target page and protect it if necessary.
604 * Called with @p->lock held.
606 static inline void tb_page_add(PageDesc
*p
, TranslationBlock
*tb
,
609 bool page_already_protected
;
611 assert_page_locked(p
);
613 tb
->page_next
[n
] = p
->first_tb
;
614 page_already_protected
= p
->first_tb
!= 0;
615 p
->first_tb
= (uintptr_t)tb
| n
;
618 * If some code is already present, then the pages are already
619 * protected. So we handle the case where only the first TB is
620 * allocated in a physical page.
622 if (!page_already_protected
) {
623 tlb_protect_code(tb
->page_addr
[n
] & TARGET_PAGE_MASK
);
627 static void tb_record(TranslationBlock
*tb
, PageDesc
*p1
, PageDesc
*p2
)
629 tb_page_add(p1
, tb
, 0);
631 tb_page_add(p2
, tb
, 1);
635 static inline void tb_page_remove(PageDesc
*pd
, TranslationBlock
*tb
)
637 TranslationBlock
*tb1
;
641 assert_page_locked(pd
);
642 pprev
= &pd
->first_tb
;
643 PAGE_FOR_EACH_TB(unused
, unused
, pd
, tb1
, n1
) {
645 *pprev
= tb1
->page_next
[n1
];
648 pprev
= &tb1
->page_next
[n1
];
650 g_assert_not_reached();
653 static void tb_remove(TranslationBlock
*tb
)
657 pd
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
658 tb_page_remove(pd
, tb
);
659 if (unlikely(tb
->page_addr
[1] != -1)) {
660 pd
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
661 tb_page_remove(pd
, tb
);
665 static void page_lock_pair(PageDesc
**ret_p1
, tb_page_addr_t phys1
,
666 PageDesc
**ret_p2
, tb_page_addr_t phys2
, bool alloc
)
669 tb_page_addr_t page1
;
670 tb_page_addr_t page2
;
672 assert_memory_lock();
673 g_assert(phys1
!= -1);
675 page1
= phys1
>> TARGET_PAGE_BITS
;
676 page2
= phys2
>> TARGET_PAGE_BITS
;
678 p1
= page_find_alloc(page1
, alloc
);
682 if (likely(phys2
== -1)) {
685 } else if (page1
== page2
) {
692 p2
= page_find_alloc(page2
, alloc
);
705 /* lock the page(s) of a TB in the correct acquisition order */
706 static void page_lock_tb(const TranslationBlock
*tb
)
708 page_lock_pair(NULL
, tb_page_addr0(tb
), NULL
, tb_page_addr1(tb
), false);
711 static void page_unlock_tb(const TranslationBlock
*tb
)
713 PageDesc
*p1
= page_find(tb_page_addr0(tb
) >> TARGET_PAGE_BITS
);
716 if (unlikely(tb_page_addr1(tb
) != -1)) {
717 PageDesc
*p2
= page_find(tb_page_addr1(tb
) >> TARGET_PAGE_BITS
);
724 #endif /* CONFIG_USER_ONLY */
726 /* flush all the translation blocks */
727 static void do_tb_flush(CPUState
*cpu
, run_on_cpu_data tb_flush_count
)
729 bool did_flush
= false;
732 /* If it is already been done on request of another CPU, just retry. */
733 if (tb_ctx
.tb_flush_count
!= tb_flush_count
.host_int
) {
739 tcg_flush_jmp_cache(cpu
);
742 qht_reset_size(&tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
);
745 tcg_region_reset_all();
746 /* XXX: flush processor icache at this point if cache flush is expensive */
747 qatomic_mb_set(&tb_ctx
.tb_flush_count
, tb_ctx
.tb_flush_count
+ 1);
752 qemu_plugin_flush_cb();
756 void tb_flush(CPUState
*cpu
)
759 unsigned tb_flush_count
= qatomic_mb_read(&tb_ctx
.tb_flush_count
);
761 if (cpu_in_exclusive_context(cpu
)) {
762 do_tb_flush(cpu
, RUN_ON_CPU_HOST_INT(tb_flush_count
));
764 async_safe_run_on_cpu(cpu
, do_tb_flush
,
765 RUN_ON_CPU_HOST_INT(tb_flush_count
));
770 /* remove @orig from its @n_orig-th jump list */
771 static inline void tb_remove_from_jmp_list(TranslationBlock
*orig
, int n_orig
)
773 uintptr_t ptr
, ptr_locked
;
774 TranslationBlock
*dest
;
775 TranslationBlock
*tb
;
779 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
780 ptr
= qatomic_or_fetch(&orig
->jmp_dest
[n_orig
], 1);
781 dest
= (TranslationBlock
*)(ptr
& ~1);
786 qemu_spin_lock(&dest
->jmp_lock
);
788 * While acquiring the lock, the jump might have been removed if the
789 * destination TB was invalidated; check again.
791 ptr_locked
= qatomic_read(&orig
->jmp_dest
[n_orig
]);
792 if (ptr_locked
!= ptr
) {
793 qemu_spin_unlock(&dest
->jmp_lock
);
795 * The only possibility is that the jump was unlinked via
796 * tb_jump_unlink(dest). Seeing here another destination would be a bug,
797 * because we set the LSB above.
799 g_assert(ptr_locked
== 1 && dest
->cflags
& CF_INVALID
);
803 * We first acquired the lock, and since the destination pointer matches,
804 * we know for sure that @orig is in the jmp list.
806 pprev
= &dest
->jmp_list_head
;
807 TB_FOR_EACH_JMP(dest
, tb
, n
) {
808 if (tb
== orig
&& n
== n_orig
) {
809 *pprev
= tb
->jmp_list_next
[n
];
810 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
811 qemu_spin_unlock(&dest
->jmp_lock
);
814 pprev
= &tb
->jmp_list_next
[n
];
816 g_assert_not_reached();
820 * Reset the jump entry 'n' of a TB so that it is not chained to another TB.
822 void tb_reset_jump(TranslationBlock
*tb
, int n
)
824 uintptr_t addr
= (uintptr_t)(tb
->tc
.ptr
+ tb
->jmp_reset_offset
[n
]);
825 tb_set_jmp_target(tb
, n
, addr
);
828 /* remove any jumps to the TB */
829 static inline void tb_jmp_unlink(TranslationBlock
*dest
)
831 TranslationBlock
*tb
;
834 qemu_spin_lock(&dest
->jmp_lock
);
836 TB_FOR_EACH_JMP(dest
, tb
, n
) {
837 tb_reset_jump(tb
, n
);
838 qatomic_and(&tb
->jmp_dest
[n
], (uintptr_t)NULL
| 1);
839 /* No need to clear the list entry; setting the dest ptr is enough */
841 dest
->jmp_list_head
= (uintptr_t)NULL
;
843 qemu_spin_unlock(&dest
->jmp_lock
);
846 static void tb_jmp_cache_inval_tb(TranslationBlock
*tb
)
850 if (TARGET_TB_PCREL
) {
851 /* A TB may be at any virtual address */
853 tcg_flush_jmp_cache(cpu
);
856 uint32_t h
= tb_jmp_cache_hash_func(tb_pc(tb
));
859 CPUJumpCache
*jc
= cpu
->tb_jmp_cache
;
861 if (qatomic_read(&jc
->array
[h
].tb
) == tb
) {
862 qatomic_set(&jc
->array
[h
].tb
, NULL
);
869 * In user-mode, call with mmap_lock held.
870 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
873 static void do_tb_phys_invalidate(TranslationBlock
*tb
, bool rm_from_page_list
)
876 tb_page_addr_t phys_pc
;
877 uint32_t orig_cflags
= tb_cflags(tb
);
879 assert_memory_lock();
881 /* make sure no further incoming jumps will be chained to this TB */
882 qemu_spin_lock(&tb
->jmp_lock
);
883 qatomic_set(&tb
->cflags
, tb
->cflags
| CF_INVALID
);
884 qemu_spin_unlock(&tb
->jmp_lock
);
886 /* remove the TB from the hash list */
887 phys_pc
= tb_page_addr0(tb
);
888 h
= tb_hash_func(phys_pc
, (TARGET_TB_PCREL
? 0 : tb_pc(tb
)),
889 tb
->flags
, orig_cflags
, tb
->trace_vcpu_dstate
);
890 if (!qht_remove(&tb_ctx
.htable
, tb
, h
)) {
894 /* remove the TB from the page list */
895 if (rm_from_page_list
) {
899 /* remove the TB from the hash list */
900 tb_jmp_cache_inval_tb(tb
);
902 /* suppress this TB from the two jump lists */
903 tb_remove_from_jmp_list(tb
, 0);
904 tb_remove_from_jmp_list(tb
, 1);
906 /* suppress any remaining jumps to this TB */
909 qatomic_set(&tb_ctx
.tb_phys_invalidate_count
,
910 tb_ctx
.tb_phys_invalidate_count
+ 1);
913 static void tb_phys_invalidate__locked(TranslationBlock
*tb
)
915 qemu_thread_jit_write();
916 do_tb_phys_invalidate(tb
, true);
917 qemu_thread_jit_execute();
922 * Called with mmap_lock held in user-mode.
924 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
926 if (page_addr
== -1 && tb_page_addr0(tb
) != -1) {
928 do_tb_phys_invalidate(tb
, true);
931 do_tb_phys_invalidate(tb
, false);
936 * Add a new TB and link it to the physical page tables. phys_page2 is
937 * (-1) to indicate that only one page contains the TB.
939 * Called with mmap_lock held for user-mode emulation.
941 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
942 * Note that in !user-mode, another thread might have already added a TB
943 * for the same block of guest code that @tb corresponds to. In that case,
944 * the caller should discard the original @tb, and use instead the returned TB.
946 TranslationBlock
*tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
947 tb_page_addr_t phys_page2
)
951 void *existing_tb
= NULL
;
954 assert_memory_lock();
955 tcg_debug_assert(!(tb
->cflags
& CF_INVALID
));
958 * Add the TB to the page list, acquiring first the pages's locks.
959 * We keep the locks held until after inserting the TB in the hash table,
960 * so that if the insertion fails we know for sure that the TBs are still
961 * in the page descriptors.
962 * Note that inserting into the hash table first isn't an option, since
963 * we can only insert TBs that are fully initialized.
965 page_lock_pair(&p
, phys_pc
, &p2
, phys_page2
, true);
966 tb_record(tb
, p
, p2
);
968 /* add in the hash table */
969 h
= tb_hash_func(phys_pc
, (TARGET_TB_PCREL
? 0 : tb_pc(tb
)),
970 tb
->flags
, tb
->cflags
, tb
->trace_vcpu_dstate
);
971 qht_insert(&tb_ctx
.htable
, tb
, h
, &existing_tb
);
973 /* remove TB from the page(s) if we couldn't insert it */
974 if (unlikely(existing_tb
)) {
986 #ifdef CONFIG_USER_ONLY
988 * Invalidate all TBs which intersect with the target address range.
989 * Called with mmap_lock held for user-mode emulation.
990 * NOTE: this function must not be called while a TB is running.
992 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
994 TranslationBlock
*tb
;
997 assert_memory_lock();
999 PAGE_FOR_EACH_TB(start
, end
, unused
, tb
, n
) {
1000 tb_phys_invalidate__locked(tb
);
1005 * Invalidate all TBs which intersect with the target address page @addr.
1006 * Called with mmap_lock held for user-mode emulation
1007 * NOTE: this function must not be called while a TB is running.
1009 void tb_invalidate_phys_page(tb_page_addr_t addr
)
1011 tb_page_addr_t start
, end
;
1013 start
= addr
& TARGET_PAGE_MASK
;
1014 end
= start
+ TARGET_PAGE_SIZE
;
1015 tb_invalidate_phys_range(start
, end
);
1019 * Called with mmap_lock held. If pc is not 0 then it indicates the
1020 * host PC of the faulting store instruction that caused this invalidate.
1021 * Returns true if the caller needs to abort execution of the current
1022 * TB (because it was modified by this store and the guest CPU has
1023 * precise-SMC semantics).
1025 bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr
, uintptr_t pc
)
1027 TranslationBlock
*current_tb
;
1028 bool current_tb_modified
;
1029 TranslationBlock
*tb
;
1033 * Without precise smc semantics, or when outside of a TB,
1034 * we can skip to invalidate.
1036 #ifndef TARGET_HAS_PRECISE_SMC
1040 tb_invalidate_phys_page(addr
);
1044 assert_memory_lock();
1045 current_tb
= tcg_tb_lookup(pc
);
1047 addr
&= TARGET_PAGE_MASK
;
1048 current_tb_modified
= false;
1050 PAGE_FOR_EACH_TB(addr
, addr
+ TARGET_PAGE_SIZE
, unused
, tb
, n
) {
1051 if (current_tb
== tb
&&
1052 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
1054 * If we are modifying the current TB, we must stop its
1055 * execution. We could be more precise by checking that
1056 * the modification is after the current PC, but it would
1057 * require a specialized function to partially restore
1060 current_tb_modified
= true;
1061 cpu_restore_state_from_tb(current_cpu
, current_tb
, pc
);
1063 tb_phys_invalidate__locked(tb
);
1066 if (current_tb_modified
) {
1067 /* Force execution of one insn next time. */
1068 CPUState
*cpu
= current_cpu
;
1069 cpu
->cflags_next_tb
= 1 | CF_NOIRQ
| curr_cflags(current_cpu
);
1076 * @p must be non-NULL.
1077 * Call with all @pages locked.
1080 tb_invalidate_phys_page_range__locked(struct page_collection
*pages
,
1081 PageDesc
*p
, tb_page_addr_t start
,
1085 TranslationBlock
*tb
;
1086 tb_page_addr_t tb_start
, tb_end
;
1088 #ifdef TARGET_HAS_PRECISE_SMC
1089 bool current_tb_modified
= false;
1090 TranslationBlock
*current_tb
= retaddr
? tcg_tb_lookup(retaddr
) : NULL
;
1091 #endif /* TARGET_HAS_PRECISE_SMC */
1094 * We remove all the TBs in the range [start, end[.
1095 * XXX: see if in some cases it could be faster to invalidate all the code
1097 PAGE_FOR_EACH_TB(start
, end
, p
, tb
, n
) {
1098 /* NOTE: this is subtle as a TB may span two physical pages */
1100 /* NOTE: tb_end may be after the end of the page, but
1101 it is not a problem */
1102 tb_start
= tb_page_addr0(tb
);
1103 tb_end
= tb_start
+ tb
->size
;
1105 tb_start
= tb_page_addr1(tb
);
1106 tb_end
= tb_start
+ ((tb_page_addr0(tb
) + tb
->size
)
1107 & ~TARGET_PAGE_MASK
);
1109 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1110 #ifdef TARGET_HAS_PRECISE_SMC
1111 if (current_tb
== tb
&&
1112 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
1114 * If we are modifying the current TB, we must stop
1115 * its execution. We could be more precise by checking
1116 * that the modification is after the current PC, but it
1117 * would require a specialized function to partially
1118 * restore the CPU state.
1120 current_tb_modified
= true;
1121 cpu_restore_state_from_tb(current_cpu
, current_tb
, retaddr
);
1123 #endif /* TARGET_HAS_PRECISE_SMC */
1124 tb_phys_invalidate__locked(tb
);
1128 /* if no code remaining, no need to continue to use slow writes */
1130 tlb_unprotect_code(start
);
1133 #ifdef TARGET_HAS_PRECISE_SMC
1134 if (current_tb_modified
) {
1135 page_collection_unlock(pages
);
1136 /* Force execution of one insn next time. */
1137 current_cpu
->cflags_next_tb
= 1 | CF_NOIRQ
| curr_cflags(current_cpu
);
1139 cpu_loop_exit_noexc(current_cpu
);
1145 * Invalidate all TBs which intersect with the target physical
1146 * address page @addr.
1148 void tb_invalidate_phys_page(tb_page_addr_t addr
)
1150 struct page_collection
*pages
;
1151 tb_page_addr_t start
, end
;
1154 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1159 start
= addr
& TARGET_PAGE_MASK
;
1160 end
= start
+ TARGET_PAGE_SIZE
;
1161 pages
= page_collection_lock(start
, end
);
1162 tb_invalidate_phys_page_range__locked(pages
, p
, start
, end
, 0);
1163 page_collection_unlock(pages
);
1167 * Invalidate all TBs which intersect with the target physical address range
1168 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1169 * 'is_cpu_write_access' should be true if called from a real cpu write
1170 * access: the virtual CPU will exit the current TB if code is modified inside
1173 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1175 struct page_collection
*pages
;
1176 tb_page_addr_t next
;
1178 pages
= page_collection_lock(start
, end
);
1179 for (next
= (start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1181 start
= next
, next
+= TARGET_PAGE_SIZE
) {
1182 PageDesc
*pd
= page_find(start
>> TARGET_PAGE_BITS
);
1183 tb_page_addr_t bound
= MIN(next
, end
);
1188 assert_page_locked(pd
);
1189 tb_invalidate_phys_page_range__locked(pages
, pd
, start
, bound
, 0);
1191 page_collection_unlock(pages
);
1195 * Call with all @pages in the range [@start, @start + len[ locked.
1197 static void tb_invalidate_phys_page_fast__locked(struct page_collection
*pages
,
1198 tb_page_addr_t start
,
1199 unsigned len
, uintptr_t ra
)
1203 p
= page_find(start
>> TARGET_PAGE_BITS
);
1208 assert_page_locked(p
);
1209 tb_invalidate_phys_page_range__locked(pages
, p
, start
, start
+ len
, ra
);
1213 * len must be <= 8 and start must be a multiple of len.
1214 * Called via softmmu_template.h when code areas are written to with
1215 * iothread mutex not held.
1217 void tb_invalidate_phys_range_fast(ram_addr_t ram_addr
,
1221 struct page_collection
*pages
;
1223 pages
= page_collection_lock(ram_addr
, ram_addr
+ size
);
1224 tb_invalidate_phys_page_fast__locked(pages
, ram_addr
, size
, retaddr
);
1225 page_collection_unlock(pages
);
1228 #endif /* CONFIG_USER_ONLY */