2 * Translation Block Maintenance
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/interval-tree.h"
22 #include "qemu/qtree.h"
23 #include "exec/cputlb.h"
25 #include "exec/exec-all.h"
26 #include "exec/tb-flush.h"
27 #include "exec/translate-all.h"
28 #include "sysemu/tcg.h"
31 #include "tb-context.h"
32 #include "internal-common.h"
33 #include "internal-target.h"
36 /* List iterators for lists of tagged pointers in TranslationBlock. */
37 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \
38 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
39 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
40 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
42 #define TB_FOR_EACH_JMP(head_tb, tb, n) \
43 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
45 static bool tb_cmp(const void *ap
, const void *bp
)
47 const TranslationBlock
*a
= ap
;
48 const TranslationBlock
*b
= bp
;
50 return ((tb_cflags(a
) & CF_PCREL
|| a
->pc
== b
->pc
) &&
51 a
->cs_base
== b
->cs_base
&&
52 a
->flags
== b
->flags
&&
53 (tb_cflags(a
) & ~CF_INVALID
) == (tb_cflags(b
) & ~CF_INVALID
) &&
54 tb_page_addr0(a
) == tb_page_addr0(b
) &&
55 tb_page_addr1(a
) == tb_page_addr1(b
));
58 void tb_htable_init(void)
60 unsigned int mode
= QHT_MODE_AUTO_RESIZE
;
62 qht_init(&tb_ctx
.htable
, tb_cmp
, CODE_GEN_HTABLE_SIZE
, mode
);
65 typedef struct PageDesc PageDesc
;
67 #ifdef CONFIG_USER_ONLY
70 * In user-mode page locks aren't used; mmap_lock is enough.
72 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
74 static inline void tb_lock_pages(const TranslationBlock
*tb
) { }
77 * For user-only, since we are protecting all of memory with a single lock,
78 * and because the two pages of a TranslationBlock are always contiguous,
79 * use a single data structure to record all TranslationBlocks.
81 static IntervalTreeRoot tb_root
;
83 static void tb_remove_all(void)
86 memset(&tb_root
, 0, sizeof(tb_root
));
89 /* Call with mmap_lock held. */
90 static void tb_record(TranslationBlock
*tb
)
96 tb
->itree
.last
= tb
->itree
.start
+ tb
->size
- 1;
98 /* translator_loop() must have made all TB pages non-writable */
99 addr
= tb_page_addr0(tb
);
100 flags
= page_get_flags(addr
);
101 assert(!(flags
& PAGE_WRITE
));
103 addr
= tb_page_addr1(tb
);
105 flags
= page_get_flags(addr
);
106 assert(!(flags
& PAGE_WRITE
));
109 interval_tree_insert(&tb
->itree
, &tb_root
);
112 /* Call with mmap_lock held. */
113 static void tb_remove(TranslationBlock
*tb
)
115 assert_memory_lock();
116 interval_tree_remove(&tb
->itree
, &tb_root
);
119 /* TODO: For now, still shared with translate-all.c for system mode. */
120 #define PAGE_FOR_EACH_TB(start, last, pagedesc, T, N) \
121 for (T = foreach_tb_first(start, last), \
122 N = foreach_tb_next(T, start, last); \
124 T = N, N = foreach_tb_next(N, start, last))
126 typedef TranslationBlock
*PageForEachNext
;
128 static PageForEachNext
foreach_tb_first(tb_page_addr_t start
,
131 IntervalTreeNode
*n
= interval_tree_iter_first(&tb_root
, start
, last
);
132 return n
? container_of(n
, TranslationBlock
, itree
) : NULL
;
135 static PageForEachNext
foreach_tb_next(PageForEachNext tb
,
136 tb_page_addr_t start
,
142 n
= interval_tree_iter_next(&tb
->itree
, start
, last
);
144 return container_of(n
, TranslationBlock
, itree
);
152 * In system mode we want L1_MAP to be based on ram offsets.
154 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
155 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
157 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
160 /* Size of the L2 (and L3, etc) page tables. */
162 #define V_L2_SIZE (1 << V_L2_BITS)
165 * L1 Mapping properties
167 static int v_l1_size
;
168 static int v_l1_shift
;
169 static int v_l2_levels
;
172 * The bottom level has pointers to PageDesc, and is indexed by
173 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
175 #define V_L1_MIN_BITS 4
176 #define V_L1_MAX_BITS (V_L2_BITS + 3)
177 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
179 static void *l1_map
[V_L1_MAX_SIZE
];
183 /* list of TBs intersecting this ram page */
187 void page_table_config_init(void)
191 assert(TARGET_PAGE_BITS
);
192 /* The bits remaining after N lower levels of page tables. */
193 v_l1_bits
= (L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
) % V_L2_BITS
;
194 if (v_l1_bits
< V_L1_MIN_BITS
) {
195 v_l1_bits
+= V_L2_BITS
;
198 v_l1_size
= 1 << v_l1_bits
;
199 v_l1_shift
= L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
- v_l1_bits
;
200 v_l2_levels
= v_l1_shift
/ V_L2_BITS
- 1;
202 assert(v_l1_bits
<= V_L1_MAX_BITS
);
203 assert(v_l1_shift
% V_L2_BITS
== 0);
204 assert(v_l2_levels
>= 0);
207 static PageDesc
*page_find_alloc(tb_page_addr_t index
, bool alloc
)
212 /* Level 1. Always allocated. */
213 lp
= l1_map
+ ((index
>> v_l1_shift
) & (v_l1_size
- 1));
216 for (int i
= v_l2_levels
; i
> 0; i
--) {
217 void **p
= qatomic_rcu_read(lp
);
225 p
= g_new0(void *, V_L2_SIZE
);
226 existing
= qatomic_cmpxchg(lp
, NULL
, p
);
227 if (unlikely(existing
)) {
233 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
236 pd
= qatomic_rcu_read(lp
);
244 pd
= g_new0(PageDesc
, V_L2_SIZE
);
245 for (int i
= 0; i
< V_L2_SIZE
; i
++) {
246 qemu_spin_init(&pd
[i
].lock
);
249 existing
= qatomic_cmpxchg(lp
, NULL
, pd
);
250 if (unlikely(existing
)) {
251 for (int i
= 0; i
< V_L2_SIZE
; i
++) {
252 qemu_spin_destroy(&pd
[i
].lock
);
259 return pd
+ (index
& (V_L2_SIZE
- 1));
262 static inline PageDesc
*page_find(tb_page_addr_t index
)
264 return page_find_alloc(index
, false);
268 * struct page_entry - page descriptor entry
269 * @pd: pointer to the &struct PageDesc of the page this entry represents
270 * @index: page index of the page
271 * @locked: whether the page is locked
273 * This struct helps us keep track of the locked state of a page, without
274 * bloating &struct PageDesc.
276 * A page lock protects accesses to all fields of &struct PageDesc.
278 * See also: &struct page_collection.
282 tb_page_addr_t index
;
287 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
288 * @tree: Binary search tree (BST) of the pages, with key == page index
289 * @max: Pointer to the page in @tree with the highest page index
291 * To avoid deadlock we lock pages in ascending order of page index.
292 * When operating on a set of pages, we need to keep track of them so that
293 * we can lock them in order and also unlock them later. For this we collect
294 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
295 * @tree implementation we use does not provide an O(1) operation to obtain the
296 * highest-ranked element, we use @max to keep track of the inserted page
297 * with the highest index. This is valuable because if a page is not in
298 * the tree and its index is higher than @max's, then we can lock it
299 * without breaking the locking order rule.
301 * Note on naming: 'struct page_set' would be shorter, but we already have a few
302 * page_set_*() helpers, so page_collection is used instead to avoid confusion.
304 * See also: page_collection_lock().
306 struct page_collection
{
308 struct page_entry
*max
;
311 typedef int PageForEachNext
;
312 #define PAGE_FOR_EACH_TB(start, last, pagedesc, tb, n) \
313 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
315 #ifdef CONFIG_DEBUG_TCG
317 static __thread GHashTable
*ht_pages_locked_debug
;
319 static void ht_pages_locked_debug_init(void)
321 if (ht_pages_locked_debug
) {
324 ht_pages_locked_debug
= g_hash_table_new(NULL
, NULL
);
327 static bool page_is_locked(const PageDesc
*pd
)
331 ht_pages_locked_debug_init();
332 found
= g_hash_table_lookup(ht_pages_locked_debug
, pd
);
336 static void page_lock__debug(PageDesc
*pd
)
338 ht_pages_locked_debug_init();
339 g_assert(!page_is_locked(pd
));
340 g_hash_table_insert(ht_pages_locked_debug
, pd
, pd
);
343 static void page_unlock__debug(const PageDesc
*pd
)
347 ht_pages_locked_debug_init();
348 g_assert(page_is_locked(pd
));
349 removed
= g_hash_table_remove(ht_pages_locked_debug
, pd
);
353 static void do_assert_page_locked(const PageDesc
*pd
,
354 const char *file
, int line
)
356 if (unlikely(!page_is_locked(pd
))) {
357 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
362 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
364 void assert_no_pages_locked(void)
366 ht_pages_locked_debug_init();
367 g_assert(g_hash_table_size(ht_pages_locked_debug
) == 0);
370 #else /* !CONFIG_DEBUG_TCG */
372 static inline void page_lock__debug(const PageDesc
*pd
) { }
373 static inline void page_unlock__debug(const PageDesc
*pd
) { }
374 static inline void assert_page_locked(const PageDesc
*pd
) { }
376 #endif /* CONFIG_DEBUG_TCG */
378 static void page_lock(PageDesc
*pd
)
380 page_lock__debug(pd
);
381 qemu_spin_lock(&pd
->lock
);
384 /* Like qemu_spin_trylock, returns false on success */
385 static bool page_trylock(PageDesc
*pd
)
387 bool busy
= qemu_spin_trylock(&pd
->lock
);
389 page_lock__debug(pd
);
394 static void page_unlock(PageDesc
*pd
)
396 qemu_spin_unlock(&pd
->lock
);
397 page_unlock__debug(pd
);
400 void tb_lock_page0(tb_page_addr_t paddr
)
402 page_lock(page_find_alloc(paddr
>> TARGET_PAGE_BITS
, true));
405 void tb_lock_page1(tb_page_addr_t paddr0
, tb_page_addr_t paddr1
)
407 tb_page_addr_t pindex0
= paddr0
>> TARGET_PAGE_BITS
;
408 tb_page_addr_t pindex1
= paddr1
>> TARGET_PAGE_BITS
;
411 if (pindex0
== pindex1
) {
412 /* Identical pages, and the first page is already locked. */
416 pd1
= page_find_alloc(pindex1
, true);
417 if (pindex0
< pindex1
) {
418 /* Correct locking order, we may block. */
423 /* Incorrect locking order, we cannot block lest we deadlock. */
424 if (!page_trylock(pd1
)) {
429 * Drop the lock on page0 and get both page locks in the right order.
430 * Restart translation via longjmp.
432 pd0
= page_find_alloc(pindex0
, false);
436 siglongjmp(tcg_ctx
->jmp_trans
, -3);
439 void tb_unlock_page1(tb_page_addr_t paddr0
, tb_page_addr_t paddr1
)
441 tb_page_addr_t pindex0
= paddr0
>> TARGET_PAGE_BITS
;
442 tb_page_addr_t pindex1
= paddr1
>> TARGET_PAGE_BITS
;
444 if (pindex0
!= pindex1
) {
445 page_unlock(page_find_alloc(pindex1
, false));
449 static void tb_lock_pages(TranslationBlock
*tb
)
451 tb_page_addr_t paddr0
= tb_page_addr0(tb
);
452 tb_page_addr_t paddr1
= tb_page_addr1(tb
);
453 tb_page_addr_t pindex0
= paddr0
>> TARGET_PAGE_BITS
;
454 tb_page_addr_t pindex1
= paddr1
>> TARGET_PAGE_BITS
;
456 if (unlikely(paddr0
== -1)) {
459 if (unlikely(paddr1
!= -1) && pindex0
!= pindex1
) {
460 if (pindex0
< pindex1
) {
461 page_lock(page_find_alloc(pindex0
, true));
462 page_lock(page_find_alloc(pindex1
, true));
465 page_lock(page_find_alloc(pindex1
, true));
467 page_lock(page_find_alloc(pindex0
, true));
470 void tb_unlock_pages(TranslationBlock
*tb
)
472 tb_page_addr_t paddr0
= tb_page_addr0(tb
);
473 tb_page_addr_t paddr1
= tb_page_addr1(tb
);
474 tb_page_addr_t pindex0
= paddr0
>> TARGET_PAGE_BITS
;
475 tb_page_addr_t pindex1
= paddr1
>> TARGET_PAGE_BITS
;
477 if (unlikely(paddr0
== -1)) {
480 if (unlikely(paddr1
!= -1) && pindex0
!= pindex1
) {
481 page_unlock(page_find_alloc(pindex1
, false));
483 page_unlock(page_find_alloc(pindex0
, false));
486 static inline struct page_entry
*
487 page_entry_new(PageDesc
*pd
, tb_page_addr_t index
)
489 struct page_entry
*pe
= g_malloc(sizeof(*pe
));
497 static void page_entry_destroy(gpointer p
)
499 struct page_entry
*pe
= p
;
501 g_assert(pe
->locked
);
506 /* returns false on success */
507 static bool page_entry_trylock(struct page_entry
*pe
)
509 bool busy
= page_trylock(pe
->pd
);
511 g_assert(!pe
->locked
);
517 static void do_page_entry_lock(struct page_entry
*pe
)
520 g_assert(!pe
->locked
);
524 static gboolean
page_entry_lock(gpointer key
, gpointer value
, gpointer data
)
526 struct page_entry
*pe
= value
;
528 do_page_entry_lock(pe
);
532 static gboolean
page_entry_unlock(gpointer key
, gpointer value
, gpointer data
)
534 struct page_entry
*pe
= value
;
544 * Trylock a page, and if successful, add the page to a collection.
545 * Returns true ("busy") if the page could not be locked; false otherwise.
547 static bool page_trylock_add(struct page_collection
*set
, tb_page_addr_t addr
)
549 tb_page_addr_t index
= addr
>> TARGET_PAGE_BITS
;
550 struct page_entry
*pe
;
553 pe
= q_tree_lookup(set
->tree
, &index
);
558 pd
= page_find(index
);
563 pe
= page_entry_new(pd
, index
);
564 q_tree_insert(set
->tree
, &pe
->index
, pe
);
567 * If this is either (1) the first insertion or (2) a page whose index
568 * is higher than any other so far, just lock the page and move on.
570 if (set
->max
== NULL
|| pe
->index
> set
->max
->index
) {
572 do_page_entry_lock(pe
);
576 * Try to acquire out-of-order lock; if busy, return busy so that we acquire
579 return page_entry_trylock(pe
);
582 static gint
tb_page_addr_cmp(gconstpointer ap
, gconstpointer bp
, gpointer udata
)
584 tb_page_addr_t a
= *(const tb_page_addr_t
*)ap
;
585 tb_page_addr_t b
= *(const tb_page_addr_t
*)bp
;
596 * Lock a range of pages ([@start,@last]) as well as the pages of all
598 * Locking order: acquire locks in ascending order of page index.
600 static struct page_collection
*page_collection_lock(tb_page_addr_t start
,
603 struct page_collection
*set
= g_malloc(sizeof(*set
));
604 tb_page_addr_t index
;
607 start
>>= TARGET_PAGE_BITS
;
608 last
>>= TARGET_PAGE_BITS
;
609 g_assert(start
<= last
);
611 set
->tree
= q_tree_new_full(tb_page_addr_cmp
, NULL
, NULL
,
614 assert_no_pages_locked();
617 q_tree_foreach(set
->tree
, page_entry_lock
, NULL
);
619 for (index
= start
; index
<= last
; index
++) {
620 TranslationBlock
*tb
;
623 pd
= page_find(index
);
627 if (page_trylock_add(set
, index
<< TARGET_PAGE_BITS
)) {
628 q_tree_foreach(set
->tree
, page_entry_unlock
, NULL
);
631 assert_page_locked(pd
);
632 PAGE_FOR_EACH_TB(unused
, unused
, pd
, tb
, n
) {
633 if (page_trylock_add(set
, tb_page_addr0(tb
)) ||
634 (tb_page_addr1(tb
) != -1 &&
635 page_trylock_add(set
, tb_page_addr1(tb
)))) {
636 /* drop all locks, and reacquire in order */
637 q_tree_foreach(set
->tree
, page_entry_unlock
, NULL
);
645 static void page_collection_unlock(struct page_collection
*set
)
647 /* entries are unlocked and freed via page_entry_destroy */
648 q_tree_destroy(set
->tree
);
652 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
653 static void tb_remove_all_1(int level
, void **lp
)
663 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
665 pd
[i
].first_tb
= (uintptr_t)NULL
;
671 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
672 tb_remove_all_1(level
- 1, pp
+ i
);
677 static void tb_remove_all(void)
679 int i
, l1_sz
= v_l1_size
;
681 for (i
= 0; i
< l1_sz
; i
++) {
682 tb_remove_all_1(v_l2_levels
, l1_map
+ i
);
687 * Add the tb in the target page and protect it if necessary.
688 * Called with @p->lock held.
690 static void tb_page_add(PageDesc
*p
, TranslationBlock
*tb
, unsigned int n
)
692 bool page_already_protected
;
694 assert_page_locked(p
);
696 tb
->page_next
[n
] = p
->first_tb
;
697 page_already_protected
= p
->first_tb
!= 0;
698 p
->first_tb
= (uintptr_t)tb
| n
;
701 * If some code is already present, then the pages are already
702 * protected. So we handle the case where only the first TB is
703 * allocated in a physical page.
705 if (!page_already_protected
) {
706 tlb_protect_code(tb
->page_addr
[n
] & TARGET_PAGE_MASK
);
710 static void tb_record(TranslationBlock
*tb
)
712 tb_page_addr_t paddr0
= tb_page_addr0(tb
);
713 tb_page_addr_t paddr1
= tb_page_addr1(tb
);
714 tb_page_addr_t pindex0
= paddr0
>> TARGET_PAGE_BITS
;
715 tb_page_addr_t pindex1
= paddr0
>> TARGET_PAGE_BITS
;
717 assert(paddr0
!= -1);
718 if (unlikely(paddr1
!= -1) && pindex0
!= pindex1
) {
719 tb_page_add(page_find_alloc(pindex1
, false), tb
, 1);
721 tb_page_add(page_find_alloc(pindex0
, false), tb
, 0);
724 static void tb_page_remove(PageDesc
*pd
, TranslationBlock
*tb
)
726 TranslationBlock
*tb1
;
730 assert_page_locked(pd
);
731 pprev
= &pd
->first_tb
;
732 PAGE_FOR_EACH_TB(unused
, unused
, pd
, tb1
, n1
) {
734 *pprev
= tb1
->page_next
[n1
];
737 pprev
= &tb1
->page_next
[n1
];
739 g_assert_not_reached();
742 static void tb_remove(TranslationBlock
*tb
)
744 tb_page_addr_t paddr0
= tb_page_addr0(tb
);
745 tb_page_addr_t paddr1
= tb_page_addr1(tb
);
746 tb_page_addr_t pindex0
= paddr0
>> TARGET_PAGE_BITS
;
747 tb_page_addr_t pindex1
= paddr0
>> TARGET_PAGE_BITS
;
749 assert(paddr0
!= -1);
750 if (unlikely(paddr1
!= -1) && pindex0
!= pindex1
) {
751 tb_page_remove(page_find_alloc(pindex1
, false), tb
);
753 tb_page_remove(page_find_alloc(pindex0
, false), tb
);
755 #endif /* CONFIG_USER_ONLY */
757 /* flush all the translation blocks */
758 static void do_tb_flush(CPUState
*cpu
, run_on_cpu_data tb_flush_count
)
760 bool did_flush
= false;
763 /* If it is already been done on request of another CPU, just retry. */
764 if (tb_ctx
.tb_flush_count
!= tb_flush_count
.host_int
) {
770 tcg_flush_jmp_cache(cpu
);
773 qht_reset_size(&tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
);
776 tcg_region_reset_all();
777 /* XXX: flush processor icache at this point if cache flush is expensive */
778 qatomic_inc(&tb_ctx
.tb_flush_count
);
783 qemu_plugin_flush_cb();
787 void tb_flush(CPUState
*cpu
)
790 unsigned tb_flush_count
= qatomic_read(&tb_ctx
.tb_flush_count
);
792 if (cpu_in_serial_context(cpu
)) {
793 do_tb_flush(cpu
, RUN_ON_CPU_HOST_INT(tb_flush_count
));
795 async_safe_run_on_cpu(cpu
, do_tb_flush
,
796 RUN_ON_CPU_HOST_INT(tb_flush_count
));
801 /* remove @orig from its @n_orig-th jump list */
802 static inline void tb_remove_from_jmp_list(TranslationBlock
*orig
, int n_orig
)
804 uintptr_t ptr
, ptr_locked
;
805 TranslationBlock
*dest
;
806 TranslationBlock
*tb
;
810 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
811 ptr
= qatomic_or_fetch(&orig
->jmp_dest
[n_orig
], 1);
812 dest
= (TranslationBlock
*)(ptr
& ~1);
817 qemu_spin_lock(&dest
->jmp_lock
);
819 * While acquiring the lock, the jump might have been removed if the
820 * destination TB was invalidated; check again.
822 ptr_locked
= qatomic_read(&orig
->jmp_dest
[n_orig
]);
823 if (ptr_locked
!= ptr
) {
824 qemu_spin_unlock(&dest
->jmp_lock
);
826 * The only possibility is that the jump was unlinked via
827 * tb_jump_unlink(dest). Seeing here another destination would be a bug,
828 * because we set the LSB above.
830 g_assert(ptr_locked
== 1 && dest
->cflags
& CF_INVALID
);
834 * We first acquired the lock, and since the destination pointer matches,
835 * we know for sure that @orig is in the jmp list.
837 pprev
= &dest
->jmp_list_head
;
838 TB_FOR_EACH_JMP(dest
, tb
, n
) {
839 if (tb
== orig
&& n
== n_orig
) {
840 *pprev
= tb
->jmp_list_next
[n
];
841 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
842 qemu_spin_unlock(&dest
->jmp_lock
);
845 pprev
= &tb
->jmp_list_next
[n
];
847 g_assert_not_reached();
851 * Reset the jump entry 'n' of a TB so that it is not chained to another TB.
853 void tb_reset_jump(TranslationBlock
*tb
, int n
)
855 uintptr_t addr
= (uintptr_t)(tb
->tc
.ptr
+ tb
->jmp_reset_offset
[n
]);
856 tb_set_jmp_target(tb
, n
, addr
);
859 /* remove any jumps to the TB */
860 static inline void tb_jmp_unlink(TranslationBlock
*dest
)
862 TranslationBlock
*tb
;
865 qemu_spin_lock(&dest
->jmp_lock
);
867 TB_FOR_EACH_JMP(dest
, tb
, n
) {
868 tb_reset_jump(tb
, n
);
869 qatomic_and(&tb
->jmp_dest
[n
], (uintptr_t)NULL
| 1);
870 /* No need to clear the list entry; setting the dest ptr is enough */
872 dest
->jmp_list_head
= (uintptr_t)NULL
;
874 qemu_spin_unlock(&dest
->jmp_lock
);
877 static void tb_jmp_cache_inval_tb(TranslationBlock
*tb
)
881 if (tb_cflags(tb
) & CF_PCREL
) {
882 /* A TB may be at any virtual address */
884 tcg_flush_jmp_cache(cpu
);
887 uint32_t h
= tb_jmp_cache_hash_func(tb
->pc
);
890 CPUJumpCache
*jc
= cpu
->tb_jmp_cache
;
892 if (qatomic_read(&jc
->array
[h
].tb
) == tb
) {
893 qatomic_set(&jc
->array
[h
].tb
, NULL
);
900 * In user-mode, call with mmap_lock held.
901 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
904 static void do_tb_phys_invalidate(TranslationBlock
*tb
, bool rm_from_page_list
)
907 tb_page_addr_t phys_pc
;
908 uint32_t orig_cflags
= tb_cflags(tb
);
910 assert_memory_lock();
912 /* make sure no further incoming jumps will be chained to this TB */
913 qemu_spin_lock(&tb
->jmp_lock
);
914 qatomic_set(&tb
->cflags
, tb
->cflags
| CF_INVALID
);
915 qemu_spin_unlock(&tb
->jmp_lock
);
917 /* remove the TB from the hash list */
918 phys_pc
= tb_page_addr0(tb
);
919 h
= tb_hash_func(phys_pc
, (orig_cflags
& CF_PCREL
? 0 : tb
->pc
),
920 tb
->flags
, tb
->cs_base
, orig_cflags
);
921 if (!qht_remove(&tb_ctx
.htable
, tb
, h
)) {
925 /* remove the TB from the page list */
926 if (rm_from_page_list
) {
930 /* remove the TB from the hash list */
931 tb_jmp_cache_inval_tb(tb
);
933 /* suppress this TB from the two jump lists */
934 tb_remove_from_jmp_list(tb
, 0);
935 tb_remove_from_jmp_list(tb
, 1);
937 /* suppress any remaining jumps to this TB */
940 qatomic_set(&tb_ctx
.tb_phys_invalidate_count
,
941 tb_ctx
.tb_phys_invalidate_count
+ 1);
944 static void tb_phys_invalidate__locked(TranslationBlock
*tb
)
946 qemu_thread_jit_write();
947 do_tb_phys_invalidate(tb
, true);
948 qemu_thread_jit_execute();
953 * Called with mmap_lock held in user-mode.
955 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
957 if (page_addr
== -1 && tb_page_addr0(tb
) != -1) {
959 do_tb_phys_invalidate(tb
, true);
962 do_tb_phys_invalidate(tb
, false);
967 * Add a new TB and link it to the physical page tables.
968 * Called with mmap_lock held for user-mode emulation.
970 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
971 * Note that in !user-mode, another thread might have already added a TB
972 * for the same block of guest code that @tb corresponds to. In that case,
973 * the caller should discard the original @tb, and use instead the returned TB.
975 TranslationBlock
*tb_link_page(TranslationBlock
*tb
)
977 void *existing_tb
= NULL
;
980 assert_memory_lock();
981 tcg_debug_assert(!(tb
->cflags
& CF_INVALID
));
985 /* add in the hash table */
986 h
= tb_hash_func(tb_page_addr0(tb
), (tb
->cflags
& CF_PCREL
? 0 : tb
->pc
),
987 tb
->flags
, tb
->cs_base
, tb
->cflags
);
988 qht_insert(&tb_ctx
.htable
, tb
, h
, &existing_tb
);
990 /* remove TB from the page(s) if we couldn't insert it */
991 if (unlikely(existing_tb
)) {
1001 #ifdef CONFIG_USER_ONLY
1003 * Invalidate all TBs which intersect with the target address range.
1004 * Called with mmap_lock held for user-mode emulation.
1005 * NOTE: this function must not be called while a TB is running.
1007 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t last
)
1009 TranslationBlock
*tb
;
1012 assert_memory_lock();
1014 PAGE_FOR_EACH_TB(start
, last
, unused
, tb
, n
) {
1015 tb_phys_invalidate__locked(tb
);
1020 * Invalidate all TBs which intersect with the target address page @addr.
1021 * Called with mmap_lock held for user-mode emulation
1022 * NOTE: this function must not be called while a TB is running.
1024 static void tb_invalidate_phys_page(tb_page_addr_t addr
)
1026 tb_page_addr_t start
, last
;
1028 start
= addr
& TARGET_PAGE_MASK
;
1029 last
= addr
| ~TARGET_PAGE_MASK
;
1030 tb_invalidate_phys_range(start
, last
);
1034 * Called with mmap_lock held. If pc is not 0 then it indicates the
1035 * host PC of the faulting store instruction that caused this invalidate.
1036 * Returns true if the caller needs to abort execution of the current
1037 * TB (because it was modified by this store and the guest CPU has
1038 * precise-SMC semantics).
1040 bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr
, uintptr_t pc
)
1042 TranslationBlock
*current_tb
;
1043 bool current_tb_modified
;
1044 TranslationBlock
*tb
;
1046 tb_page_addr_t last
;
1049 * Without precise smc semantics, or when outside of a TB,
1050 * we can skip to invalidate.
1052 #ifndef TARGET_HAS_PRECISE_SMC
1056 tb_invalidate_phys_page(addr
);
1060 assert_memory_lock();
1061 current_tb
= tcg_tb_lookup(pc
);
1063 last
= addr
| ~TARGET_PAGE_MASK
;
1064 addr
&= TARGET_PAGE_MASK
;
1065 current_tb_modified
= false;
1067 PAGE_FOR_EACH_TB(addr
, last
, unused
, tb
, n
) {
1068 if (current_tb
== tb
&&
1069 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
1071 * If we are modifying the current TB, we must stop its
1072 * execution. We could be more precise by checking that
1073 * the modification is after the current PC, but it would
1074 * require a specialized function to partially restore
1077 current_tb_modified
= true;
1078 cpu_restore_state_from_tb(current_cpu
, current_tb
, pc
);
1080 tb_phys_invalidate__locked(tb
);
1083 if (current_tb_modified
) {
1084 /* Force execution of one insn next time. */
1085 CPUState
*cpu
= current_cpu
;
1086 cpu
->cflags_next_tb
= 1 | CF_NOIRQ
| curr_cflags(current_cpu
);
1093 * @p must be non-NULL.
1094 * Call with all @pages locked.
1097 tb_invalidate_phys_page_range__locked(struct page_collection
*pages
,
1098 PageDesc
*p
, tb_page_addr_t start
,
1099 tb_page_addr_t last
,
1102 TranslationBlock
*tb
;
1104 #ifdef TARGET_HAS_PRECISE_SMC
1105 bool current_tb_modified
= false;
1106 TranslationBlock
*current_tb
= retaddr
? tcg_tb_lookup(retaddr
) : NULL
;
1107 #endif /* TARGET_HAS_PRECISE_SMC */
1109 /* Range may not cross a page. */
1110 tcg_debug_assert(((start
^ last
) & TARGET_PAGE_MASK
) == 0);
1113 * We remove all the TBs in the range [start, last].
1114 * XXX: see if in some cases it could be faster to invalidate all the code
1116 PAGE_FOR_EACH_TB(start
, last
, p
, tb
, n
) {
1117 tb_page_addr_t tb_start
, tb_last
;
1119 /* NOTE: this is subtle as a TB may span two physical pages */
1120 tb_start
= tb_page_addr0(tb
);
1121 tb_last
= tb_start
+ tb
->size
- 1;
1123 tb_last
= MIN(tb_last
, tb_start
| ~TARGET_PAGE_MASK
);
1125 tb_start
= tb_page_addr1(tb
);
1126 tb_last
= tb_start
+ (tb_last
& ~TARGET_PAGE_MASK
);
1128 if (!(tb_last
< start
|| tb_start
> last
)) {
1129 #ifdef TARGET_HAS_PRECISE_SMC
1130 if (current_tb
== tb
&&
1131 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
1133 * If we are modifying the current TB, we must stop
1134 * its execution. We could be more precise by checking
1135 * that the modification is after the current PC, but it
1136 * would require a specialized function to partially
1137 * restore the CPU state.
1139 current_tb_modified
= true;
1140 cpu_restore_state_from_tb(current_cpu
, current_tb
, retaddr
);
1142 #endif /* TARGET_HAS_PRECISE_SMC */
1143 tb_phys_invalidate__locked(tb
);
1147 /* if no code remaining, no need to continue to use slow writes */
1149 tlb_unprotect_code(start
);
1152 #ifdef TARGET_HAS_PRECISE_SMC
1153 if (current_tb_modified
) {
1154 page_collection_unlock(pages
);
1155 /* Force execution of one insn next time. */
1156 current_cpu
->cflags_next_tb
= 1 | CF_NOIRQ
| curr_cflags(current_cpu
);
1158 cpu_loop_exit_noexc(current_cpu
);
1164 * Invalidate all TBs which intersect with the target physical address range
1165 * [start;last]. NOTE: start and end may refer to *different* physical pages.
1166 * 'is_cpu_write_access' should be true if called from a real cpu write
1167 * access: the virtual CPU will exit the current TB if code is modified inside
1170 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t last
)
1172 struct page_collection
*pages
;
1173 tb_page_addr_t index
, index_last
;
1175 pages
= page_collection_lock(start
, last
);
1177 index_last
= last
>> TARGET_PAGE_BITS
;
1178 for (index
= start
>> TARGET_PAGE_BITS
; index
<= index_last
; index
++) {
1179 PageDesc
*pd
= page_find(index
);
1180 tb_page_addr_t page_start
, page_last
;
1185 assert_page_locked(pd
);
1186 page_start
= index
<< TARGET_PAGE_BITS
;
1187 page_last
= page_start
| ~TARGET_PAGE_MASK
;
1188 page_last
= MIN(page_last
, last
);
1189 tb_invalidate_phys_page_range__locked(pages
, pd
,
1190 page_start
, page_last
, 0);
1192 page_collection_unlock(pages
);
1196 * Call with all @pages in the range [@start, @start + len[ locked.
1198 static void tb_invalidate_phys_page_fast__locked(struct page_collection
*pages
,
1199 tb_page_addr_t start
,
1200 unsigned len
, uintptr_t ra
)
1204 p
= page_find(start
>> TARGET_PAGE_BITS
);
1209 assert_page_locked(p
);
1210 tb_invalidate_phys_page_range__locked(pages
, p
, start
, start
+ len
- 1, ra
);
1214 * len must be <= 8 and start must be a multiple of len.
1215 * Called via softmmu_template.h when code areas are written to with
1216 * iothread mutex not held.
1218 void tb_invalidate_phys_range_fast(ram_addr_t ram_addr
,
1222 struct page_collection
*pages
;
1224 pages
= page_collection_lock(ram_addr
, ram_addr
+ size
- 1);
1225 tb_invalidate_phys_page_fast__locked(pages
, ram_addr
, size
, retaddr
);
1226 page_collection_unlock(pages
);
1229 #endif /* CONFIG_USER_ONLY */