tests/docker: remove travis container
[qemu/ar7.git] / accel / tcg / translate-all.c
blob81d4c83f225867dffae1a442b3b403ccdcf86572
1 /*
2 * Host code generation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu-common.h"
24 #define NO_CPU_IO_DEFS
25 #include "cpu.h"
26 #include "trace.h"
27 #include "disas/disas.h"
28 #include "exec/exec-all.h"
29 #include "tcg/tcg.h"
30 #if defined(CONFIG_USER_ONLY)
31 #include "qemu.h"
32 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
33 #include <sys/param.h>
34 #if __FreeBSD_version >= 700104
35 #define HAVE_KINFO_GETVMMAP
36 #define sigqueue sigqueue_freebsd /* avoid redefinition */
37 #include <sys/proc.h>
38 #include <machine/profile.h>
39 #define _KERNEL
40 #include <sys/user.h>
41 #undef _KERNEL
42 #undef sigqueue
43 #include <libutil.h>
44 #endif
45 #endif
46 #else
47 #include "exec/ram_addr.h"
48 #endif
50 #include "exec/cputlb.h"
51 #include "exec/tb-hash.h"
52 #include "exec/translate-all.h"
53 #include "qemu/bitmap.h"
54 #include "qemu/error-report.h"
55 #include "qemu/qemu-print.h"
56 #include "qemu/timer.h"
57 #include "qemu/main-loop.h"
58 #include "exec/log.h"
59 #include "sysemu/cpus.h"
60 #include "sysemu/cpu-timers.h"
61 #include "sysemu/tcg.h"
62 #include "qapi/error.h"
63 #include "internal.h"
65 /* #define DEBUG_TB_INVALIDATE */
66 /* #define DEBUG_TB_FLUSH */
67 /* make various TB consistency checks */
68 /* #define DEBUG_TB_CHECK */
70 #ifdef DEBUG_TB_INVALIDATE
71 #define DEBUG_TB_INVALIDATE_GATE 1
72 #else
73 #define DEBUG_TB_INVALIDATE_GATE 0
74 #endif
76 #ifdef DEBUG_TB_FLUSH
77 #define DEBUG_TB_FLUSH_GATE 1
78 #else
79 #define DEBUG_TB_FLUSH_GATE 0
80 #endif
82 #if !defined(CONFIG_USER_ONLY)
83 /* TB consistency checks only implemented for usermode emulation. */
84 #undef DEBUG_TB_CHECK
85 #endif
87 #ifdef DEBUG_TB_CHECK
88 #define DEBUG_TB_CHECK_GATE 1
89 #else
90 #define DEBUG_TB_CHECK_GATE 0
91 #endif
93 /* Access to the various translations structures need to be serialised via locks
94 * for consistency.
95 * In user-mode emulation access to the memory related structures are protected
96 * with mmap_lock.
97 * In !user-mode we use per-page locks.
99 #ifdef CONFIG_SOFTMMU
100 #define assert_memory_lock()
101 #else
102 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
103 #endif
105 #define SMC_BITMAP_USE_THRESHOLD 10
107 typedef struct PageDesc {
108 /* list of TBs intersecting this ram page */
109 uintptr_t first_tb;
110 #ifdef CONFIG_SOFTMMU
111 /* in order to optimize self modifying code, we count the number
112 of lookups we do to a given page to use a bitmap */
113 unsigned long *code_bitmap;
114 unsigned int code_write_count;
115 #else
116 unsigned long flags;
117 #endif
118 #ifndef CONFIG_USER_ONLY
119 QemuSpin lock;
120 #endif
121 } PageDesc;
124 * struct page_entry - page descriptor entry
125 * @pd: pointer to the &struct PageDesc of the page this entry represents
126 * @index: page index of the page
127 * @locked: whether the page is locked
129 * This struct helps us keep track of the locked state of a page, without
130 * bloating &struct PageDesc.
132 * A page lock protects accesses to all fields of &struct PageDesc.
134 * See also: &struct page_collection.
136 struct page_entry {
137 PageDesc *pd;
138 tb_page_addr_t index;
139 bool locked;
143 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
144 * @tree: Binary search tree (BST) of the pages, with key == page index
145 * @max: Pointer to the page in @tree with the highest page index
147 * To avoid deadlock we lock pages in ascending order of page index.
148 * When operating on a set of pages, we need to keep track of them so that
149 * we can lock them in order and also unlock them later. For this we collect
150 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
151 * @tree implementation we use does not provide an O(1) operation to obtain the
152 * highest-ranked element, we use @max to keep track of the inserted page
153 * with the highest index. This is valuable because if a page is not in
154 * the tree and its index is higher than @max's, then we can lock it
155 * without breaking the locking order rule.
157 * Note on naming: 'struct page_set' would be shorter, but we already have a few
158 * page_set_*() helpers, so page_collection is used instead to avoid confusion.
160 * See also: page_collection_lock().
162 struct page_collection {
163 GTree *tree;
164 struct page_entry *max;
167 /* list iterators for lists of tagged pointers in TranslationBlock */
168 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \
169 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
170 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
171 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
173 #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
174 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
176 #define TB_FOR_EACH_JMP(head_tb, tb, n) \
177 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
180 * In system mode we want L1_MAP to be based on ram offsets,
181 * while in user mode we want it to be based on virtual addresses.
183 * TODO: For user mode, see the caveat re host vs guest virtual
184 * address spaces near GUEST_ADDR_MAX.
186 #if !defined(CONFIG_USER_ONLY)
187 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
188 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
189 #else
190 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
191 #endif
192 #else
193 # define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
194 #endif
196 /* Size of the L2 (and L3, etc) page tables. */
197 #define V_L2_BITS 10
198 #define V_L2_SIZE (1 << V_L2_BITS)
200 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
201 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
202 sizeof_field(TranslationBlock, trace_vcpu_dstate)
203 * BITS_PER_BYTE);
206 * L1 Mapping properties
208 static int v_l1_size;
209 static int v_l1_shift;
210 static int v_l2_levels;
212 /* The bottom level has pointers to PageDesc, and is indexed by
213 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
215 #define V_L1_MIN_BITS 4
216 #define V_L1_MAX_BITS (V_L2_BITS + 3)
217 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
219 static void *l1_map[V_L1_MAX_SIZE];
221 /* code generation context */
222 TCGContext tcg_init_ctx;
223 __thread TCGContext *tcg_ctx;
224 TBContext tb_ctx;
225 bool parallel_cpus;
227 static void page_table_config_init(void)
229 uint32_t v_l1_bits;
231 assert(TARGET_PAGE_BITS);
232 /* The bits remaining after N lower levels of page tables. */
233 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
234 if (v_l1_bits < V_L1_MIN_BITS) {
235 v_l1_bits += V_L2_BITS;
238 v_l1_size = 1 << v_l1_bits;
239 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
240 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
242 assert(v_l1_bits <= V_L1_MAX_BITS);
243 assert(v_l1_shift % V_L2_BITS == 0);
244 assert(v_l2_levels >= 0);
247 static void cpu_gen_init(void)
249 tcg_context_init(&tcg_init_ctx);
252 /* Encode VAL as a signed leb128 sequence at P.
253 Return P incremented past the encoded value. */
254 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
256 int more, byte;
258 do {
259 byte = val & 0x7f;
260 val >>= 7;
261 more = !((val == 0 && (byte & 0x40) == 0)
262 || (val == -1 && (byte & 0x40) != 0));
263 if (more) {
264 byte |= 0x80;
266 *p++ = byte;
267 } while (more);
269 return p;
272 /* Decode a signed leb128 sequence at *PP; increment *PP past the
273 decoded value. Return the decoded value. */
274 static target_long decode_sleb128(const uint8_t **pp)
276 const uint8_t *p = *pp;
277 target_long val = 0;
278 int byte, shift = 0;
280 do {
281 byte = *p++;
282 val |= (target_ulong)(byte & 0x7f) << shift;
283 shift += 7;
284 } while (byte & 0x80);
285 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
286 val |= -(target_ulong)1 << shift;
289 *pp = p;
290 return val;
293 /* Encode the data collected about the instructions while compiling TB.
294 Place the data at BLOCK, and return the number of bytes consumed.
296 The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
297 which come from the target's insn_start data, followed by a uintptr_t
298 which comes from the host pc of the end of the code implementing the insn.
300 Each line of the table is encoded as sleb128 deltas from the previous
301 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
302 That is, the first column is seeded with the guest pc, the last column
303 with the host pc, and the middle columns with zeros. */
305 static int encode_search(TranslationBlock *tb, uint8_t *block)
307 uint8_t *highwater = tcg_ctx->code_gen_highwater;
308 uint8_t *p = block;
309 int i, j, n;
311 for (i = 0, n = tb->icount; i < n; ++i) {
312 target_ulong prev;
314 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
315 if (i == 0) {
316 prev = (j == 0 ? tb->pc : 0);
317 } else {
318 prev = tcg_ctx->gen_insn_data[i - 1][j];
320 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
322 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
323 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
325 /* Test for (pending) buffer overflow. The assumption is that any
326 one row beginning below the high water mark cannot overrun
327 the buffer completely. Thus we can test for overflow after
328 encoding a row without having to check during encoding. */
329 if (unlikely(p > highwater)) {
330 return -1;
334 return p - block;
337 /* The cpu state corresponding to 'searched_pc' is restored.
338 * When reset_icount is true, current TB will be interrupted and
339 * icount should be recalculated.
341 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
342 uintptr_t searched_pc, bool reset_icount)
344 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
345 uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
346 CPUArchState *env = cpu->env_ptr;
347 const uint8_t *p = tb->tc.ptr + tb->tc.size;
348 int i, j, num_insns = tb->icount;
349 #ifdef CONFIG_PROFILER
350 TCGProfile *prof = &tcg_ctx->prof;
351 int64_t ti = profile_getclock();
352 #endif
354 searched_pc -= GETPC_ADJ;
356 if (searched_pc < host_pc) {
357 return -1;
360 /* Reconstruct the stored insn data while looking for the point at
361 which the end of the insn exceeds the searched_pc. */
362 for (i = 0; i < num_insns; ++i) {
363 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
364 data[j] += decode_sleb128(&p);
366 host_pc += decode_sleb128(&p);
367 if (host_pc > searched_pc) {
368 goto found;
371 return -1;
373 found:
374 if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
375 assert(icount_enabled());
376 /* Reset the cycle counter to the start of the block
377 and shift if to the number of actually executed instructions */
378 cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
380 restore_state_to_opc(env, tb, data);
382 #ifdef CONFIG_PROFILER
383 qatomic_set(&prof->restore_time,
384 prof->restore_time + profile_getclock() - ti);
385 qatomic_set(&prof->restore_count, prof->restore_count + 1);
386 #endif
387 return 0;
390 void tb_destroy(TranslationBlock *tb)
392 qemu_spin_destroy(&tb->jmp_lock);
395 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
398 * The host_pc has to be in the rx region of the code buffer.
399 * If it is not we will not be able to resolve it here.
400 * The two cases where host_pc will not be correct are:
402 * - fault during translation (instruction fetch)
403 * - fault from helper (not using GETPC() macro)
405 * Either way we need return early as we can't resolve it here.
407 if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
408 TranslationBlock *tb = tcg_tb_lookup(host_pc);
409 if (tb) {
410 cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
411 if (tb_cflags(tb) & CF_NOCACHE) {
412 /* one-shot translation, invalidate it immediately */
413 tb_phys_invalidate(tb, -1);
414 tcg_tb_remove(tb);
415 tb_destroy(tb);
417 return true;
420 return false;
423 static void page_init(void)
425 page_size_init();
426 page_table_config_init();
428 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
430 #ifdef HAVE_KINFO_GETVMMAP
431 struct kinfo_vmentry *freep;
432 int i, cnt;
434 freep = kinfo_getvmmap(getpid(), &cnt);
435 if (freep) {
436 mmap_lock();
437 for (i = 0; i < cnt; i++) {
438 unsigned long startaddr, endaddr;
440 startaddr = freep[i].kve_start;
441 endaddr = freep[i].kve_end;
442 if (h2g_valid(startaddr)) {
443 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
445 if (h2g_valid(endaddr)) {
446 endaddr = h2g(endaddr);
447 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
448 } else {
449 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
450 endaddr = ~0ul;
451 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
452 #endif
456 free(freep);
457 mmap_unlock();
459 #else
460 FILE *f;
462 last_brk = (unsigned long)sbrk(0);
464 f = fopen("/compat/linux/proc/self/maps", "r");
465 if (f) {
466 mmap_lock();
468 do {
469 unsigned long startaddr, endaddr;
470 int n;
472 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
474 if (n == 2 && h2g_valid(startaddr)) {
475 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
477 if (h2g_valid(endaddr)) {
478 endaddr = h2g(endaddr);
479 } else {
480 endaddr = ~0ul;
482 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
484 } while (!feof(f));
486 fclose(f);
487 mmap_unlock();
489 #endif
491 #endif
494 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
496 PageDesc *pd;
497 void **lp;
498 int i;
500 /* Level 1. Always allocated. */
501 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
503 /* Level 2..N-1. */
504 for (i = v_l2_levels; i > 0; i--) {
505 void **p = qatomic_rcu_read(lp);
507 if (p == NULL) {
508 void *existing;
510 if (!alloc) {
511 return NULL;
513 p = g_new0(void *, V_L2_SIZE);
514 existing = qatomic_cmpxchg(lp, NULL, p);
515 if (unlikely(existing)) {
516 g_free(p);
517 p = existing;
521 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
524 pd = qatomic_rcu_read(lp);
525 if (pd == NULL) {
526 void *existing;
528 if (!alloc) {
529 return NULL;
531 pd = g_new0(PageDesc, V_L2_SIZE);
532 #ifndef CONFIG_USER_ONLY
534 int i;
536 for (i = 0; i < V_L2_SIZE; i++) {
537 qemu_spin_init(&pd[i].lock);
540 #endif
541 existing = qatomic_cmpxchg(lp, NULL, pd);
542 if (unlikely(existing)) {
543 #ifndef CONFIG_USER_ONLY
545 int i;
547 for (i = 0; i < V_L2_SIZE; i++) {
548 qemu_spin_destroy(&pd[i].lock);
551 #endif
552 g_free(pd);
553 pd = existing;
557 return pd + (index & (V_L2_SIZE - 1));
560 static inline PageDesc *page_find(tb_page_addr_t index)
562 return page_find_alloc(index, 0);
565 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
566 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
568 /* In user-mode page locks aren't used; mmap_lock is enough */
569 #ifdef CONFIG_USER_ONLY
571 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
573 static inline void page_lock(PageDesc *pd)
576 static inline void page_unlock(PageDesc *pd)
579 static inline void page_lock_tb(const TranslationBlock *tb)
582 static inline void page_unlock_tb(const TranslationBlock *tb)
585 struct page_collection *
586 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
588 return NULL;
591 void page_collection_unlock(struct page_collection *set)
593 #else /* !CONFIG_USER_ONLY */
595 #ifdef CONFIG_DEBUG_TCG
597 static __thread GHashTable *ht_pages_locked_debug;
599 static void ht_pages_locked_debug_init(void)
601 if (ht_pages_locked_debug) {
602 return;
604 ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
607 static bool page_is_locked(const PageDesc *pd)
609 PageDesc *found;
611 ht_pages_locked_debug_init();
612 found = g_hash_table_lookup(ht_pages_locked_debug, pd);
613 return !!found;
616 static void page_lock__debug(PageDesc *pd)
618 ht_pages_locked_debug_init();
619 g_assert(!page_is_locked(pd));
620 g_hash_table_insert(ht_pages_locked_debug, pd, pd);
623 static void page_unlock__debug(const PageDesc *pd)
625 bool removed;
627 ht_pages_locked_debug_init();
628 g_assert(page_is_locked(pd));
629 removed = g_hash_table_remove(ht_pages_locked_debug, pd);
630 g_assert(removed);
633 static void
634 do_assert_page_locked(const PageDesc *pd, const char *file, int line)
636 if (unlikely(!page_is_locked(pd))) {
637 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
638 pd, file, line);
639 abort();
643 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
645 void assert_no_pages_locked(void)
647 ht_pages_locked_debug_init();
648 g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
651 #else /* !CONFIG_DEBUG_TCG */
653 #define assert_page_locked(pd)
655 static inline void page_lock__debug(const PageDesc *pd)
659 static inline void page_unlock__debug(const PageDesc *pd)
663 #endif /* CONFIG_DEBUG_TCG */
665 static inline void page_lock(PageDesc *pd)
667 page_lock__debug(pd);
668 qemu_spin_lock(&pd->lock);
671 static inline void page_unlock(PageDesc *pd)
673 qemu_spin_unlock(&pd->lock);
674 page_unlock__debug(pd);
677 /* lock the page(s) of a TB in the correct acquisition order */
678 static inline void page_lock_tb(const TranslationBlock *tb)
680 page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
683 static inline void page_unlock_tb(const TranslationBlock *tb)
685 PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
687 page_unlock(p1);
688 if (unlikely(tb->page_addr[1] != -1)) {
689 PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
691 if (p2 != p1) {
692 page_unlock(p2);
697 static inline struct page_entry *
698 page_entry_new(PageDesc *pd, tb_page_addr_t index)
700 struct page_entry *pe = g_malloc(sizeof(*pe));
702 pe->index = index;
703 pe->pd = pd;
704 pe->locked = false;
705 return pe;
708 static void page_entry_destroy(gpointer p)
710 struct page_entry *pe = p;
712 g_assert(pe->locked);
713 page_unlock(pe->pd);
714 g_free(pe);
717 /* returns false on success */
718 static bool page_entry_trylock(struct page_entry *pe)
720 bool busy;
722 busy = qemu_spin_trylock(&pe->pd->lock);
723 if (!busy) {
724 g_assert(!pe->locked);
725 pe->locked = true;
726 page_lock__debug(pe->pd);
728 return busy;
731 static void do_page_entry_lock(struct page_entry *pe)
733 page_lock(pe->pd);
734 g_assert(!pe->locked);
735 pe->locked = true;
738 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
740 struct page_entry *pe = value;
742 do_page_entry_lock(pe);
743 return FALSE;
746 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
748 struct page_entry *pe = value;
750 if (pe->locked) {
751 pe->locked = false;
752 page_unlock(pe->pd);
754 return FALSE;
758 * Trylock a page, and if successful, add the page to a collection.
759 * Returns true ("busy") if the page could not be locked; false otherwise.
761 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
763 tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
764 struct page_entry *pe;
765 PageDesc *pd;
767 pe = g_tree_lookup(set->tree, &index);
768 if (pe) {
769 return false;
772 pd = page_find(index);
773 if (pd == NULL) {
774 return false;
777 pe = page_entry_new(pd, index);
778 g_tree_insert(set->tree, &pe->index, pe);
781 * If this is either (1) the first insertion or (2) a page whose index
782 * is higher than any other so far, just lock the page and move on.
784 if (set->max == NULL || pe->index > set->max->index) {
785 set->max = pe;
786 do_page_entry_lock(pe);
787 return false;
790 * Try to acquire out-of-order lock; if busy, return busy so that we acquire
791 * locks in order.
793 return page_entry_trylock(pe);
796 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
798 tb_page_addr_t a = *(const tb_page_addr_t *)ap;
799 tb_page_addr_t b = *(const tb_page_addr_t *)bp;
801 if (a == b) {
802 return 0;
803 } else if (a < b) {
804 return -1;
806 return 1;
810 * Lock a range of pages ([@start,@end[) as well as the pages of all
811 * intersecting TBs.
812 * Locking order: acquire locks in ascending order of page index.
814 struct page_collection *
815 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
817 struct page_collection *set = g_malloc(sizeof(*set));
818 tb_page_addr_t index;
819 PageDesc *pd;
821 start >>= TARGET_PAGE_BITS;
822 end >>= TARGET_PAGE_BITS;
823 g_assert(start <= end);
825 set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
826 page_entry_destroy);
827 set->max = NULL;
828 assert_no_pages_locked();
830 retry:
831 g_tree_foreach(set->tree, page_entry_lock, NULL);
833 for (index = start; index <= end; index++) {
834 TranslationBlock *tb;
835 int n;
837 pd = page_find(index);
838 if (pd == NULL) {
839 continue;
841 if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
842 g_tree_foreach(set->tree, page_entry_unlock, NULL);
843 goto retry;
845 assert_page_locked(pd);
846 PAGE_FOR_EACH_TB(pd, tb, n) {
847 if (page_trylock_add(set, tb->page_addr[0]) ||
848 (tb->page_addr[1] != -1 &&
849 page_trylock_add(set, tb->page_addr[1]))) {
850 /* drop all locks, and reacquire in order */
851 g_tree_foreach(set->tree, page_entry_unlock, NULL);
852 goto retry;
856 return set;
859 void page_collection_unlock(struct page_collection *set)
861 /* entries are unlocked and freed via page_entry_destroy */
862 g_tree_destroy(set->tree);
863 g_free(set);
866 #endif /* !CONFIG_USER_ONLY */
868 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
869 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
871 PageDesc *p1, *p2;
872 tb_page_addr_t page1;
873 tb_page_addr_t page2;
875 assert_memory_lock();
876 g_assert(phys1 != -1);
878 page1 = phys1 >> TARGET_PAGE_BITS;
879 page2 = phys2 >> TARGET_PAGE_BITS;
881 p1 = page_find_alloc(page1, alloc);
882 if (ret_p1) {
883 *ret_p1 = p1;
885 if (likely(phys2 == -1)) {
886 page_lock(p1);
887 return;
888 } else if (page1 == page2) {
889 page_lock(p1);
890 if (ret_p2) {
891 *ret_p2 = p1;
893 return;
895 p2 = page_find_alloc(page2, alloc);
896 if (ret_p2) {
897 *ret_p2 = p2;
899 if (page1 < page2) {
900 page_lock(p1);
901 page_lock(p2);
902 } else {
903 page_lock(p2);
904 page_lock(p1);
908 /* Minimum size of the code gen buffer. This number is randomly chosen,
909 but not so small that we can't have a fair number of TB's live. */
910 #define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
912 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
913 indicated, this is constrained by the range of direct branches on the
914 host cpu, as used by the TCG implementation of goto_tb. */
915 #if defined(__x86_64__)
916 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
917 #elif defined(__sparc__)
918 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
919 #elif defined(__powerpc64__)
920 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
921 #elif defined(__powerpc__)
922 # define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
923 #elif defined(__aarch64__)
924 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
925 #elif defined(__s390x__)
926 /* We have a +- 4GB range on the branches; leave some slop. */
927 # define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
928 #elif defined(__mips__)
929 /* We have a 256MB branch region, but leave room to make sure the
930 main executable is also within that region. */
931 # define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB)
932 #else
933 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
934 #endif
936 #if TCG_TARGET_REG_BITS == 32
937 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
938 #ifdef CONFIG_USER_ONLY
940 * For user mode on smaller 32 bit systems we may run into trouble
941 * allocating big chunks of data in the right place. On these systems
942 * we utilise a static code generation buffer directly in the binary.
944 #define USE_STATIC_CODE_GEN_BUFFER
945 #endif
946 #else /* TCG_TARGET_REG_BITS == 64 */
947 #ifdef CONFIG_USER_ONLY
949 * As user-mode emulation typically means running multiple instances
950 * of the translator don't go too nuts with our default code gen
951 * buffer lest we make things too hard for the OS.
953 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
954 #else
956 * We expect most system emulation to run one or two guests per host.
957 * Users running large scale system emulation may want to tweak their
958 * runtime setup via the tb-size control on the command line.
960 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
961 #endif
962 #endif
964 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
965 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
966 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
968 static size_t size_code_gen_buffer(size_t tb_size)
970 /* Size the buffer. */
971 if (tb_size == 0) {
972 size_t phys_mem = qemu_get_host_physmem();
973 if (phys_mem == 0) {
974 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
975 } else {
976 tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8);
979 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
980 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
982 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
983 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
985 return tb_size;
988 #ifdef __mips__
989 /* In order to use J and JAL within the code_gen_buffer, we require
990 that the buffer not cross a 256MB boundary. */
991 static inline bool cross_256mb(void *addr, size_t size)
993 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
996 /* We weren't able to allocate a buffer without crossing that boundary,
997 so make do with the larger portion of the buffer that doesn't cross.
998 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
999 static inline void *split_cross_256mb(void *buf1, size_t size1)
1001 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
1002 size_t size2 = buf1 + size1 - buf2;
1004 size1 = buf2 - buf1;
1005 if (size1 < size2) {
1006 size1 = size2;
1007 buf1 = buf2;
1010 tcg_ctx->code_gen_buffer_size = size1;
1011 return buf1;
1013 #endif
1015 #ifdef USE_STATIC_CODE_GEN_BUFFER
1016 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
1017 __attribute__((aligned(CODE_GEN_ALIGN)));
1019 static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
1021 void *buf, *end;
1022 size_t size;
1024 if (splitwx > 0) {
1025 error_setg(errp, "jit split-wx not supported");
1026 return false;
1029 /* page-align the beginning and end of the buffer */
1030 buf = static_code_gen_buffer;
1031 end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
1032 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
1033 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
1035 size = end - buf;
1037 /* Honor a command-line option limiting the size of the buffer. */
1038 if (size > tb_size) {
1039 size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
1041 tcg_ctx->code_gen_buffer_size = size;
1043 #ifdef __mips__
1044 if (cross_256mb(buf, size)) {
1045 buf = split_cross_256mb(buf, size);
1046 size = tcg_ctx->code_gen_buffer_size;
1048 #endif
1050 if (qemu_mprotect_rwx(buf, size)) {
1051 error_setg_errno(errp, errno, "mprotect of jit buffer");
1052 return false;
1054 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1056 tcg_ctx->code_gen_buffer = buf;
1057 return true;
1059 #elif defined(_WIN32)
1060 static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
1062 void *buf;
1064 if (splitwx > 0) {
1065 error_setg(errp, "jit split-wx not supported");
1066 return false;
1069 buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1070 PAGE_EXECUTE_READWRITE);
1071 if (buf == NULL) {
1072 error_setg_win32(errp, GetLastError(),
1073 "allocate %zu bytes for jit buffer", size);
1074 return false;
1077 tcg_ctx->code_gen_buffer = buf;
1078 tcg_ctx->code_gen_buffer_size = size;
1079 return true;
1081 #else
1082 static bool alloc_code_gen_buffer_anon(size_t size, int prot,
1083 int flags, Error **errp)
1085 void *buf;
1087 buf = mmap(NULL, size, prot, flags, -1, 0);
1088 if (buf == MAP_FAILED) {
1089 error_setg_errno(errp, errno,
1090 "allocate %zu bytes for jit buffer", size);
1091 return false;
1093 tcg_ctx->code_gen_buffer_size = size;
1095 #ifdef __mips__
1096 if (cross_256mb(buf, size)) {
1098 * Try again, with the original still mapped, to avoid re-acquiring
1099 * the same 256mb crossing.
1101 size_t size2;
1102 void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1103 switch ((int)(buf2 != MAP_FAILED)) {
1104 case 1:
1105 if (!cross_256mb(buf2, size)) {
1106 /* Success! Use the new buffer. */
1107 munmap(buf, size);
1108 break;
1110 /* Failure. Work with what we had. */
1111 munmap(buf2, size);
1112 /* fallthru */
1113 default:
1114 /* Split the original buffer. Free the smaller half. */
1115 buf2 = split_cross_256mb(buf, size);
1116 size2 = tcg_ctx->code_gen_buffer_size;
1117 if (buf == buf2) {
1118 munmap(buf + size2, size - size2);
1119 } else {
1120 munmap(buf, size - size2);
1122 size = size2;
1123 break;
1125 buf = buf2;
1127 #endif
1129 /* Request large pages for the buffer. */
1130 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1132 tcg_ctx->code_gen_buffer = buf;
1133 return true;
1136 #ifndef CONFIG_TCG_INTERPRETER
1137 #ifdef CONFIG_POSIX
1138 #include "qemu/memfd.h"
1140 static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
1142 void *buf_rw = NULL, *buf_rx = MAP_FAILED;
1143 int fd = -1;
1145 #ifdef __mips__
1146 /* Find space for the RX mapping, vs the 256MiB regions. */
1147 if (!alloc_code_gen_buffer_anon(size, PROT_NONE,
1148 MAP_PRIVATE | MAP_ANONYMOUS |
1149 MAP_NORESERVE, errp)) {
1150 return false;
1152 /* The size of the mapping may have been adjusted. */
1153 size = tcg_ctx->code_gen_buffer_size;
1154 buf_rx = tcg_ctx->code_gen_buffer;
1155 #endif
1157 buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
1158 if (buf_rw == NULL) {
1159 goto fail;
1162 #ifdef __mips__
1163 void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
1164 MAP_SHARED | MAP_FIXED, fd, 0);
1165 if (tmp != buf_rx) {
1166 goto fail_rx;
1168 #else
1169 buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
1170 if (buf_rx == MAP_FAILED) {
1171 goto fail_rx;
1173 #endif
1175 close(fd);
1176 tcg_ctx->code_gen_buffer = buf_rw;
1177 tcg_ctx->code_gen_buffer_size = size;
1178 tcg_splitwx_diff = buf_rx - buf_rw;
1180 /* Request large pages for the buffer and the splitwx. */
1181 qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
1182 qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
1183 return true;
1185 fail_rx:
1186 error_setg_errno(errp, errno, "failed to map shared memory for execute");
1187 fail:
1188 if (buf_rx != MAP_FAILED) {
1189 munmap(buf_rx, size);
1191 if (buf_rw) {
1192 munmap(buf_rw, size);
1194 if (fd >= 0) {
1195 close(fd);
1197 return false;
1199 #endif /* CONFIG_POSIX */
1201 #ifdef CONFIG_DARWIN
1202 #include <mach/mach.h>
1204 extern kern_return_t mach_vm_remap(vm_map_t target_task,
1205 mach_vm_address_t *target_address,
1206 mach_vm_size_t size,
1207 mach_vm_offset_t mask,
1208 int flags,
1209 vm_map_t src_task,
1210 mach_vm_address_t src_address,
1211 boolean_t copy,
1212 vm_prot_t *cur_protection,
1213 vm_prot_t *max_protection,
1214 vm_inherit_t inheritance);
1216 static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
1218 kern_return_t ret;
1219 mach_vm_address_t buf_rw, buf_rx;
1220 vm_prot_t cur_prot, max_prot;
1222 /* Map the read-write portion via normal anon memory. */
1223 if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
1224 MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
1225 return false;
1228 buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer;
1229 buf_rx = 0;
1230 ret = mach_vm_remap(mach_task_self(),
1231 &buf_rx,
1232 size,
1234 VM_FLAGS_ANYWHERE,
1235 mach_task_self(),
1236 buf_rw,
1237 false,
1238 &cur_prot,
1239 &max_prot,
1240 VM_INHERIT_NONE);
1241 if (ret != KERN_SUCCESS) {
1242 /* TODO: Convert "ret" to a human readable error message. */
1243 error_setg(errp, "vm_remap for jit splitwx failed");
1244 munmap((void *)buf_rw, size);
1245 return false;
1248 if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
1249 error_setg_errno(errp, errno, "mprotect for jit splitwx");
1250 munmap((void *)buf_rx, size);
1251 munmap((void *)buf_rw, size);
1252 return false;
1255 tcg_splitwx_diff = buf_rx - buf_rw;
1256 return true;
1258 #endif /* CONFIG_DARWIN */
1259 #endif /* CONFIG_TCG_INTERPRETER */
1261 static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
1263 #ifndef CONFIG_TCG_INTERPRETER
1264 # ifdef CONFIG_DARWIN
1265 return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
1266 # endif
1267 # ifdef CONFIG_POSIX
1268 return alloc_code_gen_buffer_splitwx_memfd(size, errp);
1269 # endif
1270 #endif
1271 error_setg(errp, "jit split-wx not supported");
1272 return false;
1275 static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
1277 ERRP_GUARD();
1278 int prot, flags;
1280 if (splitwx) {
1281 if (alloc_code_gen_buffer_splitwx(size, errp)) {
1282 return true;
1285 * If splitwx force-on (1), fail;
1286 * if splitwx default-on (-1), fall through to splitwx off.
1288 if (splitwx > 0) {
1289 return false;
1291 error_free_or_abort(errp);
1294 prot = PROT_READ | PROT_WRITE | PROT_EXEC;
1295 flags = MAP_PRIVATE | MAP_ANONYMOUS;
1296 #ifdef CONFIG_TCG_INTERPRETER
1297 /* The tcg interpreter does not need execute permission. */
1298 prot = PROT_READ | PROT_WRITE;
1299 #elif defined(CONFIG_DARWIN)
1300 /* Applicable to both iOS and macOS (Apple Silicon). */
1301 if (!splitwx) {
1302 flags |= MAP_JIT;
1304 #endif
1306 return alloc_code_gen_buffer_anon(size, prot, flags, errp);
1308 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1310 static bool tb_cmp(const void *ap, const void *bp)
1312 const TranslationBlock *a = ap;
1313 const TranslationBlock *b = bp;
1315 return a->pc == b->pc &&
1316 a->cs_base == b->cs_base &&
1317 a->flags == b->flags &&
1318 (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
1319 a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1320 a->page_addr[0] == b->page_addr[0] &&
1321 a->page_addr[1] == b->page_addr[1];
1324 static void tb_htable_init(void)
1326 unsigned int mode = QHT_MODE_AUTO_RESIZE;
1328 qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1331 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1332 (in bytes) allocated to the translation buffer. Zero means default
1333 size. */
1334 void tcg_exec_init(unsigned long tb_size, int splitwx)
1336 bool ok;
1338 tcg_allowed = true;
1339 cpu_gen_init();
1340 page_init();
1341 tb_htable_init();
1343 ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
1344 splitwx, &error_fatal);
1345 assert(ok);
1347 #if defined(CONFIG_SOFTMMU)
1348 /* There's no guest base to take into account, so go ahead and
1349 initialize the prologue now. */
1350 tcg_prologue_init(tcg_ctx);
1351 #endif
1354 /* call with @p->lock held */
1355 static inline void invalidate_page_bitmap(PageDesc *p)
1357 assert_page_locked(p);
1358 #ifdef CONFIG_SOFTMMU
1359 g_free(p->code_bitmap);
1360 p->code_bitmap = NULL;
1361 p->code_write_count = 0;
1362 #endif
1365 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
1366 static void page_flush_tb_1(int level, void **lp)
1368 int i;
1370 if (*lp == NULL) {
1371 return;
1373 if (level == 0) {
1374 PageDesc *pd = *lp;
1376 for (i = 0; i < V_L2_SIZE; ++i) {
1377 page_lock(&pd[i]);
1378 pd[i].first_tb = (uintptr_t)NULL;
1379 invalidate_page_bitmap(pd + i);
1380 page_unlock(&pd[i]);
1382 } else {
1383 void **pp = *lp;
1385 for (i = 0; i < V_L2_SIZE; ++i) {
1386 page_flush_tb_1(level - 1, pp + i);
1391 static void page_flush_tb(void)
1393 int i, l1_sz = v_l1_size;
1395 for (i = 0; i < l1_sz; i++) {
1396 page_flush_tb_1(v_l2_levels, l1_map + i);
1400 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1402 const TranslationBlock *tb = value;
1403 size_t *size = data;
1405 *size += tb->tc.size;
1406 return false;
1409 /* flush all the translation blocks */
1410 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1412 bool did_flush = false;
1414 mmap_lock();
1415 /* If it is already been done on request of another CPU,
1416 * just retry.
1418 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1419 goto done;
1421 did_flush = true;
1423 if (DEBUG_TB_FLUSH_GATE) {
1424 size_t nb_tbs = tcg_nb_tbs();
1425 size_t host_size = 0;
1427 tcg_tb_foreach(tb_host_size_iter, &host_size);
1428 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1429 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1432 CPU_FOREACH(cpu) {
1433 cpu_tb_jmp_cache_clear(cpu);
1436 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1437 page_flush_tb();
1439 tcg_region_reset_all();
1440 /* XXX: flush processor icache at this point if cache flush is
1441 expensive */
1442 qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1444 done:
1445 mmap_unlock();
1446 if (did_flush) {
1447 qemu_plugin_flush_cb();
1451 void tb_flush(CPUState *cpu)
1453 if (tcg_enabled()) {
1454 unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
1456 if (cpu_in_exclusive_context(cpu)) {
1457 do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
1458 } else {
1459 async_safe_run_on_cpu(cpu, do_tb_flush,
1460 RUN_ON_CPU_HOST_INT(tb_flush_count));
1466 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1467 * so in order to prevent bit rot we compile them unconditionally in user-mode,
1468 * and let the optimizer get rid of them by wrapping their user-only callers
1469 * with if (DEBUG_TB_CHECK_GATE).
1471 #ifdef CONFIG_USER_ONLY
1473 static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1475 TranslationBlock *tb = p;
1476 target_ulong addr = *(target_ulong *)userp;
1478 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1479 printf("ERROR invalidate: address=" TARGET_FMT_lx
1480 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1484 /* verify that all the pages have correct rights for code
1486 * Called with mmap_lock held.
1488 static void tb_invalidate_check(target_ulong address)
1490 address &= TARGET_PAGE_MASK;
1491 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1494 static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1496 TranslationBlock *tb = p;
1497 int flags1, flags2;
1499 flags1 = page_get_flags(tb->pc);
1500 flags2 = page_get_flags(tb->pc + tb->size - 1);
1501 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1502 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1503 (long)tb->pc, tb->size, flags1, flags2);
1507 /* verify that all the pages have correct rights for code */
1508 static void tb_page_check(void)
1510 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1513 #endif /* CONFIG_USER_ONLY */
1516 * user-mode: call with mmap_lock held
1517 * !user-mode: call with @pd->lock held
1519 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1521 TranslationBlock *tb1;
1522 uintptr_t *pprev;
1523 unsigned int n1;
1525 assert_page_locked(pd);
1526 pprev = &pd->first_tb;
1527 PAGE_FOR_EACH_TB(pd, tb1, n1) {
1528 if (tb1 == tb) {
1529 *pprev = tb1->page_next[n1];
1530 return;
1532 pprev = &tb1->page_next[n1];
1534 g_assert_not_reached();
1537 /* remove @orig from its @n_orig-th jump list */
1538 static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1540 uintptr_t ptr, ptr_locked;
1541 TranslationBlock *dest;
1542 TranslationBlock *tb;
1543 uintptr_t *pprev;
1544 int n;
1546 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1547 ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1548 dest = (TranslationBlock *)(ptr & ~1);
1549 if (dest == NULL) {
1550 return;
1553 qemu_spin_lock(&dest->jmp_lock);
1555 * While acquiring the lock, the jump might have been removed if the
1556 * destination TB was invalidated; check again.
1558 ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
1559 if (ptr_locked != ptr) {
1560 qemu_spin_unlock(&dest->jmp_lock);
1562 * The only possibility is that the jump was unlinked via
1563 * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1564 * because we set the LSB above.
1566 g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1567 return;
1570 * We first acquired the lock, and since the destination pointer matches,
1571 * we know for sure that @orig is in the jmp list.
1573 pprev = &dest->jmp_list_head;
1574 TB_FOR_EACH_JMP(dest, tb, n) {
1575 if (tb == orig && n == n_orig) {
1576 *pprev = tb->jmp_list_next[n];
1577 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1578 qemu_spin_unlock(&dest->jmp_lock);
1579 return;
1581 pprev = &tb->jmp_list_next[n];
1583 g_assert_not_reached();
1586 /* reset the jump entry 'n' of a TB so that it is not chained to
1587 another TB */
1588 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1590 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1591 tb_set_jmp_target(tb, n, addr);
1594 /* remove any jumps to the TB */
1595 static inline void tb_jmp_unlink(TranslationBlock *dest)
1597 TranslationBlock *tb;
1598 int n;
1600 qemu_spin_lock(&dest->jmp_lock);
1602 TB_FOR_EACH_JMP(dest, tb, n) {
1603 tb_reset_jump(tb, n);
1604 qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1605 /* No need to clear the list entry; setting the dest ptr is enough */
1607 dest->jmp_list_head = (uintptr_t)NULL;
1609 qemu_spin_unlock(&dest->jmp_lock);
1613 * In user-mode, call with mmap_lock held.
1614 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1615 * locks held.
1617 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1619 CPUState *cpu;
1620 PageDesc *p;
1621 uint32_t h;
1622 tb_page_addr_t phys_pc;
1624 assert_memory_lock();
1626 /* make sure no further incoming jumps will be chained to this TB */
1627 qemu_spin_lock(&tb->jmp_lock);
1628 qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1629 qemu_spin_unlock(&tb->jmp_lock);
1631 /* remove the TB from the hash list */
1632 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1633 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
1634 tb->trace_vcpu_dstate);
1635 if (!(tb->cflags & CF_NOCACHE) &&
1636 !qht_remove(&tb_ctx.htable, tb, h)) {
1637 return;
1640 /* remove the TB from the page list */
1641 if (rm_from_page_list) {
1642 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1643 tb_page_remove(p, tb);
1644 invalidate_page_bitmap(p);
1645 if (tb->page_addr[1] != -1) {
1646 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1647 tb_page_remove(p, tb);
1648 invalidate_page_bitmap(p);
1652 /* remove the TB from the hash list */
1653 h = tb_jmp_cache_hash_func(tb->pc);
1654 CPU_FOREACH(cpu) {
1655 if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1656 qatomic_set(&cpu->tb_jmp_cache[h], NULL);
1660 /* suppress this TB from the two jump lists */
1661 tb_remove_from_jmp_list(tb, 0);
1662 tb_remove_from_jmp_list(tb, 1);
1664 /* suppress any remaining jumps to this TB */
1665 tb_jmp_unlink(tb);
1667 qatomic_set(&tcg_ctx->tb_phys_invalidate_count,
1668 tcg_ctx->tb_phys_invalidate_count + 1);
1671 static void tb_phys_invalidate__locked(TranslationBlock *tb)
1673 qemu_thread_jit_write();
1674 do_tb_phys_invalidate(tb, true);
1675 qemu_thread_jit_execute();
1678 /* invalidate one TB
1680 * Called with mmap_lock held in user-mode.
1682 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1684 if (page_addr == -1 && tb->page_addr[0] != -1) {
1685 page_lock_tb(tb);
1686 do_tb_phys_invalidate(tb, true);
1687 page_unlock_tb(tb);
1688 } else {
1689 do_tb_phys_invalidate(tb, false);
1693 #ifdef CONFIG_SOFTMMU
1694 /* call with @p->lock held */
1695 static void build_page_bitmap(PageDesc *p)
1697 int n, tb_start, tb_end;
1698 TranslationBlock *tb;
1700 assert_page_locked(p);
1701 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1703 PAGE_FOR_EACH_TB(p, tb, n) {
1704 /* NOTE: this is subtle as a TB may span two physical pages */
1705 if (n == 0) {
1706 /* NOTE: tb_end may be after the end of the page, but
1707 it is not a problem */
1708 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1709 tb_end = tb_start + tb->size;
1710 if (tb_end > TARGET_PAGE_SIZE) {
1711 tb_end = TARGET_PAGE_SIZE;
1713 } else {
1714 tb_start = 0;
1715 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1717 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1720 #endif
1722 /* add the tb in the target page and protect it if necessary
1724 * Called with mmap_lock held for user-mode emulation.
1725 * Called with @p->lock held in !user-mode.
1727 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1728 unsigned int n, tb_page_addr_t page_addr)
1730 #ifndef CONFIG_USER_ONLY
1731 bool page_already_protected;
1732 #endif
1734 assert_page_locked(p);
1736 tb->page_addr[n] = page_addr;
1737 tb->page_next[n] = p->first_tb;
1738 #ifndef CONFIG_USER_ONLY
1739 page_already_protected = p->first_tb != (uintptr_t)NULL;
1740 #endif
1741 p->first_tb = (uintptr_t)tb | n;
1742 invalidate_page_bitmap(p);
1744 #if defined(CONFIG_USER_ONLY)
1745 if (p->flags & PAGE_WRITE) {
1746 target_ulong addr;
1747 PageDesc *p2;
1748 int prot;
1750 /* force the host page as non writable (writes will have a
1751 page fault + mprotect overhead) */
1752 page_addr &= qemu_host_page_mask;
1753 prot = 0;
1754 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1755 addr += TARGET_PAGE_SIZE) {
1757 p2 = page_find(addr >> TARGET_PAGE_BITS);
1758 if (!p2) {
1759 continue;
1761 prot |= p2->flags;
1762 p2->flags &= ~PAGE_WRITE;
1764 mprotect(g2h(page_addr), qemu_host_page_size,
1765 (prot & PAGE_BITS) & ~PAGE_WRITE);
1766 if (DEBUG_TB_INVALIDATE_GATE) {
1767 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1770 #else
1771 /* if some code is already present, then the pages are already
1772 protected. So we handle the case where only the first TB is
1773 allocated in a physical page */
1774 if (!page_already_protected) {
1775 tlb_protect_code(page_addr);
1777 #endif
1780 /* add a new TB and link it to the physical page tables. phys_page2 is
1781 * (-1) to indicate that only one page contains the TB.
1783 * Called with mmap_lock held for user-mode emulation.
1785 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1786 * Note that in !user-mode, another thread might have already added a TB
1787 * for the same block of guest code that @tb corresponds to. In that case,
1788 * the caller should discard the original @tb, and use instead the returned TB.
1790 static TranslationBlock *
1791 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1792 tb_page_addr_t phys_page2)
1794 PageDesc *p;
1795 PageDesc *p2 = NULL;
1797 assert_memory_lock();
1799 if (phys_pc == -1) {
1801 * If the TB is not associated with a physical RAM page then
1802 * it must be a temporary one-insn TB, and we have nothing to do
1803 * except fill in the page_addr[] fields.
1805 assert(tb->cflags & CF_NOCACHE);
1806 tb->page_addr[0] = tb->page_addr[1] = -1;
1807 return tb;
1811 * Add the TB to the page list, acquiring first the pages's locks.
1812 * We keep the locks held until after inserting the TB in the hash table,
1813 * so that if the insertion fails we know for sure that the TBs are still
1814 * in the page descriptors.
1815 * Note that inserting into the hash table first isn't an option, since
1816 * we can only insert TBs that are fully initialized.
1818 page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1819 tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1820 if (p2) {
1821 tb_page_add(p2, tb, 1, phys_page2);
1822 } else {
1823 tb->page_addr[1] = -1;
1826 if (!(tb->cflags & CF_NOCACHE)) {
1827 void *existing_tb = NULL;
1828 uint32_t h;
1830 /* add in the hash table */
1831 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1832 tb->trace_vcpu_dstate);
1833 qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1835 /* remove TB from the page(s) if we couldn't insert it */
1836 if (unlikely(existing_tb)) {
1837 tb_page_remove(p, tb);
1838 invalidate_page_bitmap(p);
1839 if (p2) {
1840 tb_page_remove(p2, tb);
1841 invalidate_page_bitmap(p2);
1843 tb = existing_tb;
1847 if (p2 && p2 != p) {
1848 page_unlock(p2);
1850 page_unlock(p);
1852 #ifdef CONFIG_USER_ONLY
1853 if (DEBUG_TB_CHECK_GATE) {
1854 tb_page_check();
1856 #endif
1857 return tb;
1860 /* Called with mmap_lock held for user mode emulation. */
1861 TranslationBlock *tb_gen_code(CPUState *cpu,
1862 target_ulong pc, target_ulong cs_base,
1863 uint32_t flags, int cflags)
1865 CPUArchState *env = cpu->env_ptr;
1866 TranslationBlock *tb, *existing_tb;
1867 tb_page_addr_t phys_pc, phys_page2;
1868 target_ulong virt_page2;
1869 tcg_insn_unit *gen_code_buf;
1870 int gen_code_size, search_size, max_insns;
1871 #ifdef CONFIG_PROFILER
1872 TCGProfile *prof = &tcg_ctx->prof;
1873 int64_t ti;
1874 #endif
1876 assert_memory_lock();
1877 qemu_thread_jit_write();
1879 phys_pc = get_page_addr_code(env, pc);
1881 if (phys_pc == -1) {
1882 /* Generate a temporary TB with 1 insn in it */
1883 cflags &= ~CF_COUNT_MASK;
1884 cflags |= CF_NOCACHE | 1;
1887 cflags &= ~CF_CLUSTER_MASK;
1888 cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
1890 max_insns = cflags & CF_COUNT_MASK;
1891 if (max_insns == 0) {
1892 max_insns = CF_COUNT_MASK;
1894 if (max_insns > TCG_MAX_INSNS) {
1895 max_insns = TCG_MAX_INSNS;
1897 if (cpu->singlestep_enabled || singlestep) {
1898 max_insns = 1;
1901 buffer_overflow:
1902 tb = tcg_tb_alloc(tcg_ctx);
1903 if (unlikely(!tb)) {
1904 /* flush must be done */
1905 tb_flush(cpu);
1906 mmap_unlock();
1907 /* Make the execution loop process the flush as soon as possible. */
1908 cpu->exception_index = EXCP_INTERRUPT;
1909 cpu_loop_exit(cpu);
1912 gen_code_buf = tcg_ctx->code_gen_ptr;
1913 tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
1914 tb->pc = pc;
1915 tb->cs_base = cs_base;
1916 tb->flags = flags;
1917 tb->cflags = cflags;
1918 tb->orig_tb = NULL;
1919 tb->trace_vcpu_dstate = *cpu->trace_dstate;
1920 tcg_ctx->tb_cflags = cflags;
1921 tb_overflow:
1923 #ifdef CONFIG_PROFILER
1924 /* includes aborted translations because of exceptions */
1925 qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1926 ti = profile_getclock();
1927 #endif
1929 gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0);
1930 if (unlikely(gen_code_size != 0)) {
1931 goto error_return;
1934 tcg_func_start(tcg_ctx);
1936 tcg_ctx->cpu = env_cpu(env);
1937 gen_intermediate_code(cpu, tb, max_insns);
1938 tcg_ctx->cpu = NULL;
1939 max_insns = tb->icount;
1941 trace_translate_block(tb, tb->pc, tb->tc.ptr);
1943 /* generate machine code */
1944 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1945 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1946 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1947 if (TCG_TARGET_HAS_direct_jump) {
1948 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1949 tcg_ctx->tb_jmp_target_addr = NULL;
1950 } else {
1951 tcg_ctx->tb_jmp_insn_offset = NULL;
1952 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1955 #ifdef CONFIG_PROFILER
1956 qatomic_set(&prof->tb_count, prof->tb_count + 1);
1957 qatomic_set(&prof->interm_time,
1958 prof->interm_time + profile_getclock() - ti);
1959 ti = profile_getclock();
1960 #endif
1962 gen_code_size = tcg_gen_code(tcg_ctx, tb);
1963 if (unlikely(gen_code_size < 0)) {
1964 error_return:
1965 switch (gen_code_size) {
1966 case -1:
1968 * Overflow of code_gen_buffer, or the current slice of it.
1970 * TODO: We don't need to re-do gen_intermediate_code, nor
1971 * should we re-do the tcg optimization currently hidden
1972 * inside tcg_gen_code. All that should be required is to
1973 * flush the TBs, allocate a new TB, re-initialize it per
1974 * above, and re-do the actual code generation.
1976 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
1977 "Restarting code generation for "
1978 "code_gen_buffer overflow\n");
1979 goto buffer_overflow;
1981 case -2:
1983 * The code generated for the TranslationBlock is too large.
1984 * The maximum size allowed by the unwind info is 64k.
1985 * There may be stricter constraints from relocations
1986 * in the tcg backend.
1988 * Try again with half as many insns as we attempted this time.
1989 * If a single insn overflows, there's a bug somewhere...
1991 assert(max_insns > 1);
1992 max_insns /= 2;
1993 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
1994 "Restarting code generation with "
1995 "smaller translation block (max %d insns)\n",
1996 max_insns);
1997 goto tb_overflow;
1999 default:
2000 g_assert_not_reached();
2003 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
2004 if (unlikely(search_size < 0)) {
2005 goto buffer_overflow;
2007 tb->tc.size = gen_code_size;
2009 #ifdef CONFIG_PROFILER
2010 qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
2011 qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
2012 qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
2013 qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
2014 #endif
2016 #ifdef DEBUG_DISAS
2017 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
2018 qemu_log_in_addr_range(tb->pc)) {
2019 FILE *logfile = qemu_log_lock();
2020 int code_size, data_size;
2021 const tcg_target_ulong *rx_data_gen_ptr;
2022 size_t chunk_start;
2023 int insn = 0;
2025 if (tcg_ctx->data_gen_ptr) {
2026 rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
2027 code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
2028 data_size = gen_code_size - code_size;
2029 } else {
2030 rx_data_gen_ptr = 0;
2031 code_size = gen_code_size;
2032 data_size = 0;
2035 /* Dump header and the first instruction */
2036 qemu_log("OUT: [size=%d]\n", gen_code_size);
2037 qemu_log(" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
2038 tcg_ctx->gen_insn_data[insn][0]);
2039 chunk_start = tcg_ctx->gen_insn_end_off[insn];
2040 log_disas(tb->tc.ptr, chunk_start);
2043 * Dump each instruction chunk, wrapping up empty chunks into
2044 * the next instruction. The whole array is offset so the
2045 * first entry is the beginning of the 2nd instruction.
2047 while (insn < tb->icount) {
2048 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
2049 if (chunk_end > chunk_start) {
2050 qemu_log(" -- guest addr 0x" TARGET_FMT_lx "\n",
2051 tcg_ctx->gen_insn_data[insn][0]);
2052 log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start);
2053 chunk_start = chunk_end;
2055 insn++;
2058 if (chunk_start < code_size) {
2059 qemu_log(" -- tb slow paths + alignment\n");
2060 log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start);
2063 /* Finally dump any data we may have after the block */
2064 if (data_size) {
2065 int i;
2066 qemu_log(" data: [size=%d]\n", data_size);
2067 for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
2068 qemu_log("0x%08" PRIxPTR ": .quad 0x%" TCG_PRIlx "\n",
2069 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
2072 qemu_log("\n");
2073 qemu_log_flush();
2074 qemu_log_unlock(logfile);
2076 #endif
2078 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
2079 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
2080 CODE_GEN_ALIGN));
2082 /* init jump list */
2083 qemu_spin_init(&tb->jmp_lock);
2084 tb->jmp_list_head = (uintptr_t)NULL;
2085 tb->jmp_list_next[0] = (uintptr_t)NULL;
2086 tb->jmp_list_next[1] = (uintptr_t)NULL;
2087 tb->jmp_dest[0] = (uintptr_t)NULL;
2088 tb->jmp_dest[1] = (uintptr_t)NULL;
2090 /* init original jump addresses which have been set during tcg_gen_code() */
2091 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2092 tb_reset_jump(tb, 0);
2094 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2095 tb_reset_jump(tb, 1);
2098 /* check next page if needed */
2099 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
2100 phys_page2 = -1;
2101 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
2102 phys_page2 = get_page_addr_code(env, virt_page2);
2105 * No explicit memory barrier is required -- tb_link_page() makes the
2106 * TB visible in a consistent state.
2108 existing_tb = tb_link_page(tb, phys_pc, phys_page2);
2109 /* if the TB already exists, discard what we just translated */
2110 if (unlikely(existing_tb != tb)) {
2111 uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
2113 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
2114 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
2115 tb_destroy(tb);
2116 return existing_tb;
2118 tcg_tb_insert(tb);
2119 return tb;
2123 * @p must be non-NULL.
2124 * user-mode: call with mmap_lock held.
2125 * !user-mode: call with all @pages locked.
2127 static void
2128 tb_invalidate_phys_page_range__locked(struct page_collection *pages,
2129 PageDesc *p, tb_page_addr_t start,
2130 tb_page_addr_t end,
2131 uintptr_t retaddr)
2133 TranslationBlock *tb;
2134 tb_page_addr_t tb_start, tb_end;
2135 int n;
2136 #ifdef TARGET_HAS_PRECISE_SMC
2137 CPUState *cpu = current_cpu;
2138 CPUArchState *env = NULL;
2139 bool current_tb_not_found = retaddr != 0;
2140 bool current_tb_modified = false;
2141 TranslationBlock *current_tb = NULL;
2142 target_ulong current_pc = 0;
2143 target_ulong current_cs_base = 0;
2144 uint32_t current_flags = 0;
2145 #endif /* TARGET_HAS_PRECISE_SMC */
2147 assert_page_locked(p);
2149 #if defined(TARGET_HAS_PRECISE_SMC)
2150 if (cpu != NULL) {
2151 env = cpu->env_ptr;
2153 #endif
2155 /* we remove all the TBs in the range [start, end[ */
2156 /* XXX: see if in some cases it could be faster to invalidate all
2157 the code */
2158 PAGE_FOR_EACH_TB(p, tb, n) {
2159 assert_page_locked(p);
2160 /* NOTE: this is subtle as a TB may span two physical pages */
2161 if (n == 0) {
2162 /* NOTE: tb_end may be after the end of the page, but
2163 it is not a problem */
2164 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
2165 tb_end = tb_start + tb->size;
2166 } else {
2167 tb_start = tb->page_addr[1];
2168 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
2170 if (!(tb_end <= start || tb_start >= end)) {
2171 #ifdef TARGET_HAS_PRECISE_SMC
2172 if (current_tb_not_found) {
2173 current_tb_not_found = false;
2174 /* now we have a real cpu fault */
2175 current_tb = tcg_tb_lookup(retaddr);
2177 if (current_tb == tb &&
2178 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2180 * If we are modifying the current TB, we must stop
2181 * its execution. We could be more precise by checking
2182 * that the modification is after the current PC, but it
2183 * would require a specialized function to partially
2184 * restore the CPU state.
2186 current_tb_modified = true;
2187 cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
2188 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2189 &current_flags);
2191 #endif /* TARGET_HAS_PRECISE_SMC */
2192 tb_phys_invalidate__locked(tb);
2195 #if !defined(CONFIG_USER_ONLY)
2196 /* if no code remaining, no need to continue to use slow writes */
2197 if (!p->first_tb) {
2198 invalidate_page_bitmap(p);
2199 tlb_unprotect_code(start);
2201 #endif
2202 #ifdef TARGET_HAS_PRECISE_SMC
2203 if (current_tb_modified) {
2204 page_collection_unlock(pages);
2205 /* Force execution of one insn next time. */
2206 cpu->cflags_next_tb = 1 | curr_cflags();
2207 mmap_unlock();
2208 cpu_loop_exit_noexc(cpu);
2210 #endif
2214 * Invalidate all TBs which intersect with the target physical address range
2215 * [start;end[. NOTE: start and end must refer to the *same* physical page.
2216 * 'is_cpu_write_access' should be true if called from a real cpu write
2217 * access: the virtual CPU will exit the current TB if code is modified inside
2218 * this TB.
2220 * Called with mmap_lock held for user-mode emulation
2222 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
2224 struct page_collection *pages;
2225 PageDesc *p;
2227 assert_memory_lock();
2229 p = page_find(start >> TARGET_PAGE_BITS);
2230 if (p == NULL) {
2231 return;
2233 pages = page_collection_lock(start, end);
2234 tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
2235 page_collection_unlock(pages);
2239 * Invalidate all TBs which intersect with the target physical address range
2240 * [start;end[. NOTE: start and end may refer to *different* physical pages.
2241 * 'is_cpu_write_access' should be true if called from a real cpu write
2242 * access: the virtual CPU will exit the current TB if code is modified inside
2243 * this TB.
2245 * Called with mmap_lock held for user-mode emulation.
2247 #ifdef CONFIG_SOFTMMU
2248 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
2249 #else
2250 void tb_invalidate_phys_range(target_ulong start, target_ulong end)
2251 #endif
2253 struct page_collection *pages;
2254 tb_page_addr_t next;
2256 assert_memory_lock();
2258 pages = page_collection_lock(start, end);
2259 for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2260 start < end;
2261 start = next, next += TARGET_PAGE_SIZE) {
2262 PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
2263 tb_page_addr_t bound = MIN(next, end);
2265 if (pd == NULL) {
2266 continue;
2268 tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
2270 page_collection_unlock(pages);
2273 #ifdef CONFIG_SOFTMMU
2274 /* len must be <= 8 and start must be a multiple of len.
2275 * Called via softmmu_template.h when code areas are written to with
2276 * iothread mutex not held.
2278 * Call with all @pages in the range [@start, @start + len[ locked.
2280 void tb_invalidate_phys_page_fast(struct page_collection *pages,
2281 tb_page_addr_t start, int len,
2282 uintptr_t retaddr)
2284 PageDesc *p;
2286 assert_memory_lock();
2288 p = page_find(start >> TARGET_PAGE_BITS);
2289 if (!p) {
2290 return;
2293 assert_page_locked(p);
2294 if (!p->code_bitmap &&
2295 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
2296 build_page_bitmap(p);
2298 if (p->code_bitmap) {
2299 unsigned int nr;
2300 unsigned long b;
2302 nr = start & ~TARGET_PAGE_MASK;
2303 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
2304 if (b & ((1 << len) - 1)) {
2305 goto do_invalidate;
2307 } else {
2308 do_invalidate:
2309 tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
2310 retaddr);
2313 #else
2314 /* Called with mmap_lock held. If pc is not 0 then it indicates the
2315 * host PC of the faulting store instruction that caused this invalidate.
2316 * Returns true if the caller needs to abort execution of the current
2317 * TB (because it was modified by this store and the guest CPU has
2318 * precise-SMC semantics).
2320 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2322 TranslationBlock *tb;
2323 PageDesc *p;
2324 int n;
2325 #ifdef TARGET_HAS_PRECISE_SMC
2326 TranslationBlock *current_tb = NULL;
2327 CPUState *cpu = current_cpu;
2328 CPUArchState *env = NULL;
2329 int current_tb_modified = 0;
2330 target_ulong current_pc = 0;
2331 target_ulong current_cs_base = 0;
2332 uint32_t current_flags = 0;
2333 #endif
2335 assert_memory_lock();
2337 addr &= TARGET_PAGE_MASK;
2338 p = page_find(addr >> TARGET_PAGE_BITS);
2339 if (!p) {
2340 return false;
2343 #ifdef TARGET_HAS_PRECISE_SMC
2344 if (p->first_tb && pc != 0) {
2345 current_tb = tcg_tb_lookup(pc);
2347 if (cpu != NULL) {
2348 env = cpu->env_ptr;
2350 #endif
2351 assert_page_locked(p);
2352 PAGE_FOR_EACH_TB(p, tb, n) {
2353 #ifdef TARGET_HAS_PRECISE_SMC
2354 if (current_tb == tb &&
2355 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2356 /* If we are modifying the current TB, we must stop
2357 its execution. We could be more precise by checking
2358 that the modification is after the current PC, but it
2359 would require a specialized function to partially
2360 restore the CPU state */
2362 current_tb_modified = 1;
2363 cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2364 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2365 &current_flags);
2367 #endif /* TARGET_HAS_PRECISE_SMC */
2368 tb_phys_invalidate(tb, addr);
2370 p->first_tb = (uintptr_t)NULL;
2371 #ifdef TARGET_HAS_PRECISE_SMC
2372 if (current_tb_modified) {
2373 /* Force execution of one insn next time. */
2374 cpu->cflags_next_tb = 1 | curr_cflags();
2375 return true;
2377 #endif
2379 return false;
2381 #endif
2383 /* user-mode: call with mmap_lock held */
2384 void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
2386 TranslationBlock *tb;
2388 assert_memory_lock();
2390 tb = tcg_tb_lookup(retaddr);
2391 if (tb) {
2392 /* We can use retranslation to find the PC. */
2393 cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2394 tb_phys_invalidate(tb, -1);
2395 } else {
2396 /* The exception probably happened in a helper. The CPU state should
2397 have been saved before calling it. Fetch the PC from there. */
2398 CPUArchState *env = cpu->env_ptr;
2399 target_ulong pc, cs_base;
2400 tb_page_addr_t addr;
2401 uint32_t flags;
2403 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2404 addr = get_page_addr_code(env, pc);
2405 if (addr != -1) {
2406 tb_invalidate_phys_range(addr, addr + 1);
2411 #ifndef CONFIG_USER_ONLY
2412 /* in deterministic execution mode, instructions doing device I/Os
2413 * must be at the end of the TB.
2415 * Called by softmmu_template.h, with iothread mutex not held.
2417 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2419 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
2420 CPUArchState *env = cpu->env_ptr;
2421 #endif
2422 TranslationBlock *tb;
2423 uint32_t n;
2425 tb = tcg_tb_lookup(retaddr);
2426 if (!tb) {
2427 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2428 (void *)retaddr);
2430 cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2432 /* On MIPS and SH, delay slot instructions can only be restarted if
2433 they were already the first instruction in the TB. If this is not
2434 the first instruction in a TB then re-execute the preceding
2435 branch. */
2436 n = 1;
2437 #if defined(TARGET_MIPS)
2438 if ((env->hflags & MIPS_HFLAG_BMASK) != 0
2439 && env->active_tc.PC != tb->pc) {
2440 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
2441 cpu_neg(cpu)->icount_decr.u16.low++;
2442 env->hflags &= ~MIPS_HFLAG_BMASK;
2443 n = 2;
2445 #elif defined(TARGET_SH4)
2446 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
2447 && env->pc != tb->pc) {
2448 env->pc -= 2;
2449 cpu_neg(cpu)->icount_decr.u16.low++;
2450 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
2451 n = 2;
2453 #endif
2455 /* Generate a new TB executing the I/O insn. */
2456 cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
2458 if (tb_cflags(tb) & CF_NOCACHE) {
2459 if (tb->orig_tb) {
2460 /* Invalidate original TB if this TB was generated in
2461 * cpu_exec_nocache() */
2462 tb_phys_invalidate(tb->orig_tb, -1);
2464 tcg_tb_remove(tb);
2465 tb_destroy(tb);
2468 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
2469 "cpu_io_recompile: rewound execution of TB to "
2470 TARGET_FMT_lx "\n", tb->pc);
2472 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2473 * the first in the TB) then we end up generating a whole new TB and
2474 * repeating the fault, which is horribly inefficient.
2475 * Better would be to execute just this insn uncached, or generate a
2476 * second new TB.
2478 cpu_loop_exit_noexc(cpu);
2481 static void print_qht_statistics(struct qht_stats hst)
2483 uint32_t hgram_opts;
2484 size_t hgram_bins;
2485 char *hgram;
2487 if (!hst.head_buckets) {
2488 return;
2490 qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
2491 hst.used_head_buckets, hst.head_buckets,
2492 (double)hst.used_head_buckets / hst.head_buckets * 100);
2494 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2495 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
2496 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2497 hgram_opts |= QDIST_PR_NODECIMAL;
2499 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2500 qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
2501 qdist_avg(&hst.occupancy) * 100, hgram);
2502 g_free(hgram);
2504 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2505 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2506 if (hgram_bins > 10) {
2507 hgram_bins = 10;
2508 } else {
2509 hgram_bins = 0;
2510 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2512 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2513 qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n",
2514 qdist_avg(&hst.chain), hgram);
2515 g_free(hgram);
2518 struct tb_tree_stats {
2519 size_t nb_tbs;
2520 size_t host_size;
2521 size_t target_size;
2522 size_t max_target_size;
2523 size_t direct_jmp_count;
2524 size_t direct_jmp2_count;
2525 size_t cross_page;
2528 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2530 const TranslationBlock *tb = value;
2531 struct tb_tree_stats *tst = data;
2533 tst->nb_tbs++;
2534 tst->host_size += tb->tc.size;
2535 tst->target_size += tb->size;
2536 if (tb->size > tst->max_target_size) {
2537 tst->max_target_size = tb->size;
2539 if (tb->page_addr[1] != -1) {
2540 tst->cross_page++;
2542 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2543 tst->direct_jmp_count++;
2544 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2545 tst->direct_jmp2_count++;
2548 return false;
2551 void dump_exec_info(void)
2553 struct tb_tree_stats tst = {};
2554 struct qht_stats hst;
2555 size_t nb_tbs, flush_full, flush_part, flush_elide;
2557 tcg_tb_foreach(tb_tree_stats_iter, &tst);
2558 nb_tbs = tst.nb_tbs;
2559 /* XXX: avoid using doubles ? */
2560 qemu_printf("Translation buffer state:\n");
2562 * Report total code size including the padding and TB structs;
2563 * otherwise users might think "-accel tcg,tb-size" is not honoured.
2564 * For avg host size we use the precise numbers from tb_tree_stats though.
2566 qemu_printf("gen code size %zu/%zu\n",
2567 tcg_code_size(), tcg_code_capacity());
2568 qemu_printf("TB count %zu\n", nb_tbs);
2569 qemu_printf("TB avg target size %zu max=%zu bytes\n",
2570 nb_tbs ? tst.target_size / nb_tbs : 0,
2571 tst.max_target_size);
2572 qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
2573 nb_tbs ? tst.host_size / nb_tbs : 0,
2574 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2575 qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
2576 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2577 qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2578 tst.direct_jmp_count,
2579 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2580 tst.direct_jmp2_count,
2581 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2583 qht_statistics_init(&tb_ctx.htable, &hst);
2584 print_qht_statistics(hst);
2585 qht_statistics_destroy(&hst);
2587 qemu_printf("\nStatistics:\n");
2588 qemu_printf("TB flush count %u\n",
2589 qatomic_read(&tb_ctx.tb_flush_count));
2590 qemu_printf("TB invalidate count %zu\n",
2591 tcg_tb_phys_invalidate_count());
2593 tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2594 qemu_printf("TLB full flushes %zu\n", flush_full);
2595 qemu_printf("TLB partial flushes %zu\n", flush_part);
2596 qemu_printf("TLB elided flushes %zu\n", flush_elide);
2597 tcg_dump_info();
2600 void dump_opcount_info(void)
2602 tcg_dump_op_count();
2605 #else /* CONFIG_USER_ONLY */
2607 void cpu_interrupt(CPUState *cpu, int mask)
2609 g_assert(qemu_mutex_iothread_locked());
2610 cpu->interrupt_request |= mask;
2611 qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
2615 * Walks guest process memory "regions" one by one
2616 * and calls callback function 'fn' for each region.
2618 struct walk_memory_regions_data {
2619 walk_memory_regions_fn fn;
2620 void *priv;
2621 target_ulong start;
2622 int prot;
2625 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2626 target_ulong end, int new_prot)
2628 if (data->start != -1u) {
2629 int rc = data->fn(data->priv, data->start, end, data->prot);
2630 if (rc != 0) {
2631 return rc;
2635 data->start = (new_prot ? end : -1u);
2636 data->prot = new_prot;
2638 return 0;
2641 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2642 target_ulong base, int level, void **lp)
2644 target_ulong pa;
2645 int i, rc;
2647 if (*lp == NULL) {
2648 return walk_memory_regions_end(data, base, 0);
2651 if (level == 0) {
2652 PageDesc *pd = *lp;
2654 for (i = 0; i < V_L2_SIZE; ++i) {
2655 int prot = pd[i].flags;
2657 pa = base | (i << TARGET_PAGE_BITS);
2658 if (prot != data->prot) {
2659 rc = walk_memory_regions_end(data, pa, prot);
2660 if (rc != 0) {
2661 return rc;
2665 } else {
2666 void **pp = *lp;
2668 for (i = 0; i < V_L2_SIZE; ++i) {
2669 pa = base | ((target_ulong)i <<
2670 (TARGET_PAGE_BITS + V_L2_BITS * level));
2671 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2672 if (rc != 0) {
2673 return rc;
2678 return 0;
2681 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2683 struct walk_memory_regions_data data;
2684 uintptr_t i, l1_sz = v_l1_size;
2686 data.fn = fn;
2687 data.priv = priv;
2688 data.start = -1u;
2689 data.prot = 0;
2691 for (i = 0; i < l1_sz; i++) {
2692 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2693 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2694 if (rc != 0) {
2695 return rc;
2699 return walk_memory_regions_end(&data, 0, 0);
2702 static int dump_region(void *priv, target_ulong start,
2703 target_ulong end, unsigned long prot)
2705 FILE *f = (FILE *)priv;
2707 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2708 " "TARGET_FMT_lx" %c%c%c\n",
2709 start, end, end - start,
2710 ((prot & PAGE_READ) ? 'r' : '-'),
2711 ((prot & PAGE_WRITE) ? 'w' : '-'),
2712 ((prot & PAGE_EXEC) ? 'x' : '-'));
2714 return 0;
2717 /* dump memory mappings */
2718 void page_dump(FILE *f)
2720 const int length = sizeof(target_ulong) * 2;
2721 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2722 length, "start", length, "end", length, "size", "prot");
2723 walk_memory_regions(f, dump_region);
2726 int page_get_flags(target_ulong address)
2728 PageDesc *p;
2730 p = page_find(address >> TARGET_PAGE_BITS);
2731 if (!p) {
2732 return 0;
2734 return p->flags;
2737 /* Modify the flags of a page and invalidate the code if necessary.
2738 The flag PAGE_WRITE_ORG is positioned automatically depending
2739 on PAGE_WRITE. The mmap_lock should already be held. */
2740 void page_set_flags(target_ulong start, target_ulong end, int flags)
2742 target_ulong addr, len;
2744 /* This function should never be called with addresses outside the
2745 guest address space. If this assert fires, it probably indicates
2746 a missing call to h2g_valid. */
2747 assert(end - 1 <= GUEST_ADDR_MAX);
2748 assert(start < end);
2749 assert_memory_lock();
2751 start = start & TARGET_PAGE_MASK;
2752 end = TARGET_PAGE_ALIGN(end);
2754 if (flags & PAGE_WRITE) {
2755 flags |= PAGE_WRITE_ORG;
2758 for (addr = start, len = end - start;
2759 len != 0;
2760 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2761 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2763 /* If the write protection bit is set, then we invalidate
2764 the code inside. */
2765 if (!(p->flags & PAGE_WRITE) &&
2766 (flags & PAGE_WRITE) &&
2767 p->first_tb) {
2768 tb_invalidate_phys_page(addr, 0);
2770 p->flags = flags;
2774 int page_check_range(target_ulong start, target_ulong len, int flags)
2776 PageDesc *p;
2777 target_ulong end;
2778 target_ulong addr;
2780 /* This function should never be called with addresses outside the
2781 guest address space. If this assert fires, it probably indicates
2782 a missing call to h2g_valid. */
2783 if (TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS) {
2784 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2787 if (len == 0) {
2788 return 0;
2790 if (start + len - 1 < start) {
2791 /* We've wrapped around. */
2792 return -1;
2795 /* must do before we loose bits in the next step */
2796 end = TARGET_PAGE_ALIGN(start + len);
2797 start = start & TARGET_PAGE_MASK;
2799 for (addr = start, len = end - start;
2800 len != 0;
2801 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2802 p = page_find(addr >> TARGET_PAGE_BITS);
2803 if (!p) {
2804 return -1;
2806 if (!(p->flags & PAGE_VALID)) {
2807 return -1;
2810 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2811 return -1;
2813 if (flags & PAGE_WRITE) {
2814 if (!(p->flags & PAGE_WRITE_ORG)) {
2815 return -1;
2817 /* unprotect the page if it was put read-only because it
2818 contains translated code */
2819 if (!(p->flags & PAGE_WRITE)) {
2820 if (!page_unprotect(addr, 0)) {
2821 return -1;
2826 return 0;
2829 /* called from signal handler: invalidate the code and unprotect the
2830 * page. Return 0 if the fault was not handled, 1 if it was handled,
2831 * and 2 if it was handled but the caller must cause the TB to be
2832 * immediately exited. (We can only return 2 if the 'pc' argument is
2833 * non-zero.)
2835 int page_unprotect(target_ulong address, uintptr_t pc)
2837 unsigned int prot;
2838 bool current_tb_invalidated;
2839 PageDesc *p;
2840 target_ulong host_start, host_end, addr;
2842 /* Technically this isn't safe inside a signal handler. However we
2843 know this only ever happens in a synchronous SEGV handler, so in
2844 practice it seems to be ok. */
2845 mmap_lock();
2847 p = page_find(address >> TARGET_PAGE_BITS);
2848 if (!p) {
2849 mmap_unlock();
2850 return 0;
2853 /* if the page was really writable, then we change its
2854 protection back to writable */
2855 if (p->flags & PAGE_WRITE_ORG) {
2856 current_tb_invalidated = false;
2857 if (p->flags & PAGE_WRITE) {
2858 /* If the page is actually marked WRITE then assume this is because
2859 * this thread raced with another one which got here first and
2860 * set the page to PAGE_WRITE and did the TB invalidate for us.
2862 #ifdef TARGET_HAS_PRECISE_SMC
2863 TranslationBlock *current_tb = tcg_tb_lookup(pc);
2864 if (current_tb) {
2865 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2867 #endif
2868 } else {
2869 host_start = address & qemu_host_page_mask;
2870 host_end = host_start + qemu_host_page_size;
2872 prot = 0;
2873 for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2874 p = page_find(addr >> TARGET_PAGE_BITS);
2875 p->flags |= PAGE_WRITE;
2876 prot |= p->flags;
2878 /* and since the content will be modified, we must invalidate
2879 the corresponding translated code. */
2880 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2881 #ifdef CONFIG_USER_ONLY
2882 if (DEBUG_TB_CHECK_GATE) {
2883 tb_invalidate_check(addr);
2885 #endif
2887 mprotect((void *)g2h(host_start), qemu_host_page_size,
2888 prot & PAGE_BITS);
2890 mmap_unlock();
2891 /* If current TB was invalidated return to main loop */
2892 return current_tb_invalidated ? 2 : 1;
2894 mmap_unlock();
2895 return 0;
2897 #endif /* CONFIG_USER_ONLY */
2899 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2900 void tcg_flush_softmmu_tlb(CPUState *cs)
2902 #ifdef CONFIG_SOFTMMU
2903 tlb_flush(cs);
2904 #endif