4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
28 #include "trace-root.h"
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
32 #if defined(CONFIG_USER_ONLY)
34 #include "exec/exec-all.h"
35 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36 #include <sys/param.h>
37 #if __FreeBSD_version >= 700104
38 #define HAVE_KINFO_GETVMMAP
39 #define sigqueue sigqueue_freebsd /* avoid redefinition */
41 #include <machine/profile.h>
50 #include "exec/address-spaces.h"
53 #include "exec/cputlb.h"
54 #include "exec/tb-hash.h"
55 #include "translate-all.h"
56 #include "qemu/bitmap.h"
57 #include "qemu/timer.h"
58 #include "qemu/main-loop.h"
61 /* #define DEBUG_TB_INVALIDATE */
62 /* #define DEBUG_TB_FLUSH */
63 /* make various TB consistency checks */
64 /* #define DEBUG_TB_CHECK */
66 #if !defined(CONFIG_USER_ONLY)
67 /* TB consistency checks only implemented for usermode emulation. */
71 /* Access to the various translations structures need to be serialised via locks
72 * for consistency. This is automatic for SoftMMU based system
73 * emulation due to its single threaded nature. In user-mode emulation
74 * access to the memory related structures are protected with the
78 #define assert_memory_lock() tcg_debug_assert(have_tb_lock)
80 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
83 #define SMC_BITMAP_USE_THRESHOLD 10
85 typedef struct PageDesc
{
86 /* list of TBs intersecting this ram page */
87 TranslationBlock
*first_tb
;
89 /* in order to optimize self modifying code, we count the number
90 of lookups we do to a given page to use a bitmap */
91 unsigned int code_write_count
;
92 unsigned long *code_bitmap
;
98 /* In system mode we want L1_MAP to be based on ram offsets,
99 while in user mode we want it to be based on virtual addresses. */
100 #if !defined(CONFIG_USER_ONLY)
101 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
102 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
104 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
107 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
110 /* Size of the L2 (and L3, etc) page tables. */
112 #define V_L2_SIZE (1 << V_L2_BITS)
114 uintptr_t qemu_host_page_size
;
115 intptr_t qemu_host_page_mask
;
118 * L1 Mapping properties
120 static int v_l1_size
;
121 static int v_l1_shift
;
122 static int v_l2_levels
;
124 /* The bottom level has pointers to PageDesc, and is indexed by
125 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
127 #define V_L1_MIN_BITS 4
128 #define V_L1_MAX_BITS (V_L2_BITS + 3)
129 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
131 static void *l1_map
[V_L1_MAX_SIZE
];
133 /* code generation context */
137 /* translation block context */
138 __thread
int have_tb_lock
;
140 static void page_table_config_init(void)
144 assert(TARGET_PAGE_BITS
);
145 /* The bits remaining after N lower levels of page tables. */
146 v_l1_bits
= (L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
) % V_L2_BITS
;
147 if (v_l1_bits
< V_L1_MIN_BITS
) {
148 v_l1_bits
+= V_L2_BITS
;
151 v_l1_size
= 1 << v_l1_bits
;
152 v_l1_shift
= L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
- v_l1_bits
;
153 v_l2_levels
= v_l1_shift
/ V_L2_BITS
- 1;
155 assert(v_l1_bits
<= V_L1_MAX_BITS
);
156 assert(v_l1_shift
% V_L2_BITS
== 0);
157 assert(v_l2_levels
>= 0);
160 #define assert_tb_locked() tcg_debug_assert(have_tb_lock)
161 #define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
165 assert_tb_unlocked();
166 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
174 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
177 void tb_lock_reset(void)
180 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
185 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
187 void cpu_gen_init(void)
189 tcg_context_init(&tcg_ctx
);
192 /* Encode VAL as a signed leb128 sequence at P.
193 Return P incremented past the encoded value. */
194 static uint8_t *encode_sleb128(uint8_t *p
, target_long val
)
201 more
= !((val
== 0 && (byte
& 0x40) == 0)
202 || (val
== -1 && (byte
& 0x40) != 0));
212 /* Decode a signed leb128 sequence at *PP; increment *PP past the
213 decoded value. Return the decoded value. */
214 static target_long
decode_sleb128(uint8_t **pp
)
222 val
|= (target_ulong
)(byte
& 0x7f) << shift
;
224 } while (byte
& 0x80);
225 if (shift
< TARGET_LONG_BITS
&& (byte
& 0x40)) {
226 val
|= -(target_ulong
)1 << shift
;
233 /* Encode the data collected about the instructions while compiling TB.
234 Place the data at BLOCK, and return the number of bytes consumed.
236 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
237 which come from the target's insn_start data, followed by a uintptr_t
238 which comes from the host pc of the end of the code implementing the insn.
240 Each line of the table is encoded as sleb128 deltas from the previous
241 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
242 That is, the first column is seeded with the guest pc, the last column
243 with the host pc, and the middle columns with zeros. */
245 static int encode_search(TranslationBlock
*tb
, uint8_t *block
)
247 uint8_t *highwater
= tcg_ctx
.code_gen_highwater
;
251 tb
->tc_search
= block
;
253 for (i
= 0, n
= tb
->icount
; i
< n
; ++i
) {
256 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
258 prev
= (j
== 0 ? tb
->pc
: 0);
260 prev
= tcg_ctx
.gen_insn_data
[i
- 1][j
];
262 p
= encode_sleb128(p
, tcg_ctx
.gen_insn_data
[i
][j
] - prev
);
264 prev
= (i
== 0 ? 0 : tcg_ctx
.gen_insn_end_off
[i
- 1]);
265 p
= encode_sleb128(p
, tcg_ctx
.gen_insn_end_off
[i
] - prev
);
267 /* Test for (pending) buffer overflow. The assumption is that any
268 one row beginning below the high water mark cannot overrun
269 the buffer completely. Thus we can test for overflow after
270 encoding a row without having to check during encoding. */
271 if (unlikely(p
> highwater
)) {
279 /* The cpu state corresponding to 'searched_pc' is restored.
280 * Called with tb_lock held.
282 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
283 uintptr_t searched_pc
)
285 target_ulong data
[TARGET_INSN_START_WORDS
] = { tb
->pc
};
286 uintptr_t host_pc
= (uintptr_t)tb
->tc_ptr
;
287 CPUArchState
*env
= cpu
->env_ptr
;
288 uint8_t *p
= tb
->tc_search
;
289 int i
, j
, num_insns
= tb
->icount
;
290 #ifdef CONFIG_PROFILER
291 int64_t ti
= profile_getclock();
294 searched_pc
-= GETPC_ADJ
;
296 if (searched_pc
< host_pc
) {
300 /* Reconstruct the stored insn data while looking for the point at
301 which the end of the insn exceeds the searched_pc. */
302 for (i
= 0; i
< num_insns
; ++i
) {
303 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
304 data
[j
] += decode_sleb128(&p
);
306 host_pc
+= decode_sleb128(&p
);
307 if (host_pc
> searched_pc
) {
314 if (tb
->cflags
& CF_USE_ICOUNT
) {
316 /* Reset the cycle counter to the start of the block. */
317 cpu
->icount_decr
.u16
.low
+= num_insns
;
318 /* Clear the IO flag. */
321 cpu
->icount_decr
.u16
.low
-= i
;
322 restore_state_to_opc(env
, tb
, data
);
324 #ifdef CONFIG_PROFILER
325 tcg_ctx
.restore_time
+= profile_getclock() - ti
;
326 tcg_ctx
.restore_count
++;
331 bool cpu_restore_state(CPUState
*cpu
, uintptr_t retaddr
)
333 TranslationBlock
*tb
;
336 /* A retaddr of zero is invalid so we really shouldn't have ended
337 * up here. The target code has likely forgotten to check retaddr
338 * != 0 before attempting to restore state. We return early to
339 * avoid blowing up on a recursive tb_lock(). The target must have
340 * previously survived a failed cpu_restore_state because
341 * tb_find_pc(0) would have failed anyway. It still should be
350 tb
= tb_find_pc(retaddr
);
352 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
353 if (tb
->cflags
& CF_NOCACHE
) {
354 /* one-shot translation, invalidate it immediately */
355 tb_phys_invalidate(tb
, -1);
365 void page_size_init(void)
367 /* NOTE: we can always suppose that qemu_host_page_size >=
369 qemu_real_host_page_size
= getpagesize();
370 qemu_real_host_page_mask
= -(intptr_t)qemu_real_host_page_size
;
371 if (qemu_host_page_size
== 0) {
372 qemu_host_page_size
= qemu_real_host_page_size
;
374 if (qemu_host_page_size
< TARGET_PAGE_SIZE
) {
375 qemu_host_page_size
= TARGET_PAGE_SIZE
;
377 qemu_host_page_mask
= -(intptr_t)qemu_host_page_size
;
380 static void page_init(void)
383 page_table_config_init();
385 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
387 #ifdef HAVE_KINFO_GETVMMAP
388 struct kinfo_vmentry
*freep
;
391 freep
= kinfo_getvmmap(getpid(), &cnt
);
394 for (i
= 0; i
< cnt
; i
++) {
395 unsigned long startaddr
, endaddr
;
397 startaddr
= freep
[i
].kve_start
;
398 endaddr
= freep
[i
].kve_end
;
399 if (h2g_valid(startaddr
)) {
400 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
402 if (h2g_valid(endaddr
)) {
403 endaddr
= h2g(endaddr
);
404 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
406 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
408 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
419 last_brk
= (unsigned long)sbrk(0);
421 f
= fopen("/compat/linux/proc/self/maps", "r");
426 unsigned long startaddr
, endaddr
;
429 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
431 if (n
== 2 && h2g_valid(startaddr
)) {
432 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
434 if (h2g_valid(endaddr
)) {
435 endaddr
= h2g(endaddr
);
439 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
452 * Called with tb_lock held for system emulation.
453 * Called with mmap_lock held for user-mode emulation.
455 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
462 assert_memory_lock();
465 /* Level 1. Always allocated. */
466 lp
= l1_map
+ ((index
>> v_l1_shift
) & (v_l1_size
- 1));
469 for (i
= v_l2_levels
; i
> 0; i
--) {
470 void **p
= atomic_rcu_read(lp
);
476 p
= g_new0(void *, V_L2_SIZE
);
477 atomic_rcu_set(lp
, p
);
480 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
483 pd
= atomic_rcu_read(lp
);
488 pd
= g_new0(PageDesc
, V_L2_SIZE
);
489 atomic_rcu_set(lp
, pd
);
492 return pd
+ (index
& (V_L2_SIZE
- 1));
495 static inline PageDesc
*page_find(tb_page_addr_t index
)
497 return page_find_alloc(index
, 0);
500 #if defined(CONFIG_USER_ONLY)
501 /* Currently it is not recommended to allocate big chunks of data in
502 user mode. It will change when a dedicated libc will be used. */
503 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
504 region in which the guest needs to run. Revisit this. */
505 #define USE_STATIC_CODE_GEN_BUFFER
508 /* Minimum size of the code gen buffer. This number is randomly chosen,
509 but not so small that we can't have a fair number of TB's live. */
510 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
512 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
513 indicated, this is constrained by the range of direct branches on the
514 host cpu, as used by the TCG implementation of goto_tb. */
515 #if defined(__x86_64__)
516 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
517 #elif defined(__sparc__)
518 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
519 #elif defined(__powerpc64__)
520 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
521 #elif defined(__powerpc__)
522 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
523 #elif defined(__aarch64__)
524 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
525 #elif defined(__arm__)
526 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
527 #elif defined(__s390x__)
528 /* We have a +- 4GB range on the branches; leave some slop. */
529 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
530 #elif defined(__mips__)
531 /* We have a 256MB branch region, but leave room to make sure the
532 main executable is also within that region. */
533 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
535 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
538 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
540 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
541 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
542 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
544 static inline size_t size_code_gen_buffer(size_t tb_size
)
546 /* Size the buffer. */
548 #ifdef USE_STATIC_CODE_GEN_BUFFER
549 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
551 /* ??? Needs adjustments. */
552 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
553 static buffer, we could size this on RESERVED_VA, on the text
554 segment size of the executable, or continue to use the default. */
555 tb_size
= (unsigned long)(ram_size
/ 4);
558 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
559 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
561 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
562 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
568 /* In order to use J and JAL within the code_gen_buffer, we require
569 that the buffer not cross a 256MB boundary. */
570 static inline bool cross_256mb(void *addr
, size_t size
)
572 return ((uintptr_t)addr
^ ((uintptr_t)addr
+ size
)) & ~0x0ffffffful
;
575 /* We weren't able to allocate a buffer without crossing that boundary,
576 so make do with the larger portion of the buffer that doesn't cross.
577 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
578 static inline void *split_cross_256mb(void *buf1
, size_t size1
)
580 void *buf2
= (void *)(((uintptr_t)buf1
+ size1
) & ~0x0ffffffful
);
581 size_t size2
= buf1
+ size1
- buf2
;
589 tcg_ctx
.code_gen_buffer_size
= size1
;
594 #ifdef USE_STATIC_CODE_GEN_BUFFER
595 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
596 __attribute__((aligned(CODE_GEN_ALIGN
)));
599 static inline void do_protect(void *addr
, long size
, int prot
)
602 VirtualProtect(addr
, size
, prot
, &old_protect
);
605 static inline void map_exec(void *addr
, long size
)
607 do_protect(addr
, size
, PAGE_EXECUTE_READWRITE
);
610 static inline void map_none(void *addr
, long size
)
612 do_protect(addr
, size
, PAGE_NOACCESS
);
615 static inline void do_protect(void *addr
, long size
, int prot
)
617 uintptr_t start
, end
;
619 start
= (uintptr_t)addr
;
620 start
&= qemu_real_host_page_mask
;
622 end
= (uintptr_t)addr
+ size
;
623 end
= ROUND_UP(end
, qemu_real_host_page_size
);
625 mprotect((void *)start
, end
- start
, prot
);
628 static inline void map_exec(void *addr
, long size
)
630 do_protect(addr
, size
, PROT_READ
| PROT_WRITE
| PROT_EXEC
);
633 static inline void map_none(void *addr
, long size
)
635 do_protect(addr
, size
, PROT_NONE
);
639 static inline void *alloc_code_gen_buffer(void)
641 void *buf
= static_code_gen_buffer
;
642 size_t full_size
, size
;
644 /* The size of the buffer, rounded down to end on a page boundary. */
645 full_size
= (((uintptr_t)buf
+ sizeof(static_code_gen_buffer
))
646 & qemu_real_host_page_mask
) - (uintptr_t)buf
;
648 /* Reserve a guard page. */
649 size
= full_size
- qemu_real_host_page_size
;
651 /* Honor a command-line option limiting the size of the buffer. */
652 if (size
> tcg_ctx
.code_gen_buffer_size
) {
653 size
= (((uintptr_t)buf
+ tcg_ctx
.code_gen_buffer_size
)
654 & qemu_real_host_page_mask
) - (uintptr_t)buf
;
656 tcg_ctx
.code_gen_buffer_size
= size
;
659 if (cross_256mb(buf
, size
)) {
660 buf
= split_cross_256mb(buf
, size
);
661 size
= tcg_ctx
.code_gen_buffer_size
;
666 map_none(buf
+ size
, qemu_real_host_page_size
);
667 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
671 #elif defined(_WIN32)
672 static inline void *alloc_code_gen_buffer(void)
674 size_t size
= tcg_ctx
.code_gen_buffer_size
;
677 /* Perform the allocation in two steps, so that the guard page
678 is reserved but uncommitted. */
679 buf1
= VirtualAlloc(NULL
, size
+ qemu_real_host_page_size
,
680 MEM_RESERVE
, PAGE_NOACCESS
);
682 buf2
= VirtualAlloc(buf1
, size
, MEM_COMMIT
, PAGE_EXECUTE_READWRITE
);
683 assert(buf1
== buf2
);
689 static inline void *alloc_code_gen_buffer(void)
691 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
693 size_t size
= tcg_ctx
.code_gen_buffer_size
;
696 /* Constrain the position of the buffer based on the host cpu.
697 Note that these addresses are chosen in concert with the
698 addresses assigned in the relevant linker script file. */
699 # if defined(__PIE__) || defined(__PIC__)
700 /* Don't bother setting a preferred location if we're building
701 a position-independent executable. We're more likely to get
702 an address near the main executable if we let the kernel
703 choose the address. */
704 # elif defined(__x86_64__) && defined(MAP_32BIT)
705 /* Force the memory down into low memory with the executable.
706 Leave the choice of exact location with the kernel. */
708 /* Cannot expect to map more than 800MB in low memory. */
709 if (size
> 800u * 1024 * 1024) {
710 tcg_ctx
.code_gen_buffer_size
= size
= 800u * 1024 * 1024;
712 # elif defined(__sparc__)
713 start
= 0x40000000ul
;
714 # elif defined(__s390x__)
715 start
= 0x90000000ul
;
716 # elif defined(__mips__)
717 # if _MIPS_SIM == _ABI64
718 start
= 0x128000000ul
;
720 start
= 0x08000000ul
;
724 buf
= mmap((void *)start
, size
+ qemu_real_host_page_size
,
725 PROT_NONE
, flags
, -1, 0);
726 if (buf
== MAP_FAILED
) {
731 if (cross_256mb(buf
, size
)) {
732 /* Try again, with the original still mapped, to avoid re-acquiring
733 that 256mb crossing. This time don't specify an address. */
735 void *buf2
= mmap(NULL
, size
+ qemu_real_host_page_size
,
736 PROT_NONE
, flags
, -1, 0);
737 switch ((int)(buf2
!= MAP_FAILED
)) {
739 if (!cross_256mb(buf2
, size
)) {
740 /* Success! Use the new buffer. */
741 munmap(buf
, size
+ qemu_real_host_page_size
);
744 /* Failure. Work with what we had. */
745 munmap(buf2
, size
+ qemu_real_host_page_size
);
748 /* Split the original buffer. Free the smaller half. */
749 buf2
= split_cross_256mb(buf
, size
);
750 size2
= tcg_ctx
.code_gen_buffer_size
;
752 munmap(buf
+ size2
+ qemu_real_host_page_size
, size
- size2
);
754 munmap(buf
, size
- size2
);
763 /* Make the final buffer accessible. The guard page at the end
764 will remain inaccessible with PROT_NONE. */
765 mprotect(buf
, size
, PROT_WRITE
| PROT_READ
| PROT_EXEC
);
767 /* Request large pages for the buffer. */
768 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
772 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
774 static inline void code_gen_alloc(size_t tb_size
)
776 tcg_ctx
.code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
777 tcg_ctx
.code_gen_buffer
= alloc_code_gen_buffer();
778 if (tcg_ctx
.code_gen_buffer
== NULL
) {
779 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
783 /* Estimate a good size for the number of TBs we can support. We
784 still haven't deducted the prologue from the buffer size here,
785 but that's minimal and won't affect the estimate much. */
786 tcg_ctx
.code_gen_max_blocks
787 = tcg_ctx
.code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
788 tcg_ctx
.tb_ctx
.tbs
= g_new(TranslationBlock
, tcg_ctx
.code_gen_max_blocks
);
790 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
793 static void tb_htable_init(void)
795 unsigned int mode
= QHT_MODE_AUTO_RESIZE
;
797 qht_init(&tcg_ctx
.tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
, mode
);
800 /* Must be called before using the QEMU cpus. 'tb_size' is the size
801 (in bytes) allocated to the translation buffer. Zero means default
803 void tcg_exec_init(unsigned long tb_size
)
808 code_gen_alloc(tb_size
);
809 #if defined(CONFIG_SOFTMMU)
810 /* There's no guest base to take into account, so go ahead and
811 initialize the prologue now. */
812 tcg_prologue_init(&tcg_ctx
);
816 bool tcg_enabled(void)
818 return tcg_ctx
.code_gen_buffer
!= NULL
;
822 * Allocate a new translation block. Flush the translation buffer if
823 * too many translation blocks or too much generated code.
825 * Called with tb_lock held.
827 static TranslationBlock
*tb_alloc(target_ulong pc
)
829 TranslationBlock
*tb
;
833 if (tcg_ctx
.tb_ctx
.nb_tbs
>= tcg_ctx
.code_gen_max_blocks
) {
836 tb
= &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
++];
843 /* Called with tb_lock held. */
844 void tb_free(TranslationBlock
*tb
)
848 /* In practice this is mostly used for single use temporary TB
849 Ignore the hard cases and just back up if this TB happens to
850 be the last one generated. */
851 if (tcg_ctx
.tb_ctx
.nb_tbs
> 0 &&
852 tb
== &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
- 1]) {
853 tcg_ctx
.code_gen_ptr
= tb
->tc_ptr
;
854 tcg_ctx
.tb_ctx
.nb_tbs
--;
858 static inline void invalidate_page_bitmap(PageDesc
*p
)
860 #ifdef CONFIG_SOFTMMU
861 g_free(p
->code_bitmap
);
862 p
->code_bitmap
= NULL
;
863 p
->code_write_count
= 0;
867 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
868 static void page_flush_tb_1(int level
, void **lp
)
878 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
879 pd
[i
].first_tb
= NULL
;
880 invalidate_page_bitmap(pd
+ i
);
885 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
886 page_flush_tb_1(level
- 1, pp
+ i
);
891 static void page_flush_tb(void)
893 int i
, l1_sz
= v_l1_size
;
895 for (i
= 0; i
< l1_sz
; i
++) {
896 page_flush_tb_1(v_l2_levels
, l1_map
+ i
);
900 /* flush all the translation blocks */
901 static void do_tb_flush(CPUState
*cpu
, run_on_cpu_data tb_flush_count
)
905 /* If it is already been done on request of another CPU,
908 if (tcg_ctx
.tb_ctx
.tb_flush_count
!= tb_flush_count
.host_int
) {
912 #if defined(DEBUG_TB_FLUSH)
913 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
914 (unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
),
915 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.tb_ctx
.nb_tbs
> 0 ?
916 ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)) /
917 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
919 if ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)
920 > tcg_ctx
.code_gen_buffer_size
) {
921 cpu_abort(cpu
, "Internal error: code buffer overflow\n");
927 for (i
= 0; i
< TB_JMP_CACHE_SIZE
; ++i
) {
928 atomic_set(&cpu
->tb_jmp_cache
[i
], NULL
);
932 tcg_ctx
.tb_ctx
.nb_tbs
= 0;
933 qht_reset_size(&tcg_ctx
.tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
);
936 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
937 /* XXX: flush processor icache at this point if cache flush is
939 atomic_mb_set(&tcg_ctx
.tb_ctx
.tb_flush_count
,
940 tcg_ctx
.tb_ctx
.tb_flush_count
+ 1);
946 void tb_flush(CPUState
*cpu
)
949 unsigned tb_flush_count
= atomic_mb_read(&tcg_ctx
.tb_ctx
.tb_flush_count
);
950 async_safe_run_on_cpu(cpu
, do_tb_flush
,
951 RUN_ON_CPU_HOST_INT(tb_flush_count
));
955 #ifdef DEBUG_TB_CHECK
958 do_tb_invalidate_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
960 TranslationBlock
*tb
= p
;
961 target_ulong addr
= *(target_ulong
*)userp
;
963 if (!(addr
+ TARGET_PAGE_SIZE
<= tb
->pc
|| addr
>= tb
->pc
+ tb
->size
)) {
964 printf("ERROR invalidate: address=" TARGET_FMT_lx
965 " PC=%08lx size=%04x\n", addr
, (long)tb
->pc
, tb
->size
);
969 /* verify that all the pages have correct rights for code
971 * Called with tb_lock held.
973 static void tb_invalidate_check(target_ulong address
)
975 address
&= TARGET_PAGE_MASK
;
976 qht_iter(&tcg_ctx
.tb_ctx
.htable
, do_tb_invalidate_check
, &address
);
980 do_tb_page_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
982 TranslationBlock
*tb
= p
;
985 flags1
= page_get_flags(tb
->pc
);
986 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
987 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
988 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
989 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
993 /* verify that all the pages have correct rights for code */
994 static void tb_page_check(void)
996 qht_iter(&tcg_ctx
.tb_ctx
.htable
, do_tb_page_check
, NULL
);
1001 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
1003 TranslationBlock
*tb1
;
1008 n1
= (uintptr_t)tb1
& 3;
1009 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1011 *ptb
= tb1
->page_next
[n1
];
1014 ptb
= &tb1
->page_next
[n1
];
1018 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1019 static inline void tb_remove_from_jmp_list(TranslationBlock
*tb
, int n
)
1021 TranslationBlock
*tb1
;
1022 uintptr_t *ptb
, ntb
;
1025 ptb
= &tb
->jmp_list_next
[n
];
1027 /* find tb(n) in circular list */
1031 tb1
= (TranslationBlock
*)(ntb
& ~3);
1032 if (n1
== n
&& tb1
== tb
) {
1036 ptb
= &tb1
->jmp_list_first
;
1038 ptb
= &tb1
->jmp_list_next
[n1
];
1041 /* now we can suppress tb(n) from the list */
1042 *ptb
= tb
->jmp_list_next
[n
];
1044 tb
->jmp_list_next
[n
] = (uintptr_t)NULL
;
1048 /* reset the jump entry 'n' of a TB so that it is not chained to
1050 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
1052 uintptr_t addr
= (uintptr_t)(tb
->tc_ptr
+ tb
->jmp_reset_offset
[n
]);
1053 tb_set_jmp_target(tb
, n
, addr
);
1056 /* remove any jumps to the TB */
1057 static inline void tb_jmp_unlink(TranslationBlock
*tb
)
1059 TranslationBlock
*tb1
;
1060 uintptr_t *ptb
, ntb
;
1063 ptb
= &tb
->jmp_list_first
;
1067 tb1
= (TranslationBlock
*)(ntb
& ~3);
1071 tb_reset_jump(tb1
, n1
);
1072 *ptb
= tb1
->jmp_list_next
[n1
];
1073 tb1
->jmp_list_next
[n1
] = (uintptr_t)NULL
;
1077 /* invalidate one TB
1079 * Called with tb_lock held.
1081 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
1086 tb_page_addr_t phys_pc
;
1090 atomic_set(&tb
->invalid
, true);
1092 /* remove the TB from the hash list */
1093 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1094 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
);
1095 qht_remove(&tcg_ctx
.tb_ctx
.htable
, tb
, h
);
1097 /* remove the TB from the page list */
1098 if (tb
->page_addr
[0] != page_addr
) {
1099 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
1100 tb_page_remove(&p
->first_tb
, tb
);
1101 invalidate_page_bitmap(p
);
1103 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
1104 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
1105 tb_page_remove(&p
->first_tb
, tb
);
1106 invalidate_page_bitmap(p
);
1109 /* remove the TB from the hash list */
1110 h
= tb_jmp_cache_hash_func(tb
->pc
);
1112 if (atomic_read(&cpu
->tb_jmp_cache
[h
]) == tb
) {
1113 atomic_set(&cpu
->tb_jmp_cache
[h
], NULL
);
1117 /* suppress this TB from the two jump lists */
1118 tb_remove_from_jmp_list(tb
, 0);
1119 tb_remove_from_jmp_list(tb
, 1);
1121 /* suppress any remaining jumps to this TB */
1124 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
++;
1127 #ifdef CONFIG_SOFTMMU
1128 static void build_page_bitmap(PageDesc
*p
)
1130 int n
, tb_start
, tb_end
;
1131 TranslationBlock
*tb
;
1133 p
->code_bitmap
= bitmap_new(TARGET_PAGE_SIZE
);
1136 while (tb
!= NULL
) {
1137 n
= (uintptr_t)tb
& 3;
1138 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1139 /* NOTE: this is subtle as a TB may span two physical pages */
1141 /* NOTE: tb_end may be after the end of the page, but
1142 it is not a problem */
1143 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1144 tb_end
= tb_start
+ tb
->size
;
1145 if (tb_end
> TARGET_PAGE_SIZE
) {
1146 tb_end
= TARGET_PAGE_SIZE
;
1150 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1152 bitmap_set(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1153 tb
= tb
->page_next
[n
];
1158 /* add the tb in the target page and protect it if necessary
1160 * Called with mmap_lock held for user-mode emulation.
1162 static inline void tb_alloc_page(TranslationBlock
*tb
,
1163 unsigned int n
, tb_page_addr_t page_addr
)
1166 #ifndef CONFIG_USER_ONLY
1167 bool page_already_protected
;
1170 assert_memory_lock();
1172 tb
->page_addr
[n
] = page_addr
;
1173 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1174 tb
->page_next
[n
] = p
->first_tb
;
1175 #ifndef CONFIG_USER_ONLY
1176 page_already_protected
= p
->first_tb
!= NULL
;
1178 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1179 invalidate_page_bitmap(p
);
1181 #if defined(CONFIG_USER_ONLY)
1182 if (p
->flags
& PAGE_WRITE
) {
1187 /* force the host page as non writable (writes will have a
1188 page fault + mprotect overhead) */
1189 page_addr
&= qemu_host_page_mask
;
1191 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1192 addr
+= TARGET_PAGE_SIZE
) {
1194 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1199 p2
->flags
&= ~PAGE_WRITE
;
1201 mprotect(g2h(page_addr
), qemu_host_page_size
,
1202 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1203 #ifdef DEBUG_TB_INVALIDATE
1204 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1209 /* if some code is already present, then the pages are already
1210 protected. So we handle the case where only the first TB is
1211 allocated in a physical page */
1212 if (!page_already_protected
) {
1213 tlb_protect_code(page_addr
);
1218 /* add a new TB and link it to the physical page tables. phys_page2 is
1219 * (-1) to indicate that only one page contains the TB.
1221 * Called with mmap_lock held for user-mode emulation.
1223 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1224 tb_page_addr_t phys_page2
)
1228 assert_memory_lock();
1230 /* add in the page list */
1231 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1232 if (phys_page2
!= -1) {
1233 tb_alloc_page(tb
, 1, phys_page2
);
1235 tb
->page_addr
[1] = -1;
1238 /* add in the hash table */
1239 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
);
1240 qht_insert(&tcg_ctx
.tb_ctx
.htable
, tb
, h
);
1242 #ifdef DEBUG_TB_CHECK
1247 /* Called with mmap_lock held for user mode emulation. */
1248 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
1249 target_ulong pc
, target_ulong cs_base
,
1250 uint32_t flags
, int cflags
)
1252 CPUArchState
*env
= cpu
->env_ptr
;
1253 TranslationBlock
*tb
;
1254 tb_page_addr_t phys_pc
, phys_page2
;
1255 target_ulong virt_page2
;
1256 tcg_insn_unit
*gen_code_buf
;
1257 int gen_code_size
, search_size
;
1258 #ifdef CONFIG_PROFILER
1261 assert_memory_lock();
1263 phys_pc
= get_page_addr_code(env
, pc
);
1264 if (use_icount
&& !(cflags
& CF_IGNORE_ICOUNT
)) {
1265 cflags
|= CF_USE_ICOUNT
;
1269 if (unlikely(!tb
)) {
1271 /* flush must be done */
1274 /* Make the execution loop process the flush as soon as possible. */
1275 cpu
->exception_index
= EXCP_INTERRUPT
;
1279 gen_code_buf
= tcg_ctx
.code_gen_ptr
;
1280 tb
->tc_ptr
= gen_code_buf
;
1281 tb
->cs_base
= cs_base
;
1283 tb
->cflags
= cflags
;
1285 #ifdef CONFIG_PROFILER
1286 tcg_ctx
.tb_count1
++; /* includes aborted translations because of
1288 ti
= profile_getclock();
1291 tcg_func_start(&tcg_ctx
);
1293 tcg_ctx
.cpu
= ENV_GET_CPU(env
);
1294 gen_intermediate_code(env
, tb
);
1297 trace_translate_block(tb
, tb
->pc
, tb
->tc_ptr
);
1299 /* generate machine code */
1300 tb
->jmp_reset_offset
[0] = TB_JMP_RESET_OFFSET_INVALID
;
1301 tb
->jmp_reset_offset
[1] = TB_JMP_RESET_OFFSET_INVALID
;
1302 tcg_ctx
.tb_jmp_reset_offset
= tb
->jmp_reset_offset
;
1303 #ifdef USE_DIRECT_JUMP
1304 tcg_ctx
.tb_jmp_insn_offset
= tb
->jmp_insn_offset
;
1305 tcg_ctx
.tb_jmp_target_addr
= NULL
;
1307 tcg_ctx
.tb_jmp_insn_offset
= NULL
;
1308 tcg_ctx
.tb_jmp_target_addr
= tb
->jmp_target_addr
;
1311 #ifdef CONFIG_PROFILER
1313 tcg_ctx
.interm_time
+= profile_getclock() - ti
;
1314 tcg_ctx
.code_time
-= profile_getclock();
1317 /* ??? Overflow could be handled better here. In particular, we
1318 don't need to re-do gen_intermediate_code, nor should we re-do
1319 the tcg optimization currently hidden inside tcg_gen_code. All
1320 that should be required is to flush the TBs, allocate a new TB,
1321 re-initialize it per above, and re-do the actual code generation. */
1322 gen_code_size
= tcg_gen_code(&tcg_ctx
, tb
);
1323 if (unlikely(gen_code_size
< 0)) {
1324 goto buffer_overflow
;
1326 search_size
= encode_search(tb
, (void *)gen_code_buf
+ gen_code_size
);
1327 if (unlikely(search_size
< 0)) {
1328 goto buffer_overflow
;
1331 #ifdef CONFIG_PROFILER
1332 tcg_ctx
.code_time
+= profile_getclock();
1333 tcg_ctx
.code_in_len
+= tb
->size
;
1334 tcg_ctx
.code_out_len
+= gen_code_size
;
1335 tcg_ctx
.search_out_len
+= search_size
;
1339 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
) &&
1340 qemu_log_in_addr_range(tb
->pc
)) {
1342 qemu_log("OUT: [size=%d]\n", gen_code_size
);
1343 log_disas(tb
->tc_ptr
, gen_code_size
);
1350 tcg_ctx
.code_gen_ptr
= (void *)
1351 ROUND_UP((uintptr_t)gen_code_buf
+ gen_code_size
+ search_size
,
1354 /* init jump list */
1355 assert(((uintptr_t)tb
& 3) == 0);
1356 tb
->jmp_list_first
= (uintptr_t)tb
| 2;
1357 tb
->jmp_list_next
[0] = (uintptr_t)NULL
;
1358 tb
->jmp_list_next
[1] = (uintptr_t)NULL
;
1360 /* init original jump addresses wich has been set during tcg_gen_code() */
1361 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1362 tb_reset_jump(tb
, 0);
1364 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1365 tb_reset_jump(tb
, 1);
1368 /* check next page if needed */
1369 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1371 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1372 phys_page2
= get_page_addr_code(env
, virt_page2
);
1374 /* As long as consistency of the TB stuff is provided by tb_lock in user
1375 * mode and is implicit in single-threaded softmmu emulation, no explicit
1376 * memory barrier is required before tb_link_page() makes the TB visible
1377 * through the physical hash table and physical page list.
1379 tb_link_page(tb
, phys_pc
, phys_page2
);
1384 * Invalidate all TBs which intersect with the target physical address range
1385 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1386 * 'is_cpu_write_access' should be true if called from a real cpu write
1387 * access: the virtual CPU will exit the current TB if code is modified inside
1390 * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1391 * Called with tb_lock held for system-mode emulation
1393 static void tb_invalidate_phys_range_1(tb_page_addr_t start
, tb_page_addr_t end
)
1395 while (start
< end
) {
1396 tb_invalidate_phys_page_range(start
, end
, 0);
1397 start
&= TARGET_PAGE_MASK
;
1398 start
+= TARGET_PAGE_SIZE
;
1402 #ifdef CONFIG_SOFTMMU
1403 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1406 tb_invalidate_phys_range_1(start
, end
);
1409 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1411 assert_memory_lock();
1413 tb_invalidate_phys_range_1(start
, end
);
1418 * Invalidate all TBs which intersect with the target physical address range
1419 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1420 * 'is_cpu_write_access' should be true if called from a real cpu write
1421 * access: the virtual CPU will exit the current TB if code is modified inside
1424 * Called with tb_lock/mmap_lock held for user-mode emulation
1425 * Called with tb_lock held for system-mode emulation
1427 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1428 int is_cpu_write_access
)
1430 TranslationBlock
*tb
, *tb_next
;
1431 #if defined(TARGET_HAS_PRECISE_SMC)
1432 CPUState
*cpu
= current_cpu
;
1433 CPUArchState
*env
= NULL
;
1435 tb_page_addr_t tb_start
, tb_end
;
1438 #ifdef TARGET_HAS_PRECISE_SMC
1439 int current_tb_not_found
= is_cpu_write_access
;
1440 TranslationBlock
*current_tb
= NULL
;
1441 int current_tb_modified
= 0;
1442 target_ulong current_pc
= 0;
1443 target_ulong current_cs_base
= 0;
1444 uint32_t current_flags
= 0;
1445 #endif /* TARGET_HAS_PRECISE_SMC */
1447 assert_memory_lock();
1450 p
= page_find(start
>> TARGET_PAGE_BITS
);
1454 #if defined(TARGET_HAS_PRECISE_SMC)
1460 /* we remove all the TBs in the range [start, end[ */
1461 /* XXX: see if in some cases it could be faster to invalidate all
1464 while (tb
!= NULL
) {
1465 n
= (uintptr_t)tb
& 3;
1466 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1467 tb_next
= tb
->page_next
[n
];
1468 /* NOTE: this is subtle as a TB may span two physical pages */
1470 /* NOTE: tb_end may be after the end of the page, but
1471 it is not a problem */
1472 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1473 tb_end
= tb_start
+ tb
->size
;
1475 tb_start
= tb
->page_addr
[1];
1476 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1478 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1479 #ifdef TARGET_HAS_PRECISE_SMC
1480 if (current_tb_not_found
) {
1481 current_tb_not_found
= 0;
1483 if (cpu
->mem_io_pc
) {
1484 /* now we have a real cpu fault */
1485 current_tb
= tb_find_pc(cpu
->mem_io_pc
);
1488 if (current_tb
== tb
&&
1489 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1490 /* If we are modifying the current TB, we must stop
1491 its execution. We could be more precise by checking
1492 that the modification is after the current PC, but it
1493 would require a specialized function to partially
1494 restore the CPU state */
1496 current_tb_modified
= 1;
1497 cpu_restore_state_from_tb(cpu
, current_tb
, cpu
->mem_io_pc
);
1498 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1501 #endif /* TARGET_HAS_PRECISE_SMC */
1502 tb_phys_invalidate(tb
, -1);
1506 #if !defined(CONFIG_USER_ONLY)
1507 /* if no code remaining, no need to continue to use slow writes */
1509 invalidate_page_bitmap(p
);
1510 tlb_unprotect_code(start
);
1513 #ifdef TARGET_HAS_PRECISE_SMC
1514 if (current_tb_modified
) {
1515 /* we generate a block containing just the instruction
1516 modifying the memory. It will ensure that it cannot modify
1518 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1519 cpu_loop_exit_noexc(cpu
);
1524 #ifdef CONFIG_SOFTMMU
1525 /* len must be <= 8 and start must be a multiple of len.
1526 * Called via softmmu_template.h when code areas are written to with
1527 * iothread mutex not held.
1529 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1535 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1536 cpu_single_env
->mem_io_vaddr
, len
,
1537 cpu_single_env
->eip
,
1538 cpu_single_env
->eip
+
1539 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1542 assert_memory_lock();
1544 p
= page_find(start
>> TARGET_PAGE_BITS
);
1548 if (!p
->code_bitmap
&&
1549 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
1550 /* build code bitmap. FIXME: writes should be protected by
1551 * tb_lock, reads by tb_lock or RCU.
1553 build_page_bitmap(p
);
1555 if (p
->code_bitmap
) {
1559 nr
= start
& ~TARGET_PAGE_MASK
;
1560 b
= p
->code_bitmap
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1));
1561 if (b
& ((1 << len
) - 1)) {
1566 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1570 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1571 * host PC of the faulting store instruction that caused this invalidate.
1572 * Returns true if the caller needs to abort execution of the current
1573 * TB (because it was modified by this store and the guest CPU has
1574 * precise-SMC semantics).
1576 static bool tb_invalidate_phys_page(tb_page_addr_t addr
, uintptr_t pc
)
1578 TranslationBlock
*tb
;
1581 #ifdef TARGET_HAS_PRECISE_SMC
1582 TranslationBlock
*current_tb
= NULL
;
1583 CPUState
*cpu
= current_cpu
;
1584 CPUArchState
*env
= NULL
;
1585 int current_tb_modified
= 0;
1586 target_ulong current_pc
= 0;
1587 target_ulong current_cs_base
= 0;
1588 uint32_t current_flags
= 0;
1591 assert_memory_lock();
1593 addr
&= TARGET_PAGE_MASK
;
1594 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1601 #ifdef TARGET_HAS_PRECISE_SMC
1602 if (tb
&& pc
!= 0) {
1603 current_tb
= tb_find_pc(pc
);
1609 while (tb
!= NULL
) {
1610 n
= (uintptr_t)tb
& 3;
1611 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1612 #ifdef TARGET_HAS_PRECISE_SMC
1613 if (current_tb
== tb
&&
1614 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1615 /* If we are modifying the current TB, we must stop
1616 its execution. We could be more precise by checking
1617 that the modification is after the current PC, but it
1618 would require a specialized function to partially
1619 restore the CPU state */
1621 current_tb_modified
= 1;
1622 cpu_restore_state_from_tb(cpu
, current_tb
, pc
);
1623 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1626 #endif /* TARGET_HAS_PRECISE_SMC */
1627 tb_phys_invalidate(tb
, addr
);
1628 tb
= tb
->page_next
[n
];
1631 #ifdef TARGET_HAS_PRECISE_SMC
1632 if (current_tb_modified
) {
1633 /* we generate a block containing just the instruction
1634 modifying the memory. It will ensure that it cannot modify
1636 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1637 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
1638 * back into the cpu_exec loop. */
1648 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1649 tb[1].tc_ptr. Return NULL if not found */
1650 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1652 int m_min
, m_max
, m
;
1654 TranslationBlock
*tb
;
1656 if (tcg_ctx
.tb_ctx
.nb_tbs
<= 0) {
1659 if (tc_ptr
< (uintptr_t)tcg_ctx
.code_gen_buffer
||
1660 tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_ptr
) {
1663 /* binary search (cf Knuth) */
1665 m_max
= tcg_ctx
.tb_ctx
.nb_tbs
- 1;
1666 while (m_min
<= m_max
) {
1667 m
= (m_min
+ m_max
) >> 1;
1668 tb
= &tcg_ctx
.tb_ctx
.tbs
[m
];
1669 v
= (uintptr_t)tb
->tc_ptr
;
1672 } else if (tc_ptr
< v
) {
1678 return &tcg_ctx
.tb_ctx
.tbs
[m_max
];
1681 #if !defined(CONFIG_USER_ONLY)
1682 void tb_invalidate_phys_addr(AddressSpace
*as
, hwaddr addr
)
1684 ram_addr_t ram_addr
;
1689 mr
= address_space_translate(as
, addr
, &addr
, &l
, false);
1690 if (!(memory_region_is_ram(mr
)
1691 || memory_region_is_romd(mr
))) {
1695 ram_addr
= memory_region_get_ram_addr(mr
) + addr
;
1697 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1701 #endif /* !defined(CONFIG_USER_ONLY) */
1703 /* Called with tb_lock held. */
1704 void tb_check_watchpoint(CPUState
*cpu
)
1706 TranslationBlock
*tb
;
1708 tb
= tb_find_pc(cpu
->mem_io_pc
);
1710 /* We can use retranslation to find the PC. */
1711 cpu_restore_state_from_tb(cpu
, tb
, cpu
->mem_io_pc
);
1712 tb_phys_invalidate(tb
, -1);
1714 /* The exception probably happened in a helper. The CPU state should
1715 have been saved before calling it. Fetch the PC from there. */
1716 CPUArchState
*env
= cpu
->env_ptr
;
1717 target_ulong pc
, cs_base
;
1718 tb_page_addr_t addr
;
1721 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
1722 addr
= get_page_addr_code(env
, pc
);
1723 tb_invalidate_phys_range(addr
, addr
+ 1);
1727 #ifndef CONFIG_USER_ONLY
1728 /* in deterministic execution mode, instructions doing device I/Os
1729 * must be at the end of the TB.
1731 * Called by softmmu_template.h, with iothread mutex not held.
1733 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
1735 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1736 CPUArchState
*env
= cpu
->env_ptr
;
1738 TranslationBlock
*tb
;
1740 target_ulong pc
, cs_base
;
1744 tb
= tb_find_pc(retaddr
);
1746 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
1749 n
= cpu
->icount_decr
.u16
.low
+ tb
->icount
;
1750 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
1751 /* Calculate how many instructions had been executed before the fault
1753 n
= n
- cpu
->icount_decr
.u16
.low
;
1754 /* Generate a new TB ending on the I/O insn. */
1756 /* On MIPS and SH, delay slot instructions can only be restarted if
1757 they were already the first instruction in the TB. If this is not
1758 the first instruction in a TB then re-execute the preceding
1760 #if defined(TARGET_MIPS)
1761 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1762 env
->active_tc
.PC
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
1763 cpu
->icount_decr
.u16
.low
++;
1764 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1766 #elif defined(TARGET_SH4)
1767 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1770 cpu
->icount_decr
.u16
.low
++;
1771 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1774 /* This should never happen. */
1775 if (n
> CF_COUNT_MASK
) {
1776 cpu_abort(cpu
, "TB too big during recompile");
1779 cflags
= n
| CF_LAST_IO
;
1781 cs_base
= tb
->cs_base
;
1783 tb_phys_invalidate(tb
, -1);
1784 if (tb
->cflags
& CF_NOCACHE
) {
1786 /* Invalidate original TB if this TB was generated in
1787 * cpu_exec_nocache() */
1788 tb_phys_invalidate(tb
->orig_tb
, -1);
1792 /* FIXME: In theory this could raise an exception. In practice
1793 we have already translated the block once so it's probably ok. */
1794 tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
1796 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1797 * the first in the TB) then we end up generating a whole new TB and
1798 * repeating the fault, which is horribly inefficient.
1799 * Better would be to execute just this insn uncached, or generate a
1802 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
1803 * tb_lock gets reset.
1805 cpu_loop_exit_noexc(cpu
);
1808 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
1812 /* Discard jump cache entries for any tb which might potentially
1813 overlap the flushed page. */
1814 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1815 memset(&cpu
->tb_jmp_cache
[i
], 0,
1816 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1818 i
= tb_jmp_cache_hash_page(addr
);
1819 memset(&cpu
->tb_jmp_cache
[i
], 0,
1820 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1823 static void print_qht_statistics(FILE *f
, fprintf_function cpu_fprintf
,
1824 struct qht_stats hst
)
1826 uint32_t hgram_opts
;
1830 if (!hst
.head_buckets
) {
1833 cpu_fprintf(f
, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1834 hst
.used_head_buckets
, hst
.head_buckets
,
1835 (double)hst
.used_head_buckets
/ hst
.head_buckets
* 100);
1837 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1838 hgram_opts
|= QDIST_PR_100X
| QDIST_PR_PERCENT
;
1839 if (qdist_xmax(&hst
.occupancy
) - qdist_xmin(&hst
.occupancy
) == 1) {
1840 hgram_opts
|= QDIST_PR_NODECIMAL
;
1842 hgram
= qdist_pr(&hst
.occupancy
, 10, hgram_opts
);
1843 cpu_fprintf(f
, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1844 qdist_avg(&hst
.occupancy
) * 100, hgram
);
1847 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1848 hgram_bins
= qdist_xmax(&hst
.chain
) - qdist_xmin(&hst
.chain
);
1849 if (hgram_bins
> 10) {
1853 hgram_opts
|= QDIST_PR_NODECIMAL
| QDIST_PR_NOBINRANGE
;
1855 hgram
= qdist_pr(&hst
.chain
, hgram_bins
, hgram_opts
);
1856 cpu_fprintf(f
, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1857 qdist_avg(&hst
.chain
), hgram
);
1861 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1863 int i
, target_code_size
, max_target_code_size
;
1864 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
1865 TranslationBlock
*tb
;
1866 struct qht_stats hst
;
1870 target_code_size
= 0;
1871 max_target_code_size
= 0;
1873 direct_jmp_count
= 0;
1874 direct_jmp2_count
= 0;
1875 for (i
= 0; i
< tcg_ctx
.tb_ctx
.nb_tbs
; i
++) {
1876 tb
= &tcg_ctx
.tb_ctx
.tbs
[i
];
1877 target_code_size
+= tb
->size
;
1878 if (tb
->size
> max_target_code_size
) {
1879 max_target_code_size
= tb
->size
;
1881 if (tb
->page_addr
[1] != -1) {
1884 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1886 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1887 direct_jmp2_count
++;
1891 /* XXX: avoid using doubles ? */
1892 cpu_fprintf(f
, "Translation buffer state:\n");
1893 cpu_fprintf(f
, "gen code size %td/%zd\n",
1894 tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
,
1895 tcg_ctx
.code_gen_highwater
- tcg_ctx
.code_gen_buffer
);
1896 cpu_fprintf(f
, "TB count %d/%d\n",
1897 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.code_gen_max_blocks
);
1898 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
1899 tcg_ctx
.tb_ctx
.nb_tbs
? target_code_size
/
1900 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1901 max_target_code_size
);
1902 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1903 tcg_ctx
.tb_ctx
.nb_tbs
? (tcg_ctx
.code_gen_ptr
-
1904 tcg_ctx
.code_gen_buffer
) /
1905 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1906 target_code_size
? (double) (tcg_ctx
.code_gen_ptr
-
1907 tcg_ctx
.code_gen_buffer
) /
1908 target_code_size
: 0);
1909 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n", cross_page
,
1910 tcg_ctx
.tb_ctx
.nb_tbs
? (cross_page
* 100) /
1911 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1912 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1914 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp_count
* 100) /
1915 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1917 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp2_count
* 100) /
1918 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1920 qht_statistics_init(&tcg_ctx
.tb_ctx
.htable
, &hst
);
1921 print_qht_statistics(f
, cpu_fprintf
, hst
);
1922 qht_statistics_destroy(&hst
);
1924 cpu_fprintf(f
, "\nStatistics:\n");
1925 cpu_fprintf(f
, "TB flush count %u\n",
1926 atomic_read(&tcg_ctx
.tb_ctx
.tb_flush_count
));
1927 cpu_fprintf(f
, "TB invalidate count %d\n",
1928 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
);
1929 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
1930 tcg_dump_info(f
, cpu_fprintf
);
1935 void dump_opcount_info(FILE *f
, fprintf_function cpu_fprintf
)
1937 tcg_dump_op_count(f
, cpu_fprintf
);
1940 #else /* CONFIG_USER_ONLY */
1942 void cpu_interrupt(CPUState
*cpu
, int mask
)
1944 g_assert(qemu_mutex_iothread_locked());
1945 cpu
->interrupt_request
|= mask
;
1946 cpu
->icount_decr
.u16
.high
= -1;
1950 * Walks guest process memory "regions" one by one
1951 * and calls callback function 'fn' for each region.
1953 struct walk_memory_regions_data
{
1954 walk_memory_regions_fn fn
;
1960 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1961 target_ulong end
, int new_prot
)
1963 if (data
->start
!= -1u) {
1964 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1970 data
->start
= (new_prot
? end
: -1u);
1971 data
->prot
= new_prot
;
1976 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1977 target_ulong base
, int level
, void **lp
)
1983 return walk_memory_regions_end(data
, base
, 0);
1989 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1990 int prot
= pd
[i
].flags
;
1992 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1993 if (prot
!= data
->prot
) {
1994 rc
= walk_memory_regions_end(data
, pa
, prot
);
2003 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2004 pa
= base
| ((target_ulong
)i
<<
2005 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
2006 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2016 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2018 struct walk_memory_regions_data data
;
2019 uintptr_t i
, l1_sz
= v_l1_size
;
2026 for (i
= 0; i
< l1_sz
; i
++) {
2027 target_ulong base
= i
<< (v_l1_shift
+ TARGET_PAGE_BITS
);
2028 int rc
= walk_memory_regions_1(&data
, base
, v_l2_levels
, l1_map
+ i
);
2034 return walk_memory_regions_end(&data
, 0, 0);
2037 static int dump_region(void *priv
, target_ulong start
,
2038 target_ulong end
, unsigned long prot
)
2040 FILE *f
= (FILE *)priv
;
2042 (void) fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
2043 " "TARGET_FMT_lx
" %c%c%c\n",
2044 start
, end
, end
- start
,
2045 ((prot
& PAGE_READ
) ? 'r' : '-'),
2046 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2047 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2052 /* dump memory mappings */
2053 void page_dump(FILE *f
)
2055 const int length
= sizeof(target_ulong
) * 2;
2056 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
2057 length
, "start", length
, "end", length
, "size", "prot");
2058 walk_memory_regions(f
, dump_region
);
2061 int page_get_flags(target_ulong address
)
2065 p
= page_find(address
>> TARGET_PAGE_BITS
);
2072 /* Modify the flags of a page and invalidate the code if necessary.
2073 The flag PAGE_WRITE_ORG is positioned automatically depending
2074 on PAGE_WRITE. The mmap_lock should already be held. */
2075 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2077 target_ulong addr
, len
;
2079 /* This function should never be called with addresses outside the
2080 guest address space. If this assert fires, it probably indicates
2081 a missing call to h2g_valid. */
2082 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2083 assert(end
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2085 assert(start
< end
);
2086 assert_memory_lock();
2088 start
= start
& TARGET_PAGE_MASK
;
2089 end
= TARGET_PAGE_ALIGN(end
);
2091 if (flags
& PAGE_WRITE
) {
2092 flags
|= PAGE_WRITE_ORG
;
2095 for (addr
= start
, len
= end
- start
;
2097 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2098 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2100 /* If the write protection bit is set, then we invalidate
2102 if (!(p
->flags
& PAGE_WRITE
) &&
2103 (flags
& PAGE_WRITE
) &&
2105 tb_invalidate_phys_page(addr
, 0);
2111 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2117 /* This function should never be called with addresses outside the
2118 guest address space. If this assert fires, it probably indicates
2119 a missing call to h2g_valid. */
2120 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2121 assert(start
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2127 if (start
+ len
- 1 < start
) {
2128 /* We've wrapped around. */
2132 /* must do before we loose bits in the next step */
2133 end
= TARGET_PAGE_ALIGN(start
+ len
);
2134 start
= start
& TARGET_PAGE_MASK
;
2136 for (addr
= start
, len
= end
- start
;
2138 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2139 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2143 if (!(p
->flags
& PAGE_VALID
)) {
2147 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
2150 if (flags
& PAGE_WRITE
) {
2151 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
2154 /* unprotect the page if it was put read-only because it
2155 contains translated code */
2156 if (!(p
->flags
& PAGE_WRITE
)) {
2157 if (!page_unprotect(addr
, 0)) {
2166 /* called from signal handler: invalidate the code and unprotect the
2167 * page. Return 0 if the fault was not handled, 1 if it was handled,
2168 * and 2 if it was handled but the caller must cause the TB to be
2169 * immediately exited. (We can only return 2 if the 'pc' argument is
2172 int page_unprotect(target_ulong address
, uintptr_t pc
)
2175 bool current_tb_invalidated
;
2177 target_ulong host_start
, host_end
, addr
;
2179 /* Technically this isn't safe inside a signal handler. However we
2180 know this only ever happens in a synchronous SEGV handler, so in
2181 practice it seems to be ok. */
2184 p
= page_find(address
>> TARGET_PAGE_BITS
);
2190 /* if the page was really writable, then we change its
2191 protection back to writable */
2192 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2193 host_start
= address
& qemu_host_page_mask
;
2194 host_end
= host_start
+ qemu_host_page_size
;
2197 current_tb_invalidated
= false;
2198 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2199 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2200 p
->flags
|= PAGE_WRITE
;
2203 /* and since the content will be modified, we must invalidate
2204 the corresponding translated code. */
2205 current_tb_invalidated
|= tb_invalidate_phys_page(addr
, pc
);
2206 #ifdef DEBUG_TB_CHECK
2207 tb_invalidate_check(addr
);
2210 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2214 /* If current TB was invalidated return to main loop */
2215 return current_tb_invalidated
? 2 : 1;
2220 #endif /* CONFIG_USER_ONLY */