4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
32 #if defined(CONFIG_USER_ONLY)
34 #include "exec/exec-all.h"
35 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36 #include <sys/param.h>
37 #if __FreeBSD_version >= 700104
38 #define HAVE_KINFO_GETVMMAP
39 #define sigqueue sigqueue_freebsd /* avoid redefinition */
41 #include <machine/profile.h>
50 #include "exec/address-spaces.h"
53 #include "exec/cputlb.h"
54 #include "exec/tb-hash.h"
55 #include "translate-all.h"
56 #include "qemu/bitmap.h"
57 #include "qemu/timer.h"
60 /* #define DEBUG_TB_INVALIDATE */
61 /* #define DEBUG_TB_FLUSH */
62 /* #define DEBUG_LOCKING */
63 /* make various TB consistency checks */
64 /* #define DEBUG_TB_CHECK */
66 #if !defined(CONFIG_USER_ONLY)
67 /* TB consistency checks only implemented for usermode emulation. */
71 /* Access to the various translations structures need to be serialised via locks
72 * for consistency. This is automatic for SoftMMU based system
73 * emulation due to its single threaded nature. In user-mode emulation
74 * access to the memory related structures are protected with the
78 #define DEBUG_MEM_LOCKS 1
80 #define DEBUG_MEM_LOCKS 0
84 #define assert_memory_lock() do { /* nothing */ } while (0)
86 #define assert_memory_lock() do { \
87 if (DEBUG_MEM_LOCKS) { \
88 g_assert(have_mmap_lock()); \
93 #define SMC_BITMAP_USE_THRESHOLD 10
95 typedef struct PageDesc
{
96 /* list of TBs intersecting this ram page */
97 TranslationBlock
*first_tb
;
99 /* in order to optimize self modifying code, we count the number
100 of lookups we do to a given page to use a bitmap */
101 unsigned int code_write_count
;
102 unsigned long *code_bitmap
;
108 /* In system mode we want L1_MAP to be based on ram offsets,
109 while in user mode we want it to be based on virtual addresses. */
110 #if !defined(CONFIG_USER_ONLY)
111 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
112 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
114 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
117 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
120 /* Size of the L2 (and L3, etc) page tables. */
122 #define V_L2_SIZE (1 << V_L2_BITS)
124 uintptr_t qemu_host_page_size
;
125 intptr_t qemu_host_page_mask
;
128 * L1 Mapping properties
130 static int v_l1_size
;
131 static int v_l1_shift
;
132 static int v_l2_levels
;
134 /* The bottom level has pointers to PageDesc, and is indexed by
135 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
137 #define V_L1_MIN_BITS 4
138 #define V_L1_MAX_BITS (V_L2_BITS + 3)
139 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
141 static void *l1_map
[V_L1_MAX_SIZE
];
143 /* code generation context */
147 /* translation block context */
148 #ifdef CONFIG_USER_ONLY
149 __thread
int have_tb_lock
;
152 static void page_table_config_init(void)
156 assert(TARGET_PAGE_BITS
);
157 /* The bits remaining after N lower levels of page tables. */
158 v_l1_bits
= (L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
) % V_L2_BITS
;
159 if (v_l1_bits
< V_L1_MIN_BITS
) {
160 v_l1_bits
+= V_L2_BITS
;
163 v_l1_size
= 1 << v_l1_bits
;
164 v_l1_shift
= L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
- v_l1_bits
;
165 v_l2_levels
= v_l1_shift
/ V_L2_BITS
- 1;
167 assert(v_l1_bits
<= V_L1_MAX_BITS
);
168 assert(v_l1_shift
% V_L2_BITS
== 0);
169 assert(v_l2_levels
>= 0);
174 #ifdef CONFIG_USER_ONLY
175 assert(!have_tb_lock
);
176 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
183 #ifdef CONFIG_USER_ONLY
184 assert(have_tb_lock
);
186 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
190 void tb_lock_reset(void)
192 #ifdef CONFIG_USER_ONLY
194 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
201 #define DEBUG_TB_LOCKS 1
203 #define DEBUG_TB_LOCKS 0
206 #ifdef CONFIG_SOFTMMU
207 #define assert_tb_lock() do { /* nothing */ } while (0)
209 #define assert_tb_lock() do { \
210 if (DEBUG_TB_LOCKS) { \
211 g_assert(have_tb_lock); \
217 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
219 void cpu_gen_init(void)
221 tcg_context_init(&tcg_ctx
);
224 /* Encode VAL as a signed leb128 sequence at P.
225 Return P incremented past the encoded value. */
226 static uint8_t *encode_sleb128(uint8_t *p
, target_long val
)
233 more
= !((val
== 0 && (byte
& 0x40) == 0)
234 || (val
== -1 && (byte
& 0x40) != 0));
244 /* Decode a signed leb128 sequence at *PP; increment *PP past the
245 decoded value. Return the decoded value. */
246 static target_long
decode_sleb128(uint8_t **pp
)
254 val
|= (target_ulong
)(byte
& 0x7f) << shift
;
256 } while (byte
& 0x80);
257 if (shift
< TARGET_LONG_BITS
&& (byte
& 0x40)) {
258 val
|= -(target_ulong
)1 << shift
;
265 /* Encode the data collected about the instructions while compiling TB.
266 Place the data at BLOCK, and return the number of bytes consumed.
268 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
269 which come from the target's insn_start data, followed by a uintptr_t
270 which comes from the host pc of the end of the code implementing the insn.
272 Each line of the table is encoded as sleb128 deltas from the previous
273 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
274 That is, the first column is seeded with the guest pc, the last column
275 with the host pc, and the middle columns with zeros. */
277 static int encode_search(TranslationBlock
*tb
, uint8_t *block
)
279 uint8_t *highwater
= tcg_ctx
.code_gen_highwater
;
283 tb
->tc_search
= block
;
285 for (i
= 0, n
= tb
->icount
; i
< n
; ++i
) {
288 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
290 prev
= (j
== 0 ? tb
->pc
: 0);
292 prev
= tcg_ctx
.gen_insn_data
[i
- 1][j
];
294 p
= encode_sleb128(p
, tcg_ctx
.gen_insn_data
[i
][j
] - prev
);
296 prev
= (i
== 0 ? 0 : tcg_ctx
.gen_insn_end_off
[i
- 1]);
297 p
= encode_sleb128(p
, tcg_ctx
.gen_insn_end_off
[i
] - prev
);
299 /* Test for (pending) buffer overflow. The assumption is that any
300 one row beginning below the high water mark cannot overrun
301 the buffer completely. Thus we can test for overflow after
302 encoding a row without having to check during encoding. */
303 if (unlikely(p
> highwater
)) {
311 /* The cpu state corresponding to 'searched_pc' is restored.
312 * Called with tb_lock held.
314 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
315 uintptr_t searched_pc
)
317 target_ulong data
[TARGET_INSN_START_WORDS
] = { tb
->pc
};
318 uintptr_t host_pc
= (uintptr_t)tb
->tc_ptr
;
319 CPUArchState
*env
= cpu
->env_ptr
;
320 uint8_t *p
= tb
->tc_search
;
321 int i
, j
, num_insns
= tb
->icount
;
322 #ifdef CONFIG_PROFILER
323 int64_t ti
= profile_getclock();
326 searched_pc
-= GETPC_ADJ
;
328 if (searched_pc
< host_pc
) {
332 /* Reconstruct the stored insn data while looking for the point at
333 which the end of the insn exceeds the searched_pc. */
334 for (i
= 0; i
< num_insns
; ++i
) {
335 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
336 data
[j
] += decode_sleb128(&p
);
338 host_pc
+= decode_sleb128(&p
);
339 if (host_pc
> searched_pc
) {
346 if (tb
->cflags
& CF_USE_ICOUNT
) {
348 /* Reset the cycle counter to the start of the block. */
349 cpu
->icount_decr
.u16
.low
+= num_insns
;
350 /* Clear the IO flag. */
353 cpu
->icount_decr
.u16
.low
-= i
;
354 restore_state_to_opc(env
, tb
, data
);
356 #ifdef CONFIG_PROFILER
357 tcg_ctx
.restore_time
+= profile_getclock() - ti
;
358 tcg_ctx
.restore_count
++;
363 bool cpu_restore_state(CPUState
*cpu
, uintptr_t retaddr
)
365 TranslationBlock
*tb
;
369 tb
= tb_find_pc(retaddr
);
371 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
372 if (tb
->cflags
& CF_NOCACHE
) {
373 /* one-shot translation, invalidate it immediately */
374 tb_phys_invalidate(tb
, -1);
384 void page_size_init(void)
386 /* NOTE: we can always suppose that qemu_host_page_size >=
388 qemu_real_host_page_size
= getpagesize();
389 qemu_real_host_page_mask
= -(intptr_t)qemu_real_host_page_size
;
390 if (qemu_host_page_size
== 0) {
391 qemu_host_page_size
= qemu_real_host_page_size
;
393 if (qemu_host_page_size
< TARGET_PAGE_SIZE
) {
394 qemu_host_page_size
= TARGET_PAGE_SIZE
;
396 qemu_host_page_mask
= -(intptr_t)qemu_host_page_size
;
399 static void page_init(void)
402 page_table_config_init();
404 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
406 #ifdef HAVE_KINFO_GETVMMAP
407 struct kinfo_vmentry
*freep
;
410 freep
= kinfo_getvmmap(getpid(), &cnt
);
413 for (i
= 0; i
< cnt
; i
++) {
414 unsigned long startaddr
, endaddr
;
416 startaddr
= freep
[i
].kve_start
;
417 endaddr
= freep
[i
].kve_end
;
418 if (h2g_valid(startaddr
)) {
419 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
421 if (h2g_valid(endaddr
)) {
422 endaddr
= h2g(endaddr
);
423 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
425 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
427 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
438 last_brk
= (unsigned long)sbrk(0);
440 f
= fopen("/compat/linux/proc/self/maps", "r");
445 unsigned long startaddr
, endaddr
;
448 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
450 if (n
== 2 && h2g_valid(startaddr
)) {
451 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
453 if (h2g_valid(endaddr
)) {
454 endaddr
= h2g(endaddr
);
458 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
471 * Called with tb_lock held for system emulation.
472 * Called with mmap_lock held for user-mode emulation.
474 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
481 assert_memory_lock();
484 /* Level 1. Always allocated. */
485 lp
= l1_map
+ ((index
>> v_l1_shift
) & (v_l1_size
- 1));
488 for (i
= v_l2_levels
; i
> 0; i
--) {
489 void **p
= atomic_rcu_read(lp
);
495 p
= g_new0(void *, V_L2_SIZE
);
496 atomic_rcu_set(lp
, p
);
499 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
502 pd
= atomic_rcu_read(lp
);
507 pd
= g_new0(PageDesc
, V_L2_SIZE
);
508 atomic_rcu_set(lp
, pd
);
511 return pd
+ (index
& (V_L2_SIZE
- 1));
514 static inline PageDesc
*page_find(tb_page_addr_t index
)
516 return page_find_alloc(index
, 0);
519 #if defined(CONFIG_USER_ONLY)
520 /* Currently it is not recommended to allocate big chunks of data in
521 user mode. It will change when a dedicated libc will be used. */
522 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
523 region in which the guest needs to run. Revisit this. */
524 #define USE_STATIC_CODE_GEN_BUFFER
527 /* Minimum size of the code gen buffer. This number is randomly chosen,
528 but not so small that we can't have a fair number of TB's live. */
529 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
531 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
532 indicated, this is constrained by the range of direct branches on the
533 host cpu, as used by the TCG implementation of goto_tb. */
534 #if defined(__x86_64__)
535 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
536 #elif defined(__sparc__)
537 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
538 #elif defined(__powerpc64__)
539 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
540 #elif defined(__powerpc__)
541 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
542 #elif defined(__aarch64__)
543 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
544 #elif defined(__arm__)
545 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
546 #elif defined(__s390x__)
547 /* We have a +- 4GB range on the branches; leave some slop. */
548 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
549 #elif defined(__mips__)
550 /* We have a 256MB branch region, but leave room to make sure the
551 main executable is also within that region. */
552 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
554 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
557 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
559 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
560 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
561 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
563 static inline size_t size_code_gen_buffer(size_t tb_size
)
565 /* Size the buffer. */
567 #ifdef USE_STATIC_CODE_GEN_BUFFER
568 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
570 /* ??? Needs adjustments. */
571 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
572 static buffer, we could size this on RESERVED_VA, on the text
573 segment size of the executable, or continue to use the default. */
574 tb_size
= (unsigned long)(ram_size
/ 4);
577 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
578 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
580 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
581 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
587 /* In order to use J and JAL within the code_gen_buffer, we require
588 that the buffer not cross a 256MB boundary. */
589 static inline bool cross_256mb(void *addr
, size_t size
)
591 return ((uintptr_t)addr
^ ((uintptr_t)addr
+ size
)) & ~0x0ffffffful
;
594 /* We weren't able to allocate a buffer without crossing that boundary,
595 so make do with the larger portion of the buffer that doesn't cross.
596 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
597 static inline void *split_cross_256mb(void *buf1
, size_t size1
)
599 void *buf2
= (void *)(((uintptr_t)buf1
+ size1
) & ~0x0ffffffful
);
600 size_t size2
= buf1
+ size1
- buf2
;
608 tcg_ctx
.code_gen_buffer_size
= size1
;
613 #ifdef USE_STATIC_CODE_GEN_BUFFER
614 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
615 __attribute__((aligned(CODE_GEN_ALIGN
)));
618 static inline void do_protect(void *addr
, long size
, int prot
)
621 VirtualProtect(addr
, size
, prot
, &old_protect
);
624 static inline void map_exec(void *addr
, long size
)
626 do_protect(addr
, size
, PAGE_EXECUTE_READWRITE
);
629 static inline void map_none(void *addr
, long size
)
631 do_protect(addr
, size
, PAGE_NOACCESS
);
634 static inline void do_protect(void *addr
, long size
, int prot
)
636 uintptr_t start
, end
;
638 start
= (uintptr_t)addr
;
639 start
&= qemu_real_host_page_mask
;
641 end
= (uintptr_t)addr
+ size
;
642 end
= ROUND_UP(end
, qemu_real_host_page_size
);
644 mprotect((void *)start
, end
- start
, prot
);
647 static inline void map_exec(void *addr
, long size
)
649 do_protect(addr
, size
, PROT_READ
| PROT_WRITE
| PROT_EXEC
);
652 static inline void map_none(void *addr
, long size
)
654 do_protect(addr
, size
, PROT_NONE
);
658 static inline void *alloc_code_gen_buffer(void)
660 void *buf
= static_code_gen_buffer
;
661 size_t full_size
, size
;
663 /* The size of the buffer, rounded down to end on a page boundary. */
664 full_size
= (((uintptr_t)buf
+ sizeof(static_code_gen_buffer
))
665 & qemu_real_host_page_mask
) - (uintptr_t)buf
;
667 /* Reserve a guard page. */
668 size
= full_size
- qemu_real_host_page_size
;
670 /* Honor a command-line option limiting the size of the buffer. */
671 if (size
> tcg_ctx
.code_gen_buffer_size
) {
672 size
= (((uintptr_t)buf
+ tcg_ctx
.code_gen_buffer_size
)
673 & qemu_real_host_page_mask
) - (uintptr_t)buf
;
675 tcg_ctx
.code_gen_buffer_size
= size
;
678 if (cross_256mb(buf
, size
)) {
679 buf
= split_cross_256mb(buf
, size
);
680 size
= tcg_ctx
.code_gen_buffer_size
;
685 map_none(buf
+ size
, qemu_real_host_page_size
);
686 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
690 #elif defined(_WIN32)
691 static inline void *alloc_code_gen_buffer(void)
693 size_t size
= tcg_ctx
.code_gen_buffer_size
;
696 /* Perform the allocation in two steps, so that the guard page
697 is reserved but uncommitted. */
698 buf1
= VirtualAlloc(NULL
, size
+ qemu_real_host_page_size
,
699 MEM_RESERVE
, PAGE_NOACCESS
);
701 buf2
= VirtualAlloc(buf1
, size
, MEM_COMMIT
, PAGE_EXECUTE_READWRITE
);
702 assert(buf1
== buf2
);
708 static inline void *alloc_code_gen_buffer(void)
710 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
712 size_t size
= tcg_ctx
.code_gen_buffer_size
;
715 /* Constrain the position of the buffer based on the host cpu.
716 Note that these addresses are chosen in concert with the
717 addresses assigned in the relevant linker script file. */
718 # if defined(__PIE__) || defined(__PIC__)
719 /* Don't bother setting a preferred location if we're building
720 a position-independent executable. We're more likely to get
721 an address near the main executable if we let the kernel
722 choose the address. */
723 # elif defined(__x86_64__) && defined(MAP_32BIT)
724 /* Force the memory down into low memory with the executable.
725 Leave the choice of exact location with the kernel. */
727 /* Cannot expect to map more than 800MB in low memory. */
728 if (size
> 800u * 1024 * 1024) {
729 tcg_ctx
.code_gen_buffer_size
= size
= 800u * 1024 * 1024;
731 # elif defined(__sparc__)
732 start
= 0x40000000ul
;
733 # elif defined(__s390x__)
734 start
= 0x90000000ul
;
735 # elif defined(__mips__)
736 # if _MIPS_SIM == _ABI64
737 start
= 0x128000000ul
;
739 start
= 0x08000000ul
;
743 buf
= mmap((void *)start
, size
+ qemu_real_host_page_size
,
744 PROT_NONE
, flags
, -1, 0);
745 if (buf
== MAP_FAILED
) {
750 if (cross_256mb(buf
, size
)) {
751 /* Try again, with the original still mapped, to avoid re-acquiring
752 that 256mb crossing. This time don't specify an address. */
754 void *buf2
= mmap(NULL
, size
+ qemu_real_host_page_size
,
755 PROT_NONE
, flags
, -1, 0);
756 switch (buf2
!= MAP_FAILED
) {
758 if (!cross_256mb(buf2
, size
)) {
759 /* Success! Use the new buffer. */
760 munmap(buf
, size
+ qemu_real_host_page_size
);
763 /* Failure. Work with what we had. */
764 munmap(buf2
, size
+ qemu_real_host_page_size
);
767 /* Split the original buffer. Free the smaller half. */
768 buf2
= split_cross_256mb(buf
, size
);
769 size2
= tcg_ctx
.code_gen_buffer_size
;
771 munmap(buf
+ size2
+ qemu_real_host_page_size
, size
- size2
);
773 munmap(buf
, size
- size2
);
782 /* Make the final buffer accessible. The guard page at the end
783 will remain inaccessible with PROT_NONE. */
784 mprotect(buf
, size
, PROT_WRITE
| PROT_READ
| PROT_EXEC
);
786 /* Request large pages for the buffer. */
787 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
791 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
793 static inline void code_gen_alloc(size_t tb_size
)
795 tcg_ctx
.code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
796 tcg_ctx
.code_gen_buffer
= alloc_code_gen_buffer();
797 if (tcg_ctx
.code_gen_buffer
== NULL
) {
798 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
802 /* Estimate a good size for the number of TBs we can support. We
803 still haven't deducted the prologue from the buffer size here,
804 but that's minimal and won't affect the estimate much. */
805 tcg_ctx
.code_gen_max_blocks
806 = tcg_ctx
.code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
807 tcg_ctx
.tb_ctx
.tbs
= g_new(TranslationBlock
, tcg_ctx
.code_gen_max_blocks
);
809 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
812 static void tb_htable_init(void)
814 unsigned int mode
= QHT_MODE_AUTO_RESIZE
;
816 qht_init(&tcg_ctx
.tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
, mode
);
819 /* Must be called before using the QEMU cpus. 'tb_size' is the size
820 (in bytes) allocated to the translation buffer. Zero means default
822 void tcg_exec_init(unsigned long tb_size
)
827 code_gen_alloc(tb_size
);
828 #if defined(CONFIG_SOFTMMU)
829 /* There's no guest base to take into account, so go ahead and
830 initialize the prologue now. */
831 tcg_prologue_init(&tcg_ctx
);
835 bool tcg_enabled(void)
837 return tcg_ctx
.code_gen_buffer
!= NULL
;
841 * Allocate a new translation block. Flush the translation buffer if
842 * too many translation blocks or too much generated code.
844 * Called with tb_lock held.
846 static TranslationBlock
*tb_alloc(target_ulong pc
)
848 TranslationBlock
*tb
;
852 if (tcg_ctx
.tb_ctx
.nb_tbs
>= tcg_ctx
.code_gen_max_blocks
) {
855 tb
= &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
++];
862 /* Called with tb_lock held. */
863 void tb_free(TranslationBlock
*tb
)
867 /* In practice this is mostly used for single use temporary TB
868 Ignore the hard cases and just back up if this TB happens to
869 be the last one generated. */
870 if (tcg_ctx
.tb_ctx
.nb_tbs
> 0 &&
871 tb
== &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
- 1]) {
872 tcg_ctx
.code_gen_ptr
= tb
->tc_ptr
;
873 tcg_ctx
.tb_ctx
.nb_tbs
--;
877 static inline void invalidate_page_bitmap(PageDesc
*p
)
879 #ifdef CONFIG_SOFTMMU
880 g_free(p
->code_bitmap
);
881 p
->code_bitmap
= NULL
;
882 p
->code_write_count
= 0;
886 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
887 static void page_flush_tb_1(int level
, void **lp
)
897 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
898 pd
[i
].first_tb
= NULL
;
899 invalidate_page_bitmap(pd
+ i
);
904 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
905 page_flush_tb_1(level
- 1, pp
+ i
);
910 static void page_flush_tb(void)
912 int i
, l1_sz
= v_l1_size
;
914 for (i
= 0; i
< l1_sz
; i
++) {
915 page_flush_tb_1(v_l2_levels
, l1_map
+ i
);
919 /* flush all the translation blocks */
920 static void do_tb_flush(CPUState
*cpu
, run_on_cpu_data tb_flush_count
)
924 /* If it is already been done on request of another CPU,
927 if (tcg_ctx
.tb_ctx
.tb_flush_count
!= tb_flush_count
.host_int
) {
931 #if defined(DEBUG_TB_FLUSH)
932 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
933 (unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
),
934 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.tb_ctx
.nb_tbs
> 0 ?
935 ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)) /
936 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
938 if ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)
939 > tcg_ctx
.code_gen_buffer_size
) {
940 cpu_abort(cpu
, "Internal error: code buffer overflow\n");
946 for (i
= 0; i
< TB_JMP_CACHE_SIZE
; ++i
) {
947 atomic_set(&cpu
->tb_jmp_cache
[i
], NULL
);
951 tcg_ctx
.tb_ctx
.nb_tbs
= 0;
952 qht_reset_size(&tcg_ctx
.tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
);
955 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
956 /* XXX: flush processor icache at this point if cache flush is
958 atomic_mb_set(&tcg_ctx
.tb_ctx
.tb_flush_count
,
959 tcg_ctx
.tb_ctx
.tb_flush_count
+ 1);
965 void tb_flush(CPUState
*cpu
)
968 unsigned tb_flush_count
= atomic_mb_read(&tcg_ctx
.tb_ctx
.tb_flush_count
);
969 async_safe_run_on_cpu(cpu
, do_tb_flush
,
970 RUN_ON_CPU_HOST_INT(tb_flush_count
));
974 #ifdef DEBUG_TB_CHECK
977 do_tb_invalidate_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
979 TranslationBlock
*tb
= p
;
980 target_ulong addr
= *(target_ulong
*)userp
;
982 if (!(addr
+ TARGET_PAGE_SIZE
<= tb
->pc
|| addr
>= tb
->pc
+ tb
->size
)) {
983 printf("ERROR invalidate: address=" TARGET_FMT_lx
984 " PC=%08lx size=%04x\n", addr
, (long)tb
->pc
, tb
->size
);
988 /* verify that all the pages have correct rights for code
990 * Called with tb_lock held.
992 static void tb_invalidate_check(target_ulong address
)
994 address
&= TARGET_PAGE_MASK
;
995 qht_iter(&tcg_ctx
.tb_ctx
.htable
, do_tb_invalidate_check
, &address
);
999 do_tb_page_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
1001 TranslationBlock
*tb
= p
;
1004 flags1
= page_get_flags(tb
->pc
);
1005 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
1006 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
1007 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1008 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
1012 /* verify that all the pages have correct rights for code */
1013 static void tb_page_check(void)
1015 qht_iter(&tcg_ctx
.tb_ctx
.htable
, do_tb_page_check
, NULL
);
1020 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
1022 TranslationBlock
*tb1
;
1027 n1
= (uintptr_t)tb1
& 3;
1028 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1030 *ptb
= tb1
->page_next
[n1
];
1033 ptb
= &tb1
->page_next
[n1
];
1037 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1038 static inline void tb_remove_from_jmp_list(TranslationBlock
*tb
, int n
)
1040 TranslationBlock
*tb1
;
1041 uintptr_t *ptb
, ntb
;
1044 ptb
= &tb
->jmp_list_next
[n
];
1046 /* find tb(n) in circular list */
1050 tb1
= (TranslationBlock
*)(ntb
& ~3);
1051 if (n1
== n
&& tb1
== tb
) {
1055 ptb
= &tb1
->jmp_list_first
;
1057 ptb
= &tb1
->jmp_list_next
[n1
];
1060 /* now we can suppress tb(n) from the list */
1061 *ptb
= tb
->jmp_list_next
[n
];
1063 tb
->jmp_list_next
[n
] = (uintptr_t)NULL
;
1067 /* reset the jump entry 'n' of a TB so that it is not chained to
1069 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
1071 uintptr_t addr
= (uintptr_t)(tb
->tc_ptr
+ tb
->jmp_reset_offset
[n
]);
1072 tb_set_jmp_target(tb
, n
, addr
);
1075 /* remove any jumps to the TB */
1076 static inline void tb_jmp_unlink(TranslationBlock
*tb
)
1078 TranslationBlock
*tb1
;
1079 uintptr_t *ptb
, ntb
;
1082 ptb
= &tb
->jmp_list_first
;
1086 tb1
= (TranslationBlock
*)(ntb
& ~3);
1090 tb_reset_jump(tb1
, n1
);
1091 *ptb
= tb1
->jmp_list_next
[n1
];
1092 tb1
->jmp_list_next
[n1
] = (uintptr_t)NULL
;
1096 /* invalidate one TB
1098 * Called with tb_lock held.
1100 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
1105 tb_page_addr_t phys_pc
;
1109 atomic_set(&tb
->invalid
, true);
1111 /* remove the TB from the hash list */
1112 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1113 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
);
1114 qht_remove(&tcg_ctx
.tb_ctx
.htable
, tb
, h
);
1116 /* remove the TB from the page list */
1117 if (tb
->page_addr
[0] != page_addr
) {
1118 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
1119 tb_page_remove(&p
->first_tb
, tb
);
1120 invalidate_page_bitmap(p
);
1122 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
1123 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
1124 tb_page_remove(&p
->first_tb
, tb
);
1125 invalidate_page_bitmap(p
);
1128 /* remove the TB from the hash list */
1129 h
= tb_jmp_cache_hash_func(tb
->pc
);
1131 if (atomic_read(&cpu
->tb_jmp_cache
[h
]) == tb
) {
1132 atomic_set(&cpu
->tb_jmp_cache
[h
], NULL
);
1136 /* suppress this TB from the two jump lists */
1137 tb_remove_from_jmp_list(tb
, 0);
1138 tb_remove_from_jmp_list(tb
, 1);
1140 /* suppress any remaining jumps to this TB */
1143 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
++;
1146 #ifdef CONFIG_SOFTMMU
1147 static void build_page_bitmap(PageDesc
*p
)
1149 int n
, tb_start
, tb_end
;
1150 TranslationBlock
*tb
;
1152 p
->code_bitmap
= bitmap_new(TARGET_PAGE_SIZE
);
1155 while (tb
!= NULL
) {
1156 n
= (uintptr_t)tb
& 3;
1157 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1158 /* NOTE: this is subtle as a TB may span two physical pages */
1160 /* NOTE: tb_end may be after the end of the page, but
1161 it is not a problem */
1162 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1163 tb_end
= tb_start
+ tb
->size
;
1164 if (tb_end
> TARGET_PAGE_SIZE
) {
1165 tb_end
= TARGET_PAGE_SIZE
;
1169 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1171 bitmap_set(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1172 tb
= tb
->page_next
[n
];
1177 /* add the tb in the target page and protect it if necessary
1179 * Called with mmap_lock held for user-mode emulation.
1181 static inline void tb_alloc_page(TranslationBlock
*tb
,
1182 unsigned int n
, tb_page_addr_t page_addr
)
1185 #ifndef CONFIG_USER_ONLY
1186 bool page_already_protected
;
1189 assert_memory_lock();
1191 tb
->page_addr
[n
] = page_addr
;
1192 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1193 tb
->page_next
[n
] = p
->first_tb
;
1194 #ifndef CONFIG_USER_ONLY
1195 page_already_protected
= p
->first_tb
!= NULL
;
1197 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1198 invalidate_page_bitmap(p
);
1200 #if defined(CONFIG_USER_ONLY)
1201 if (p
->flags
& PAGE_WRITE
) {
1206 /* force the host page as non writable (writes will have a
1207 page fault + mprotect overhead) */
1208 page_addr
&= qemu_host_page_mask
;
1210 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1211 addr
+= TARGET_PAGE_SIZE
) {
1213 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1218 p2
->flags
&= ~PAGE_WRITE
;
1220 mprotect(g2h(page_addr
), qemu_host_page_size
,
1221 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1222 #ifdef DEBUG_TB_INVALIDATE
1223 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1228 /* if some code is already present, then the pages are already
1229 protected. So we handle the case where only the first TB is
1230 allocated in a physical page */
1231 if (!page_already_protected
) {
1232 tlb_protect_code(page_addr
);
1237 /* add a new TB and link it to the physical page tables. phys_page2 is
1238 * (-1) to indicate that only one page contains the TB.
1240 * Called with mmap_lock held for user-mode emulation.
1242 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1243 tb_page_addr_t phys_page2
)
1247 assert_memory_lock();
1249 /* add in the page list */
1250 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1251 if (phys_page2
!= -1) {
1252 tb_alloc_page(tb
, 1, phys_page2
);
1254 tb
->page_addr
[1] = -1;
1257 /* add in the hash table */
1258 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
);
1259 qht_insert(&tcg_ctx
.tb_ctx
.htable
, tb
, h
);
1261 #ifdef DEBUG_TB_CHECK
1266 /* Called with mmap_lock held for user mode emulation. */
1267 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
1268 target_ulong pc
, target_ulong cs_base
,
1269 uint32_t flags
, int cflags
)
1271 CPUArchState
*env
= cpu
->env_ptr
;
1272 TranslationBlock
*tb
;
1273 tb_page_addr_t phys_pc
, phys_page2
;
1274 target_ulong virt_page2
;
1275 tcg_insn_unit
*gen_code_buf
;
1276 int gen_code_size
, search_size
;
1277 #ifdef CONFIG_PROFILER
1280 assert_memory_lock();
1282 phys_pc
= get_page_addr_code(env
, pc
);
1283 if (use_icount
&& !(cflags
& CF_IGNORE_ICOUNT
)) {
1284 cflags
|= CF_USE_ICOUNT
;
1288 if (unlikely(!tb
)) {
1290 /* flush must be done */
1296 gen_code_buf
= tcg_ctx
.code_gen_ptr
;
1297 tb
->tc_ptr
= gen_code_buf
;
1298 tb
->cs_base
= cs_base
;
1300 tb
->cflags
= cflags
;
1302 #ifdef CONFIG_PROFILER
1303 tcg_ctx
.tb_count1
++; /* includes aborted translations because of
1305 ti
= profile_getclock();
1308 tcg_func_start(&tcg_ctx
);
1310 tcg_ctx
.cpu
= ENV_GET_CPU(env
);
1311 gen_intermediate_code(env
, tb
);
1314 trace_translate_block(tb
, tb
->pc
, tb
->tc_ptr
);
1316 /* generate machine code */
1317 tb
->jmp_reset_offset
[0] = TB_JMP_RESET_OFFSET_INVALID
;
1318 tb
->jmp_reset_offset
[1] = TB_JMP_RESET_OFFSET_INVALID
;
1319 tcg_ctx
.tb_jmp_reset_offset
= tb
->jmp_reset_offset
;
1320 #ifdef USE_DIRECT_JUMP
1321 tcg_ctx
.tb_jmp_insn_offset
= tb
->jmp_insn_offset
;
1322 tcg_ctx
.tb_jmp_target_addr
= NULL
;
1324 tcg_ctx
.tb_jmp_insn_offset
= NULL
;
1325 tcg_ctx
.tb_jmp_target_addr
= tb
->jmp_target_addr
;
1328 #ifdef CONFIG_PROFILER
1330 tcg_ctx
.interm_time
+= profile_getclock() - ti
;
1331 tcg_ctx
.code_time
-= profile_getclock();
1334 /* ??? Overflow could be handled better here. In particular, we
1335 don't need to re-do gen_intermediate_code, nor should we re-do
1336 the tcg optimization currently hidden inside tcg_gen_code. All
1337 that should be required is to flush the TBs, allocate a new TB,
1338 re-initialize it per above, and re-do the actual code generation. */
1339 gen_code_size
= tcg_gen_code(&tcg_ctx
, tb
);
1340 if (unlikely(gen_code_size
< 0)) {
1341 goto buffer_overflow
;
1343 search_size
= encode_search(tb
, (void *)gen_code_buf
+ gen_code_size
);
1344 if (unlikely(search_size
< 0)) {
1345 goto buffer_overflow
;
1348 #ifdef CONFIG_PROFILER
1349 tcg_ctx
.code_time
+= profile_getclock();
1350 tcg_ctx
.code_in_len
+= tb
->size
;
1351 tcg_ctx
.code_out_len
+= gen_code_size
;
1352 tcg_ctx
.search_out_len
+= search_size
;
1356 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
) &&
1357 qemu_log_in_addr_range(tb
->pc
)) {
1358 qemu_log("OUT: [size=%d]\n", gen_code_size
);
1359 log_disas(tb
->tc_ptr
, gen_code_size
);
1365 tcg_ctx
.code_gen_ptr
= (void *)
1366 ROUND_UP((uintptr_t)gen_code_buf
+ gen_code_size
+ search_size
,
1369 /* init jump list */
1370 assert(((uintptr_t)tb
& 3) == 0);
1371 tb
->jmp_list_first
= (uintptr_t)tb
| 2;
1372 tb
->jmp_list_next
[0] = (uintptr_t)NULL
;
1373 tb
->jmp_list_next
[1] = (uintptr_t)NULL
;
1375 /* init original jump addresses wich has been set during tcg_gen_code() */
1376 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1377 tb_reset_jump(tb
, 0);
1379 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1380 tb_reset_jump(tb
, 1);
1383 /* check next page if needed */
1384 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1386 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1387 phys_page2
= get_page_addr_code(env
, virt_page2
);
1389 /* As long as consistency of the TB stuff is provided by tb_lock in user
1390 * mode and is implicit in single-threaded softmmu emulation, no explicit
1391 * memory barrier is required before tb_link_page() makes the TB visible
1392 * through the physical hash table and physical page list.
1394 tb_link_page(tb
, phys_pc
, phys_page2
);
1399 * Invalidate all TBs which intersect with the target physical address range
1400 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1401 * 'is_cpu_write_access' should be true if called from a real cpu write
1402 * access: the virtual CPU will exit the current TB if code is modified inside
1405 * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1406 * Called with tb_lock held for system-mode emulation
1408 static void tb_invalidate_phys_range_1(tb_page_addr_t start
, tb_page_addr_t end
)
1410 while (start
< end
) {
1411 tb_invalidate_phys_page_range(start
, end
, 0);
1412 start
&= TARGET_PAGE_MASK
;
1413 start
+= TARGET_PAGE_SIZE
;
1417 #ifdef CONFIG_SOFTMMU
1418 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1421 tb_invalidate_phys_range_1(start
, end
);
1424 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1426 assert_memory_lock();
1428 tb_invalidate_phys_range_1(start
, end
);
1433 * Invalidate all TBs which intersect with the target physical address range
1434 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1435 * 'is_cpu_write_access' should be true if called from a real cpu write
1436 * access: the virtual CPU will exit the current TB if code is modified inside
1439 * Called with tb_lock/mmap_lock held for user-mode emulation
1440 * Called with tb_lock held for system-mode emulation
1442 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1443 int is_cpu_write_access
)
1445 TranslationBlock
*tb
, *tb_next
;
1446 #if defined(TARGET_HAS_PRECISE_SMC)
1447 CPUState
*cpu
= current_cpu
;
1448 CPUArchState
*env
= NULL
;
1450 tb_page_addr_t tb_start
, tb_end
;
1453 #ifdef TARGET_HAS_PRECISE_SMC
1454 int current_tb_not_found
= is_cpu_write_access
;
1455 TranslationBlock
*current_tb
= NULL
;
1456 int current_tb_modified
= 0;
1457 target_ulong current_pc
= 0;
1458 target_ulong current_cs_base
= 0;
1459 uint32_t current_flags
= 0;
1460 #endif /* TARGET_HAS_PRECISE_SMC */
1462 assert_memory_lock();
1465 p
= page_find(start
>> TARGET_PAGE_BITS
);
1469 #if defined(TARGET_HAS_PRECISE_SMC)
1475 /* we remove all the TBs in the range [start, end[ */
1476 /* XXX: see if in some cases it could be faster to invalidate all
1479 while (tb
!= NULL
) {
1480 n
= (uintptr_t)tb
& 3;
1481 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1482 tb_next
= tb
->page_next
[n
];
1483 /* NOTE: this is subtle as a TB may span two physical pages */
1485 /* NOTE: tb_end may be after the end of the page, but
1486 it is not a problem */
1487 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1488 tb_end
= tb_start
+ tb
->size
;
1490 tb_start
= tb
->page_addr
[1];
1491 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1493 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1494 #ifdef TARGET_HAS_PRECISE_SMC
1495 if (current_tb_not_found
) {
1496 current_tb_not_found
= 0;
1498 if (cpu
->mem_io_pc
) {
1499 /* now we have a real cpu fault */
1500 current_tb
= tb_find_pc(cpu
->mem_io_pc
);
1503 if (current_tb
== tb
&&
1504 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1505 /* If we are modifying the current TB, we must stop
1506 its execution. We could be more precise by checking
1507 that the modification is after the current PC, but it
1508 would require a specialized function to partially
1509 restore the CPU state */
1511 current_tb_modified
= 1;
1512 cpu_restore_state_from_tb(cpu
, current_tb
, cpu
->mem_io_pc
);
1513 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1516 #endif /* TARGET_HAS_PRECISE_SMC */
1517 tb_phys_invalidate(tb
, -1);
1521 #if !defined(CONFIG_USER_ONLY)
1522 /* if no code remaining, no need to continue to use slow writes */
1524 invalidate_page_bitmap(p
);
1525 tlb_unprotect_code(start
);
1528 #ifdef TARGET_HAS_PRECISE_SMC
1529 if (current_tb_modified
) {
1530 /* we generate a block containing just the instruction
1531 modifying the memory. It will ensure that it cannot modify
1533 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1534 cpu_loop_exit_noexc(cpu
);
1539 #ifdef CONFIG_SOFTMMU
1540 /* len must be <= 8 and start must be a multiple of len.
1541 * Called via softmmu_template.h when code areas are written to with
1544 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1550 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1551 cpu_single_env
->mem_io_vaddr
, len
,
1552 cpu_single_env
->eip
,
1553 cpu_single_env
->eip
+
1554 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1557 assert_memory_lock();
1559 p
= page_find(start
>> TARGET_PAGE_BITS
);
1563 if (!p
->code_bitmap
&&
1564 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
1565 /* build code bitmap. FIXME: writes should be protected by
1566 * tb_lock, reads by tb_lock or RCU.
1568 build_page_bitmap(p
);
1570 if (p
->code_bitmap
) {
1574 nr
= start
& ~TARGET_PAGE_MASK
;
1575 b
= p
->code_bitmap
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1));
1576 if (b
& ((1 << len
) - 1)) {
1581 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1585 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1586 * host PC of the faulting store instruction that caused this invalidate.
1587 * Returns true if the caller needs to abort execution of the current
1588 * TB (because it was modified by this store and the guest CPU has
1589 * precise-SMC semantics).
1591 static bool tb_invalidate_phys_page(tb_page_addr_t addr
, uintptr_t pc
)
1593 TranslationBlock
*tb
;
1596 #ifdef TARGET_HAS_PRECISE_SMC
1597 TranslationBlock
*current_tb
= NULL
;
1598 CPUState
*cpu
= current_cpu
;
1599 CPUArchState
*env
= NULL
;
1600 int current_tb_modified
= 0;
1601 target_ulong current_pc
= 0;
1602 target_ulong current_cs_base
= 0;
1603 uint32_t current_flags
= 0;
1606 assert_memory_lock();
1608 addr
&= TARGET_PAGE_MASK
;
1609 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1616 #ifdef TARGET_HAS_PRECISE_SMC
1617 if (tb
&& pc
!= 0) {
1618 current_tb
= tb_find_pc(pc
);
1624 while (tb
!= NULL
) {
1625 n
= (uintptr_t)tb
& 3;
1626 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1627 #ifdef TARGET_HAS_PRECISE_SMC
1628 if (current_tb
== tb
&&
1629 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1630 /* If we are modifying the current TB, we must stop
1631 its execution. We could be more precise by checking
1632 that the modification is after the current PC, but it
1633 would require a specialized function to partially
1634 restore the CPU state */
1636 current_tb_modified
= 1;
1637 cpu_restore_state_from_tb(cpu
, current_tb
, pc
);
1638 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1641 #endif /* TARGET_HAS_PRECISE_SMC */
1642 tb_phys_invalidate(tb
, addr
);
1643 tb
= tb
->page_next
[n
];
1646 #ifdef TARGET_HAS_PRECISE_SMC
1647 if (current_tb_modified
) {
1648 /* we generate a block containing just the instruction
1649 modifying the memory. It will ensure that it cannot modify
1651 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1652 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
1653 * back into the cpu_exec loop. */
1663 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1664 tb[1].tc_ptr. Return NULL if not found */
1665 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1667 int m_min
, m_max
, m
;
1669 TranslationBlock
*tb
;
1671 if (tcg_ctx
.tb_ctx
.nb_tbs
<= 0) {
1674 if (tc_ptr
< (uintptr_t)tcg_ctx
.code_gen_buffer
||
1675 tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_ptr
) {
1678 /* binary search (cf Knuth) */
1680 m_max
= tcg_ctx
.tb_ctx
.nb_tbs
- 1;
1681 while (m_min
<= m_max
) {
1682 m
= (m_min
+ m_max
) >> 1;
1683 tb
= &tcg_ctx
.tb_ctx
.tbs
[m
];
1684 v
= (uintptr_t)tb
->tc_ptr
;
1687 } else if (tc_ptr
< v
) {
1693 return &tcg_ctx
.tb_ctx
.tbs
[m_max
];
1696 #if !defined(CONFIG_USER_ONLY)
1697 void tb_invalidate_phys_addr(AddressSpace
*as
, hwaddr addr
)
1699 ram_addr_t ram_addr
;
1704 mr
= address_space_translate(as
, addr
, &addr
, &l
, false);
1705 if (!(memory_region_is_ram(mr
)
1706 || memory_region_is_romd(mr
))) {
1710 ram_addr
= memory_region_get_ram_addr(mr
) + addr
;
1712 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1716 #endif /* !defined(CONFIG_USER_ONLY) */
1718 /* Called with tb_lock held. */
1719 void tb_check_watchpoint(CPUState
*cpu
)
1721 TranslationBlock
*tb
;
1723 tb
= tb_find_pc(cpu
->mem_io_pc
);
1725 /* We can use retranslation to find the PC. */
1726 cpu_restore_state_from_tb(cpu
, tb
, cpu
->mem_io_pc
);
1727 tb_phys_invalidate(tb
, -1);
1729 /* The exception probably happened in a helper. The CPU state should
1730 have been saved before calling it. Fetch the PC from there. */
1731 CPUArchState
*env
= cpu
->env_ptr
;
1732 target_ulong pc
, cs_base
;
1733 tb_page_addr_t addr
;
1736 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
1737 addr
= get_page_addr_code(env
, pc
);
1738 tb_invalidate_phys_range(addr
, addr
+ 1);
1742 #ifndef CONFIG_USER_ONLY
1743 /* in deterministic execution mode, instructions doing device I/Os
1744 must be at the end of the TB */
1745 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
1747 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1748 CPUArchState
*env
= cpu
->env_ptr
;
1750 TranslationBlock
*tb
;
1752 target_ulong pc
, cs_base
;
1756 tb
= tb_find_pc(retaddr
);
1758 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
1761 n
= cpu
->icount_decr
.u16
.low
+ tb
->icount
;
1762 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
1763 /* Calculate how many instructions had been executed before the fault
1765 n
= n
- cpu
->icount_decr
.u16
.low
;
1766 /* Generate a new TB ending on the I/O insn. */
1768 /* On MIPS and SH, delay slot instructions can only be restarted if
1769 they were already the first instruction in the TB. If this is not
1770 the first instruction in a TB then re-execute the preceding
1772 #if defined(TARGET_MIPS)
1773 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1774 env
->active_tc
.PC
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
1775 cpu
->icount_decr
.u16
.low
++;
1776 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1778 #elif defined(TARGET_SH4)
1779 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1782 cpu
->icount_decr
.u16
.low
++;
1783 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1786 /* This should never happen. */
1787 if (n
> CF_COUNT_MASK
) {
1788 cpu_abort(cpu
, "TB too big during recompile");
1791 cflags
= n
| CF_LAST_IO
;
1793 cs_base
= tb
->cs_base
;
1795 tb_phys_invalidate(tb
, -1);
1796 if (tb
->cflags
& CF_NOCACHE
) {
1798 /* Invalidate original TB if this TB was generated in
1799 * cpu_exec_nocache() */
1800 tb_phys_invalidate(tb
->orig_tb
, -1);
1804 /* FIXME: In theory this could raise an exception. In practice
1805 we have already translated the block once so it's probably ok. */
1806 tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
1808 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1809 * the first in the TB) then we end up generating a whole new TB and
1810 * repeating the fault, which is horribly inefficient.
1811 * Better would be to execute just this insn uncached, or generate a
1814 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
1815 * tb_lock gets reset.
1817 cpu_loop_exit_noexc(cpu
);
1820 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
1824 /* Discard jump cache entries for any tb which might potentially
1825 overlap the flushed page. */
1826 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1827 memset(&cpu
->tb_jmp_cache
[i
], 0,
1828 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1830 i
= tb_jmp_cache_hash_page(addr
);
1831 memset(&cpu
->tb_jmp_cache
[i
], 0,
1832 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1835 static void print_qht_statistics(FILE *f
, fprintf_function cpu_fprintf
,
1836 struct qht_stats hst
)
1838 uint32_t hgram_opts
;
1842 if (!hst
.head_buckets
) {
1845 cpu_fprintf(f
, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1846 hst
.used_head_buckets
, hst
.head_buckets
,
1847 (double)hst
.used_head_buckets
/ hst
.head_buckets
* 100);
1849 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1850 hgram_opts
|= QDIST_PR_100X
| QDIST_PR_PERCENT
;
1851 if (qdist_xmax(&hst
.occupancy
) - qdist_xmin(&hst
.occupancy
) == 1) {
1852 hgram_opts
|= QDIST_PR_NODECIMAL
;
1854 hgram
= qdist_pr(&hst
.occupancy
, 10, hgram_opts
);
1855 cpu_fprintf(f
, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1856 qdist_avg(&hst
.occupancy
) * 100, hgram
);
1859 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1860 hgram_bins
= qdist_xmax(&hst
.chain
) - qdist_xmin(&hst
.chain
);
1861 if (hgram_bins
> 10) {
1865 hgram_opts
|= QDIST_PR_NODECIMAL
| QDIST_PR_NOBINRANGE
;
1867 hgram
= qdist_pr(&hst
.chain
, hgram_bins
, hgram_opts
);
1868 cpu_fprintf(f
, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1869 qdist_avg(&hst
.chain
), hgram
);
1873 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1875 int i
, target_code_size
, max_target_code_size
;
1876 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
1877 TranslationBlock
*tb
;
1878 struct qht_stats hst
;
1882 target_code_size
= 0;
1883 max_target_code_size
= 0;
1885 direct_jmp_count
= 0;
1886 direct_jmp2_count
= 0;
1887 for (i
= 0; i
< tcg_ctx
.tb_ctx
.nb_tbs
; i
++) {
1888 tb
= &tcg_ctx
.tb_ctx
.tbs
[i
];
1889 target_code_size
+= tb
->size
;
1890 if (tb
->size
> max_target_code_size
) {
1891 max_target_code_size
= tb
->size
;
1893 if (tb
->page_addr
[1] != -1) {
1896 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1898 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1899 direct_jmp2_count
++;
1903 /* XXX: avoid using doubles ? */
1904 cpu_fprintf(f
, "Translation buffer state:\n");
1905 cpu_fprintf(f
, "gen code size %td/%zd\n",
1906 tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
,
1907 tcg_ctx
.code_gen_highwater
- tcg_ctx
.code_gen_buffer
);
1908 cpu_fprintf(f
, "TB count %d/%d\n",
1909 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.code_gen_max_blocks
);
1910 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
1911 tcg_ctx
.tb_ctx
.nb_tbs
? target_code_size
/
1912 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1913 max_target_code_size
);
1914 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1915 tcg_ctx
.tb_ctx
.nb_tbs
? (tcg_ctx
.code_gen_ptr
-
1916 tcg_ctx
.code_gen_buffer
) /
1917 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1918 target_code_size
? (double) (tcg_ctx
.code_gen_ptr
-
1919 tcg_ctx
.code_gen_buffer
) /
1920 target_code_size
: 0);
1921 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n", cross_page
,
1922 tcg_ctx
.tb_ctx
.nb_tbs
? (cross_page
* 100) /
1923 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1924 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1926 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp_count
* 100) /
1927 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1929 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp2_count
* 100) /
1930 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1932 qht_statistics_init(&tcg_ctx
.tb_ctx
.htable
, &hst
);
1933 print_qht_statistics(f
, cpu_fprintf
, hst
);
1934 qht_statistics_destroy(&hst
);
1936 cpu_fprintf(f
, "\nStatistics:\n");
1937 cpu_fprintf(f
, "TB flush count %u\n",
1938 atomic_read(&tcg_ctx
.tb_ctx
.tb_flush_count
));
1939 cpu_fprintf(f
, "TB invalidate count %d\n",
1940 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
);
1941 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
1942 tcg_dump_info(f
, cpu_fprintf
);
1947 void dump_opcount_info(FILE *f
, fprintf_function cpu_fprintf
)
1949 tcg_dump_op_count(f
, cpu_fprintf
);
1952 #else /* CONFIG_USER_ONLY */
1954 void cpu_interrupt(CPUState
*cpu
, int mask
)
1956 cpu
->interrupt_request
|= mask
;
1957 cpu
->tcg_exit_req
= 1;
1961 * Walks guest process memory "regions" one by one
1962 * and calls callback function 'fn' for each region.
1964 struct walk_memory_regions_data
{
1965 walk_memory_regions_fn fn
;
1971 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1972 target_ulong end
, int new_prot
)
1974 if (data
->start
!= -1u) {
1975 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1981 data
->start
= (new_prot
? end
: -1u);
1982 data
->prot
= new_prot
;
1987 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1988 target_ulong base
, int level
, void **lp
)
1994 return walk_memory_regions_end(data
, base
, 0);
2000 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2001 int prot
= pd
[i
].flags
;
2003 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2004 if (prot
!= data
->prot
) {
2005 rc
= walk_memory_regions_end(data
, pa
, prot
);
2014 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2015 pa
= base
| ((target_ulong
)i
<<
2016 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
2017 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2027 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2029 struct walk_memory_regions_data data
;
2030 uintptr_t i
, l1_sz
= v_l1_size
;
2037 for (i
= 0; i
< l1_sz
; i
++) {
2038 target_ulong base
= i
<< (v_l1_shift
+ TARGET_PAGE_BITS
);
2039 int rc
= walk_memory_regions_1(&data
, base
, v_l2_levels
, l1_map
+ i
);
2045 return walk_memory_regions_end(&data
, 0, 0);
2048 static int dump_region(void *priv
, target_ulong start
,
2049 target_ulong end
, unsigned long prot
)
2051 FILE *f
= (FILE *)priv
;
2053 (void) fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
2054 " "TARGET_FMT_lx
" %c%c%c\n",
2055 start
, end
, end
- start
,
2056 ((prot
& PAGE_READ
) ? 'r' : '-'),
2057 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2058 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2063 /* dump memory mappings */
2064 void page_dump(FILE *f
)
2066 const int length
= sizeof(target_ulong
) * 2;
2067 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
2068 length
, "start", length
, "end", length
, "size", "prot");
2069 walk_memory_regions(f
, dump_region
);
2072 int page_get_flags(target_ulong address
)
2076 p
= page_find(address
>> TARGET_PAGE_BITS
);
2083 /* Modify the flags of a page and invalidate the code if necessary.
2084 The flag PAGE_WRITE_ORG is positioned automatically depending
2085 on PAGE_WRITE. The mmap_lock should already be held. */
2086 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2088 target_ulong addr
, len
;
2090 /* This function should never be called with addresses outside the
2091 guest address space. If this assert fires, it probably indicates
2092 a missing call to h2g_valid. */
2093 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2094 assert(end
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2096 assert(start
< end
);
2097 assert_memory_lock();
2099 start
= start
& TARGET_PAGE_MASK
;
2100 end
= TARGET_PAGE_ALIGN(end
);
2102 if (flags
& PAGE_WRITE
) {
2103 flags
|= PAGE_WRITE_ORG
;
2106 for (addr
= start
, len
= end
- start
;
2108 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2109 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2111 /* If the write protection bit is set, then we invalidate
2113 if (!(p
->flags
& PAGE_WRITE
) &&
2114 (flags
& PAGE_WRITE
) &&
2116 tb_invalidate_phys_page(addr
, 0);
2122 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2128 /* This function should never be called with addresses outside the
2129 guest address space. If this assert fires, it probably indicates
2130 a missing call to h2g_valid. */
2131 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2132 assert(start
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2138 if (start
+ len
- 1 < start
) {
2139 /* We've wrapped around. */
2143 /* must do before we loose bits in the next step */
2144 end
= TARGET_PAGE_ALIGN(start
+ len
);
2145 start
= start
& TARGET_PAGE_MASK
;
2147 for (addr
= start
, len
= end
- start
;
2149 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2150 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2154 if (!(p
->flags
& PAGE_VALID
)) {
2158 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
2161 if (flags
& PAGE_WRITE
) {
2162 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
2165 /* unprotect the page if it was put read-only because it
2166 contains translated code */
2167 if (!(p
->flags
& PAGE_WRITE
)) {
2168 if (!page_unprotect(addr
, 0)) {
2177 /* called from signal handler: invalidate the code and unprotect the
2178 * page. Return 0 if the fault was not handled, 1 if it was handled,
2179 * and 2 if it was handled but the caller must cause the TB to be
2180 * immediately exited. (We can only return 2 if the 'pc' argument is
2183 int page_unprotect(target_ulong address
, uintptr_t pc
)
2186 bool current_tb_invalidated
;
2188 target_ulong host_start
, host_end
, addr
;
2190 /* Technically this isn't safe inside a signal handler. However we
2191 know this only ever happens in a synchronous SEGV handler, so in
2192 practice it seems to be ok. */
2195 p
= page_find(address
>> TARGET_PAGE_BITS
);
2201 /* if the page was really writable, then we change its
2202 protection back to writable */
2203 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2204 host_start
= address
& qemu_host_page_mask
;
2205 host_end
= host_start
+ qemu_host_page_size
;
2208 current_tb_invalidated
= false;
2209 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2210 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2211 p
->flags
|= PAGE_WRITE
;
2214 /* and since the content will be modified, we must invalidate
2215 the corresponding translated code. */
2216 current_tb_invalidated
|= tb_invalidate_phys_page(addr
, pc
);
2217 #ifdef DEBUG_TB_CHECK
2218 tb_invalidate_check(addr
);
2221 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2225 /* If current TB was invalidated return to main loop */
2226 return current_tb_invalidated
? 2 : 1;
2231 #endif /* CONFIG_USER_ONLY */