4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
32 #if defined(CONFIG_USER_ONLY)
34 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
35 #include <sys/param.h>
36 #if __FreeBSD_version >= 700104
37 #define HAVE_KINFO_GETVMMAP
38 #define sigqueue sigqueue_freebsd /* avoid redefinition */
40 #include <machine/profile.h>
49 #include "exec/address-spaces.h"
52 #include "exec/cputlb.h"
53 #include "exec/tb-hash.h"
54 #include "translate-all.h"
55 #include "qemu/bitmap.h"
56 #include "qemu/timer.h"
59 //#define DEBUG_TB_INVALIDATE
61 /* make various TB consistency checks */
62 //#define DEBUG_TB_CHECK
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 typedef struct PageDesc
{
72 /* list of TBs intersecting this ram page */
73 TranslationBlock
*first_tb
;
75 /* in order to optimize self modifying code, we count the number
76 of lookups we do to a given page to use a bitmap */
77 unsigned int code_write_count
;
78 unsigned long *code_bitmap
;
84 /* In system mode we want L1_MAP to be based on ram offsets,
85 while in user mode we want it to be based on virtual addresses. */
86 #if !defined(CONFIG_USER_ONLY)
87 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
88 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
90 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
93 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
96 /* Size of the L2 (and L3, etc) page tables. */
98 #define V_L2_SIZE (1 << V_L2_BITS)
100 /* The bits remaining after N lower levels of page tables. */
101 #define V_L1_BITS_REM \
102 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
104 #if V_L1_BITS_REM < 4
105 #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
107 #define V_L1_BITS V_L1_BITS_REM
110 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
112 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
114 uintptr_t qemu_host_page_size
;
115 intptr_t qemu_host_page_mask
;
117 /* The bottom level has pointers to PageDesc */
118 static void *l1_map
[V_L1_SIZE
];
120 /* code generation context */
123 /* translation block context */
124 #ifdef CONFIG_USER_ONLY
125 __thread
int have_tb_lock
;
130 #ifdef CONFIG_USER_ONLY
131 assert(!have_tb_lock
);
132 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
139 #ifdef CONFIG_USER_ONLY
140 assert(have_tb_lock
);
142 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
146 void tb_lock_reset(void)
148 #ifdef CONFIG_USER_ONLY
150 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
156 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
158 void cpu_gen_init(void)
160 tcg_context_init(&tcg_ctx
);
163 /* Encode VAL as a signed leb128 sequence at P.
164 Return P incremented past the encoded value. */
165 static uint8_t *encode_sleb128(uint8_t *p
, target_long val
)
172 more
= !((val
== 0 && (byte
& 0x40) == 0)
173 || (val
== -1 && (byte
& 0x40) != 0));
183 /* Decode a signed leb128 sequence at *PP; increment *PP past the
184 decoded value. Return the decoded value. */
185 static target_long
decode_sleb128(uint8_t **pp
)
193 val
|= (target_ulong
)(byte
& 0x7f) << shift
;
195 } while (byte
& 0x80);
196 if (shift
< TARGET_LONG_BITS
&& (byte
& 0x40)) {
197 val
|= -(target_ulong
)1 << shift
;
204 /* Encode the data collected about the instructions while compiling TB.
205 Place the data at BLOCK, and return the number of bytes consumed.
207 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
208 which come from the target's insn_start data, followed by a uintptr_t
209 which comes from the host pc of the end of the code implementing the insn.
211 Each line of the table is encoded as sleb128 deltas from the previous
212 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
213 That is, the first column is seeded with the guest pc, the last column
214 with the host pc, and the middle columns with zeros. */
216 static int encode_search(TranslationBlock
*tb
, uint8_t *block
)
218 uint8_t *highwater
= tcg_ctx
.code_gen_highwater
;
222 tb
->tc_search
= block
;
224 for (i
= 0, n
= tb
->icount
; i
< n
; ++i
) {
227 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
229 prev
= (j
== 0 ? tb
->pc
: 0);
231 prev
= tcg_ctx
.gen_insn_data
[i
- 1][j
];
233 p
= encode_sleb128(p
, tcg_ctx
.gen_insn_data
[i
][j
] - prev
);
235 prev
= (i
== 0 ? 0 : tcg_ctx
.gen_insn_end_off
[i
- 1]);
236 p
= encode_sleb128(p
, tcg_ctx
.gen_insn_end_off
[i
] - prev
);
238 /* Test for (pending) buffer overflow. The assumption is that any
239 one row beginning below the high water mark cannot overrun
240 the buffer completely. Thus we can test for overflow after
241 encoding a row without having to check during encoding. */
242 if (unlikely(p
> highwater
)) {
250 /* The cpu state corresponding to 'searched_pc' is restored. */
251 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
252 uintptr_t searched_pc
)
254 target_ulong data
[TARGET_INSN_START_WORDS
] = { tb
->pc
};
255 uintptr_t host_pc
= (uintptr_t)tb
->tc_ptr
;
256 CPUArchState
*env
= cpu
->env_ptr
;
257 uint8_t *p
= tb
->tc_search
;
258 int i
, j
, num_insns
= tb
->icount
;
259 #ifdef CONFIG_PROFILER
260 int64_t ti
= profile_getclock();
263 searched_pc
-= GETPC_ADJ
;
265 if (searched_pc
< host_pc
) {
269 /* Reconstruct the stored insn data while looking for the point at
270 which the end of the insn exceeds the searched_pc. */
271 for (i
= 0; i
< num_insns
; ++i
) {
272 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
273 data
[j
] += decode_sleb128(&p
);
275 host_pc
+= decode_sleb128(&p
);
276 if (host_pc
> searched_pc
) {
283 if (tb
->cflags
& CF_USE_ICOUNT
) {
285 /* Reset the cycle counter to the start of the block. */
286 cpu
->icount_decr
.u16
.low
+= num_insns
;
287 /* Clear the IO flag. */
290 cpu
->icount_decr
.u16
.low
-= i
;
291 restore_state_to_opc(env
, tb
, data
);
293 #ifdef CONFIG_PROFILER
294 tcg_ctx
.restore_time
+= profile_getclock() - ti
;
295 tcg_ctx
.restore_count
++;
300 bool cpu_restore_state(CPUState
*cpu
, uintptr_t retaddr
)
302 TranslationBlock
*tb
;
304 tb
= tb_find_pc(retaddr
);
306 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
307 if (tb
->cflags
& CF_NOCACHE
) {
308 /* one-shot translation, invalidate it immediately */
309 tb_phys_invalidate(tb
, -1);
317 void page_size_init(void)
319 /* NOTE: we can always suppose that qemu_host_page_size >=
321 qemu_real_host_page_size
= getpagesize();
322 qemu_real_host_page_mask
= -(intptr_t)qemu_real_host_page_size
;
323 if (qemu_host_page_size
== 0) {
324 qemu_host_page_size
= qemu_real_host_page_size
;
326 if (qemu_host_page_size
< TARGET_PAGE_SIZE
) {
327 qemu_host_page_size
= TARGET_PAGE_SIZE
;
329 qemu_host_page_mask
= -(intptr_t)qemu_host_page_size
;
332 static void page_init(void)
335 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
337 #ifdef HAVE_KINFO_GETVMMAP
338 struct kinfo_vmentry
*freep
;
341 freep
= kinfo_getvmmap(getpid(), &cnt
);
344 for (i
= 0; i
< cnt
; i
++) {
345 unsigned long startaddr
, endaddr
;
347 startaddr
= freep
[i
].kve_start
;
348 endaddr
= freep
[i
].kve_end
;
349 if (h2g_valid(startaddr
)) {
350 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
352 if (h2g_valid(endaddr
)) {
353 endaddr
= h2g(endaddr
);
354 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
356 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
358 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
369 last_brk
= (unsigned long)sbrk(0);
371 f
= fopen("/compat/linux/proc/self/maps", "r");
376 unsigned long startaddr
, endaddr
;
379 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
381 if (n
== 2 && h2g_valid(startaddr
)) {
382 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
384 if (h2g_valid(endaddr
)) {
385 endaddr
= h2g(endaddr
);
389 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
402 * Called with mmap_lock held for user-mode emulation.
404 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
410 /* Level 1. Always allocated. */
411 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
414 for (i
= V_L1_SHIFT
/ V_L2_BITS
- 1; i
> 0; i
--) {
415 void **p
= atomic_rcu_read(lp
);
421 p
= g_new0(void *, V_L2_SIZE
);
422 atomic_rcu_set(lp
, p
);
425 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
428 pd
= atomic_rcu_read(lp
);
433 pd
= g_new0(PageDesc
, V_L2_SIZE
);
434 atomic_rcu_set(lp
, pd
);
437 return pd
+ (index
& (V_L2_SIZE
- 1));
440 static inline PageDesc
*page_find(tb_page_addr_t index
)
442 return page_find_alloc(index
, 0);
445 #if defined(CONFIG_USER_ONLY)
446 /* Currently it is not recommended to allocate big chunks of data in
447 user mode. It will change when a dedicated libc will be used. */
448 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
449 region in which the guest needs to run. Revisit this. */
450 #define USE_STATIC_CODE_GEN_BUFFER
453 /* Minimum size of the code gen buffer. This number is randomly chosen,
454 but not so small that we can't have a fair number of TB's live. */
455 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
457 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
458 indicated, this is constrained by the range of direct branches on the
459 host cpu, as used by the TCG implementation of goto_tb. */
460 #if defined(__x86_64__)
461 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
462 #elif defined(__sparc__)
463 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
464 #elif defined(__powerpc64__)
465 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
466 #elif defined(__powerpc__)
467 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
468 #elif defined(__aarch64__)
469 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
470 #elif defined(__arm__)
471 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
472 #elif defined(__s390x__)
473 /* We have a +- 4GB range on the branches; leave some slop. */
474 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
475 #elif defined(__mips__)
476 /* We have a 256MB branch region, but leave room to make sure the
477 main executable is also within that region. */
478 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
480 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
483 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
485 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
486 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
487 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
489 static inline size_t size_code_gen_buffer(size_t tb_size
)
491 /* Size the buffer. */
493 #ifdef USE_STATIC_CODE_GEN_BUFFER
494 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
496 /* ??? Needs adjustments. */
497 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
498 static buffer, we could size this on RESERVED_VA, on the text
499 segment size of the executable, or continue to use the default. */
500 tb_size
= (unsigned long)(ram_size
/ 4);
503 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
504 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
506 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
507 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
513 /* In order to use J and JAL within the code_gen_buffer, we require
514 that the buffer not cross a 256MB boundary. */
515 static inline bool cross_256mb(void *addr
, size_t size
)
517 return ((uintptr_t)addr
^ ((uintptr_t)addr
+ size
)) & ~0x0ffffffful
;
520 /* We weren't able to allocate a buffer without crossing that boundary,
521 so make do with the larger portion of the buffer that doesn't cross.
522 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
523 static inline void *split_cross_256mb(void *buf1
, size_t size1
)
525 void *buf2
= (void *)(((uintptr_t)buf1
+ size1
) & ~0x0ffffffful
);
526 size_t size2
= buf1
+ size1
- buf2
;
534 tcg_ctx
.code_gen_buffer_size
= size1
;
539 #ifdef USE_STATIC_CODE_GEN_BUFFER
540 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
541 __attribute__((aligned(CODE_GEN_ALIGN
)));
544 static inline void do_protect(void *addr
, long size
, int prot
)
547 VirtualProtect(addr
, size
, prot
, &old_protect
);
550 static inline void map_exec(void *addr
, long size
)
552 do_protect(addr
, size
, PAGE_EXECUTE_READWRITE
);
555 static inline void map_none(void *addr
, long size
)
557 do_protect(addr
, size
, PAGE_NOACCESS
);
560 static inline void do_protect(void *addr
, long size
, int prot
)
562 uintptr_t start
, end
;
564 start
= (uintptr_t)addr
;
565 start
&= qemu_real_host_page_mask
;
567 end
= (uintptr_t)addr
+ size
;
568 end
= ROUND_UP(end
, qemu_real_host_page_size
);
570 mprotect((void *)start
, end
- start
, prot
);
573 static inline void map_exec(void *addr
, long size
)
575 do_protect(addr
, size
, PROT_READ
| PROT_WRITE
| PROT_EXEC
);
578 static inline void map_none(void *addr
, long size
)
580 do_protect(addr
, size
, PROT_NONE
);
584 static inline void *alloc_code_gen_buffer(void)
586 void *buf
= static_code_gen_buffer
;
587 size_t full_size
, size
;
589 /* The size of the buffer, rounded down to end on a page boundary. */
590 full_size
= (((uintptr_t)buf
+ sizeof(static_code_gen_buffer
))
591 & qemu_real_host_page_mask
) - (uintptr_t)buf
;
593 /* Reserve a guard page. */
594 size
= full_size
- qemu_real_host_page_size
;
596 /* Honor a command-line option limiting the size of the buffer. */
597 if (size
> tcg_ctx
.code_gen_buffer_size
) {
598 size
= (((uintptr_t)buf
+ tcg_ctx
.code_gen_buffer_size
)
599 & qemu_real_host_page_mask
) - (uintptr_t)buf
;
601 tcg_ctx
.code_gen_buffer_size
= size
;
604 if (cross_256mb(buf
, size
)) {
605 buf
= split_cross_256mb(buf
, size
);
606 size
= tcg_ctx
.code_gen_buffer_size
;
611 map_none(buf
+ size
, qemu_real_host_page_size
);
612 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
616 #elif defined(_WIN32)
617 static inline void *alloc_code_gen_buffer(void)
619 size_t size
= tcg_ctx
.code_gen_buffer_size
;
622 /* Perform the allocation in two steps, so that the guard page
623 is reserved but uncommitted. */
624 buf1
= VirtualAlloc(NULL
, size
+ qemu_real_host_page_size
,
625 MEM_RESERVE
, PAGE_NOACCESS
);
627 buf2
= VirtualAlloc(buf1
, size
, MEM_COMMIT
, PAGE_EXECUTE_READWRITE
);
628 assert(buf1
== buf2
);
634 static inline void *alloc_code_gen_buffer(void)
636 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
638 size_t size
= tcg_ctx
.code_gen_buffer_size
;
641 /* Constrain the position of the buffer based on the host cpu.
642 Note that these addresses are chosen in concert with the
643 addresses assigned in the relevant linker script file. */
644 # if defined(__PIE__) || defined(__PIC__)
645 /* Don't bother setting a preferred location if we're building
646 a position-independent executable. We're more likely to get
647 an address near the main executable if we let the kernel
648 choose the address. */
649 # elif defined(__x86_64__) && defined(MAP_32BIT)
650 /* Force the memory down into low memory with the executable.
651 Leave the choice of exact location with the kernel. */
653 /* Cannot expect to map more than 800MB in low memory. */
654 if (size
> 800u * 1024 * 1024) {
655 tcg_ctx
.code_gen_buffer_size
= size
= 800u * 1024 * 1024;
657 # elif defined(__sparc__)
658 start
= 0x40000000ul
;
659 # elif defined(__s390x__)
660 start
= 0x90000000ul
;
661 # elif defined(__mips__)
662 # if _MIPS_SIM == _ABI64
663 start
= 0x128000000ul
;
665 start
= 0x08000000ul
;
669 buf
= mmap((void *)start
, size
+ qemu_real_host_page_size
,
670 PROT_NONE
, flags
, -1, 0);
671 if (buf
== MAP_FAILED
) {
676 if (cross_256mb(buf
, size
)) {
677 /* Try again, with the original still mapped, to avoid re-acquiring
678 that 256mb crossing. This time don't specify an address. */
680 void *buf2
= mmap(NULL
, size
+ qemu_real_host_page_size
,
681 PROT_NONE
, flags
, -1, 0);
682 switch (buf2
!= MAP_FAILED
) {
684 if (!cross_256mb(buf2
, size
)) {
685 /* Success! Use the new buffer. */
686 munmap(buf
, size
+ qemu_real_host_page_size
);
689 /* Failure. Work with what we had. */
690 munmap(buf2
, size
+ qemu_real_host_page_size
);
693 /* Split the original buffer. Free the smaller half. */
694 buf2
= split_cross_256mb(buf
, size
);
695 size2
= tcg_ctx
.code_gen_buffer_size
;
697 munmap(buf
+ size2
+ qemu_real_host_page_size
, size
- size2
);
699 munmap(buf
, size
- size2
);
708 /* Make the final buffer accessible. The guard page at the end
709 will remain inaccessible with PROT_NONE. */
710 mprotect(buf
, size
, PROT_WRITE
| PROT_READ
| PROT_EXEC
);
712 /* Request large pages for the buffer. */
713 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
717 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
719 static inline void code_gen_alloc(size_t tb_size
)
721 tcg_ctx
.code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
722 tcg_ctx
.code_gen_buffer
= alloc_code_gen_buffer();
723 if (tcg_ctx
.code_gen_buffer
== NULL
) {
724 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
728 /* Estimate a good size for the number of TBs we can support. We
729 still haven't deducted the prologue from the buffer size here,
730 but that's minimal and won't affect the estimate much. */
731 tcg_ctx
.code_gen_max_blocks
732 = tcg_ctx
.code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
733 tcg_ctx
.tb_ctx
.tbs
= g_new(TranslationBlock
, tcg_ctx
.code_gen_max_blocks
);
735 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
738 static void tb_htable_init(void)
740 unsigned int mode
= QHT_MODE_AUTO_RESIZE
;
742 qht_init(&tcg_ctx
.tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
, mode
);
745 /* Must be called before using the QEMU cpus. 'tb_size' is the size
746 (in bytes) allocated to the translation buffer. Zero means default
748 void tcg_exec_init(unsigned long tb_size
)
753 code_gen_alloc(tb_size
);
754 #if defined(CONFIG_SOFTMMU)
755 /* There's no guest base to take into account, so go ahead and
756 initialize the prologue now. */
757 tcg_prologue_init(&tcg_ctx
);
761 bool tcg_enabled(void)
763 return tcg_ctx
.code_gen_buffer
!= NULL
;
766 /* Allocate a new translation block. Flush the translation buffer if
767 too many translation blocks or too much generated code. */
768 static TranslationBlock
*tb_alloc(target_ulong pc
)
770 TranslationBlock
*tb
;
772 if (tcg_ctx
.tb_ctx
.nb_tbs
>= tcg_ctx
.code_gen_max_blocks
) {
775 tb
= &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
++];
782 void tb_free(TranslationBlock
*tb
)
784 /* In practice this is mostly used for single use temporary TB
785 Ignore the hard cases and just back up if this TB happens to
786 be the last one generated. */
787 if (tcg_ctx
.tb_ctx
.nb_tbs
> 0 &&
788 tb
== &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
- 1]) {
789 tcg_ctx
.code_gen_ptr
= tb
->tc_ptr
;
790 tcg_ctx
.tb_ctx
.nb_tbs
--;
794 static inline void invalidate_page_bitmap(PageDesc
*p
)
796 #ifdef CONFIG_SOFTMMU
797 g_free(p
->code_bitmap
);
798 p
->code_bitmap
= NULL
;
799 p
->code_write_count
= 0;
803 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
804 static void page_flush_tb_1(int level
, void **lp
)
814 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
815 pd
[i
].first_tb
= NULL
;
816 invalidate_page_bitmap(pd
+ i
);
821 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
822 page_flush_tb_1(level
- 1, pp
+ i
);
827 static void page_flush_tb(void)
831 for (i
= 0; i
< V_L1_SIZE
; i
++) {
832 page_flush_tb_1(V_L1_SHIFT
/ V_L2_BITS
- 1, l1_map
+ i
);
836 /* flush all the translation blocks */
837 /* XXX: tb_flush is currently not thread safe */
838 void tb_flush(CPUState
*cpu
)
840 if (!tcg_enabled()) {
843 #if defined(DEBUG_FLUSH)
844 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
845 (unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
),
846 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.tb_ctx
.nb_tbs
> 0 ?
847 ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)) /
848 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
850 if ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)
851 > tcg_ctx
.code_gen_buffer_size
) {
852 cpu_abort(cpu
, "Internal error: code buffer overflow\n");
858 for (i
= 0; i
< TB_JMP_CACHE_SIZE
; ++i
) {
859 atomic_set(&cpu
->tb_jmp_cache
[i
], NULL
);
861 atomic_mb_set(&cpu
->tb_flushed
, true);
864 tcg_ctx
.tb_ctx
.nb_tbs
= 0;
865 qht_reset_size(&tcg_ctx
.tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
);
868 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
869 /* XXX: flush processor icache at this point if cache flush is
871 tcg_ctx
.tb_ctx
.tb_flush_count
++;
874 #ifdef DEBUG_TB_CHECK
877 do_tb_invalidate_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
879 TranslationBlock
*tb
= p
;
880 target_ulong addr
= *(target_ulong
*)userp
;
882 if (!(addr
+ TARGET_PAGE_SIZE
<= tb
->pc
|| addr
>= tb
->pc
+ tb
->size
)) {
883 printf("ERROR invalidate: address=" TARGET_FMT_lx
884 " PC=%08lx size=%04x\n", addr
, (long)tb
->pc
, tb
->size
);
888 static void tb_invalidate_check(target_ulong address
)
890 address
&= TARGET_PAGE_MASK
;
891 qht_iter(&tcg_ctx
.tb_ctx
.htable
, do_tb_invalidate_check
, &address
);
895 do_tb_page_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
897 TranslationBlock
*tb
= p
;
900 flags1
= page_get_flags(tb
->pc
);
901 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
902 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
903 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
904 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
908 /* verify that all the pages have correct rights for code */
909 static void tb_page_check(void)
911 qht_iter(&tcg_ctx
.tb_ctx
.htable
, do_tb_page_check
, NULL
);
916 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
918 TranslationBlock
*tb1
;
923 n1
= (uintptr_t)tb1
& 3;
924 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
926 *ptb
= tb1
->page_next
[n1
];
929 ptb
= &tb1
->page_next
[n1
];
933 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
934 static inline void tb_remove_from_jmp_list(TranslationBlock
*tb
, int n
)
936 TranslationBlock
*tb1
;
940 ptb
= &tb
->jmp_list_next
[n
];
942 /* find tb(n) in circular list */
946 tb1
= (TranslationBlock
*)(ntb
& ~3);
947 if (n1
== n
&& tb1
== tb
) {
951 ptb
= &tb1
->jmp_list_first
;
953 ptb
= &tb1
->jmp_list_next
[n1
];
956 /* now we can suppress tb(n) from the list */
957 *ptb
= tb
->jmp_list_next
[n
];
959 tb
->jmp_list_next
[n
] = (uintptr_t)NULL
;
963 /* reset the jump entry 'n' of a TB so that it is not chained to
965 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
967 uintptr_t addr
= (uintptr_t)(tb
->tc_ptr
+ tb
->jmp_reset_offset
[n
]);
968 tb_set_jmp_target(tb
, n
, addr
);
971 /* remove any jumps to the TB */
972 static inline void tb_jmp_unlink(TranslationBlock
*tb
)
974 TranslationBlock
*tb1
;
978 ptb
= &tb
->jmp_list_first
;
982 tb1
= (TranslationBlock
*)(ntb
& ~3);
986 tb_reset_jump(tb1
, n1
);
987 *ptb
= tb1
->jmp_list_next
[n1
];
988 tb1
->jmp_list_next
[n1
] = (uintptr_t)NULL
;
992 /* invalidate one TB */
993 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
998 tb_page_addr_t phys_pc
;
1000 atomic_set(&tb
->invalid
, true);
1002 /* remove the TB from the hash list */
1003 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1004 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
);
1005 qht_remove(&tcg_ctx
.tb_ctx
.htable
, tb
, h
);
1007 /* remove the TB from the page list */
1008 if (tb
->page_addr
[0] != page_addr
) {
1009 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
1010 tb_page_remove(&p
->first_tb
, tb
);
1011 invalidate_page_bitmap(p
);
1013 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
1014 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
1015 tb_page_remove(&p
->first_tb
, tb
);
1016 invalidate_page_bitmap(p
);
1019 /* remove the TB from the hash list */
1020 h
= tb_jmp_cache_hash_func(tb
->pc
);
1022 if (atomic_read(&cpu
->tb_jmp_cache
[h
]) == tb
) {
1023 atomic_set(&cpu
->tb_jmp_cache
[h
], NULL
);
1027 /* suppress this TB from the two jump lists */
1028 tb_remove_from_jmp_list(tb
, 0);
1029 tb_remove_from_jmp_list(tb
, 1);
1031 /* suppress any remaining jumps to this TB */
1034 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
++;
1037 #ifdef CONFIG_SOFTMMU
1038 static void build_page_bitmap(PageDesc
*p
)
1040 int n
, tb_start
, tb_end
;
1041 TranslationBlock
*tb
;
1043 p
->code_bitmap
= bitmap_new(TARGET_PAGE_SIZE
);
1046 while (tb
!= NULL
) {
1047 n
= (uintptr_t)tb
& 3;
1048 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1049 /* NOTE: this is subtle as a TB may span two physical pages */
1051 /* NOTE: tb_end may be after the end of the page, but
1052 it is not a problem */
1053 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1054 tb_end
= tb_start
+ tb
->size
;
1055 if (tb_end
> TARGET_PAGE_SIZE
) {
1056 tb_end
= TARGET_PAGE_SIZE
;
1060 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1062 bitmap_set(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1063 tb
= tb
->page_next
[n
];
1068 /* add the tb in the target page and protect it if necessary
1070 * Called with mmap_lock held for user-mode emulation.
1072 static inline void tb_alloc_page(TranslationBlock
*tb
,
1073 unsigned int n
, tb_page_addr_t page_addr
)
1076 #ifndef CONFIG_USER_ONLY
1077 bool page_already_protected
;
1080 tb
->page_addr
[n
] = page_addr
;
1081 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1082 tb
->page_next
[n
] = p
->first_tb
;
1083 #ifndef CONFIG_USER_ONLY
1084 page_already_protected
= p
->first_tb
!= NULL
;
1086 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1087 invalidate_page_bitmap(p
);
1089 #if defined(CONFIG_USER_ONLY)
1090 if (p
->flags
& PAGE_WRITE
) {
1095 /* force the host page as non writable (writes will have a
1096 page fault + mprotect overhead) */
1097 page_addr
&= qemu_host_page_mask
;
1099 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1100 addr
+= TARGET_PAGE_SIZE
) {
1102 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1107 p2
->flags
&= ~PAGE_WRITE
;
1109 mprotect(g2h(page_addr
), qemu_host_page_size
,
1110 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1111 #ifdef DEBUG_TB_INVALIDATE
1112 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1117 /* if some code is already present, then the pages are already
1118 protected. So we handle the case where only the first TB is
1119 allocated in a physical page */
1120 if (!page_already_protected
) {
1121 tlb_protect_code(page_addr
);
1126 /* add a new TB and link it to the physical page tables. phys_page2 is
1127 * (-1) to indicate that only one page contains the TB.
1129 * Called with mmap_lock held for user-mode emulation.
1131 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1132 tb_page_addr_t phys_page2
)
1136 /* add in the page list */
1137 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1138 if (phys_page2
!= -1) {
1139 tb_alloc_page(tb
, 1, phys_page2
);
1141 tb
->page_addr
[1] = -1;
1144 /* add in the hash table */
1145 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
);
1146 qht_insert(&tcg_ctx
.tb_ctx
.htable
, tb
, h
);
1148 #ifdef DEBUG_TB_CHECK
1153 /* Called with mmap_lock held for user mode emulation. */
1154 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
1155 target_ulong pc
, target_ulong cs_base
,
1156 uint32_t flags
, int cflags
)
1158 CPUArchState
*env
= cpu
->env_ptr
;
1159 TranslationBlock
*tb
;
1160 tb_page_addr_t phys_pc
, phys_page2
;
1161 target_ulong virt_page2
;
1162 tcg_insn_unit
*gen_code_buf
;
1163 int gen_code_size
, search_size
;
1164 #ifdef CONFIG_PROFILER
1168 phys_pc
= get_page_addr_code(env
, pc
);
1169 if (use_icount
&& !(cflags
& CF_IGNORE_ICOUNT
)) {
1170 cflags
|= CF_USE_ICOUNT
;
1174 if (unlikely(!tb
)) {
1176 /* flush must be done */
1178 /* cannot fail at this point */
1183 gen_code_buf
= tcg_ctx
.code_gen_ptr
;
1184 tb
->tc_ptr
= gen_code_buf
;
1185 tb
->cs_base
= cs_base
;
1187 tb
->cflags
= cflags
;
1189 #ifdef CONFIG_PROFILER
1190 tcg_ctx
.tb_count1
++; /* includes aborted translations because of
1192 ti
= profile_getclock();
1195 tcg_func_start(&tcg_ctx
);
1197 tcg_ctx
.cpu
= ENV_GET_CPU(env
);
1198 gen_intermediate_code(env
, tb
);
1201 trace_translate_block(tb
, tb
->pc
, tb
->tc_ptr
);
1203 /* generate machine code */
1204 tb
->jmp_reset_offset
[0] = TB_JMP_RESET_OFFSET_INVALID
;
1205 tb
->jmp_reset_offset
[1] = TB_JMP_RESET_OFFSET_INVALID
;
1206 tcg_ctx
.tb_jmp_reset_offset
= tb
->jmp_reset_offset
;
1207 #ifdef USE_DIRECT_JUMP
1208 tcg_ctx
.tb_jmp_insn_offset
= tb
->jmp_insn_offset
;
1209 tcg_ctx
.tb_jmp_target_addr
= NULL
;
1211 tcg_ctx
.tb_jmp_insn_offset
= NULL
;
1212 tcg_ctx
.tb_jmp_target_addr
= tb
->jmp_target_addr
;
1215 #ifdef CONFIG_PROFILER
1217 tcg_ctx
.interm_time
+= profile_getclock() - ti
;
1218 tcg_ctx
.code_time
-= profile_getclock();
1221 /* ??? Overflow could be handled better here. In particular, we
1222 don't need to re-do gen_intermediate_code, nor should we re-do
1223 the tcg optimization currently hidden inside tcg_gen_code. All
1224 that should be required is to flush the TBs, allocate a new TB,
1225 re-initialize it per above, and re-do the actual code generation. */
1226 gen_code_size
= tcg_gen_code(&tcg_ctx
, tb
);
1227 if (unlikely(gen_code_size
< 0)) {
1228 goto buffer_overflow
;
1230 search_size
= encode_search(tb
, (void *)gen_code_buf
+ gen_code_size
);
1231 if (unlikely(search_size
< 0)) {
1232 goto buffer_overflow
;
1235 #ifdef CONFIG_PROFILER
1236 tcg_ctx
.code_time
+= profile_getclock();
1237 tcg_ctx
.code_in_len
+= tb
->size
;
1238 tcg_ctx
.code_out_len
+= gen_code_size
;
1239 tcg_ctx
.search_out_len
+= search_size
;
1243 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
) &&
1244 qemu_log_in_addr_range(tb
->pc
)) {
1245 qemu_log("OUT: [size=%d]\n", gen_code_size
);
1246 log_disas(tb
->tc_ptr
, gen_code_size
);
1252 tcg_ctx
.code_gen_ptr
= (void *)
1253 ROUND_UP((uintptr_t)gen_code_buf
+ gen_code_size
+ search_size
,
1256 /* init jump list */
1257 assert(((uintptr_t)tb
& 3) == 0);
1258 tb
->jmp_list_first
= (uintptr_t)tb
| 2;
1259 tb
->jmp_list_next
[0] = (uintptr_t)NULL
;
1260 tb
->jmp_list_next
[1] = (uintptr_t)NULL
;
1262 /* init original jump addresses wich has been set during tcg_gen_code() */
1263 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1264 tb_reset_jump(tb
, 0);
1266 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1267 tb_reset_jump(tb
, 1);
1270 /* check next page if needed */
1271 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1273 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1274 phys_page2
= get_page_addr_code(env
, virt_page2
);
1276 /* As long as consistency of the TB stuff is provided by tb_lock in user
1277 * mode and is implicit in single-threaded softmmu emulation, no explicit
1278 * memory barrier is required before tb_link_page() makes the TB visible
1279 * through the physical hash table and physical page list.
1281 tb_link_page(tb
, phys_pc
, phys_page2
);
1286 * Invalidate all TBs which intersect with the target physical address range
1287 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1288 * 'is_cpu_write_access' should be true if called from a real cpu write
1289 * access: the virtual CPU will exit the current TB if code is modified inside
1292 * Called with mmap_lock held for user-mode emulation
1294 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1296 while (start
< end
) {
1297 tb_invalidate_phys_page_range(start
, end
, 0);
1298 start
&= TARGET_PAGE_MASK
;
1299 start
+= TARGET_PAGE_SIZE
;
1304 * Invalidate all TBs which intersect with the target physical address range
1305 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1306 * 'is_cpu_write_access' should be true if called from a real cpu write
1307 * access: the virtual CPU will exit the current TB if code is modified inside
1310 * Called with mmap_lock held for user-mode emulation
1312 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1313 int is_cpu_write_access
)
1315 TranslationBlock
*tb
, *tb_next
;
1316 #if defined(TARGET_HAS_PRECISE_SMC)
1317 CPUState
*cpu
= current_cpu
;
1318 CPUArchState
*env
= NULL
;
1320 tb_page_addr_t tb_start
, tb_end
;
1323 #ifdef TARGET_HAS_PRECISE_SMC
1324 int current_tb_not_found
= is_cpu_write_access
;
1325 TranslationBlock
*current_tb
= NULL
;
1326 int current_tb_modified
= 0;
1327 target_ulong current_pc
= 0;
1328 target_ulong current_cs_base
= 0;
1329 uint32_t current_flags
= 0;
1330 #endif /* TARGET_HAS_PRECISE_SMC */
1332 p
= page_find(start
>> TARGET_PAGE_BITS
);
1336 #if defined(TARGET_HAS_PRECISE_SMC)
1342 /* we remove all the TBs in the range [start, end[ */
1343 /* XXX: see if in some cases it could be faster to invalidate all
1346 while (tb
!= NULL
) {
1347 n
= (uintptr_t)tb
& 3;
1348 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1349 tb_next
= tb
->page_next
[n
];
1350 /* NOTE: this is subtle as a TB may span two physical pages */
1352 /* NOTE: tb_end may be after the end of the page, but
1353 it is not a problem */
1354 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1355 tb_end
= tb_start
+ tb
->size
;
1357 tb_start
= tb
->page_addr
[1];
1358 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1360 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1361 #ifdef TARGET_HAS_PRECISE_SMC
1362 if (current_tb_not_found
) {
1363 current_tb_not_found
= 0;
1365 if (cpu
->mem_io_pc
) {
1366 /* now we have a real cpu fault */
1367 current_tb
= tb_find_pc(cpu
->mem_io_pc
);
1370 if (current_tb
== tb
&&
1371 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1372 /* If we are modifying the current TB, we must stop
1373 its execution. We could be more precise by checking
1374 that the modification is after the current PC, but it
1375 would require a specialized function to partially
1376 restore the CPU state */
1378 current_tb_modified
= 1;
1379 cpu_restore_state_from_tb(cpu
, current_tb
, cpu
->mem_io_pc
);
1380 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1383 #endif /* TARGET_HAS_PRECISE_SMC */
1384 tb_phys_invalidate(tb
, -1);
1388 #if !defined(CONFIG_USER_ONLY)
1389 /* if no code remaining, no need to continue to use slow writes */
1391 invalidate_page_bitmap(p
);
1392 tlb_unprotect_code(start
);
1395 #ifdef TARGET_HAS_PRECISE_SMC
1396 if (current_tb_modified
) {
1397 /* we generate a block containing just the instruction
1398 modifying the memory. It will ensure that it cannot modify
1400 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1401 cpu_loop_exit_noexc(cpu
);
1406 #ifdef CONFIG_SOFTMMU
1407 /* len must be <= 8 and start must be a multiple of len */
1408 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1414 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1415 cpu_single_env
->mem_io_vaddr
, len
,
1416 cpu_single_env
->eip
,
1417 cpu_single_env
->eip
+
1418 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1421 p
= page_find(start
>> TARGET_PAGE_BITS
);
1425 if (!p
->code_bitmap
&&
1426 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
1427 /* build code bitmap */
1428 build_page_bitmap(p
);
1430 if (p
->code_bitmap
) {
1434 nr
= start
& ~TARGET_PAGE_MASK
;
1435 b
= p
->code_bitmap
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1));
1436 if (b
& ((1 << len
) - 1)) {
1441 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1445 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1446 * host PC of the faulting store instruction that caused this invalidate.
1447 * Returns true if the caller needs to abort execution of the current
1448 * TB (because it was modified by this store and the guest CPU has
1449 * precise-SMC semantics).
1451 static bool tb_invalidate_phys_page(tb_page_addr_t addr
, uintptr_t pc
)
1453 TranslationBlock
*tb
;
1456 #ifdef TARGET_HAS_PRECISE_SMC
1457 TranslationBlock
*current_tb
= NULL
;
1458 CPUState
*cpu
= current_cpu
;
1459 CPUArchState
*env
= NULL
;
1460 int current_tb_modified
= 0;
1461 target_ulong current_pc
= 0;
1462 target_ulong current_cs_base
= 0;
1463 uint32_t current_flags
= 0;
1466 addr
&= TARGET_PAGE_MASK
;
1467 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1472 #ifdef TARGET_HAS_PRECISE_SMC
1473 if (tb
&& pc
!= 0) {
1474 current_tb
= tb_find_pc(pc
);
1480 while (tb
!= NULL
) {
1481 n
= (uintptr_t)tb
& 3;
1482 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1483 #ifdef TARGET_HAS_PRECISE_SMC
1484 if (current_tb
== tb
&&
1485 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1486 /* If we are modifying the current TB, we must stop
1487 its execution. We could be more precise by checking
1488 that the modification is after the current PC, but it
1489 would require a specialized function to partially
1490 restore the CPU state */
1492 current_tb_modified
= 1;
1493 cpu_restore_state_from_tb(cpu
, current_tb
, pc
);
1494 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1497 #endif /* TARGET_HAS_PRECISE_SMC */
1498 tb_phys_invalidate(tb
, addr
);
1499 tb
= tb
->page_next
[n
];
1502 #ifdef TARGET_HAS_PRECISE_SMC
1503 if (current_tb_modified
) {
1504 /* we generate a block containing just the instruction
1505 modifying the memory. It will ensure that it cannot modify
1507 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1515 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1516 tb[1].tc_ptr. Return NULL if not found */
1517 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1519 int m_min
, m_max
, m
;
1521 TranslationBlock
*tb
;
1523 if (tcg_ctx
.tb_ctx
.nb_tbs
<= 0) {
1526 if (tc_ptr
< (uintptr_t)tcg_ctx
.code_gen_buffer
||
1527 tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_ptr
) {
1530 /* binary search (cf Knuth) */
1532 m_max
= tcg_ctx
.tb_ctx
.nb_tbs
- 1;
1533 while (m_min
<= m_max
) {
1534 m
= (m_min
+ m_max
) >> 1;
1535 tb
= &tcg_ctx
.tb_ctx
.tbs
[m
];
1536 v
= (uintptr_t)tb
->tc_ptr
;
1539 } else if (tc_ptr
< v
) {
1545 return &tcg_ctx
.tb_ctx
.tbs
[m_max
];
1548 #if !defined(CONFIG_USER_ONLY)
1549 void tb_invalidate_phys_addr(AddressSpace
*as
, hwaddr addr
)
1551 ram_addr_t ram_addr
;
1556 mr
= address_space_translate(as
, addr
, &addr
, &l
, false);
1557 if (!(memory_region_is_ram(mr
)
1558 || memory_region_is_romd(mr
))) {
1562 ram_addr
= memory_region_get_ram_addr(mr
) + addr
;
1563 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1566 #endif /* !defined(CONFIG_USER_ONLY) */
1568 void tb_check_watchpoint(CPUState
*cpu
)
1570 TranslationBlock
*tb
;
1572 tb
= tb_find_pc(cpu
->mem_io_pc
);
1574 /* We can use retranslation to find the PC. */
1575 cpu_restore_state_from_tb(cpu
, tb
, cpu
->mem_io_pc
);
1576 tb_phys_invalidate(tb
, -1);
1578 /* The exception probably happened in a helper. The CPU state should
1579 have been saved before calling it. Fetch the PC from there. */
1580 CPUArchState
*env
= cpu
->env_ptr
;
1581 target_ulong pc
, cs_base
;
1582 tb_page_addr_t addr
;
1585 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
1586 addr
= get_page_addr_code(env
, pc
);
1587 tb_invalidate_phys_range(addr
, addr
+ 1);
1591 #ifndef CONFIG_USER_ONLY
1592 /* in deterministic execution mode, instructions doing device I/Os
1593 must be at the end of the TB */
1594 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
1596 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1597 CPUArchState
*env
= cpu
->env_ptr
;
1599 TranslationBlock
*tb
;
1601 target_ulong pc
, cs_base
;
1604 tb
= tb_find_pc(retaddr
);
1606 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
1609 n
= cpu
->icount_decr
.u16
.low
+ tb
->icount
;
1610 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
1611 /* Calculate how many instructions had been executed before the fault
1613 n
= n
- cpu
->icount_decr
.u16
.low
;
1614 /* Generate a new TB ending on the I/O insn. */
1616 /* On MIPS and SH, delay slot instructions can only be restarted if
1617 they were already the first instruction in the TB. If this is not
1618 the first instruction in a TB then re-execute the preceding
1620 #if defined(TARGET_MIPS)
1621 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1622 env
->active_tc
.PC
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
1623 cpu
->icount_decr
.u16
.low
++;
1624 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1626 #elif defined(TARGET_SH4)
1627 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1630 cpu
->icount_decr
.u16
.low
++;
1631 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1634 /* This should never happen. */
1635 if (n
> CF_COUNT_MASK
) {
1636 cpu_abort(cpu
, "TB too big during recompile");
1639 cflags
= n
| CF_LAST_IO
;
1641 cs_base
= tb
->cs_base
;
1643 tb_phys_invalidate(tb
, -1);
1644 if (tb
->cflags
& CF_NOCACHE
) {
1646 /* Invalidate original TB if this TB was generated in
1647 * cpu_exec_nocache() */
1648 tb_phys_invalidate(tb
->orig_tb
, -1);
1652 /* FIXME: In theory this could raise an exception. In practice
1653 we have already translated the block once so it's probably ok. */
1654 tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
1655 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1656 the first in the TB) then we end up generating a whole new TB and
1657 repeating the fault, which is horribly inefficient.
1658 Better would be to execute just this insn uncached, or generate a
1660 cpu_loop_exit_noexc(cpu
);
1663 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
1667 /* Discard jump cache entries for any tb which might potentially
1668 overlap the flushed page. */
1669 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1670 memset(&cpu
->tb_jmp_cache
[i
], 0,
1671 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1673 i
= tb_jmp_cache_hash_page(addr
);
1674 memset(&cpu
->tb_jmp_cache
[i
], 0,
1675 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1678 static void print_qht_statistics(FILE *f
, fprintf_function cpu_fprintf
,
1679 struct qht_stats hst
)
1681 uint32_t hgram_opts
;
1685 if (!hst
.head_buckets
) {
1688 cpu_fprintf(f
, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1689 hst
.used_head_buckets
, hst
.head_buckets
,
1690 (double)hst
.used_head_buckets
/ hst
.head_buckets
* 100);
1692 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1693 hgram_opts
|= QDIST_PR_100X
| QDIST_PR_PERCENT
;
1694 if (qdist_xmax(&hst
.occupancy
) - qdist_xmin(&hst
.occupancy
) == 1) {
1695 hgram_opts
|= QDIST_PR_NODECIMAL
;
1697 hgram
= qdist_pr(&hst
.occupancy
, 10, hgram_opts
);
1698 cpu_fprintf(f
, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1699 qdist_avg(&hst
.occupancy
) * 100, hgram
);
1702 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1703 hgram_bins
= qdist_xmax(&hst
.chain
) - qdist_xmin(&hst
.chain
);
1704 if (hgram_bins
> 10) {
1708 hgram_opts
|= QDIST_PR_NODECIMAL
| QDIST_PR_NOBINRANGE
;
1710 hgram
= qdist_pr(&hst
.chain
, hgram_bins
, hgram_opts
);
1711 cpu_fprintf(f
, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1712 qdist_avg(&hst
.chain
), hgram
);
1716 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1718 int i
, target_code_size
, max_target_code_size
;
1719 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
1720 TranslationBlock
*tb
;
1721 struct qht_stats hst
;
1723 target_code_size
= 0;
1724 max_target_code_size
= 0;
1726 direct_jmp_count
= 0;
1727 direct_jmp2_count
= 0;
1728 for (i
= 0; i
< tcg_ctx
.tb_ctx
.nb_tbs
; i
++) {
1729 tb
= &tcg_ctx
.tb_ctx
.tbs
[i
];
1730 target_code_size
+= tb
->size
;
1731 if (tb
->size
> max_target_code_size
) {
1732 max_target_code_size
= tb
->size
;
1734 if (tb
->page_addr
[1] != -1) {
1737 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1739 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1740 direct_jmp2_count
++;
1744 /* XXX: avoid using doubles ? */
1745 cpu_fprintf(f
, "Translation buffer state:\n");
1746 cpu_fprintf(f
, "gen code size %td/%zd\n",
1747 tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
,
1748 tcg_ctx
.code_gen_highwater
- tcg_ctx
.code_gen_buffer
);
1749 cpu_fprintf(f
, "TB count %d/%d\n",
1750 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.code_gen_max_blocks
);
1751 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
1752 tcg_ctx
.tb_ctx
.nb_tbs
? target_code_size
/
1753 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1754 max_target_code_size
);
1755 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1756 tcg_ctx
.tb_ctx
.nb_tbs
? (tcg_ctx
.code_gen_ptr
-
1757 tcg_ctx
.code_gen_buffer
) /
1758 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1759 target_code_size
? (double) (tcg_ctx
.code_gen_ptr
-
1760 tcg_ctx
.code_gen_buffer
) /
1761 target_code_size
: 0);
1762 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n", cross_page
,
1763 tcg_ctx
.tb_ctx
.nb_tbs
? (cross_page
* 100) /
1764 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1765 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1767 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp_count
* 100) /
1768 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1770 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp2_count
* 100) /
1771 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1773 qht_statistics_init(&tcg_ctx
.tb_ctx
.htable
, &hst
);
1774 print_qht_statistics(f
, cpu_fprintf
, hst
);
1775 qht_statistics_destroy(&hst
);
1777 cpu_fprintf(f
, "\nStatistics:\n");
1778 cpu_fprintf(f
, "TB flush count %d\n", tcg_ctx
.tb_ctx
.tb_flush_count
);
1779 cpu_fprintf(f
, "TB invalidate count %d\n",
1780 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
);
1781 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
1782 tcg_dump_info(f
, cpu_fprintf
);
1785 void dump_opcount_info(FILE *f
, fprintf_function cpu_fprintf
)
1787 tcg_dump_op_count(f
, cpu_fprintf
);
1790 #else /* CONFIG_USER_ONLY */
1792 void cpu_interrupt(CPUState
*cpu
, int mask
)
1794 cpu
->interrupt_request
|= mask
;
1795 cpu
->tcg_exit_req
= 1;
1799 * Walks guest process memory "regions" one by one
1800 * and calls callback function 'fn' for each region.
1802 struct walk_memory_regions_data
{
1803 walk_memory_regions_fn fn
;
1809 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1810 target_ulong end
, int new_prot
)
1812 if (data
->start
!= -1u) {
1813 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1819 data
->start
= (new_prot
? end
: -1u);
1820 data
->prot
= new_prot
;
1825 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1826 target_ulong base
, int level
, void **lp
)
1832 return walk_memory_regions_end(data
, base
, 0);
1838 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1839 int prot
= pd
[i
].flags
;
1841 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1842 if (prot
!= data
->prot
) {
1843 rc
= walk_memory_regions_end(data
, pa
, prot
);
1852 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1853 pa
= base
| ((target_ulong
)i
<<
1854 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
1855 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1865 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1867 struct walk_memory_regions_data data
;
1875 for (i
= 0; i
< V_L1_SIZE
; i
++) {
1876 int rc
= walk_memory_regions_1(&data
, (target_ulong
)i
<< (V_L1_SHIFT
+ TARGET_PAGE_BITS
),
1877 V_L1_SHIFT
/ V_L2_BITS
- 1, l1_map
+ i
);
1883 return walk_memory_regions_end(&data
, 0, 0);
1886 static int dump_region(void *priv
, target_ulong start
,
1887 target_ulong end
, unsigned long prot
)
1889 FILE *f
= (FILE *)priv
;
1891 (void) fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
1892 " "TARGET_FMT_lx
" %c%c%c\n",
1893 start
, end
, end
- start
,
1894 ((prot
& PAGE_READ
) ? 'r' : '-'),
1895 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
1896 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
1901 /* dump memory mappings */
1902 void page_dump(FILE *f
)
1904 const int length
= sizeof(target_ulong
) * 2;
1905 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
1906 length
, "start", length
, "end", length
, "size", "prot");
1907 walk_memory_regions(f
, dump_region
);
1910 int page_get_flags(target_ulong address
)
1914 p
= page_find(address
>> TARGET_PAGE_BITS
);
1921 /* Modify the flags of a page and invalidate the code if necessary.
1922 The flag PAGE_WRITE_ORG is positioned automatically depending
1923 on PAGE_WRITE. The mmap_lock should already be held. */
1924 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1926 target_ulong addr
, len
;
1928 /* This function should never be called with addresses outside the
1929 guest address space. If this assert fires, it probably indicates
1930 a missing call to h2g_valid. */
1931 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1932 assert(end
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1934 assert(start
< end
);
1936 start
= start
& TARGET_PAGE_MASK
;
1937 end
= TARGET_PAGE_ALIGN(end
);
1939 if (flags
& PAGE_WRITE
) {
1940 flags
|= PAGE_WRITE_ORG
;
1943 for (addr
= start
, len
= end
- start
;
1945 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1946 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1948 /* If the write protection bit is set, then we invalidate
1950 if (!(p
->flags
& PAGE_WRITE
) &&
1951 (flags
& PAGE_WRITE
) &&
1953 tb_invalidate_phys_page(addr
, 0);
1959 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1965 /* This function should never be called with addresses outside the
1966 guest address space. If this assert fires, it probably indicates
1967 a missing call to h2g_valid. */
1968 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1969 assert(start
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1975 if (start
+ len
- 1 < start
) {
1976 /* We've wrapped around. */
1980 /* must do before we loose bits in the next step */
1981 end
= TARGET_PAGE_ALIGN(start
+ len
);
1982 start
= start
& TARGET_PAGE_MASK
;
1984 for (addr
= start
, len
= end
- start
;
1986 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1987 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1991 if (!(p
->flags
& PAGE_VALID
)) {
1995 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
1998 if (flags
& PAGE_WRITE
) {
1999 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
2002 /* unprotect the page if it was put read-only because it
2003 contains translated code */
2004 if (!(p
->flags
& PAGE_WRITE
)) {
2005 if (!page_unprotect(addr
, 0)) {
2014 /* called from signal handler: invalidate the code and unprotect the
2015 * page. Return 0 if the fault was not handled, 1 if it was handled,
2016 * and 2 if it was handled but the caller must cause the TB to be
2017 * immediately exited. (We can only return 2 if the 'pc' argument is
2020 int page_unprotect(target_ulong address
, uintptr_t pc
)
2023 bool current_tb_invalidated
;
2025 target_ulong host_start
, host_end
, addr
;
2027 /* Technically this isn't safe inside a signal handler. However we
2028 know this only ever happens in a synchronous SEGV handler, so in
2029 practice it seems to be ok. */
2032 p
= page_find(address
>> TARGET_PAGE_BITS
);
2038 /* if the page was really writable, then we change its
2039 protection back to writable */
2040 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2041 host_start
= address
& qemu_host_page_mask
;
2042 host_end
= host_start
+ qemu_host_page_size
;
2045 current_tb_invalidated
= false;
2046 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2047 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2048 p
->flags
|= PAGE_WRITE
;
2051 /* and since the content will be modified, we must invalidate
2052 the corresponding translated code. */
2053 current_tb_invalidated
|= tb_invalidate_phys_page(addr
, pc
);
2054 #ifdef DEBUG_TB_CHECK
2055 tb_invalidate_check(addr
);
2058 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2062 /* If current TB was invalidated return to main loop */
2063 return current_tb_invalidated
? 2 : 1;
2068 #endif /* CONFIG_USER_ONLY */