4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include <sys/types.h>
33 #include "qemu-common.h"
34 #define NO_CPU_IO_DEFS
37 #include "disas/disas.h"
39 #if defined(CONFIG_USER_ONLY)
41 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42 #include <sys/param.h>
43 #if __FreeBSD_version >= 700104
44 #define HAVE_KINFO_GETVMMAP
45 #define sigqueue sigqueue_freebsd /* avoid redefinition */
48 #include <machine/profile.h>
57 #include "exec/address-spaces.h"
60 #include "exec/cputlb.h"
61 #include "exec/tb-hash.h"
62 #include "translate-all.h"
63 #include "qemu/bitmap.h"
64 #include "qemu/timer.h"
66 //#define DEBUG_TB_INVALIDATE
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
71 #if !defined(CONFIG_USER_ONLY)
72 /* TB consistency checks only implemented for usermode emulation. */
76 #define SMC_BITMAP_USE_THRESHOLD 10
78 typedef struct PageDesc
{
79 /* list of TBs intersecting this ram page */
80 TranslationBlock
*first_tb
;
81 /* in order to optimize self modifying code, we count the number
82 of lookups we do to a given page to use a bitmap */
83 unsigned int code_write_count
;
84 unsigned long *code_bitmap
;
85 #if defined(CONFIG_USER_ONLY)
90 /* In system mode we want L1_MAP to be based on ram offsets,
91 while in user mode we want it to be based on virtual addresses. */
92 #if !defined(CONFIG_USER_ONLY)
93 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
94 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
96 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
99 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
102 /* Size of the L2 (and L3, etc) page tables. */
104 #define V_L2_SIZE (1 << V_L2_BITS)
106 /* The bits remaining after N lower levels of page tables. */
107 #define V_L1_BITS_REM \
108 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
110 #if V_L1_BITS_REM < 4
111 #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
113 #define V_L1_BITS V_L1_BITS_REM
116 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
118 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
120 uintptr_t qemu_host_page_size
;
121 uintptr_t qemu_host_page_mask
;
123 /* The bottom level has pointers to PageDesc */
124 static void *l1_map
[V_L1_SIZE
];
126 /* code generation context */
129 /* translation block context */
130 #ifdef CONFIG_USER_ONLY
131 __thread
int have_tb_lock
;
136 #ifdef CONFIG_USER_ONLY
137 assert(!have_tb_lock
);
138 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
145 #ifdef CONFIG_USER_ONLY
146 assert(have_tb_lock
);
148 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
152 void tb_lock_reset(void)
154 #ifdef CONFIG_USER_ONLY
156 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
162 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
163 tb_page_addr_t phys_page2
);
164 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
166 void cpu_gen_init(void)
168 tcg_context_init(&tcg_ctx
);
171 /* Encode VAL as a signed leb128 sequence at P.
172 Return P incremented past the encoded value. */
173 static uint8_t *encode_sleb128(uint8_t *p
, target_long val
)
180 more
= !((val
== 0 && (byte
& 0x40) == 0)
181 || (val
== -1 && (byte
& 0x40) != 0));
191 /* Decode a signed leb128 sequence at *PP; increment *PP past the
192 decoded value. Return the decoded value. */
193 static target_long
decode_sleb128(uint8_t **pp
)
201 val
|= (target_ulong
)(byte
& 0x7f) << shift
;
203 } while (byte
& 0x80);
204 if (shift
< TARGET_LONG_BITS
&& (byte
& 0x40)) {
205 val
|= -(target_ulong
)1 << shift
;
212 /* Encode the data collected about the instructions while compiling TB.
213 Place the data at BLOCK, and return the number of bytes consumed.
215 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
216 which come from the target's insn_start data, followed by a uintptr_t
217 which comes from the host pc of the end of the code implementing the insn.
219 Each line of the table is encoded as sleb128 deltas from the previous
220 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
221 That is, the first column is seeded with the guest pc, the last column
222 with the host pc, and the middle columns with zeros. */
224 static int encode_search(TranslationBlock
*tb
, uint8_t *block
)
229 tb
->tc_search
= block
;
231 for (i
= 0, n
= tb
->icount
; i
< n
; ++i
) {
234 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
236 prev
= (j
== 0 ? tb
->pc
: 0);
238 prev
= tcg_ctx
.gen_insn_data
[i
- 1][j
];
240 p
= encode_sleb128(p
, tcg_ctx
.gen_insn_data
[i
][j
] - prev
);
242 prev
= (i
== 0 ? 0 : tcg_ctx
.gen_insn_end_off
[i
- 1]);
243 p
= encode_sleb128(p
, tcg_ctx
.gen_insn_end_off
[i
] - prev
);
249 /* The cpu state corresponding to 'searched_pc' is restored. */
250 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
251 uintptr_t searched_pc
)
253 target_ulong data
[TARGET_INSN_START_WORDS
] = { tb
->pc
};
254 uintptr_t host_pc
= (uintptr_t)tb
->tc_ptr
;
255 CPUArchState
*env
= cpu
->env_ptr
;
256 uint8_t *p
= tb
->tc_search
;
257 int i
, j
, num_insns
= tb
->icount
;
258 #ifdef CONFIG_PROFILER
259 int64_t ti
= profile_getclock();
262 if (searched_pc
< host_pc
) {
266 /* Reconstruct the stored insn data while looking for the point at
267 which the end of the insn exceeds the searched_pc. */
268 for (i
= 0; i
< num_insns
; ++i
) {
269 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
270 data
[j
] += decode_sleb128(&p
);
272 host_pc
+= decode_sleb128(&p
);
273 if (host_pc
> searched_pc
) {
280 if (tb
->cflags
& CF_USE_ICOUNT
) {
282 /* Reset the cycle counter to the start of the block. */
283 cpu
->icount_decr
.u16
.low
+= num_insns
;
284 /* Clear the IO flag. */
287 cpu
->icount_decr
.u16
.low
-= i
;
288 restore_state_to_opc(env
, tb
, data
);
290 #ifdef CONFIG_PROFILER
291 tcg_ctx
.restore_time
+= profile_getclock() - ti
;
292 tcg_ctx
.restore_count
++;
297 bool cpu_restore_state(CPUState
*cpu
, uintptr_t retaddr
)
299 TranslationBlock
*tb
;
301 tb
= tb_find_pc(retaddr
);
303 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
304 if (tb
->cflags
& CF_NOCACHE
) {
305 /* one-shot translation, invalidate it immediately */
306 cpu
->current_tb
= NULL
;
307 tb_phys_invalidate(tb
, -1);
316 static __attribute__((unused
)) void map_exec(void *addr
, long size
)
319 VirtualProtect(addr
, size
,
320 PAGE_EXECUTE_READWRITE
, &old_protect
);
323 static __attribute__((unused
)) void map_exec(void *addr
, long size
)
325 unsigned long start
, end
, page_size
;
327 page_size
= getpagesize();
328 start
= (unsigned long)addr
;
329 start
&= ~(page_size
- 1);
331 end
= (unsigned long)addr
+ size
;
332 end
+= page_size
- 1;
333 end
&= ~(page_size
- 1);
335 mprotect((void *)start
, end
- start
,
336 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
340 void page_size_init(void)
342 /* NOTE: we can always suppose that qemu_host_page_size >=
344 qemu_real_host_page_size
= getpagesize();
345 qemu_real_host_page_mask
= ~(qemu_real_host_page_size
- 1);
346 if (qemu_host_page_size
== 0) {
347 qemu_host_page_size
= qemu_real_host_page_size
;
349 if (qemu_host_page_size
< TARGET_PAGE_SIZE
) {
350 qemu_host_page_size
= TARGET_PAGE_SIZE
;
352 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
355 static void page_init(void)
358 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
360 #ifdef HAVE_KINFO_GETVMMAP
361 struct kinfo_vmentry
*freep
;
364 freep
= kinfo_getvmmap(getpid(), &cnt
);
367 for (i
= 0; i
< cnt
; i
++) {
368 unsigned long startaddr
, endaddr
;
370 startaddr
= freep
[i
].kve_start
;
371 endaddr
= freep
[i
].kve_end
;
372 if (h2g_valid(startaddr
)) {
373 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
375 if (h2g_valid(endaddr
)) {
376 endaddr
= h2g(endaddr
);
377 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
379 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
381 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
392 last_brk
= (unsigned long)sbrk(0);
394 f
= fopen("/compat/linux/proc/self/maps", "r");
399 unsigned long startaddr
, endaddr
;
402 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
404 if (n
== 2 && h2g_valid(startaddr
)) {
405 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
407 if (h2g_valid(endaddr
)) {
408 endaddr
= h2g(endaddr
);
412 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
425 * Called with mmap_lock held for user-mode emulation.
427 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
433 /* Level 1. Always allocated. */
434 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
437 for (i
= V_L1_SHIFT
/ V_L2_BITS
- 1; i
> 0; i
--) {
438 void **p
= atomic_rcu_read(lp
);
444 p
= g_new0(void *, V_L2_SIZE
);
445 atomic_rcu_set(lp
, p
);
448 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
451 pd
= atomic_rcu_read(lp
);
456 pd
= g_new0(PageDesc
, V_L2_SIZE
);
457 atomic_rcu_set(lp
, pd
);
460 return pd
+ (index
& (V_L2_SIZE
- 1));
463 static inline PageDesc
*page_find(tb_page_addr_t index
)
465 return page_find_alloc(index
, 0);
468 #if defined(CONFIG_USER_ONLY)
469 /* Currently it is not recommended to allocate big chunks of data in
470 user mode. It will change when a dedicated libc will be used. */
471 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
472 region in which the guest needs to run. Revisit this. */
473 #define USE_STATIC_CODE_GEN_BUFFER
476 /* ??? Should configure for this, not list operating systems here. */
477 #if (defined(__linux__) \
478 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
479 || defined(__DragonFly__) || defined(__OpenBSD__) \
480 || defined(__NetBSD__))
484 /* Minimum size of the code gen buffer. This number is randomly chosen,
485 but not so small that we can't have a fair number of TB's live. */
486 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
488 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
489 indicated, this is constrained by the range of direct branches on the
490 host cpu, as used by the TCG implementation of goto_tb. */
491 #if defined(__x86_64__)
492 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
493 #elif defined(__sparc__)
494 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
495 #elif defined(__aarch64__)
496 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
497 #elif defined(__arm__)
498 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
499 #elif defined(__s390x__)
500 /* We have a +- 4GB range on the branches; leave some slop. */
501 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
502 #elif defined(__mips__)
503 /* We have a 256MB branch region, but leave room to make sure the
504 main executable is also within that region. */
505 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
507 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
510 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
512 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
513 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
514 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
516 static inline size_t size_code_gen_buffer(size_t tb_size
)
518 /* Size the buffer. */
520 #ifdef USE_STATIC_CODE_GEN_BUFFER
521 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
523 /* ??? Needs adjustments. */
524 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
525 static buffer, we could size this on RESERVED_VA, on the text
526 segment size of the executable, or continue to use the default. */
527 tb_size
= (unsigned long)(ram_size
/ 4);
530 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
531 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
533 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
534 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
536 tcg_ctx
.code_gen_buffer_size
= tb_size
;
541 /* In order to use J and JAL within the code_gen_buffer, we require
542 that the buffer not cross a 256MB boundary. */
543 static inline bool cross_256mb(void *addr
, size_t size
)
545 return ((uintptr_t)addr
^ ((uintptr_t)addr
+ size
)) & 0xf0000000;
548 /* We weren't able to allocate a buffer without crossing that boundary,
549 so make do with the larger portion of the buffer that doesn't cross.
550 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
551 static inline void *split_cross_256mb(void *buf1
, size_t size1
)
553 void *buf2
= (void *)(((uintptr_t)buf1
+ size1
) & 0xf0000000);
554 size_t size2
= buf1
+ size1
- buf2
;
562 tcg_ctx
.code_gen_buffer_size
= size1
;
567 #ifdef USE_STATIC_CODE_GEN_BUFFER
568 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
569 __attribute__((aligned(CODE_GEN_ALIGN
)));
571 static inline void *alloc_code_gen_buffer(void)
573 void *buf
= static_code_gen_buffer
;
575 if (cross_256mb(buf
, tcg_ctx
.code_gen_buffer_size
)) {
576 buf
= split_cross_256mb(buf
, tcg_ctx
.code_gen_buffer_size
);
579 map_exec(buf
, tcg_ctx
.code_gen_buffer_size
);
582 #elif defined(USE_MMAP)
583 static inline void *alloc_code_gen_buffer(void)
585 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
589 /* Constrain the position of the buffer based on the host cpu.
590 Note that these addresses are chosen in concert with the
591 addresses assigned in the relevant linker script file. */
592 # if defined(__PIE__) || defined(__PIC__)
593 /* Don't bother setting a preferred location if we're building
594 a position-independent executable. We're more likely to get
595 an address near the main executable if we let the kernel
596 choose the address. */
597 # elif defined(__x86_64__) && defined(MAP_32BIT)
598 /* Force the memory down into low memory with the executable.
599 Leave the choice of exact location with the kernel. */
601 /* Cannot expect to map more than 800MB in low memory. */
602 if (tcg_ctx
.code_gen_buffer_size
> 800u * 1024 * 1024) {
603 tcg_ctx
.code_gen_buffer_size
= 800u * 1024 * 1024;
605 # elif defined(__sparc__)
606 start
= 0x40000000ul
;
607 # elif defined(__s390x__)
608 start
= 0x90000000ul
;
609 # elif defined(__mips__)
610 /* ??? We ought to more explicitly manage layout for softmmu too. */
611 # ifdef CONFIG_USER_ONLY
612 start
= 0x68000000ul
;
613 # elif _MIPS_SIM == _ABI64
614 start
= 0x128000000ul
;
616 start
= 0x08000000ul
;
620 buf
= mmap((void *)start
, tcg_ctx
.code_gen_buffer_size
,
621 PROT_WRITE
| PROT_READ
| PROT_EXEC
, flags
, -1, 0);
622 if (buf
== MAP_FAILED
) {
627 if (cross_256mb(buf
, tcg_ctx
.code_gen_buffer_size
)) {
628 /* Try again, with the original still mapped, to avoid re-acquiring
629 that 256mb crossing. This time don't specify an address. */
630 size_t size2
, size1
= tcg_ctx
.code_gen_buffer_size
;
631 void *buf2
= mmap(NULL
, size1
, PROT_WRITE
| PROT_READ
| PROT_EXEC
,
633 if (buf2
!= MAP_FAILED
) {
634 if (!cross_256mb(buf2
, size1
)) {
635 /* Success! Use the new buffer. */
639 /* Failure. Work with what we had. */
643 /* Split the original buffer. Free the smaller half. */
644 buf2
= split_cross_256mb(buf
, size1
);
645 size2
= tcg_ctx
.code_gen_buffer_size
;
646 munmap(buf
+ (buf
== buf2
? size2
: 0), size1
- size2
);
654 static inline void *alloc_code_gen_buffer(void)
656 void *buf
= g_try_malloc(tcg_ctx
.code_gen_buffer_size
);
663 if (cross_256mb(buf
, tcg_ctx
.code_gen_buffer_size
)) {
664 void *buf2
= g_malloc(tcg_ctx
.code_gen_buffer_size
);
665 if (buf2
!= NULL
&& !cross_256mb(buf2
, size1
)) {
666 /* Success! Use the new buffer. */
670 /* Failure. Work with what we had. Since this is malloc
671 and not mmap, we can't free the other half. */
673 buf
= split_cross_256mb(buf
, tcg_ctx
.code_gen_buffer_size
);
678 map_exec(buf
, tcg_ctx
.code_gen_buffer_size
);
681 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
683 static inline void code_gen_alloc(size_t tb_size
)
685 tcg_ctx
.code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
686 tcg_ctx
.code_gen_buffer
= alloc_code_gen_buffer();
687 if (tcg_ctx
.code_gen_buffer
== NULL
) {
688 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
692 qemu_madvise(tcg_ctx
.code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
,
695 /* Steal room for the prologue at the end of the buffer. This ensures
696 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
697 from TB's to the prologue are going to be in range. It also means
698 that we don't need to mark (additional) portions of the data segment
700 tcg_ctx
.code_gen_prologue
= tcg_ctx
.code_gen_buffer
+
701 tcg_ctx
.code_gen_buffer_size
- 1024;
702 tcg_ctx
.code_gen_buffer_size
-= 1024;
704 tcg_ctx
.code_gen_buffer_max_size
= tcg_ctx
.code_gen_buffer_size
-
705 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
706 tcg_ctx
.code_gen_max_blocks
= tcg_ctx
.code_gen_buffer_size
/
707 CODE_GEN_AVG_BLOCK_SIZE
;
709 g_malloc(tcg_ctx
.code_gen_max_blocks
* sizeof(TranslationBlock
));
710 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
713 /* Must be called before using the QEMU cpus. 'tb_size' is the size
714 (in bytes) allocated to the translation buffer. Zero means default
716 void tcg_exec_init(unsigned long tb_size
)
719 code_gen_alloc(tb_size
);
720 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
721 tcg_register_jit(tcg_ctx
.code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
);
723 #if defined(CONFIG_SOFTMMU)
724 /* There's no guest base to take into account, so go ahead and
725 initialize the prologue now. */
726 tcg_prologue_init(&tcg_ctx
);
730 bool tcg_enabled(void)
732 return tcg_ctx
.code_gen_buffer
!= NULL
;
735 /* Allocate a new translation block. Flush the translation buffer if
736 too many translation blocks or too much generated code. */
737 static TranslationBlock
*tb_alloc(target_ulong pc
)
739 TranslationBlock
*tb
;
741 if (tcg_ctx
.tb_ctx
.nb_tbs
>= tcg_ctx
.code_gen_max_blocks
||
742 (tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
) >=
743 tcg_ctx
.code_gen_buffer_max_size
) {
746 tb
= &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
++];
752 void tb_free(TranslationBlock
*tb
)
754 /* In practice this is mostly used for single use temporary TB
755 Ignore the hard cases and just back up if this TB happens to
756 be the last one generated. */
757 if (tcg_ctx
.tb_ctx
.nb_tbs
> 0 &&
758 tb
== &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
- 1]) {
759 tcg_ctx
.code_gen_ptr
= tb
->tc_ptr
;
760 tcg_ctx
.tb_ctx
.nb_tbs
--;
764 static inline void invalidate_page_bitmap(PageDesc
*p
)
766 g_free(p
->code_bitmap
);
767 p
->code_bitmap
= NULL
;
768 p
->code_write_count
= 0;
771 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
772 static void page_flush_tb_1(int level
, void **lp
)
782 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
783 pd
[i
].first_tb
= NULL
;
784 invalidate_page_bitmap(pd
+ i
);
789 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
790 page_flush_tb_1(level
- 1, pp
+ i
);
795 static void page_flush_tb(void)
799 for (i
= 0; i
< V_L1_SIZE
; i
++) {
800 page_flush_tb_1(V_L1_SHIFT
/ V_L2_BITS
- 1, l1_map
+ i
);
804 /* flush all the translation blocks */
805 /* XXX: tb_flush is currently not thread safe */
806 void tb_flush(CPUState
*cpu
)
808 #if defined(DEBUG_FLUSH)
809 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
810 (unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
),
811 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.tb_ctx
.nb_tbs
> 0 ?
812 ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)) /
813 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
815 if ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)
816 > tcg_ctx
.code_gen_buffer_size
) {
817 cpu_abort(cpu
, "Internal error: code buffer overflow\n");
819 tcg_ctx
.tb_ctx
.nb_tbs
= 0;
822 memset(cpu
->tb_jmp_cache
, 0, sizeof(cpu
->tb_jmp_cache
));
825 memset(tcg_ctx
.tb_ctx
.tb_phys_hash
, 0, sizeof(tcg_ctx
.tb_ctx
.tb_phys_hash
));
828 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
829 /* XXX: flush processor icache at this point if cache flush is
831 tcg_ctx
.tb_ctx
.tb_flush_count
++;
834 #ifdef DEBUG_TB_CHECK
836 static void tb_invalidate_check(target_ulong address
)
838 TranslationBlock
*tb
;
841 address
&= TARGET_PAGE_MASK
;
842 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
843 for (tb
= tb_ctx
.tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
844 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
845 address
>= tb
->pc
+ tb
->size
)) {
846 printf("ERROR invalidate: address=" TARGET_FMT_lx
847 " PC=%08lx size=%04x\n",
848 address
, (long)tb
->pc
, tb
->size
);
854 /* verify that all the pages have correct rights for code */
855 static void tb_page_check(void)
857 TranslationBlock
*tb
;
858 int i
, flags1
, flags2
;
860 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
861 for (tb
= tcg_ctx
.tb_ctx
.tb_phys_hash
[i
]; tb
!= NULL
;
862 tb
= tb
->phys_hash_next
) {
863 flags1
= page_get_flags(tb
->pc
);
864 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
865 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
866 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
867 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
875 static inline void tb_hash_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
877 TranslationBlock
*tb1
;
882 *ptb
= tb1
->phys_hash_next
;
885 ptb
= &tb1
->phys_hash_next
;
889 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
891 TranslationBlock
*tb1
;
896 n1
= (uintptr_t)tb1
& 3;
897 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
899 *ptb
= tb1
->page_next
[n1
];
902 ptb
= &tb1
->page_next
[n1
];
906 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
908 TranslationBlock
*tb1
, **ptb
;
911 ptb
= &tb
->jmp_next
[n
];
914 /* find tb(n) in circular list */
917 n1
= (uintptr_t)tb1
& 3;
918 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
919 if (n1
== n
&& tb1
== tb
) {
923 ptb
= &tb1
->jmp_first
;
925 ptb
= &tb1
->jmp_next
[n1
];
928 /* now we can suppress tb(n) from the list */
929 *ptb
= tb
->jmp_next
[n
];
931 tb
->jmp_next
[n
] = NULL
;
935 /* reset the jump entry 'n' of a TB so that it is not chained to
937 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
939 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
942 /* invalidate one TB */
943 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
948 tb_page_addr_t phys_pc
;
949 TranslationBlock
*tb1
, *tb2
;
951 /* remove the TB from the hash list */
952 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
953 h
= tb_phys_hash_func(phys_pc
);
954 tb_hash_remove(&tcg_ctx
.tb_ctx
.tb_phys_hash
[h
], tb
);
956 /* remove the TB from the page list */
957 if (tb
->page_addr
[0] != page_addr
) {
958 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
959 tb_page_remove(&p
->first_tb
, tb
);
960 invalidate_page_bitmap(p
);
962 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
963 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
964 tb_page_remove(&p
->first_tb
, tb
);
965 invalidate_page_bitmap(p
);
968 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 1;
970 /* remove the TB from the hash list */
971 h
= tb_jmp_cache_hash_func(tb
->pc
);
973 if (cpu
->tb_jmp_cache
[h
] == tb
) {
974 cpu
->tb_jmp_cache
[h
] = NULL
;
978 /* suppress this TB from the two jump lists */
979 tb_jmp_remove(tb
, 0);
980 tb_jmp_remove(tb
, 1);
982 /* suppress any remaining jumps to this TB */
985 n1
= (uintptr_t)tb1
& 3;
989 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
990 tb2
= tb1
->jmp_next
[n1
];
991 tb_reset_jump(tb1
, n1
);
992 tb1
->jmp_next
[n1
] = NULL
;
995 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
997 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
++;
1000 static void build_page_bitmap(PageDesc
*p
)
1002 int n
, tb_start
, tb_end
;
1003 TranslationBlock
*tb
;
1005 p
->code_bitmap
= bitmap_new(TARGET_PAGE_SIZE
);
1008 while (tb
!= NULL
) {
1009 n
= (uintptr_t)tb
& 3;
1010 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1011 /* NOTE: this is subtle as a TB may span two physical pages */
1013 /* NOTE: tb_end may be after the end of the page, but
1014 it is not a problem */
1015 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1016 tb_end
= tb_start
+ tb
->size
;
1017 if (tb_end
> TARGET_PAGE_SIZE
) {
1018 tb_end
= TARGET_PAGE_SIZE
;
1022 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1024 bitmap_set(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1025 tb
= tb
->page_next
[n
];
1029 /* Called with mmap_lock held for user mode emulation. */
1030 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
1031 target_ulong pc
, target_ulong cs_base
,
1032 int flags
, int cflags
)
1034 CPUArchState
*env
= cpu
->env_ptr
;
1035 TranslationBlock
*tb
;
1036 tb_page_addr_t phys_pc
, phys_page2
;
1037 target_ulong virt_page2
;
1038 tcg_insn_unit
*gen_code_buf
;
1039 int gen_code_size
, search_size
;
1040 #ifdef CONFIG_PROFILER
1044 phys_pc
= get_page_addr_code(env
, pc
);
1046 cflags
|= CF_USE_ICOUNT
;
1050 /* flush must be done */
1052 /* cannot fail at this point */
1054 /* Don't forget to invalidate previous TB info. */
1055 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 1;
1058 gen_code_buf
= tcg_ctx
.code_gen_ptr
;
1059 tb
->tc_ptr
= gen_code_buf
;
1060 tb
->cs_base
= cs_base
;
1062 tb
->cflags
= cflags
;
1064 #ifdef CONFIG_PROFILER
1065 tcg_ctx
.tb_count1
++; /* includes aborted translations because of
1067 ti
= profile_getclock();
1070 tcg_func_start(&tcg_ctx
);
1072 gen_intermediate_code(env
, tb
);
1074 trace_translate_block(tb
, tb
->pc
, tb
->tc_ptr
);
1076 /* generate machine code */
1077 tb
->tb_next_offset
[0] = 0xffff;
1078 tb
->tb_next_offset
[1] = 0xffff;
1079 tcg_ctx
.tb_next_offset
= tb
->tb_next_offset
;
1080 #ifdef USE_DIRECT_JUMP
1081 tcg_ctx
.tb_jmp_offset
= tb
->tb_jmp_offset
;
1082 tcg_ctx
.tb_next
= NULL
;
1084 tcg_ctx
.tb_jmp_offset
= NULL
;
1085 tcg_ctx
.tb_next
= tb
->tb_next
;
1088 #ifdef CONFIG_PROFILER
1090 tcg_ctx
.interm_time
+= profile_getclock() - ti
;
1091 tcg_ctx
.code_time
-= profile_getclock();
1094 gen_code_size
= tcg_gen_code(&tcg_ctx
, gen_code_buf
);
1095 search_size
= encode_search(tb
, (void *)gen_code_buf
+ gen_code_size
);
1097 #ifdef CONFIG_PROFILER
1098 tcg_ctx
.code_time
+= profile_getclock();
1099 tcg_ctx
.code_in_len
+= tb
->size
;
1100 tcg_ctx
.code_out_len
+= gen_code_size
;
1101 tcg_ctx
.search_out_len
+= search_size
;
1105 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
1106 qemu_log("OUT: [size=%d]\n", gen_code_size
);
1107 log_disas(tb
->tc_ptr
, gen_code_size
);
1113 tcg_ctx
.code_gen_ptr
= (void *)
1114 ROUND_UP((uintptr_t)gen_code_buf
+ gen_code_size
+ search_size
,
1117 /* check next page if needed */
1118 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1120 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1121 phys_page2
= get_page_addr_code(env
, virt_page2
);
1123 tb_link_page(tb
, phys_pc
, phys_page2
);
1128 * Invalidate all TBs which intersect with the target physical address range
1129 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1130 * 'is_cpu_write_access' should be true if called from a real cpu write
1131 * access: the virtual CPU will exit the current TB if code is modified inside
1134 * Called with mmap_lock held for user-mode emulation
1136 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1138 while (start
< end
) {
1139 tb_invalidate_phys_page_range(start
, end
, 0);
1140 start
&= TARGET_PAGE_MASK
;
1141 start
+= TARGET_PAGE_SIZE
;
1146 * Invalidate all TBs which intersect with the target physical address range
1147 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1148 * 'is_cpu_write_access' should be true if called from a real cpu write
1149 * access: the virtual CPU will exit the current TB if code is modified inside
1152 * Called with mmap_lock held for user-mode emulation
1154 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1155 int is_cpu_write_access
)
1157 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1158 CPUState
*cpu
= current_cpu
;
1159 #if defined(TARGET_HAS_PRECISE_SMC)
1160 CPUArchState
*env
= NULL
;
1162 tb_page_addr_t tb_start
, tb_end
;
1165 #ifdef TARGET_HAS_PRECISE_SMC
1166 int current_tb_not_found
= is_cpu_write_access
;
1167 TranslationBlock
*current_tb
= NULL
;
1168 int current_tb_modified
= 0;
1169 target_ulong current_pc
= 0;
1170 target_ulong current_cs_base
= 0;
1171 int current_flags
= 0;
1172 #endif /* TARGET_HAS_PRECISE_SMC */
1174 p
= page_find(start
>> TARGET_PAGE_BITS
);
1178 #if defined(TARGET_HAS_PRECISE_SMC)
1184 /* we remove all the TBs in the range [start, end[ */
1185 /* XXX: see if in some cases it could be faster to invalidate all
1188 while (tb
!= NULL
) {
1189 n
= (uintptr_t)tb
& 3;
1190 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1191 tb_next
= tb
->page_next
[n
];
1192 /* NOTE: this is subtle as a TB may span two physical pages */
1194 /* NOTE: tb_end may be after the end of the page, but
1195 it is not a problem */
1196 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1197 tb_end
= tb_start
+ tb
->size
;
1199 tb_start
= tb
->page_addr
[1];
1200 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1202 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1203 #ifdef TARGET_HAS_PRECISE_SMC
1204 if (current_tb_not_found
) {
1205 current_tb_not_found
= 0;
1207 if (cpu
->mem_io_pc
) {
1208 /* now we have a real cpu fault */
1209 current_tb
= tb_find_pc(cpu
->mem_io_pc
);
1212 if (current_tb
== tb
&&
1213 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1214 /* If we are modifying the current TB, we must stop
1215 its execution. We could be more precise by checking
1216 that the modification is after the current PC, but it
1217 would require a specialized function to partially
1218 restore the CPU state */
1220 current_tb_modified
= 1;
1221 cpu_restore_state_from_tb(cpu
, current_tb
, cpu
->mem_io_pc
);
1222 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1225 #endif /* TARGET_HAS_PRECISE_SMC */
1226 /* we need to do that to handle the case where a signal
1227 occurs while doing tb_phys_invalidate() */
1230 saved_tb
= cpu
->current_tb
;
1231 cpu
->current_tb
= NULL
;
1233 tb_phys_invalidate(tb
, -1);
1235 cpu
->current_tb
= saved_tb
;
1236 if (cpu
->interrupt_request
&& cpu
->current_tb
) {
1237 cpu_interrupt(cpu
, cpu
->interrupt_request
);
1243 #if !defined(CONFIG_USER_ONLY)
1244 /* if no code remaining, no need to continue to use slow writes */
1246 invalidate_page_bitmap(p
);
1247 tlb_unprotect_code(start
);
1250 #ifdef TARGET_HAS_PRECISE_SMC
1251 if (current_tb_modified
) {
1252 /* we generate a block containing just the instruction
1253 modifying the memory. It will ensure that it cannot modify
1255 cpu
->current_tb
= NULL
;
1256 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1257 cpu_resume_from_signal(cpu
, NULL
);
1262 /* len must be <= 8 and start must be a multiple of len */
1263 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1269 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1270 cpu_single_env
->mem_io_vaddr
, len
,
1271 cpu_single_env
->eip
,
1272 cpu_single_env
->eip
+
1273 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1276 p
= page_find(start
>> TARGET_PAGE_BITS
);
1280 if (!p
->code_bitmap
&&
1281 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
1282 /* build code bitmap */
1283 build_page_bitmap(p
);
1285 if (p
->code_bitmap
) {
1289 nr
= start
& ~TARGET_PAGE_MASK
;
1290 b
= p
->code_bitmap
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1));
1291 if (b
& ((1 << len
) - 1)) {
1296 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1300 #if !defined(CONFIG_SOFTMMU)
1301 /* Called with mmap_lock held. */
1302 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1303 uintptr_t pc
, void *puc
,
1306 TranslationBlock
*tb
;
1309 #ifdef TARGET_HAS_PRECISE_SMC
1310 TranslationBlock
*current_tb
= NULL
;
1311 CPUState
*cpu
= current_cpu
;
1312 CPUArchState
*env
= NULL
;
1313 int current_tb_modified
= 0;
1314 target_ulong current_pc
= 0;
1315 target_ulong current_cs_base
= 0;
1316 int current_flags
= 0;
1319 addr
&= TARGET_PAGE_MASK
;
1320 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1325 #ifdef TARGET_HAS_PRECISE_SMC
1326 if (tb
&& pc
!= 0) {
1327 current_tb
= tb_find_pc(pc
);
1333 while (tb
!= NULL
) {
1334 n
= (uintptr_t)tb
& 3;
1335 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1336 #ifdef TARGET_HAS_PRECISE_SMC
1337 if (current_tb
== tb
&&
1338 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1339 /* If we are modifying the current TB, we must stop
1340 its execution. We could be more precise by checking
1341 that the modification is after the current PC, but it
1342 would require a specialized function to partially
1343 restore the CPU state */
1345 current_tb_modified
= 1;
1346 cpu_restore_state_from_tb(cpu
, current_tb
, pc
);
1347 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1350 #endif /* TARGET_HAS_PRECISE_SMC */
1351 tb_phys_invalidate(tb
, addr
);
1352 tb
= tb
->page_next
[n
];
1355 #ifdef TARGET_HAS_PRECISE_SMC
1356 if (current_tb_modified
) {
1357 /* we generate a block containing just the instruction
1358 modifying the memory. It will ensure that it cannot modify
1360 cpu
->current_tb
= NULL
;
1361 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1365 cpu_resume_from_signal(cpu
, puc
);
1371 /* add the tb in the target page and protect it if necessary
1373 * Called with mmap_lock held for user-mode emulation.
1375 static inline void tb_alloc_page(TranslationBlock
*tb
,
1376 unsigned int n
, tb_page_addr_t page_addr
)
1379 #ifndef CONFIG_USER_ONLY
1380 bool page_already_protected
;
1383 tb
->page_addr
[n
] = page_addr
;
1384 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1385 tb
->page_next
[n
] = p
->first_tb
;
1386 #ifndef CONFIG_USER_ONLY
1387 page_already_protected
= p
->first_tb
!= NULL
;
1389 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1390 invalidate_page_bitmap(p
);
1392 #if defined(CONFIG_USER_ONLY)
1393 if (p
->flags
& PAGE_WRITE
) {
1398 /* force the host page as non writable (writes will have a
1399 page fault + mprotect overhead) */
1400 page_addr
&= qemu_host_page_mask
;
1402 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1403 addr
+= TARGET_PAGE_SIZE
) {
1405 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1410 p2
->flags
&= ~PAGE_WRITE
;
1412 mprotect(g2h(page_addr
), qemu_host_page_size
,
1413 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1414 #ifdef DEBUG_TB_INVALIDATE
1415 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1420 /* if some code is already present, then the pages are already
1421 protected. So we handle the case where only the first TB is
1422 allocated in a physical page */
1423 if (!page_already_protected
) {
1424 tlb_protect_code(page_addr
);
1429 /* add a new TB and link it to the physical page tables. phys_page2 is
1430 * (-1) to indicate that only one page contains the TB.
1432 * Called with mmap_lock held for user-mode emulation.
1434 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1435 tb_page_addr_t phys_page2
)
1438 TranslationBlock
**ptb
;
1440 /* add in the physical hash table */
1441 h
= tb_phys_hash_func(phys_pc
);
1442 ptb
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
1443 tb
->phys_hash_next
= *ptb
;
1446 /* add in the page list */
1447 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1448 if (phys_page2
!= -1) {
1449 tb_alloc_page(tb
, 1, phys_page2
);
1451 tb
->page_addr
[1] = -1;
1454 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1455 tb
->jmp_next
[0] = NULL
;
1456 tb
->jmp_next
[1] = NULL
;
1458 /* init original jump addresses */
1459 if (tb
->tb_next_offset
[0] != 0xffff) {
1460 tb_reset_jump(tb
, 0);
1462 if (tb
->tb_next_offset
[1] != 0xffff) {
1463 tb_reset_jump(tb
, 1);
1466 #ifdef DEBUG_TB_CHECK
1471 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1472 tb[1].tc_ptr. Return NULL if not found */
1473 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1475 int m_min
, m_max
, m
;
1477 TranslationBlock
*tb
;
1479 if (tcg_ctx
.tb_ctx
.nb_tbs
<= 0) {
1482 if (tc_ptr
< (uintptr_t)tcg_ctx
.code_gen_buffer
||
1483 tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_ptr
) {
1486 /* binary search (cf Knuth) */
1488 m_max
= tcg_ctx
.tb_ctx
.nb_tbs
- 1;
1489 while (m_min
<= m_max
) {
1490 m
= (m_min
+ m_max
) >> 1;
1491 tb
= &tcg_ctx
.tb_ctx
.tbs
[m
];
1492 v
= (uintptr_t)tb
->tc_ptr
;
1495 } else if (tc_ptr
< v
) {
1501 return &tcg_ctx
.tb_ctx
.tbs
[m_max
];
1504 #if !defined(CONFIG_USER_ONLY)
1505 void tb_invalidate_phys_addr(AddressSpace
*as
, hwaddr addr
)
1507 ram_addr_t ram_addr
;
1512 mr
= address_space_translate(as
, addr
, &addr
, &l
, false);
1513 if (!(memory_region_is_ram(mr
)
1514 || memory_region_is_romd(mr
))) {
1518 ram_addr
= (memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
)
1520 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1523 #endif /* !defined(CONFIG_USER_ONLY) */
1525 void tb_check_watchpoint(CPUState
*cpu
)
1527 TranslationBlock
*tb
;
1529 tb
= tb_find_pc(cpu
->mem_io_pc
);
1531 /* We can use retranslation to find the PC. */
1532 cpu_restore_state_from_tb(cpu
, tb
, cpu
->mem_io_pc
);
1533 tb_phys_invalidate(tb
, -1);
1535 /* The exception probably happened in a helper. The CPU state should
1536 have been saved before calling it. Fetch the PC from there. */
1537 CPUArchState
*env
= cpu
->env_ptr
;
1538 target_ulong pc
, cs_base
;
1539 tb_page_addr_t addr
;
1542 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
1543 addr
= get_page_addr_code(env
, pc
);
1544 tb_invalidate_phys_range(addr
, addr
+ 1);
1548 #ifndef CONFIG_USER_ONLY
1549 /* in deterministic execution mode, instructions doing device I/Os
1550 must be at the end of the TB */
1551 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
1553 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1554 CPUArchState
*env
= cpu
->env_ptr
;
1556 TranslationBlock
*tb
;
1558 target_ulong pc
, cs_base
;
1561 tb
= tb_find_pc(retaddr
);
1563 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
1566 n
= cpu
->icount_decr
.u16
.low
+ tb
->icount
;
1567 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
1568 /* Calculate how many instructions had been executed before the fault
1570 n
= n
- cpu
->icount_decr
.u16
.low
;
1571 /* Generate a new TB ending on the I/O insn. */
1573 /* On MIPS and SH, delay slot instructions can only be restarted if
1574 they were already the first instruction in the TB. If this is not
1575 the first instruction in a TB then re-execute the preceding
1577 #if defined(TARGET_MIPS)
1578 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1579 env
->active_tc
.PC
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
1580 cpu
->icount_decr
.u16
.low
++;
1581 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1583 #elif defined(TARGET_SH4)
1584 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1587 cpu
->icount_decr
.u16
.low
++;
1588 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1591 /* This should never happen. */
1592 if (n
> CF_COUNT_MASK
) {
1593 cpu_abort(cpu
, "TB too big during recompile");
1596 cflags
= n
| CF_LAST_IO
;
1598 cs_base
= tb
->cs_base
;
1600 tb_phys_invalidate(tb
, -1);
1601 if (tb
->cflags
& CF_NOCACHE
) {
1603 /* Invalidate original TB if this TB was generated in
1604 * cpu_exec_nocache() */
1605 tb_phys_invalidate(tb
->orig_tb
, -1);
1609 /* FIXME: In theory this could raise an exception. In practice
1610 we have already translated the block once so it's probably ok. */
1611 tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
1612 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1613 the first in the TB) then we end up generating a whole new TB and
1614 repeating the fault, which is horribly inefficient.
1615 Better would be to execute just this insn uncached, or generate a
1617 cpu_resume_from_signal(cpu
, NULL
);
1620 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
1624 /* Discard jump cache entries for any tb which might potentially
1625 overlap the flushed page. */
1626 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1627 memset(&cpu
->tb_jmp_cache
[i
], 0,
1628 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1630 i
= tb_jmp_cache_hash_page(addr
);
1631 memset(&cpu
->tb_jmp_cache
[i
], 0,
1632 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1635 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1637 int i
, target_code_size
, max_target_code_size
;
1638 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
1639 TranslationBlock
*tb
;
1641 target_code_size
= 0;
1642 max_target_code_size
= 0;
1644 direct_jmp_count
= 0;
1645 direct_jmp2_count
= 0;
1646 for (i
= 0; i
< tcg_ctx
.tb_ctx
.nb_tbs
; i
++) {
1647 tb
= &tcg_ctx
.tb_ctx
.tbs
[i
];
1648 target_code_size
+= tb
->size
;
1649 if (tb
->size
> max_target_code_size
) {
1650 max_target_code_size
= tb
->size
;
1652 if (tb
->page_addr
[1] != -1) {
1655 if (tb
->tb_next_offset
[0] != 0xffff) {
1657 if (tb
->tb_next_offset
[1] != 0xffff) {
1658 direct_jmp2_count
++;
1662 /* XXX: avoid using doubles ? */
1663 cpu_fprintf(f
, "Translation buffer state:\n");
1664 cpu_fprintf(f
, "gen code size %td/%zd\n",
1665 tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
,
1666 tcg_ctx
.code_gen_buffer_max_size
);
1667 cpu_fprintf(f
, "TB count %d/%d\n",
1668 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.code_gen_max_blocks
);
1669 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
1670 tcg_ctx
.tb_ctx
.nb_tbs
? target_code_size
/
1671 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1672 max_target_code_size
);
1673 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1674 tcg_ctx
.tb_ctx
.nb_tbs
? (tcg_ctx
.code_gen_ptr
-
1675 tcg_ctx
.code_gen_buffer
) /
1676 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1677 target_code_size
? (double) (tcg_ctx
.code_gen_ptr
-
1678 tcg_ctx
.code_gen_buffer
) /
1679 target_code_size
: 0);
1680 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n", cross_page
,
1681 tcg_ctx
.tb_ctx
.nb_tbs
? (cross_page
* 100) /
1682 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1683 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1685 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp_count
* 100) /
1686 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1688 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp2_count
* 100) /
1689 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1690 cpu_fprintf(f
, "\nStatistics:\n");
1691 cpu_fprintf(f
, "TB flush count %d\n", tcg_ctx
.tb_ctx
.tb_flush_count
);
1692 cpu_fprintf(f
, "TB invalidate count %d\n",
1693 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
);
1694 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
1695 tcg_dump_info(f
, cpu_fprintf
);
1698 void dump_opcount_info(FILE *f
, fprintf_function cpu_fprintf
)
1700 tcg_dump_op_count(f
, cpu_fprintf
);
1703 #else /* CONFIG_USER_ONLY */
1705 void cpu_interrupt(CPUState
*cpu
, int mask
)
1707 cpu
->interrupt_request
|= mask
;
1708 cpu
->tcg_exit_req
= 1;
1712 * Walks guest process memory "regions" one by one
1713 * and calls callback function 'fn' for each region.
1715 struct walk_memory_regions_data
{
1716 walk_memory_regions_fn fn
;
1722 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1723 target_ulong end
, int new_prot
)
1725 if (data
->start
!= -1u) {
1726 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1732 data
->start
= (new_prot
? end
: -1u);
1733 data
->prot
= new_prot
;
1738 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1739 target_ulong base
, int level
, void **lp
)
1745 return walk_memory_regions_end(data
, base
, 0);
1751 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1752 int prot
= pd
[i
].flags
;
1754 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1755 if (prot
!= data
->prot
) {
1756 rc
= walk_memory_regions_end(data
, pa
, prot
);
1765 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1766 pa
= base
| ((target_ulong
)i
<<
1767 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
1768 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1778 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1780 struct walk_memory_regions_data data
;
1788 for (i
= 0; i
< V_L1_SIZE
; i
++) {
1789 int rc
= walk_memory_regions_1(&data
, (target_ulong
)i
<< (V_L1_SHIFT
+ TARGET_PAGE_BITS
),
1790 V_L1_SHIFT
/ V_L2_BITS
- 1, l1_map
+ i
);
1796 return walk_memory_regions_end(&data
, 0, 0);
1799 static int dump_region(void *priv
, target_ulong start
,
1800 target_ulong end
, unsigned long prot
)
1802 FILE *f
= (FILE *)priv
;
1804 (void) fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
1805 " "TARGET_FMT_lx
" %c%c%c\n",
1806 start
, end
, end
- start
,
1807 ((prot
& PAGE_READ
) ? 'r' : '-'),
1808 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
1809 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
1814 /* dump memory mappings */
1815 void page_dump(FILE *f
)
1817 const int length
= sizeof(target_ulong
) * 2;
1818 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
1819 length
, "start", length
, "end", length
, "size", "prot");
1820 walk_memory_regions(f
, dump_region
);
1823 int page_get_flags(target_ulong address
)
1827 p
= page_find(address
>> TARGET_PAGE_BITS
);
1834 /* Modify the flags of a page and invalidate the code if necessary.
1835 The flag PAGE_WRITE_ORG is positioned automatically depending
1836 on PAGE_WRITE. The mmap_lock should already be held. */
1837 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1839 target_ulong addr
, len
;
1841 /* This function should never be called with addresses outside the
1842 guest address space. If this assert fires, it probably indicates
1843 a missing call to h2g_valid. */
1844 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1845 assert(end
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1847 assert(start
< end
);
1849 start
= start
& TARGET_PAGE_MASK
;
1850 end
= TARGET_PAGE_ALIGN(end
);
1852 if (flags
& PAGE_WRITE
) {
1853 flags
|= PAGE_WRITE_ORG
;
1856 for (addr
= start
, len
= end
- start
;
1858 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1859 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1861 /* If the write protection bit is set, then we invalidate
1863 if (!(p
->flags
& PAGE_WRITE
) &&
1864 (flags
& PAGE_WRITE
) &&
1866 tb_invalidate_phys_page(addr
, 0, NULL
, false);
1872 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1878 /* This function should never be called with addresses outside the
1879 guest address space. If this assert fires, it probably indicates
1880 a missing call to h2g_valid. */
1881 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1882 assert(start
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1888 if (start
+ len
- 1 < start
) {
1889 /* We've wrapped around. */
1893 /* must do before we loose bits in the next step */
1894 end
= TARGET_PAGE_ALIGN(start
+ len
);
1895 start
= start
& TARGET_PAGE_MASK
;
1897 for (addr
= start
, len
= end
- start
;
1899 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1900 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1904 if (!(p
->flags
& PAGE_VALID
)) {
1908 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
1911 if (flags
& PAGE_WRITE
) {
1912 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
1915 /* unprotect the page if it was put read-only because it
1916 contains translated code */
1917 if (!(p
->flags
& PAGE_WRITE
)) {
1918 if (!page_unprotect(addr
, 0, NULL
)) {
1927 /* called from signal handler: invalidate the code and unprotect the
1928 page. Return TRUE if the fault was successfully handled. */
1929 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
1933 target_ulong host_start
, host_end
, addr
;
1935 /* Technically this isn't safe inside a signal handler. However we
1936 know this only ever happens in a synchronous SEGV handler, so in
1937 practice it seems to be ok. */
1940 p
= page_find(address
>> TARGET_PAGE_BITS
);
1946 /* if the page was really writable, then we change its
1947 protection back to writable */
1948 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
1949 host_start
= address
& qemu_host_page_mask
;
1950 host_end
= host_start
+ qemu_host_page_size
;
1953 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1954 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1955 p
->flags
|= PAGE_WRITE
;
1958 /* and since the content will be modified, we must invalidate
1959 the corresponding translated code. */
1960 tb_invalidate_phys_page(addr
, pc
, puc
, true);
1961 #ifdef DEBUG_TB_CHECK
1962 tb_invalidate_check(addr
);
1965 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1974 #endif /* CONFIG_USER_ONLY */