4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include <sys/types.h>
33 #include "qemu-common.h"
34 #define NO_CPU_IO_DEFS
37 #include "disas/disas.h"
39 #if defined(CONFIG_USER_ONLY)
41 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42 #include <sys/param.h>
43 #if __FreeBSD_version >= 700104
44 #define HAVE_KINFO_GETVMMAP
45 #define sigqueue sigqueue_freebsd /* avoid redefinition */
48 #include <machine/profile.h>
57 #include "exec/address-spaces.h"
60 #include "exec/cputlb.h"
61 #include "exec/tb-hash.h"
62 #include "translate-all.h"
63 #include "qemu/bitmap.h"
64 #include "qemu/timer.h"
66 //#define DEBUG_TB_INVALIDATE
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
71 #if !defined(CONFIG_USER_ONLY)
72 /* TB consistency checks only implemented for usermode emulation. */
76 #define SMC_BITMAP_USE_THRESHOLD 10
78 typedef struct PageDesc
{
79 /* list of TBs intersecting this ram page */
80 TranslationBlock
*first_tb
;
81 /* in order to optimize self modifying code, we count the number
82 of lookups we do to a given page to use a bitmap */
83 unsigned int code_write_count
;
84 unsigned long *code_bitmap
;
85 #if defined(CONFIG_USER_ONLY)
90 /* In system mode we want L1_MAP to be based on ram offsets,
91 while in user mode we want it to be based on virtual addresses. */
92 #if !defined(CONFIG_USER_ONLY)
93 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
94 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
96 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
99 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
102 /* Size of the L2 (and L3, etc) page tables. */
104 #define V_L2_SIZE (1 << V_L2_BITS)
106 /* The bits remaining after N lower levels of page tables. */
107 #define V_L1_BITS_REM \
108 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
110 #if V_L1_BITS_REM < 4
111 #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
113 #define V_L1_BITS V_L1_BITS_REM
116 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
118 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
120 uintptr_t qemu_real_host_page_size
;
121 uintptr_t qemu_host_page_size
;
122 uintptr_t qemu_host_page_mask
;
124 /* This is a multi-level map on the virtual address space.
125 The bottom level has pointers to PageDesc. */
126 static void *l1_map
[V_L1_SIZE
];
128 /* code generation context */
131 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
132 tb_page_addr_t phys_page2
);
133 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
135 void cpu_gen_init(void)
137 tcg_context_init(&tcg_ctx
);
140 /* return non zero if the very first instruction is invalid so that
141 the virtual CPU can trigger an exception.
143 '*gen_code_size_ptr' contains the size of the generated code (host
146 int cpu_gen_code(CPUArchState
*env
, TranslationBlock
*tb
, int *gen_code_size_ptr
)
148 TCGContext
*s
= &tcg_ctx
;
149 tcg_insn_unit
*gen_code_buf
;
151 #ifdef CONFIG_PROFILER
155 #ifdef CONFIG_PROFILER
156 s
->tb_count1
++; /* includes aborted translations because of
158 ti
= profile_getclock();
162 gen_intermediate_code(env
, tb
);
164 trace_translate_block(tb
, tb
->pc
, tb
->tc_ptr
);
166 /* generate machine code */
167 gen_code_buf
= tb
->tc_ptr
;
168 tb
->tb_next_offset
[0] = 0xffff;
169 tb
->tb_next_offset
[1] = 0xffff;
170 s
->tb_next_offset
= tb
->tb_next_offset
;
171 #ifdef USE_DIRECT_JUMP
172 s
->tb_jmp_offset
= tb
->tb_jmp_offset
;
175 s
->tb_jmp_offset
= NULL
;
176 s
->tb_next
= tb
->tb_next
;
179 #ifdef CONFIG_PROFILER
181 s
->interm_time
+= profile_getclock() - ti
;
182 s
->code_time
-= profile_getclock();
184 gen_code_size
= tcg_gen_code(s
, gen_code_buf
);
185 *gen_code_size_ptr
= gen_code_size
;
186 #ifdef CONFIG_PROFILER
187 s
->code_time
+= profile_getclock();
188 s
->code_in_len
+= tb
->size
;
189 s
->code_out_len
+= gen_code_size
;
193 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
194 qemu_log("OUT: [size=%d]\n", gen_code_size
);
195 log_disas(tb
->tc_ptr
, gen_code_size
);
203 /* The cpu state corresponding to 'searched_pc' is restored.
205 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
206 uintptr_t searched_pc
)
208 CPUArchState
*env
= cpu
->env_ptr
;
209 TCGContext
*s
= &tcg_ctx
;
212 #ifdef CONFIG_PROFILER
216 #ifdef CONFIG_PROFILER
217 ti
= profile_getclock();
221 gen_intermediate_code_pc(env
, tb
);
223 if (tb
->cflags
& CF_USE_ICOUNT
) {
224 /* Reset the cycle counter to the start of the block. */
225 cpu
->icount_decr
.u16
.low
+= tb
->icount
;
226 /* Clear the IO flag. */
230 /* find opc index corresponding to search_pc */
231 tc_ptr
= (uintptr_t)tb
->tc_ptr
;
232 if (searched_pc
< tc_ptr
)
235 s
->tb_next_offset
= tb
->tb_next_offset
;
236 #ifdef USE_DIRECT_JUMP
237 s
->tb_jmp_offset
= tb
->tb_jmp_offset
;
240 s
->tb_jmp_offset
= NULL
;
241 s
->tb_next
= tb
->tb_next
;
243 j
= tcg_gen_code_search_pc(s
, (tcg_insn_unit
*)tc_ptr
,
244 searched_pc
- tc_ptr
);
247 /* now find start of instruction before */
248 while (s
->gen_opc_instr_start
[j
] == 0) {
251 cpu
->icount_decr
.u16
.low
-= s
->gen_opc_icount
[j
];
253 restore_state_to_opc(env
, tb
, j
);
255 #ifdef CONFIG_PROFILER
256 s
->restore_time
+= profile_getclock() - ti
;
262 bool cpu_restore_state(CPUState
*cpu
, uintptr_t retaddr
)
264 TranslationBlock
*tb
;
266 tb
= tb_find_pc(retaddr
);
268 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
269 if (tb
->cflags
& CF_NOCACHE
) {
270 /* one-shot translation, invalidate it immediately */
271 cpu
->current_tb
= NULL
;
272 tb_phys_invalidate(tb
, -1);
281 static __attribute__((unused
)) void map_exec(void *addr
, long size
)
284 VirtualProtect(addr
, size
,
285 PAGE_EXECUTE_READWRITE
, &old_protect
);
288 static __attribute__((unused
)) void map_exec(void *addr
, long size
)
290 unsigned long start
, end
, page_size
;
292 page_size
= getpagesize();
293 start
= (unsigned long)addr
;
294 start
&= ~(page_size
- 1);
296 end
= (unsigned long)addr
+ size
;
297 end
+= page_size
- 1;
298 end
&= ~(page_size
- 1);
300 mprotect((void *)start
, end
- start
,
301 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
305 void page_size_init(void)
307 /* NOTE: we can always suppose that qemu_host_page_size >=
309 qemu_real_host_page_size
= getpagesize();
310 if (qemu_host_page_size
== 0) {
311 qemu_host_page_size
= qemu_real_host_page_size
;
313 if (qemu_host_page_size
< TARGET_PAGE_SIZE
) {
314 qemu_host_page_size
= TARGET_PAGE_SIZE
;
316 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
319 static void page_init(void)
322 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
324 #ifdef HAVE_KINFO_GETVMMAP
325 struct kinfo_vmentry
*freep
;
328 freep
= kinfo_getvmmap(getpid(), &cnt
);
331 for (i
= 0; i
< cnt
; i
++) {
332 unsigned long startaddr
, endaddr
;
334 startaddr
= freep
[i
].kve_start
;
335 endaddr
= freep
[i
].kve_end
;
336 if (h2g_valid(startaddr
)) {
337 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
339 if (h2g_valid(endaddr
)) {
340 endaddr
= h2g(endaddr
);
341 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
343 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
345 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
356 last_brk
= (unsigned long)sbrk(0);
358 f
= fopen("/compat/linux/proc/self/maps", "r");
363 unsigned long startaddr
, endaddr
;
366 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
368 if (n
== 2 && h2g_valid(startaddr
)) {
369 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
371 if (h2g_valid(endaddr
)) {
372 endaddr
= h2g(endaddr
);
376 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
388 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
394 /* Level 1. Always allocated. */
395 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
398 for (i
= V_L1_SHIFT
/ V_L2_BITS
- 1; i
> 0; i
--) {
405 p
= g_new0(void *, V_L2_SIZE
);
409 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
417 pd
= g_new0(PageDesc
, V_L2_SIZE
);
421 return pd
+ (index
& (V_L2_SIZE
- 1));
424 static inline PageDesc
*page_find(tb_page_addr_t index
)
426 return page_find_alloc(index
, 0);
429 #if !defined(CONFIG_USER_ONLY)
430 #define mmap_lock() do { } while (0)
431 #define mmap_unlock() do { } while (0)
434 #if defined(CONFIG_USER_ONLY)
435 /* Currently it is not recommended to allocate big chunks of data in
436 user mode. It will change when a dedicated libc will be used. */
437 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
438 region in which the guest needs to run. Revisit this. */
439 #define USE_STATIC_CODE_GEN_BUFFER
442 /* ??? Should configure for this, not list operating systems here. */
443 #if (defined(__linux__) \
444 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
445 || defined(__DragonFly__) || defined(__OpenBSD__) \
446 || defined(__NetBSD__))
450 /* Minimum size of the code gen buffer. This number is randomly chosen,
451 but not so small that we can't have a fair number of TB's live. */
452 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
454 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
455 indicated, this is constrained by the range of direct branches on the
456 host cpu, as used by the TCG implementation of goto_tb. */
457 #if defined(__x86_64__)
458 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
459 #elif defined(__sparc__)
460 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
461 #elif defined(__aarch64__)
462 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
463 #elif defined(__arm__)
464 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
465 #elif defined(__s390x__)
466 /* We have a +- 4GB range on the branches; leave some slop. */
467 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
468 #elif defined(__mips__)
469 /* We have a 256MB branch region, but leave room to make sure the
470 main executable is also within that region. */
471 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
473 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
476 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
478 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
479 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
480 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
482 static inline size_t size_code_gen_buffer(size_t tb_size
)
484 /* Size the buffer. */
486 #ifdef USE_STATIC_CODE_GEN_BUFFER
487 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
489 /* ??? Needs adjustments. */
490 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
491 static buffer, we could size this on RESERVED_VA, on the text
492 segment size of the executable, or continue to use the default. */
493 tb_size
= (unsigned long)(ram_size
/ 4);
496 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
497 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
499 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
500 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
502 tcg_ctx
.code_gen_buffer_size
= tb_size
;
507 /* In order to use J and JAL within the code_gen_buffer, we require
508 that the buffer not cross a 256MB boundary. */
509 static inline bool cross_256mb(void *addr
, size_t size
)
511 return ((uintptr_t)addr
^ ((uintptr_t)addr
+ size
)) & 0xf0000000;
514 /* We weren't able to allocate a buffer without crossing that boundary,
515 so make do with the larger portion of the buffer that doesn't cross.
516 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
517 static inline void *split_cross_256mb(void *buf1
, size_t size1
)
519 void *buf2
= (void *)(((uintptr_t)buf1
+ size1
) & 0xf0000000);
520 size_t size2
= buf1
+ size1
- buf2
;
528 tcg_ctx
.code_gen_buffer_size
= size1
;
533 #ifdef USE_STATIC_CODE_GEN_BUFFER
534 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
535 __attribute__((aligned(CODE_GEN_ALIGN
)));
537 static inline void *alloc_code_gen_buffer(void)
539 void *buf
= static_code_gen_buffer
;
541 if (cross_256mb(buf
, tcg_ctx
.code_gen_buffer_size
)) {
542 buf
= split_cross_256mb(buf
, tcg_ctx
.code_gen_buffer_size
);
545 map_exec(buf
, tcg_ctx
.code_gen_buffer_size
);
548 #elif defined(USE_MMAP)
549 static inline void *alloc_code_gen_buffer(void)
551 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
555 /* Constrain the position of the buffer based on the host cpu.
556 Note that these addresses are chosen in concert with the
557 addresses assigned in the relevant linker script file. */
558 # if defined(__PIE__) || defined(__PIC__)
559 /* Don't bother setting a preferred location if we're building
560 a position-independent executable. We're more likely to get
561 an address near the main executable if we let the kernel
562 choose the address. */
563 # elif defined(__x86_64__) && defined(MAP_32BIT)
564 /* Force the memory down into low memory with the executable.
565 Leave the choice of exact location with the kernel. */
567 /* Cannot expect to map more than 800MB in low memory. */
568 if (tcg_ctx
.code_gen_buffer_size
> 800u * 1024 * 1024) {
569 tcg_ctx
.code_gen_buffer_size
= 800u * 1024 * 1024;
571 # elif defined(__sparc__)
572 start
= 0x40000000ul
;
573 # elif defined(__s390x__)
574 start
= 0x90000000ul
;
575 # elif defined(__mips__)
576 /* ??? We ought to more explicitly manage layout for softmmu too. */
577 # ifdef CONFIG_USER_ONLY
578 start
= 0x68000000ul
;
579 # elif _MIPS_SIM == _ABI64
580 start
= 0x128000000ul
;
582 start
= 0x08000000ul
;
586 buf
= mmap((void *)start
, tcg_ctx
.code_gen_buffer_size
,
587 PROT_WRITE
| PROT_READ
| PROT_EXEC
, flags
, -1, 0);
588 if (buf
== MAP_FAILED
) {
593 if (cross_256mb(buf
, tcg_ctx
.code_gen_buffer_size
)) {
594 /* Try again, with the original still mapped, to avoid re-acquiring
595 that 256mb crossing. This time don't specify an address. */
596 size_t size2
, size1
= tcg_ctx
.code_gen_buffer_size
;
597 void *buf2
= mmap(NULL
, size1
, PROT_WRITE
| PROT_READ
| PROT_EXEC
,
599 if (buf2
!= MAP_FAILED
) {
600 if (!cross_256mb(buf2
, size1
)) {
601 /* Success! Use the new buffer. */
605 /* Failure. Work with what we had. */
609 /* Split the original buffer. Free the smaller half. */
610 buf2
= split_cross_256mb(buf
, size1
);
611 size2
= tcg_ctx
.code_gen_buffer_size
;
612 munmap(buf
+ (buf
== buf2
? size2
: 0), size1
- size2
);
620 static inline void *alloc_code_gen_buffer(void)
622 void *buf
= g_try_malloc(tcg_ctx
.code_gen_buffer_size
);
629 if (cross_256mb(buf
, tcg_ctx
.code_gen_buffer_size
)) {
630 void *buf2
= g_malloc(tcg_ctx
.code_gen_buffer_size
);
631 if (buf2
!= NULL
&& !cross_256mb(buf2
, size1
)) {
632 /* Success! Use the new buffer. */
636 /* Failure. Work with what we had. Since this is malloc
637 and not mmap, we can't free the other half. */
639 buf
= split_cross_256mb(buf
, tcg_ctx
.code_gen_buffer_size
);
644 map_exec(buf
, tcg_ctx
.code_gen_buffer_size
);
647 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
649 static inline void code_gen_alloc(size_t tb_size
)
651 tcg_ctx
.code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
652 tcg_ctx
.code_gen_buffer
= alloc_code_gen_buffer();
653 if (tcg_ctx
.code_gen_buffer
== NULL
) {
654 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
658 qemu_madvise(tcg_ctx
.code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
,
661 /* Steal room for the prologue at the end of the buffer. This ensures
662 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
663 from TB's to the prologue are going to be in range. It also means
664 that we don't need to mark (additional) portions of the data segment
666 tcg_ctx
.code_gen_prologue
= tcg_ctx
.code_gen_buffer
+
667 tcg_ctx
.code_gen_buffer_size
- 1024;
668 tcg_ctx
.code_gen_buffer_size
-= 1024;
670 tcg_ctx
.code_gen_buffer_max_size
= tcg_ctx
.code_gen_buffer_size
-
671 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
672 tcg_ctx
.code_gen_max_blocks
= tcg_ctx
.code_gen_buffer_size
/
673 CODE_GEN_AVG_BLOCK_SIZE
;
675 g_malloc(tcg_ctx
.code_gen_max_blocks
* sizeof(TranslationBlock
));
678 /* Must be called before using the QEMU cpus. 'tb_size' is the size
679 (in bytes) allocated to the translation buffer. Zero means default
681 void tcg_exec_init(unsigned long tb_size
)
684 code_gen_alloc(tb_size
);
685 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
686 tcg_register_jit(tcg_ctx
.code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
);
688 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
689 /* There's no guest base to take into account, so go ahead and
690 initialize the prologue now. */
691 tcg_prologue_init(&tcg_ctx
);
695 bool tcg_enabled(void)
697 return tcg_ctx
.code_gen_buffer
!= NULL
;
700 /* Allocate a new translation block. Flush the translation buffer if
701 too many translation blocks or too much generated code. */
702 static TranslationBlock
*tb_alloc(target_ulong pc
)
704 TranslationBlock
*tb
;
706 if (tcg_ctx
.tb_ctx
.nb_tbs
>= tcg_ctx
.code_gen_max_blocks
||
707 (tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
) >=
708 tcg_ctx
.code_gen_buffer_max_size
) {
711 tb
= &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
++];
717 void tb_free(TranslationBlock
*tb
)
719 /* In practice this is mostly used for single use temporary TB
720 Ignore the hard cases and just back up if this TB happens to
721 be the last one generated. */
722 if (tcg_ctx
.tb_ctx
.nb_tbs
> 0 &&
723 tb
== &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
- 1]) {
724 tcg_ctx
.code_gen_ptr
= tb
->tc_ptr
;
725 tcg_ctx
.tb_ctx
.nb_tbs
--;
729 static inline void invalidate_page_bitmap(PageDesc
*p
)
731 if (p
->code_bitmap
) {
732 g_free(p
->code_bitmap
);
733 p
->code_bitmap
= NULL
;
735 p
->code_write_count
= 0;
738 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
739 static void page_flush_tb_1(int level
, void **lp
)
749 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
750 pd
[i
].first_tb
= NULL
;
751 invalidate_page_bitmap(pd
+ i
);
756 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
757 page_flush_tb_1(level
- 1, pp
+ i
);
762 static void page_flush_tb(void)
766 for (i
= 0; i
< V_L1_SIZE
; i
++) {
767 page_flush_tb_1(V_L1_SHIFT
/ V_L2_BITS
- 1, l1_map
+ i
);
771 /* flush all the translation blocks */
772 /* XXX: tb_flush is currently not thread safe */
773 void tb_flush(CPUArchState
*env1
)
775 CPUState
*cpu
= ENV_GET_CPU(env1
);
777 #if defined(DEBUG_FLUSH)
778 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
779 (unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
),
780 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.tb_ctx
.nb_tbs
> 0 ?
781 ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)) /
782 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
784 if ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)
785 > tcg_ctx
.code_gen_buffer_size
) {
786 cpu_abort(cpu
, "Internal error: code buffer overflow\n");
788 tcg_ctx
.tb_ctx
.nb_tbs
= 0;
791 memset(cpu
->tb_jmp_cache
, 0, sizeof(cpu
->tb_jmp_cache
));
794 memset(tcg_ctx
.tb_ctx
.tb_phys_hash
, 0, sizeof(tcg_ctx
.tb_ctx
.tb_phys_hash
));
797 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
798 /* XXX: flush processor icache at this point if cache flush is
800 tcg_ctx
.tb_ctx
.tb_flush_count
++;
803 #ifdef DEBUG_TB_CHECK
805 static void tb_invalidate_check(target_ulong address
)
807 TranslationBlock
*tb
;
810 address
&= TARGET_PAGE_MASK
;
811 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
812 for (tb
= tb_ctx
.tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
813 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
814 address
>= tb
->pc
+ tb
->size
)) {
815 printf("ERROR invalidate: address=" TARGET_FMT_lx
816 " PC=%08lx size=%04x\n",
817 address
, (long)tb
->pc
, tb
->size
);
823 /* verify that all the pages have correct rights for code */
824 static void tb_page_check(void)
826 TranslationBlock
*tb
;
827 int i
, flags1
, flags2
;
829 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
830 for (tb
= tcg_ctx
.tb_ctx
.tb_phys_hash
[i
]; tb
!= NULL
;
831 tb
= tb
->phys_hash_next
) {
832 flags1
= page_get_flags(tb
->pc
);
833 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
834 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
835 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
836 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
844 static inline void tb_hash_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
846 TranslationBlock
*tb1
;
851 *ptb
= tb1
->phys_hash_next
;
854 ptb
= &tb1
->phys_hash_next
;
858 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
860 TranslationBlock
*tb1
;
865 n1
= (uintptr_t)tb1
& 3;
866 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
868 *ptb
= tb1
->page_next
[n1
];
871 ptb
= &tb1
->page_next
[n1
];
875 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
877 TranslationBlock
*tb1
, **ptb
;
880 ptb
= &tb
->jmp_next
[n
];
883 /* find tb(n) in circular list */
886 n1
= (uintptr_t)tb1
& 3;
887 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
888 if (n1
== n
&& tb1
== tb
) {
892 ptb
= &tb1
->jmp_first
;
894 ptb
= &tb1
->jmp_next
[n1
];
897 /* now we can suppress tb(n) from the list */
898 *ptb
= tb
->jmp_next
[n
];
900 tb
->jmp_next
[n
] = NULL
;
904 /* reset the jump entry 'n' of a TB so that it is not chained to
906 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
908 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
911 /* invalidate one TB */
912 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
917 tb_page_addr_t phys_pc
;
918 TranslationBlock
*tb1
, *tb2
;
920 /* remove the TB from the hash list */
921 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
922 h
= tb_phys_hash_func(phys_pc
);
923 tb_hash_remove(&tcg_ctx
.tb_ctx
.tb_phys_hash
[h
], tb
);
925 /* remove the TB from the page list */
926 if (tb
->page_addr
[0] != page_addr
) {
927 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
928 tb_page_remove(&p
->first_tb
, tb
);
929 invalidate_page_bitmap(p
);
931 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
932 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
933 tb_page_remove(&p
->first_tb
, tb
);
934 invalidate_page_bitmap(p
);
937 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 1;
939 /* remove the TB from the hash list */
940 h
= tb_jmp_cache_hash_func(tb
->pc
);
942 if (cpu
->tb_jmp_cache
[h
] == tb
) {
943 cpu
->tb_jmp_cache
[h
] = NULL
;
947 /* suppress this TB from the two jump lists */
948 tb_jmp_remove(tb
, 0);
949 tb_jmp_remove(tb
, 1);
951 /* suppress any remaining jumps to this TB */
954 n1
= (uintptr_t)tb1
& 3;
958 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
959 tb2
= tb1
->jmp_next
[n1
];
960 tb_reset_jump(tb1
, n1
);
961 tb1
->jmp_next
[n1
] = NULL
;
964 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
966 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
++;
969 static void build_page_bitmap(PageDesc
*p
)
971 int n
, tb_start
, tb_end
;
972 TranslationBlock
*tb
;
974 p
->code_bitmap
= bitmap_new(TARGET_PAGE_SIZE
);
978 n
= (uintptr_t)tb
& 3;
979 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
980 /* NOTE: this is subtle as a TB may span two physical pages */
982 /* NOTE: tb_end may be after the end of the page, but
983 it is not a problem */
984 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
985 tb_end
= tb_start
+ tb
->size
;
986 if (tb_end
> TARGET_PAGE_SIZE
) {
987 tb_end
= TARGET_PAGE_SIZE
;
991 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
993 bitmap_set(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
994 tb
= tb
->page_next
[n
];
998 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
999 target_ulong pc
, target_ulong cs_base
,
1000 int flags
, int cflags
)
1002 CPUArchState
*env
= cpu
->env_ptr
;
1003 TranslationBlock
*tb
;
1004 tb_page_addr_t phys_pc
, phys_page2
;
1005 target_ulong virt_page2
;
1008 phys_pc
= get_page_addr_code(env
, pc
);
1010 cflags
|= CF_USE_ICOUNT
;
1014 /* flush must be done */
1016 /* cannot fail at this point */
1018 /* Don't forget to invalidate previous TB info. */
1019 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 1;
1021 tb
->tc_ptr
= tcg_ctx
.code_gen_ptr
;
1022 tb
->cs_base
= cs_base
;
1024 tb
->cflags
= cflags
;
1025 cpu_gen_code(env
, tb
, &code_gen_size
);
1026 tcg_ctx
.code_gen_ptr
= (void *)(((uintptr_t)tcg_ctx
.code_gen_ptr
+
1027 code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1029 /* check next page if needed */
1030 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1032 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1033 phys_page2
= get_page_addr_code(env
, virt_page2
);
1035 tb_link_page(tb
, phys_pc
, phys_page2
);
1040 * Invalidate all TBs which intersect with the target physical address range
1041 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1042 * 'is_cpu_write_access' should be true if called from a real cpu write
1043 * access: the virtual CPU will exit the current TB if code is modified inside
1046 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1048 while (start
< end
) {
1049 tb_invalidate_phys_page_range(start
, end
, 0);
1050 start
&= TARGET_PAGE_MASK
;
1051 start
+= TARGET_PAGE_SIZE
;
1056 * Invalidate all TBs which intersect with the target physical address range
1057 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1058 * 'is_cpu_write_access' should be true if called from a real cpu write
1059 * access: the virtual CPU will exit the current TB if code is modified inside
1062 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1063 int is_cpu_write_access
)
1065 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1066 CPUState
*cpu
= current_cpu
;
1067 #if defined(TARGET_HAS_PRECISE_SMC)
1068 CPUArchState
*env
= NULL
;
1070 tb_page_addr_t tb_start
, tb_end
;
1073 #ifdef TARGET_HAS_PRECISE_SMC
1074 int current_tb_not_found
= is_cpu_write_access
;
1075 TranslationBlock
*current_tb
= NULL
;
1076 int current_tb_modified
= 0;
1077 target_ulong current_pc
= 0;
1078 target_ulong current_cs_base
= 0;
1079 int current_flags
= 0;
1080 #endif /* TARGET_HAS_PRECISE_SMC */
1082 p
= page_find(start
>> TARGET_PAGE_BITS
);
1086 #if defined(TARGET_HAS_PRECISE_SMC)
1092 /* we remove all the TBs in the range [start, end[ */
1093 /* XXX: see if in some cases it could be faster to invalidate all
1096 while (tb
!= NULL
) {
1097 n
= (uintptr_t)tb
& 3;
1098 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1099 tb_next
= tb
->page_next
[n
];
1100 /* NOTE: this is subtle as a TB may span two physical pages */
1102 /* NOTE: tb_end may be after the end of the page, but
1103 it is not a problem */
1104 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1105 tb_end
= tb_start
+ tb
->size
;
1107 tb_start
= tb
->page_addr
[1];
1108 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1110 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1111 #ifdef TARGET_HAS_PRECISE_SMC
1112 if (current_tb_not_found
) {
1113 current_tb_not_found
= 0;
1115 if (cpu
->mem_io_pc
) {
1116 /* now we have a real cpu fault */
1117 current_tb
= tb_find_pc(cpu
->mem_io_pc
);
1120 if (current_tb
== tb
&&
1121 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1122 /* If we are modifying the current TB, we must stop
1123 its execution. We could be more precise by checking
1124 that the modification is after the current PC, but it
1125 would require a specialized function to partially
1126 restore the CPU state */
1128 current_tb_modified
= 1;
1129 cpu_restore_state_from_tb(cpu
, current_tb
, cpu
->mem_io_pc
);
1130 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1133 #endif /* TARGET_HAS_PRECISE_SMC */
1134 /* we need to do that to handle the case where a signal
1135 occurs while doing tb_phys_invalidate() */
1138 saved_tb
= cpu
->current_tb
;
1139 cpu
->current_tb
= NULL
;
1141 tb_phys_invalidate(tb
, -1);
1143 cpu
->current_tb
= saved_tb
;
1144 if (cpu
->interrupt_request
&& cpu
->current_tb
) {
1145 cpu_interrupt(cpu
, cpu
->interrupt_request
);
1151 #if !defined(CONFIG_USER_ONLY)
1152 /* if no code remaining, no need to continue to use slow writes */
1154 invalidate_page_bitmap(p
);
1155 tlb_unprotect_code(start
);
1158 #ifdef TARGET_HAS_PRECISE_SMC
1159 if (current_tb_modified
) {
1160 /* we generate a block containing just the instruction
1161 modifying the memory. It will ensure that it cannot modify
1163 cpu
->current_tb
= NULL
;
1164 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1165 cpu_resume_from_signal(cpu
, NULL
);
1170 /* len must be <= 8 and start must be a multiple of len */
1171 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1177 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1178 cpu_single_env
->mem_io_vaddr
, len
,
1179 cpu_single_env
->eip
,
1180 cpu_single_env
->eip
+
1181 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1184 p
= page_find(start
>> TARGET_PAGE_BITS
);
1188 if (!p
->code_bitmap
&&
1189 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
1190 /* build code bitmap */
1191 build_page_bitmap(p
);
1193 if (p
->code_bitmap
) {
1197 nr
= start
& ~TARGET_PAGE_MASK
;
1198 b
= p
->code_bitmap
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1));
1199 if (b
& ((1 << len
) - 1)) {
1204 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1208 #if !defined(CONFIG_SOFTMMU)
1209 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1210 uintptr_t pc
, void *puc
,
1213 TranslationBlock
*tb
;
1216 #ifdef TARGET_HAS_PRECISE_SMC
1217 TranslationBlock
*current_tb
= NULL
;
1218 CPUState
*cpu
= current_cpu
;
1219 CPUArchState
*env
= NULL
;
1220 int current_tb_modified
= 0;
1221 target_ulong current_pc
= 0;
1222 target_ulong current_cs_base
= 0;
1223 int current_flags
= 0;
1226 addr
&= TARGET_PAGE_MASK
;
1227 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1232 #ifdef TARGET_HAS_PRECISE_SMC
1233 if (tb
&& pc
!= 0) {
1234 current_tb
= tb_find_pc(pc
);
1240 while (tb
!= NULL
) {
1241 n
= (uintptr_t)tb
& 3;
1242 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1243 #ifdef TARGET_HAS_PRECISE_SMC
1244 if (current_tb
== tb
&&
1245 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1246 /* If we are modifying the current TB, we must stop
1247 its execution. We could be more precise by checking
1248 that the modification is after the current PC, but it
1249 would require a specialized function to partially
1250 restore the CPU state */
1252 current_tb_modified
= 1;
1253 cpu_restore_state_from_tb(cpu
, current_tb
, pc
);
1254 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1257 #endif /* TARGET_HAS_PRECISE_SMC */
1258 tb_phys_invalidate(tb
, addr
);
1259 tb
= tb
->page_next
[n
];
1262 #ifdef TARGET_HAS_PRECISE_SMC
1263 if (current_tb_modified
) {
1264 /* we generate a block containing just the instruction
1265 modifying the memory. It will ensure that it cannot modify
1267 cpu
->current_tb
= NULL
;
1268 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1272 cpu_resume_from_signal(cpu
, puc
);
1278 /* add the tb in the target page and protect it if necessary */
1279 static inline void tb_alloc_page(TranslationBlock
*tb
,
1280 unsigned int n
, tb_page_addr_t page_addr
)
1283 #ifndef CONFIG_USER_ONLY
1284 bool page_already_protected
;
1287 tb
->page_addr
[n
] = page_addr
;
1288 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1289 tb
->page_next
[n
] = p
->first_tb
;
1290 #ifndef CONFIG_USER_ONLY
1291 page_already_protected
= p
->first_tb
!= NULL
;
1293 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1294 invalidate_page_bitmap(p
);
1296 #if defined(CONFIG_USER_ONLY)
1297 if (p
->flags
& PAGE_WRITE
) {
1302 /* force the host page as non writable (writes will have a
1303 page fault + mprotect overhead) */
1304 page_addr
&= qemu_host_page_mask
;
1306 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1307 addr
+= TARGET_PAGE_SIZE
) {
1309 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1314 p2
->flags
&= ~PAGE_WRITE
;
1316 mprotect(g2h(page_addr
), qemu_host_page_size
,
1317 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1318 #ifdef DEBUG_TB_INVALIDATE
1319 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1324 /* if some code is already present, then the pages are already
1325 protected. So we handle the case where only the first TB is
1326 allocated in a physical page */
1327 if (!page_already_protected
) {
1328 tlb_protect_code(page_addr
);
1333 /* add a new TB and link it to the physical page tables. phys_page2 is
1334 (-1) to indicate that only one page contains the TB. */
1335 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1336 tb_page_addr_t phys_page2
)
1339 TranslationBlock
**ptb
;
1341 /* Grab the mmap lock to stop another thread invalidating this TB
1342 before we are done. */
1344 /* add in the physical hash table */
1345 h
= tb_phys_hash_func(phys_pc
);
1346 ptb
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
1347 tb
->phys_hash_next
= *ptb
;
1350 /* add in the page list */
1351 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1352 if (phys_page2
!= -1) {
1353 tb_alloc_page(tb
, 1, phys_page2
);
1355 tb
->page_addr
[1] = -1;
1358 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1359 tb
->jmp_next
[0] = NULL
;
1360 tb
->jmp_next
[1] = NULL
;
1362 /* init original jump addresses */
1363 if (tb
->tb_next_offset
[0] != 0xffff) {
1364 tb_reset_jump(tb
, 0);
1366 if (tb
->tb_next_offset
[1] != 0xffff) {
1367 tb_reset_jump(tb
, 1);
1370 #ifdef DEBUG_TB_CHECK
1376 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1377 tb[1].tc_ptr. Return NULL if not found */
1378 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1380 int m_min
, m_max
, m
;
1382 TranslationBlock
*tb
;
1384 if (tcg_ctx
.tb_ctx
.nb_tbs
<= 0) {
1387 if (tc_ptr
< (uintptr_t)tcg_ctx
.code_gen_buffer
||
1388 tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_ptr
) {
1391 /* binary search (cf Knuth) */
1393 m_max
= tcg_ctx
.tb_ctx
.nb_tbs
- 1;
1394 while (m_min
<= m_max
) {
1395 m
= (m_min
+ m_max
) >> 1;
1396 tb
= &tcg_ctx
.tb_ctx
.tbs
[m
];
1397 v
= (uintptr_t)tb
->tc_ptr
;
1400 } else if (tc_ptr
< v
) {
1406 return &tcg_ctx
.tb_ctx
.tbs
[m_max
];
1409 #if !defined(CONFIG_USER_ONLY)
1410 void tb_invalidate_phys_addr(AddressSpace
*as
, hwaddr addr
)
1412 ram_addr_t ram_addr
;
1417 mr
= address_space_translate(as
, addr
, &addr
, &l
, false);
1418 if (!(memory_region_is_ram(mr
)
1419 || memory_region_is_romd(mr
))) {
1423 ram_addr
= (memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
)
1425 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1428 #endif /* !defined(CONFIG_USER_ONLY) */
1430 void tb_check_watchpoint(CPUState
*cpu
)
1432 TranslationBlock
*tb
;
1434 tb
= tb_find_pc(cpu
->mem_io_pc
);
1436 /* We can use retranslation to find the PC. */
1437 cpu_restore_state_from_tb(cpu
, tb
, cpu
->mem_io_pc
);
1438 tb_phys_invalidate(tb
, -1);
1440 /* The exception probably happened in a helper. The CPU state should
1441 have been saved before calling it. Fetch the PC from there. */
1442 CPUArchState
*env
= cpu
->env_ptr
;
1443 target_ulong pc
, cs_base
;
1444 tb_page_addr_t addr
;
1447 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
1448 addr
= get_page_addr_code(env
, pc
);
1449 tb_invalidate_phys_range(addr
, addr
+ 1);
1453 #ifndef CONFIG_USER_ONLY
1454 /* mask must never be zero, except for A20 change call */
1455 static void tcg_handle_interrupt(CPUState
*cpu
, int mask
)
1459 old_mask
= cpu
->interrupt_request
;
1460 cpu
->interrupt_request
|= mask
;
1463 * If called from iothread context, wake the target cpu in
1466 if (!qemu_cpu_is_self(cpu
)) {
1472 cpu
->icount_decr
.u16
.high
= 0xffff;
1473 if (!cpu_can_do_io(cpu
)
1474 && (mask
& ~old_mask
) != 0) {
1475 cpu_abort(cpu
, "Raised interrupt while not in I/O function");
1478 cpu
->tcg_exit_req
= 1;
1482 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1484 /* in deterministic execution mode, instructions doing device I/Os
1485 must be at the end of the TB */
1486 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
1488 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1489 CPUArchState
*env
= cpu
->env_ptr
;
1491 TranslationBlock
*tb
;
1493 target_ulong pc
, cs_base
;
1496 tb
= tb_find_pc(retaddr
);
1498 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
1501 n
= cpu
->icount_decr
.u16
.low
+ tb
->icount
;
1502 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
1503 /* Calculate how many instructions had been executed before the fault
1505 n
= n
- cpu
->icount_decr
.u16
.low
;
1506 /* Generate a new TB ending on the I/O insn. */
1508 /* On MIPS and SH, delay slot instructions can only be restarted if
1509 they were already the first instruction in the TB. If this is not
1510 the first instruction in a TB then re-execute the preceding
1512 #if defined(TARGET_MIPS)
1513 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1514 env
->active_tc
.PC
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
1515 cpu
->icount_decr
.u16
.low
++;
1516 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1518 #elif defined(TARGET_SH4)
1519 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1522 cpu
->icount_decr
.u16
.low
++;
1523 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1526 /* This should never happen. */
1527 if (n
> CF_COUNT_MASK
) {
1528 cpu_abort(cpu
, "TB too big during recompile");
1531 cflags
= n
| CF_LAST_IO
;
1533 cs_base
= tb
->cs_base
;
1535 tb_phys_invalidate(tb
, -1);
1536 /* FIXME: In theory this could raise an exception. In practice
1537 we have already translated the block once so it's probably ok. */
1538 tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
1539 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1540 the first in the TB) then we end up generating a whole new TB and
1541 repeating the fault, which is horribly inefficient.
1542 Better would be to execute just this insn uncached, or generate a
1544 cpu_resume_from_signal(cpu
, NULL
);
1547 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
1551 /* Discard jump cache entries for any tb which might potentially
1552 overlap the flushed page. */
1553 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1554 memset(&cpu
->tb_jmp_cache
[i
], 0,
1555 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1557 i
= tb_jmp_cache_hash_page(addr
);
1558 memset(&cpu
->tb_jmp_cache
[i
], 0,
1559 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1562 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1564 int i
, target_code_size
, max_target_code_size
;
1565 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
1566 TranslationBlock
*tb
;
1568 target_code_size
= 0;
1569 max_target_code_size
= 0;
1571 direct_jmp_count
= 0;
1572 direct_jmp2_count
= 0;
1573 for (i
= 0; i
< tcg_ctx
.tb_ctx
.nb_tbs
; i
++) {
1574 tb
= &tcg_ctx
.tb_ctx
.tbs
[i
];
1575 target_code_size
+= tb
->size
;
1576 if (tb
->size
> max_target_code_size
) {
1577 max_target_code_size
= tb
->size
;
1579 if (tb
->page_addr
[1] != -1) {
1582 if (tb
->tb_next_offset
[0] != 0xffff) {
1584 if (tb
->tb_next_offset
[1] != 0xffff) {
1585 direct_jmp2_count
++;
1589 /* XXX: avoid using doubles ? */
1590 cpu_fprintf(f
, "Translation buffer state:\n");
1591 cpu_fprintf(f
, "gen code size %td/%zd\n",
1592 tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
,
1593 tcg_ctx
.code_gen_buffer_max_size
);
1594 cpu_fprintf(f
, "TB count %d/%d\n",
1595 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.code_gen_max_blocks
);
1596 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
1597 tcg_ctx
.tb_ctx
.nb_tbs
? target_code_size
/
1598 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1599 max_target_code_size
);
1600 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1601 tcg_ctx
.tb_ctx
.nb_tbs
? (tcg_ctx
.code_gen_ptr
-
1602 tcg_ctx
.code_gen_buffer
) /
1603 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1604 target_code_size
? (double) (tcg_ctx
.code_gen_ptr
-
1605 tcg_ctx
.code_gen_buffer
) /
1606 target_code_size
: 0);
1607 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n", cross_page
,
1608 tcg_ctx
.tb_ctx
.nb_tbs
? (cross_page
* 100) /
1609 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1610 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1612 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp_count
* 100) /
1613 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1615 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp2_count
* 100) /
1616 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1617 cpu_fprintf(f
, "\nStatistics:\n");
1618 cpu_fprintf(f
, "TB flush count %d\n", tcg_ctx
.tb_ctx
.tb_flush_count
);
1619 cpu_fprintf(f
, "TB invalidate count %d\n",
1620 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
);
1621 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
1622 tcg_dump_info(f
, cpu_fprintf
);
1625 void dump_opcount_info(FILE *f
, fprintf_function cpu_fprintf
)
1627 tcg_dump_op_count(f
, cpu_fprintf
);
1630 #else /* CONFIG_USER_ONLY */
1632 void cpu_interrupt(CPUState
*cpu
, int mask
)
1634 cpu
->interrupt_request
|= mask
;
1635 cpu
->tcg_exit_req
= 1;
1639 * Walks guest process memory "regions" one by one
1640 * and calls callback function 'fn' for each region.
1642 struct walk_memory_regions_data
{
1643 walk_memory_regions_fn fn
;
1649 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1650 target_ulong end
, int new_prot
)
1652 if (data
->start
!= -1u) {
1653 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1659 data
->start
= (new_prot
? end
: -1u);
1660 data
->prot
= new_prot
;
1665 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1666 target_ulong base
, int level
, void **lp
)
1672 return walk_memory_regions_end(data
, base
, 0);
1678 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1679 int prot
= pd
[i
].flags
;
1681 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1682 if (prot
!= data
->prot
) {
1683 rc
= walk_memory_regions_end(data
, pa
, prot
);
1692 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1693 pa
= base
| ((target_ulong
)i
<<
1694 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
1695 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1705 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1707 struct walk_memory_regions_data data
;
1715 for (i
= 0; i
< V_L1_SIZE
; i
++) {
1716 int rc
= walk_memory_regions_1(&data
, (target_ulong
)i
<< (V_L1_SHIFT
+ TARGET_PAGE_BITS
),
1717 V_L1_SHIFT
/ V_L2_BITS
- 1, l1_map
+ i
);
1723 return walk_memory_regions_end(&data
, 0, 0);
1726 static int dump_region(void *priv
, target_ulong start
,
1727 target_ulong end
, unsigned long prot
)
1729 FILE *f
= (FILE *)priv
;
1731 (void) fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
1732 " "TARGET_FMT_lx
" %c%c%c\n",
1733 start
, end
, end
- start
,
1734 ((prot
& PAGE_READ
) ? 'r' : '-'),
1735 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
1736 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
1741 /* dump memory mappings */
1742 void page_dump(FILE *f
)
1744 const int length
= sizeof(target_ulong
) * 2;
1745 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
1746 length
, "start", length
, "end", length
, "size", "prot");
1747 walk_memory_regions(f
, dump_region
);
1750 int page_get_flags(target_ulong address
)
1754 p
= page_find(address
>> TARGET_PAGE_BITS
);
1761 /* Modify the flags of a page and invalidate the code if necessary.
1762 The flag PAGE_WRITE_ORG is positioned automatically depending
1763 on PAGE_WRITE. The mmap_lock should already be held. */
1764 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1766 target_ulong addr
, len
;
1768 /* This function should never be called with addresses outside the
1769 guest address space. If this assert fires, it probably indicates
1770 a missing call to h2g_valid. */
1771 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1772 assert(end
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1774 assert(start
< end
);
1776 start
= start
& TARGET_PAGE_MASK
;
1777 end
= TARGET_PAGE_ALIGN(end
);
1779 if (flags
& PAGE_WRITE
) {
1780 flags
|= PAGE_WRITE_ORG
;
1783 for (addr
= start
, len
= end
- start
;
1785 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1786 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1788 /* If the write protection bit is set, then we invalidate
1790 if (!(p
->flags
& PAGE_WRITE
) &&
1791 (flags
& PAGE_WRITE
) &&
1793 tb_invalidate_phys_page(addr
, 0, NULL
, false);
1799 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1805 /* This function should never be called with addresses outside the
1806 guest address space. If this assert fires, it probably indicates
1807 a missing call to h2g_valid. */
1808 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1809 assert(start
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1815 if (start
+ len
- 1 < start
) {
1816 /* We've wrapped around. */
1820 /* must do before we loose bits in the next step */
1821 end
= TARGET_PAGE_ALIGN(start
+ len
);
1822 start
= start
& TARGET_PAGE_MASK
;
1824 for (addr
= start
, len
= end
- start
;
1826 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1827 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1831 if (!(p
->flags
& PAGE_VALID
)) {
1835 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
1838 if (flags
& PAGE_WRITE
) {
1839 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
1842 /* unprotect the page if it was put read-only because it
1843 contains translated code */
1844 if (!(p
->flags
& PAGE_WRITE
)) {
1845 if (!page_unprotect(addr
, 0, NULL
)) {
1854 /* called from signal handler: invalidate the code and unprotect the
1855 page. Return TRUE if the fault was successfully handled. */
1856 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
1860 target_ulong host_start
, host_end
, addr
;
1862 /* Technically this isn't safe inside a signal handler. However we
1863 know this only ever happens in a synchronous SEGV handler, so in
1864 practice it seems to be ok. */
1867 p
= page_find(address
>> TARGET_PAGE_BITS
);
1873 /* if the page was really writable, then we change its
1874 protection back to writable */
1875 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
1876 host_start
= address
& qemu_host_page_mask
;
1877 host_end
= host_start
+ qemu_host_page_size
;
1880 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1881 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1882 p
->flags
|= PAGE_WRITE
;
1885 /* and since the content will be modified, we must invalidate
1886 the corresponding translated code. */
1887 tb_invalidate_phys_page(addr
, pc
, puc
, true);
1888 #ifdef DEBUG_TB_CHECK
1889 tb_invalidate_check(addr
);
1892 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1901 #endif /* CONFIG_USER_ONLY */