4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include <sys/types.h>
33 #include "qemu-common.h"
34 #define NO_CPU_IO_DEFS
36 #include "disas/disas.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
56 #include "exec/address-spaces.h"
59 #include "exec/cputlb.h"
60 #include "translate-all.h"
61 #include "qemu/timer.h"
63 //#define DEBUG_TB_INVALIDATE
65 /* make various TB consistency checks */
66 //#define DEBUG_TB_CHECK
68 #if !defined(CONFIG_USER_ONLY)
69 /* TB consistency checks only implemented for usermode emulation. */
73 #define SMC_BITMAP_USE_THRESHOLD 10
75 typedef struct PageDesc
{
76 /* list of TBs intersecting this ram page */
77 TranslationBlock
*first_tb
;
78 /* in order to optimize self modifying code, we count the number
79 of lookups we do to a given page to use a bitmap */
80 unsigned int code_write_count
;
82 #if defined(CONFIG_USER_ONLY)
87 /* In system mode we want L1_MAP to be based on ram offsets,
88 while in user mode we want it to be based on virtual addresses. */
89 #if !defined(CONFIG_USER_ONLY)
90 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
91 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
93 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
96 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
99 /* Size of the L2 (and L3, etc) page tables. */
101 #define V_L2_SIZE (1 << V_L2_BITS)
103 /* The bits remaining after N lower levels of page tables. */
104 #define V_L1_BITS_REM \
105 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
107 #if V_L1_BITS_REM < 4
108 #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
110 #define V_L1_BITS V_L1_BITS_REM
113 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
115 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
117 uintptr_t qemu_real_host_page_size
;
118 uintptr_t qemu_host_page_size
;
119 uintptr_t qemu_host_page_mask
;
121 /* This is a multi-level map on the virtual address space.
122 The bottom level has pointers to PageDesc. */
123 static void *l1_map
[V_L1_SIZE
];
125 /* code generation context */
128 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
129 tb_page_addr_t phys_page2
);
130 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
132 void cpu_gen_init(void)
134 tcg_context_init(&tcg_ctx
);
137 /* return non zero if the very first instruction is invalid so that
138 the virtual CPU can trigger an exception.
140 '*gen_code_size_ptr' contains the size of the generated code (host
143 int cpu_gen_code(CPUArchState
*env
, TranslationBlock
*tb
, int *gen_code_size_ptr
)
145 TCGContext
*s
= &tcg_ctx
;
146 uint8_t *gen_code_buf
;
148 #ifdef CONFIG_PROFILER
152 #ifdef CONFIG_PROFILER
153 s
->tb_count1
++; /* includes aborted translations because of
155 ti
= profile_getclock();
159 gen_intermediate_code(env
, tb
);
161 /* generate machine code */
162 gen_code_buf
= tb
->tc_ptr
;
163 tb
->tb_next_offset
[0] = 0xffff;
164 tb
->tb_next_offset
[1] = 0xffff;
165 s
->tb_next_offset
= tb
->tb_next_offset
;
166 #ifdef USE_DIRECT_JUMP
167 s
->tb_jmp_offset
= tb
->tb_jmp_offset
;
170 s
->tb_jmp_offset
= NULL
;
171 s
->tb_next
= tb
->tb_next
;
174 #ifdef CONFIG_PROFILER
176 s
->interm_time
+= profile_getclock() - ti
;
177 s
->code_time
-= profile_getclock();
179 gen_code_size
= tcg_gen_code(s
, gen_code_buf
);
180 *gen_code_size_ptr
= gen_code_size
;
181 #ifdef CONFIG_PROFILER
182 s
->code_time
+= profile_getclock();
183 s
->code_in_len
+= tb
->size
;
184 s
->code_out_len
+= gen_code_size
;
188 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
189 qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr
);
190 log_disas(tb
->tc_ptr
, *gen_code_size_ptr
);
198 /* The cpu state corresponding to 'searched_pc' is restored.
200 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
201 uintptr_t searched_pc
)
203 CPUArchState
*env
= cpu
->env_ptr
;
204 TCGContext
*s
= &tcg_ctx
;
207 #ifdef CONFIG_PROFILER
211 #ifdef CONFIG_PROFILER
212 ti
= profile_getclock();
216 gen_intermediate_code_pc(env
, tb
);
219 /* Reset the cycle counter to the start of the block. */
220 cpu
->icount_decr
.u16
.low
+= tb
->icount
;
221 /* Clear the IO flag. */
225 /* find opc index corresponding to search_pc */
226 tc_ptr
= (uintptr_t)tb
->tc_ptr
;
227 if (searched_pc
< tc_ptr
)
230 s
->tb_next_offset
= tb
->tb_next_offset
;
231 #ifdef USE_DIRECT_JUMP
232 s
->tb_jmp_offset
= tb
->tb_jmp_offset
;
235 s
->tb_jmp_offset
= NULL
;
236 s
->tb_next
= tb
->tb_next
;
238 j
= tcg_gen_code_search_pc(s
, (uint8_t *)tc_ptr
, searched_pc
- tc_ptr
);
241 /* now find start of instruction before */
242 while (s
->gen_opc_instr_start
[j
] == 0) {
245 cpu
->icount_decr
.u16
.low
-= s
->gen_opc_icount
[j
];
247 restore_state_to_opc(env
, tb
, j
);
249 #ifdef CONFIG_PROFILER
250 s
->restore_time
+= profile_getclock() - ti
;
256 bool cpu_restore_state(CPUState
*cpu
, uintptr_t retaddr
)
258 TranslationBlock
*tb
;
260 tb
= tb_find_pc(retaddr
);
262 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
269 static inline void map_exec(void *addr
, long size
)
272 VirtualProtect(addr
, size
,
273 PAGE_EXECUTE_READWRITE
, &old_protect
);
276 static inline void map_exec(void *addr
, long size
)
278 unsigned long start
, end
, page_size
;
280 page_size
= getpagesize();
281 start
= (unsigned long)addr
;
282 start
&= ~(page_size
- 1);
284 end
= (unsigned long)addr
+ size
;
285 end
+= page_size
- 1;
286 end
&= ~(page_size
- 1);
288 mprotect((void *)start
, end
- start
,
289 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
293 void page_size_init(void)
295 /* NOTE: we can always suppose that qemu_host_page_size >=
298 SYSTEM_INFO system_info
;
300 GetSystemInfo(&system_info
);
301 qemu_real_host_page_size
= system_info
.dwPageSize
;
303 qemu_real_host_page_size
= getpagesize();
305 if (qemu_host_page_size
== 0) {
306 qemu_host_page_size
= qemu_real_host_page_size
;
308 if (qemu_host_page_size
< TARGET_PAGE_SIZE
) {
309 qemu_host_page_size
= TARGET_PAGE_SIZE
;
311 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
314 static void page_init(void)
317 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
319 #ifdef HAVE_KINFO_GETVMMAP
320 struct kinfo_vmentry
*freep
;
323 freep
= kinfo_getvmmap(getpid(), &cnt
);
326 for (i
= 0; i
< cnt
; i
++) {
327 unsigned long startaddr
, endaddr
;
329 startaddr
= freep
[i
].kve_start
;
330 endaddr
= freep
[i
].kve_end
;
331 if (h2g_valid(startaddr
)) {
332 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
334 if (h2g_valid(endaddr
)) {
335 endaddr
= h2g(endaddr
);
336 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
338 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
340 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
351 last_brk
= (unsigned long)sbrk(0);
353 f
= fopen("/compat/linux/proc/self/maps", "r");
358 unsigned long startaddr
, endaddr
;
361 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
363 if (n
== 2 && h2g_valid(startaddr
)) {
364 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
366 if (h2g_valid(endaddr
)) {
367 endaddr
= h2g(endaddr
);
371 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
383 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
389 #if defined(CONFIG_USER_ONLY)
390 /* We can't use g_malloc because it may recurse into a locked mutex. */
391 # define ALLOC(P, SIZE) \
393 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
394 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
397 # define ALLOC(P, SIZE) \
398 do { P = g_malloc0(SIZE); } while (0)
401 /* Level 1. Always allocated. */
402 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
405 for (i
= V_L1_SHIFT
/ V_L2_BITS
- 1; i
> 0; i
--) {
412 ALLOC(p
, sizeof(void *) * V_L2_SIZE
);
416 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
424 ALLOC(pd
, sizeof(PageDesc
) * V_L2_SIZE
);
430 return pd
+ (index
& (V_L2_SIZE
- 1));
433 static inline PageDesc
*page_find(tb_page_addr_t index
)
435 return page_find_alloc(index
, 0);
438 #if !defined(CONFIG_USER_ONLY)
439 #define mmap_lock() do { } while (0)
440 #define mmap_unlock() do { } while (0)
443 #if defined(CONFIG_USER_ONLY)
444 /* Currently it is not recommended to allocate big chunks of data in
445 user mode. It will change when a dedicated libc will be used. */
446 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
447 region in which the guest needs to run. Revisit this. */
448 #define USE_STATIC_CODE_GEN_BUFFER
451 /* ??? Should configure for this, not list operating systems here. */
452 #if (defined(__linux__) \
453 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
454 || defined(__DragonFly__) || defined(__OpenBSD__) \
455 || defined(__NetBSD__))
459 /* Minimum size of the code gen buffer. This number is randomly chosen,
460 but not so small that we can't have a fair number of TB's live. */
461 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
463 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
464 indicated, this is constrained by the range of direct branches on the
465 host cpu, as used by the TCG implementation of goto_tb. */
466 #if defined(__x86_64__)
467 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
468 #elif defined(__sparc__)
469 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
470 #elif defined(__aarch64__)
471 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
472 #elif defined(__arm__)
473 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
474 #elif defined(__s390x__)
475 /* We have a +- 4GB range on the branches; leave some slop. */
476 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
478 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
481 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
483 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
484 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
485 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
487 static inline size_t size_code_gen_buffer(size_t tb_size
)
489 /* Size the buffer. */
491 #ifdef USE_STATIC_CODE_GEN_BUFFER
492 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
494 /* ??? Needs adjustments. */
495 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
496 static buffer, we could size this on RESERVED_VA, on the text
497 segment size of the executable, or continue to use the default. */
498 tb_size
= (unsigned long)(ram_size
/ 4);
501 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
502 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
504 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
505 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
507 tcg_ctx
.code_gen_buffer_size
= tb_size
;
511 #ifdef USE_STATIC_CODE_GEN_BUFFER
512 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
513 __attribute__((aligned(CODE_GEN_ALIGN
)));
515 static inline void *alloc_code_gen_buffer(void)
517 map_exec(static_code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
);
518 return static_code_gen_buffer
;
520 #elif defined(USE_MMAP)
521 static inline void *alloc_code_gen_buffer(void)
523 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
527 /* Constrain the position of the buffer based on the host cpu.
528 Note that these addresses are chosen in concert with the
529 addresses assigned in the relevant linker script file. */
530 # if defined(__PIE__) || defined(__PIC__)
531 /* Don't bother setting a preferred location if we're building
532 a position-independent executable. We're more likely to get
533 an address near the main executable if we let the kernel
534 choose the address. */
535 # elif defined(__x86_64__) && defined(MAP_32BIT)
536 /* Force the memory down into low memory with the executable.
537 Leave the choice of exact location with the kernel. */
539 /* Cannot expect to map more than 800MB in low memory. */
540 if (tcg_ctx
.code_gen_buffer_size
> 800u * 1024 * 1024) {
541 tcg_ctx
.code_gen_buffer_size
= 800u * 1024 * 1024;
543 # elif defined(__sparc__)
544 start
= 0x40000000ul
;
545 # elif defined(__s390x__)
546 start
= 0x90000000ul
;
549 buf
= mmap((void *)start
, tcg_ctx
.code_gen_buffer_size
,
550 PROT_WRITE
| PROT_READ
| PROT_EXEC
, flags
, -1, 0);
551 return buf
== MAP_FAILED
? NULL
: buf
;
554 static inline void *alloc_code_gen_buffer(void)
556 void *buf
= g_malloc(tcg_ctx
.code_gen_buffer_size
);
559 map_exec(buf
, tcg_ctx
.code_gen_buffer_size
);
563 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
565 static inline void code_gen_alloc(size_t tb_size
)
567 tcg_ctx
.code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
568 tcg_ctx
.code_gen_buffer
= alloc_code_gen_buffer();
569 if (tcg_ctx
.code_gen_buffer
== NULL
) {
570 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
574 qemu_madvise(tcg_ctx
.code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
,
577 /* Steal room for the prologue at the end of the buffer. This ensures
578 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
579 from TB's to the prologue are going to be in range. It also means
580 that we don't need to mark (additional) portions of the data segment
582 tcg_ctx
.code_gen_prologue
= tcg_ctx
.code_gen_buffer
+
583 tcg_ctx
.code_gen_buffer_size
- 1024;
584 tcg_ctx
.code_gen_buffer_size
-= 1024;
586 tcg_ctx
.code_gen_buffer_max_size
= tcg_ctx
.code_gen_buffer_size
-
587 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
588 tcg_ctx
.code_gen_max_blocks
= tcg_ctx
.code_gen_buffer_size
/
589 CODE_GEN_AVG_BLOCK_SIZE
;
591 g_malloc(tcg_ctx
.code_gen_max_blocks
* sizeof(TranslationBlock
));
594 /* Must be called before using the QEMU cpus. 'tb_size' is the size
595 (in bytes) allocated to the translation buffer. Zero means default
597 void tcg_exec_init(unsigned long tb_size
)
600 code_gen_alloc(tb_size
);
601 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
602 tcg_register_jit(tcg_ctx
.code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
);
604 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
605 /* There's no guest base to take into account, so go ahead and
606 initialize the prologue now. */
607 tcg_prologue_init(&tcg_ctx
);
611 bool tcg_enabled(void)
613 return tcg_ctx
.code_gen_buffer
!= NULL
;
616 /* Allocate a new translation block. Flush the translation buffer if
617 too many translation blocks or too much generated code. */
618 static TranslationBlock
*tb_alloc(target_ulong pc
)
620 TranslationBlock
*tb
;
622 if (tcg_ctx
.tb_ctx
.nb_tbs
>= tcg_ctx
.code_gen_max_blocks
||
623 (tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
) >=
624 tcg_ctx
.code_gen_buffer_max_size
) {
627 tb
= &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
++];
633 void tb_free(TranslationBlock
*tb
)
635 /* In practice this is mostly used for single use temporary TB
636 Ignore the hard cases and just back up if this TB happens to
637 be the last one generated. */
638 if (tcg_ctx
.tb_ctx
.nb_tbs
> 0 &&
639 tb
== &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
- 1]) {
640 tcg_ctx
.code_gen_ptr
= tb
->tc_ptr
;
641 tcg_ctx
.tb_ctx
.nb_tbs
--;
645 static inline void invalidate_page_bitmap(PageDesc
*p
)
647 if (p
->code_bitmap
) {
648 g_free(p
->code_bitmap
);
649 p
->code_bitmap
= NULL
;
651 p
->code_write_count
= 0;
654 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
655 static void page_flush_tb_1(int level
, void **lp
)
665 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
666 pd
[i
].first_tb
= NULL
;
667 invalidate_page_bitmap(pd
+ i
);
672 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
673 page_flush_tb_1(level
- 1, pp
+ i
);
678 static void page_flush_tb(void)
682 for (i
= 0; i
< V_L1_SIZE
; i
++) {
683 page_flush_tb_1(V_L1_SHIFT
/ V_L2_BITS
- 1, l1_map
+ i
);
687 /* flush all the translation blocks */
688 /* XXX: tb_flush is currently not thread safe */
689 void tb_flush(CPUArchState
*env1
)
693 #if defined(DEBUG_FLUSH)
694 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
695 (unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
),
696 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.tb_ctx
.nb_tbs
> 0 ?
697 ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)) /
698 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
700 if ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)
701 > tcg_ctx
.code_gen_buffer_size
) {
702 cpu_abort(env1
, "Internal error: code buffer overflow\n");
704 tcg_ctx
.tb_ctx
.nb_tbs
= 0;
707 memset(cpu
->tb_jmp_cache
, 0, sizeof(cpu
->tb_jmp_cache
));
710 memset(tcg_ctx
.tb_ctx
.tb_phys_hash
, 0, sizeof(tcg_ctx
.tb_ctx
.tb_phys_hash
));
713 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
714 /* XXX: flush processor icache at this point if cache flush is
716 tcg_ctx
.tb_ctx
.tb_flush_count
++;
719 #ifdef DEBUG_TB_CHECK
721 static void tb_invalidate_check(target_ulong address
)
723 TranslationBlock
*tb
;
726 address
&= TARGET_PAGE_MASK
;
727 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
728 for (tb
= tb_ctx
.tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
729 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
730 address
>= tb
->pc
+ tb
->size
)) {
731 printf("ERROR invalidate: address=" TARGET_FMT_lx
732 " PC=%08lx size=%04x\n",
733 address
, (long)tb
->pc
, tb
->size
);
739 /* verify that all the pages have correct rights for code */
740 static void tb_page_check(void)
742 TranslationBlock
*tb
;
743 int i
, flags1
, flags2
;
745 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
746 for (tb
= tcg_ctx
.tb_ctx
.tb_phys_hash
[i
]; tb
!= NULL
;
747 tb
= tb
->phys_hash_next
) {
748 flags1
= page_get_flags(tb
->pc
);
749 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
750 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
751 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
752 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
760 static inline void tb_hash_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
762 TranslationBlock
*tb1
;
767 *ptb
= tb1
->phys_hash_next
;
770 ptb
= &tb1
->phys_hash_next
;
774 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
776 TranslationBlock
*tb1
;
781 n1
= (uintptr_t)tb1
& 3;
782 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
784 *ptb
= tb1
->page_next
[n1
];
787 ptb
= &tb1
->page_next
[n1
];
791 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
793 TranslationBlock
*tb1
, **ptb
;
796 ptb
= &tb
->jmp_next
[n
];
799 /* find tb(n) in circular list */
802 n1
= (uintptr_t)tb1
& 3;
803 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
804 if (n1
== n
&& tb1
== tb
) {
808 ptb
= &tb1
->jmp_first
;
810 ptb
= &tb1
->jmp_next
[n1
];
813 /* now we can suppress tb(n) from the list */
814 *ptb
= tb
->jmp_next
[n
];
816 tb
->jmp_next
[n
] = NULL
;
820 /* reset the jump entry 'n' of a TB so that it is not chained to
822 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
824 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
827 /* invalidate one TB */
828 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
833 tb_page_addr_t phys_pc
;
834 TranslationBlock
*tb1
, *tb2
;
836 /* remove the TB from the hash list */
837 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
838 h
= tb_phys_hash_func(phys_pc
);
839 tb_hash_remove(&tcg_ctx
.tb_ctx
.tb_phys_hash
[h
], tb
);
841 /* remove the TB from the page list */
842 if (tb
->page_addr
[0] != page_addr
) {
843 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
844 tb_page_remove(&p
->first_tb
, tb
);
845 invalidate_page_bitmap(p
);
847 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
848 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
849 tb_page_remove(&p
->first_tb
, tb
);
850 invalidate_page_bitmap(p
);
853 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 1;
855 /* remove the TB from the hash list */
856 h
= tb_jmp_cache_hash_func(tb
->pc
);
858 if (cpu
->tb_jmp_cache
[h
] == tb
) {
859 cpu
->tb_jmp_cache
[h
] = NULL
;
863 /* suppress this TB from the two jump lists */
864 tb_jmp_remove(tb
, 0);
865 tb_jmp_remove(tb
, 1);
867 /* suppress any remaining jumps to this TB */
870 n1
= (uintptr_t)tb1
& 3;
874 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
875 tb2
= tb1
->jmp_next
[n1
];
876 tb_reset_jump(tb1
, n1
);
877 tb1
->jmp_next
[n1
] = NULL
;
880 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
882 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
++;
885 static inline void set_bits(uint8_t *tab
, int start
, int len
)
891 mask
= 0xff << (start
& 7);
892 if ((start
& ~7) == (end
& ~7)) {
894 mask
&= ~(0xff << (end
& 7));
899 start
= (start
+ 8) & ~7;
901 while (start
< end1
) {
906 mask
= ~(0xff << (end
& 7));
912 static void build_page_bitmap(PageDesc
*p
)
914 int n
, tb_start
, tb_end
;
915 TranslationBlock
*tb
;
917 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
921 n
= (uintptr_t)tb
& 3;
922 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
923 /* NOTE: this is subtle as a TB may span two physical pages */
925 /* NOTE: tb_end may be after the end of the page, but
926 it is not a problem */
927 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
928 tb_end
= tb_start
+ tb
->size
;
929 if (tb_end
> TARGET_PAGE_SIZE
) {
930 tb_end
= TARGET_PAGE_SIZE
;
934 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
936 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
937 tb
= tb
->page_next
[n
];
941 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
942 target_ulong pc
, target_ulong cs_base
,
943 int flags
, int cflags
)
945 CPUArchState
*env
= cpu
->env_ptr
;
946 TranslationBlock
*tb
;
948 tb_page_addr_t phys_pc
, phys_page2
;
949 target_ulong virt_page2
;
952 phys_pc
= get_page_addr_code(env
, pc
);
955 /* flush must be done */
957 /* cannot fail at this point */
959 /* Don't forget to invalidate previous TB info. */
960 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 1;
962 tc_ptr
= tcg_ctx
.code_gen_ptr
;
964 tb
->cs_base
= cs_base
;
967 cpu_gen_code(env
, tb
, &code_gen_size
);
968 tcg_ctx
.code_gen_ptr
= (void *)(((uintptr_t)tcg_ctx
.code_gen_ptr
+
969 code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
971 /* check next page if needed */
972 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
974 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
975 phys_page2
= get_page_addr_code(env
, virt_page2
);
977 tb_link_page(tb
, phys_pc
, phys_page2
);
982 * Invalidate all TBs which intersect with the target physical address range
983 * [start;end[. NOTE: start and end may refer to *different* physical pages.
984 * 'is_cpu_write_access' should be true if called from a real cpu write
985 * access: the virtual CPU will exit the current TB if code is modified inside
988 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
,
989 int is_cpu_write_access
)
991 while (start
< end
) {
992 tb_invalidate_phys_page_range(start
, end
, is_cpu_write_access
);
993 start
&= TARGET_PAGE_MASK
;
994 start
+= TARGET_PAGE_SIZE
;
999 * Invalidate all TBs which intersect with the target physical address range
1000 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1001 * 'is_cpu_write_access' should be true if called from a real cpu write
1002 * access: the virtual CPU will exit the current TB if code is modified inside
1005 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1006 int is_cpu_write_access
)
1008 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1009 CPUState
*cpu
= current_cpu
;
1010 #if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1011 CPUArchState
*env
= NULL
;
1013 tb_page_addr_t tb_start
, tb_end
;
1016 #ifdef TARGET_HAS_PRECISE_SMC
1017 int current_tb_not_found
= is_cpu_write_access
;
1018 TranslationBlock
*current_tb
= NULL
;
1019 int current_tb_modified
= 0;
1020 target_ulong current_pc
= 0;
1021 target_ulong current_cs_base
= 0;
1022 int current_flags
= 0;
1023 #endif /* TARGET_HAS_PRECISE_SMC */
1025 p
= page_find(start
>> TARGET_PAGE_BITS
);
1029 if (!p
->code_bitmap
&&
1030 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1031 is_cpu_write_access
) {
1032 /* build code bitmap */
1033 build_page_bitmap(p
);
1035 #if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1041 /* we remove all the TBs in the range [start, end[ */
1042 /* XXX: see if in some cases it could be faster to invalidate all
1045 while (tb
!= NULL
) {
1046 n
= (uintptr_t)tb
& 3;
1047 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1048 tb_next
= tb
->page_next
[n
];
1049 /* NOTE: this is subtle as a TB may span two physical pages */
1051 /* NOTE: tb_end may be after the end of the page, but
1052 it is not a problem */
1053 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1054 tb_end
= tb_start
+ tb
->size
;
1056 tb_start
= tb
->page_addr
[1];
1057 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1059 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1060 #ifdef TARGET_HAS_PRECISE_SMC
1061 if (current_tb_not_found
) {
1062 current_tb_not_found
= 0;
1064 if (cpu
->mem_io_pc
) {
1065 /* now we have a real cpu fault */
1066 current_tb
= tb_find_pc(cpu
->mem_io_pc
);
1069 if (current_tb
== tb
&&
1070 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1071 /* If we are modifying the current TB, we must stop
1072 its execution. We could be more precise by checking
1073 that the modification is after the current PC, but it
1074 would require a specialized function to partially
1075 restore the CPU state */
1077 current_tb_modified
= 1;
1078 cpu_restore_state_from_tb(cpu
, current_tb
, cpu
->mem_io_pc
);
1079 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1082 #endif /* TARGET_HAS_PRECISE_SMC */
1083 /* we need to do that to handle the case where a signal
1084 occurs while doing tb_phys_invalidate() */
1087 saved_tb
= cpu
->current_tb
;
1088 cpu
->current_tb
= NULL
;
1090 tb_phys_invalidate(tb
, -1);
1092 cpu
->current_tb
= saved_tb
;
1093 if (cpu
->interrupt_request
&& cpu
->current_tb
) {
1094 cpu_interrupt(cpu
, cpu
->interrupt_request
);
1100 #if !defined(CONFIG_USER_ONLY)
1101 /* if no code remaining, no need to continue to use slow writes */
1103 invalidate_page_bitmap(p
);
1104 if (is_cpu_write_access
) {
1105 tlb_unprotect_code_phys(env
, start
, cpu
->mem_io_vaddr
);
1109 #ifdef TARGET_HAS_PRECISE_SMC
1110 if (current_tb_modified
) {
1111 /* we generate a block containing just the instruction
1112 modifying the memory. It will ensure that it cannot modify
1114 cpu
->current_tb
= NULL
;
1115 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1116 cpu_resume_from_signal(env
, NULL
);
1121 /* len must be <= 8 and start must be a multiple of len */
1122 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1129 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1130 cpu_single_env
->mem_io_vaddr
, len
,
1131 cpu_single_env
->eip
,
1132 cpu_single_env
->eip
+
1133 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1136 p
= page_find(start
>> TARGET_PAGE_BITS
);
1140 if (p
->code_bitmap
) {
1141 offset
= start
& ~TARGET_PAGE_MASK
;
1142 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1143 if (b
& ((1 << len
) - 1)) {
1148 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1152 #if !defined(CONFIG_SOFTMMU)
1153 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1154 uintptr_t pc
, void *puc
,
1157 TranslationBlock
*tb
;
1160 #ifdef TARGET_HAS_PRECISE_SMC
1161 TranslationBlock
*current_tb
= NULL
;
1162 CPUState
*cpu
= current_cpu
;
1163 CPUArchState
*env
= NULL
;
1164 int current_tb_modified
= 0;
1165 target_ulong current_pc
= 0;
1166 target_ulong current_cs_base
= 0;
1167 int current_flags
= 0;
1170 addr
&= TARGET_PAGE_MASK
;
1171 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1176 #ifdef TARGET_HAS_PRECISE_SMC
1177 if (tb
&& pc
!= 0) {
1178 current_tb
= tb_find_pc(pc
);
1184 while (tb
!= NULL
) {
1185 n
= (uintptr_t)tb
& 3;
1186 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1187 #ifdef TARGET_HAS_PRECISE_SMC
1188 if (current_tb
== tb
&&
1189 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1190 /* If we are modifying the current TB, we must stop
1191 its execution. We could be more precise by checking
1192 that the modification is after the current PC, but it
1193 would require a specialized function to partially
1194 restore the CPU state */
1196 current_tb_modified
= 1;
1197 cpu_restore_state_from_tb(cpu
, current_tb
, pc
);
1198 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1201 #endif /* TARGET_HAS_PRECISE_SMC */
1202 tb_phys_invalidate(tb
, addr
);
1203 tb
= tb
->page_next
[n
];
1206 #ifdef TARGET_HAS_PRECISE_SMC
1207 if (current_tb_modified
) {
1208 /* we generate a block containing just the instruction
1209 modifying the memory. It will ensure that it cannot modify
1211 cpu
->current_tb
= NULL
;
1212 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1216 cpu_resume_from_signal(env
, puc
);
1222 /* add the tb in the target page and protect it if necessary */
1223 static inline void tb_alloc_page(TranslationBlock
*tb
,
1224 unsigned int n
, tb_page_addr_t page_addr
)
1227 #ifndef CONFIG_USER_ONLY
1228 bool page_already_protected
;
1231 tb
->page_addr
[n
] = page_addr
;
1232 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1233 tb
->page_next
[n
] = p
->first_tb
;
1234 #ifndef CONFIG_USER_ONLY
1235 page_already_protected
= p
->first_tb
!= NULL
;
1237 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1238 invalidate_page_bitmap(p
);
1240 #if defined(TARGET_HAS_SMC) || 1
1242 #if defined(CONFIG_USER_ONLY)
1243 if (p
->flags
& PAGE_WRITE
) {
1248 /* force the host page as non writable (writes will have a
1249 page fault + mprotect overhead) */
1250 page_addr
&= qemu_host_page_mask
;
1252 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1253 addr
+= TARGET_PAGE_SIZE
) {
1255 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1260 p2
->flags
&= ~PAGE_WRITE
;
1262 mprotect(g2h(page_addr
), qemu_host_page_size
,
1263 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1264 #ifdef DEBUG_TB_INVALIDATE
1265 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1270 /* if some code is already present, then the pages are already
1271 protected. So we handle the case where only the first TB is
1272 allocated in a physical page */
1273 if (!page_already_protected
) {
1274 tlb_protect_code(page_addr
);
1278 #endif /* TARGET_HAS_SMC */
1281 /* add a new TB and link it to the physical page tables. phys_page2 is
1282 (-1) to indicate that only one page contains the TB. */
1283 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1284 tb_page_addr_t phys_page2
)
1287 TranslationBlock
**ptb
;
1289 /* Grab the mmap lock to stop another thread invalidating this TB
1290 before we are done. */
1292 /* add in the physical hash table */
1293 h
= tb_phys_hash_func(phys_pc
);
1294 ptb
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
1295 tb
->phys_hash_next
= *ptb
;
1298 /* add in the page list */
1299 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1300 if (phys_page2
!= -1) {
1301 tb_alloc_page(tb
, 1, phys_page2
);
1303 tb
->page_addr
[1] = -1;
1306 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1307 tb
->jmp_next
[0] = NULL
;
1308 tb
->jmp_next
[1] = NULL
;
1310 /* init original jump addresses */
1311 if (tb
->tb_next_offset
[0] != 0xffff) {
1312 tb_reset_jump(tb
, 0);
1314 if (tb
->tb_next_offset
[1] != 0xffff) {
1315 tb_reset_jump(tb
, 1);
1318 #ifdef DEBUG_TB_CHECK
1324 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1325 tb[1].tc_ptr. Return NULL if not found */
1326 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1328 int m_min
, m_max
, m
;
1330 TranslationBlock
*tb
;
1332 if (tcg_ctx
.tb_ctx
.nb_tbs
<= 0) {
1335 if (tc_ptr
< (uintptr_t)tcg_ctx
.code_gen_buffer
||
1336 tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_ptr
) {
1339 /* binary search (cf Knuth) */
1341 m_max
= tcg_ctx
.tb_ctx
.nb_tbs
- 1;
1342 while (m_min
<= m_max
) {
1343 m
= (m_min
+ m_max
) >> 1;
1344 tb
= &tcg_ctx
.tb_ctx
.tbs
[m
];
1345 v
= (uintptr_t)tb
->tc_ptr
;
1348 } else if (tc_ptr
< v
) {
1354 return &tcg_ctx
.tb_ctx
.tbs
[m_max
];
1357 #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1358 void tb_invalidate_phys_addr(AddressSpace
*as
, hwaddr addr
)
1360 ram_addr_t ram_addr
;
1364 mr
= address_space_translate(as
, addr
, &addr
, &l
, false);
1365 if (!(memory_region_is_ram(mr
)
1366 || memory_region_is_romd(mr
))) {
1369 ram_addr
= (memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
)
1371 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1373 #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1375 void tb_check_watchpoint(CPUState
*cpu
)
1377 CPUArchState
*env
= cpu
->env_ptr
;
1378 TranslationBlock
*tb
;
1380 tb
= tb_find_pc(cpu
->mem_io_pc
);
1382 cpu_abort(env
, "check_watchpoint: could not find TB for pc=%p",
1383 (void *)cpu
->mem_io_pc
);
1385 cpu_restore_state_from_tb(cpu
, tb
, cpu
->mem_io_pc
);
1386 tb_phys_invalidate(tb
, -1);
1389 #ifndef CONFIG_USER_ONLY
1390 /* mask must never be zero, except for A20 change call */
1391 static void tcg_handle_interrupt(CPUState
*cpu
, int mask
)
1393 CPUArchState
*env
= cpu
->env_ptr
;
1396 old_mask
= cpu
->interrupt_request
;
1397 cpu
->interrupt_request
|= mask
;
1400 * If called from iothread context, wake the target cpu in
1403 if (!qemu_cpu_is_self(cpu
)) {
1409 cpu
->icount_decr
.u16
.high
= 0xffff;
1410 if (!cpu_can_do_io(cpu
)
1411 && (mask
& ~old_mask
) != 0) {
1412 cpu_abort(env
, "Raised interrupt while not in I/O function");
1415 cpu
->tcg_exit_req
= 1;
1419 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1421 /* in deterministic execution mode, instructions doing device I/Os
1422 must be at the end of the TB */
1423 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
1425 CPUArchState
*env
= cpu
->env_ptr
;
1426 TranslationBlock
*tb
;
1428 target_ulong pc
, cs_base
;
1431 tb
= tb_find_pc(retaddr
);
1433 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
1436 n
= cpu
->icount_decr
.u16
.low
+ tb
->icount
;
1437 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
1438 /* Calculate how many instructions had been executed before the fault
1440 n
= n
- cpu
->icount_decr
.u16
.low
;
1441 /* Generate a new TB ending on the I/O insn. */
1443 /* On MIPS and SH, delay slot instructions can only be restarted if
1444 they were already the first instruction in the TB. If this is not
1445 the first instruction in a TB then re-execute the preceding
1447 #if defined(TARGET_MIPS)
1448 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1449 env
->active_tc
.PC
-= 4;
1450 cpu
->icount_decr
.u16
.low
++;
1451 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1453 #elif defined(TARGET_SH4)
1454 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1457 cpu
->icount_decr
.u16
.low
++;
1458 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1461 /* This should never happen. */
1462 if (n
> CF_COUNT_MASK
) {
1463 cpu_abort(env
, "TB too big during recompile");
1466 cflags
= n
| CF_LAST_IO
;
1468 cs_base
= tb
->cs_base
;
1470 tb_phys_invalidate(tb
, -1);
1471 /* FIXME: In theory this could raise an exception. In practice
1472 we have already translated the block once so it's probably ok. */
1473 tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
1474 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1475 the first in the TB) then we end up generating a whole new TB and
1476 repeating the fault, which is horribly inefficient.
1477 Better would be to execute just this insn uncached, or generate a
1479 cpu_resume_from_signal(env
, NULL
);
1482 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
1486 /* Discard jump cache entries for any tb which might potentially
1487 overlap the flushed page. */
1488 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1489 memset(&cpu
->tb_jmp_cache
[i
], 0,
1490 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1492 i
= tb_jmp_cache_hash_page(addr
);
1493 memset(&cpu
->tb_jmp_cache
[i
], 0,
1494 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1497 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1499 int i
, target_code_size
, max_target_code_size
;
1500 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
1501 TranslationBlock
*tb
;
1503 target_code_size
= 0;
1504 max_target_code_size
= 0;
1506 direct_jmp_count
= 0;
1507 direct_jmp2_count
= 0;
1508 for (i
= 0; i
< tcg_ctx
.tb_ctx
.nb_tbs
; i
++) {
1509 tb
= &tcg_ctx
.tb_ctx
.tbs
[i
];
1510 target_code_size
+= tb
->size
;
1511 if (tb
->size
> max_target_code_size
) {
1512 max_target_code_size
= tb
->size
;
1514 if (tb
->page_addr
[1] != -1) {
1517 if (tb
->tb_next_offset
[0] != 0xffff) {
1519 if (tb
->tb_next_offset
[1] != 0xffff) {
1520 direct_jmp2_count
++;
1524 /* XXX: avoid using doubles ? */
1525 cpu_fprintf(f
, "Translation buffer state:\n");
1526 cpu_fprintf(f
, "gen code size %td/%zd\n",
1527 tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
,
1528 tcg_ctx
.code_gen_buffer_max_size
);
1529 cpu_fprintf(f
, "TB count %d/%d\n",
1530 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.code_gen_max_blocks
);
1531 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
1532 tcg_ctx
.tb_ctx
.nb_tbs
? target_code_size
/
1533 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1534 max_target_code_size
);
1535 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1536 tcg_ctx
.tb_ctx
.nb_tbs
? (tcg_ctx
.code_gen_ptr
-
1537 tcg_ctx
.code_gen_buffer
) /
1538 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1539 target_code_size
? (double) (tcg_ctx
.code_gen_ptr
-
1540 tcg_ctx
.code_gen_buffer
) /
1541 target_code_size
: 0);
1542 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n", cross_page
,
1543 tcg_ctx
.tb_ctx
.nb_tbs
? (cross_page
* 100) /
1544 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1545 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1547 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp_count
* 100) /
1548 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1550 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp2_count
* 100) /
1551 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1552 cpu_fprintf(f
, "\nStatistics:\n");
1553 cpu_fprintf(f
, "TB flush count %d\n", tcg_ctx
.tb_ctx
.tb_flush_count
);
1554 cpu_fprintf(f
, "TB invalidate count %d\n",
1555 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
);
1556 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
1557 tcg_dump_info(f
, cpu_fprintf
);
1560 #else /* CONFIG_USER_ONLY */
1562 void cpu_interrupt(CPUState
*cpu
, int mask
)
1564 cpu
->interrupt_request
|= mask
;
1565 cpu
->tcg_exit_req
= 1;
1569 * Walks guest process memory "regions" one by one
1570 * and calls callback function 'fn' for each region.
1572 struct walk_memory_regions_data
{
1573 walk_memory_regions_fn fn
;
1579 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1580 abi_ulong end
, int new_prot
)
1582 if (data
->start
!= -1ul) {
1583 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1589 data
->start
= (new_prot
? end
: -1ul);
1590 data
->prot
= new_prot
;
1595 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1596 abi_ulong base
, int level
, void **lp
)
1602 return walk_memory_regions_end(data
, base
, 0);
1608 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1609 int prot
= pd
[i
].flags
;
1611 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1612 if (prot
!= data
->prot
) {
1613 rc
= walk_memory_regions_end(data
, pa
, prot
);
1622 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1623 pa
= base
| ((abi_ulong
)i
<<
1624 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
1625 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1635 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1637 struct walk_memory_regions_data data
;
1645 for (i
= 0; i
< V_L1_SIZE
; i
++) {
1646 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
1647 V_L1_SHIFT
/ V_L2_BITS
- 1, l1_map
+ i
);
1654 return walk_memory_regions_end(&data
, 0, 0);
1657 static int dump_region(void *priv
, abi_ulong start
,
1658 abi_ulong end
, unsigned long prot
)
1660 FILE *f
= (FILE *)priv
;
1662 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
1663 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
1664 start
, end
, end
- start
,
1665 ((prot
& PAGE_READ
) ? 'r' : '-'),
1666 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
1667 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
1672 /* dump memory mappings */
1673 void page_dump(FILE *f
)
1675 const int length
= sizeof(abi_ulong
) * 2;
1676 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
1677 length
, "start", length
, "end", length
, "size", "prot");
1678 walk_memory_regions(f
, dump_region
);
1681 int page_get_flags(target_ulong address
)
1685 p
= page_find(address
>> TARGET_PAGE_BITS
);
1692 /* Modify the flags of a page and invalidate the code if necessary.
1693 The flag PAGE_WRITE_ORG is positioned automatically depending
1694 on PAGE_WRITE. The mmap_lock should already be held. */
1695 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1697 target_ulong addr
, len
;
1699 /* This function should never be called with addresses outside the
1700 guest address space. If this assert fires, it probably indicates
1701 a missing call to h2g_valid. */
1702 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1703 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1705 assert(start
< end
);
1707 start
= start
& TARGET_PAGE_MASK
;
1708 end
= TARGET_PAGE_ALIGN(end
);
1710 if (flags
& PAGE_WRITE
) {
1711 flags
|= PAGE_WRITE_ORG
;
1714 for (addr
= start
, len
= end
- start
;
1716 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1717 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1719 /* If the write protection bit is set, then we invalidate
1721 if (!(p
->flags
& PAGE_WRITE
) &&
1722 (flags
& PAGE_WRITE
) &&
1724 tb_invalidate_phys_page(addr
, 0, NULL
, false);
1730 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1736 /* This function should never be called with addresses outside the
1737 guest address space. If this assert fires, it probably indicates
1738 a missing call to h2g_valid. */
1739 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1740 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1746 if (start
+ len
- 1 < start
) {
1747 /* We've wrapped around. */
1751 /* must do before we loose bits in the next step */
1752 end
= TARGET_PAGE_ALIGN(start
+ len
);
1753 start
= start
& TARGET_PAGE_MASK
;
1755 for (addr
= start
, len
= end
- start
;
1757 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1758 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1762 if (!(p
->flags
& PAGE_VALID
)) {
1766 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
1769 if (flags
& PAGE_WRITE
) {
1770 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
1773 /* unprotect the page if it was put read-only because it
1774 contains translated code */
1775 if (!(p
->flags
& PAGE_WRITE
)) {
1776 if (!page_unprotect(addr
, 0, NULL
)) {
1786 /* called from signal handler: invalidate the code and unprotect the
1787 page. Return TRUE if the fault was successfully handled. */
1788 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
1792 target_ulong host_start
, host_end
, addr
;
1794 /* Technically this isn't safe inside a signal handler. However we
1795 know this only ever happens in a synchronous SEGV handler, so in
1796 practice it seems to be ok. */
1799 p
= page_find(address
>> TARGET_PAGE_BITS
);
1805 /* if the page was really writable, then we change its
1806 protection back to writable */
1807 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
1808 host_start
= address
& qemu_host_page_mask
;
1809 host_end
= host_start
+ qemu_host_page_size
;
1812 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1813 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1814 p
->flags
|= PAGE_WRITE
;
1817 /* and since the content will be modified, we must invalidate
1818 the corresponding translated code. */
1819 tb_invalidate_phys_page(addr
, pc
, puc
, true);
1820 #ifdef DEBUG_TB_CHECK
1821 tb_invalidate_check(addr
);
1824 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1833 #endif /* CONFIG_USER_ONLY */