4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include <sys/types.h>
33 #include "qemu-common.h"
34 #define NO_CPU_IO_DEFS
36 #include "disas/disas.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
56 #include "exec/address-spaces.h"
59 #include "exec/cputlb.h"
60 #include "translate-all.h"
61 #include "qemu/timer.h"
63 //#define DEBUG_TB_INVALIDATE
65 /* make various TB consistency checks */
66 //#define DEBUG_TB_CHECK
68 #if !defined(CONFIG_USER_ONLY)
69 /* TB consistency checks only implemented for usermode emulation. */
73 #define SMC_BITMAP_USE_THRESHOLD 10
75 typedef struct PageDesc
{
76 /* list of TBs intersecting this ram page */
77 TranslationBlock
*first_tb
;
78 /* in order to optimize self modifying code, we count the number
79 of lookups we do to a given page to use a bitmap */
80 unsigned int code_write_count
;
82 #if defined(CONFIG_USER_ONLY)
87 /* In system mode we want L1_MAP to be based on ram offsets,
88 while in user mode we want it to be based on virtual addresses. */
89 #if !defined(CONFIG_USER_ONLY)
90 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
91 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
93 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
96 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
99 /* The bits remaining after N lower levels of page tables. */
100 #define V_L1_BITS_REM \
101 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
103 #if V_L1_BITS_REM < 4
104 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
106 #define V_L1_BITS V_L1_BITS_REM
109 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
111 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
113 uintptr_t qemu_real_host_page_size
;
114 uintptr_t qemu_host_page_size
;
115 uintptr_t qemu_host_page_mask
;
117 /* This is a multi-level map on the virtual address space.
118 The bottom level has pointers to PageDesc. */
119 static void *l1_map
[V_L1_SIZE
];
121 /* code generation context */
124 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
125 tb_page_addr_t phys_page2
);
126 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
128 void cpu_gen_init(void)
130 tcg_context_init(&tcg_ctx
);
133 /* return non zero if the very first instruction is invalid so that
134 the virtual CPU can trigger an exception.
136 '*gen_code_size_ptr' contains the size of the generated code (host
139 int cpu_gen_code(CPUArchState
*env
, TranslationBlock
*tb
, int *gen_code_size_ptr
)
141 TCGContext
*s
= &tcg_ctx
;
142 uint8_t *gen_code_buf
;
144 #ifdef CONFIG_PROFILER
148 #ifdef CONFIG_PROFILER
149 s
->tb_count1
++; /* includes aborted translations because of
151 ti
= profile_getclock();
155 gen_intermediate_code(env
, tb
);
157 /* generate machine code */
158 gen_code_buf
= tb
->tc_ptr
;
159 tb
->tb_next_offset
[0] = 0xffff;
160 tb
->tb_next_offset
[1] = 0xffff;
161 s
->tb_next_offset
= tb
->tb_next_offset
;
162 #ifdef USE_DIRECT_JUMP
163 s
->tb_jmp_offset
= tb
->tb_jmp_offset
;
166 s
->tb_jmp_offset
= NULL
;
167 s
->tb_next
= tb
->tb_next
;
170 #ifdef CONFIG_PROFILER
172 s
->interm_time
+= profile_getclock() - ti
;
173 s
->code_time
-= profile_getclock();
175 gen_code_size
= tcg_gen_code(s
, gen_code_buf
);
176 *gen_code_size_ptr
= gen_code_size
;
177 #ifdef CONFIG_PROFILER
178 s
->code_time
+= profile_getclock();
179 s
->code_in_len
+= tb
->size
;
180 s
->code_out_len
+= gen_code_size
;
184 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
185 qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr
);
186 log_disas(tb
->tc_ptr
, *gen_code_size_ptr
);
194 /* The cpu state corresponding to 'searched_pc' is restored.
196 static int cpu_restore_state_from_tb(TranslationBlock
*tb
, CPUArchState
*env
,
197 uintptr_t searched_pc
)
199 TCGContext
*s
= &tcg_ctx
;
202 #ifdef CONFIG_PROFILER
206 #ifdef CONFIG_PROFILER
207 ti
= profile_getclock();
211 gen_intermediate_code_pc(env
, tb
);
214 /* Reset the cycle counter to the start of the block. */
215 env
->icount_decr
.u16
.low
+= tb
->icount
;
216 /* Clear the IO flag. */
220 /* find opc index corresponding to search_pc */
221 tc_ptr
= (uintptr_t)tb
->tc_ptr
;
222 if (searched_pc
< tc_ptr
)
225 s
->tb_next_offset
= tb
->tb_next_offset
;
226 #ifdef USE_DIRECT_JUMP
227 s
->tb_jmp_offset
= tb
->tb_jmp_offset
;
230 s
->tb_jmp_offset
= NULL
;
231 s
->tb_next
= tb
->tb_next
;
233 j
= tcg_gen_code_search_pc(s
, (uint8_t *)tc_ptr
, searched_pc
- tc_ptr
);
236 /* now find start of instruction before */
237 while (s
->gen_opc_instr_start
[j
] == 0) {
240 env
->icount_decr
.u16
.low
-= s
->gen_opc_icount
[j
];
242 restore_state_to_opc(env
, tb
, j
);
244 #ifdef CONFIG_PROFILER
245 s
->restore_time
+= profile_getclock() - ti
;
251 bool cpu_restore_state(CPUArchState
*env
, uintptr_t retaddr
)
253 TranslationBlock
*tb
;
255 tb
= tb_find_pc(retaddr
);
257 cpu_restore_state_from_tb(tb
, env
, retaddr
);
264 static inline void map_exec(void *addr
, long size
)
267 VirtualProtect(addr
, size
,
268 PAGE_EXECUTE_READWRITE
, &old_protect
);
271 static inline void map_exec(void *addr
, long size
)
273 unsigned long start
, end
, page_size
;
275 page_size
= getpagesize();
276 start
= (unsigned long)addr
;
277 start
&= ~(page_size
- 1);
279 end
= (unsigned long)addr
+ size
;
280 end
+= page_size
- 1;
281 end
&= ~(page_size
- 1);
283 mprotect((void *)start
, end
- start
,
284 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
288 static void page_init(void)
290 /* NOTE: we can always suppose that qemu_host_page_size >=
294 SYSTEM_INFO system_info
;
296 GetSystemInfo(&system_info
);
297 qemu_real_host_page_size
= system_info
.dwPageSize
;
300 qemu_real_host_page_size
= getpagesize();
302 if (qemu_host_page_size
== 0) {
303 qemu_host_page_size
= qemu_real_host_page_size
;
305 if (qemu_host_page_size
< TARGET_PAGE_SIZE
) {
306 qemu_host_page_size
= TARGET_PAGE_SIZE
;
308 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
310 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
312 #ifdef HAVE_KINFO_GETVMMAP
313 struct kinfo_vmentry
*freep
;
316 freep
= kinfo_getvmmap(getpid(), &cnt
);
319 for (i
= 0; i
< cnt
; i
++) {
320 unsigned long startaddr
, endaddr
;
322 startaddr
= freep
[i
].kve_start
;
323 endaddr
= freep
[i
].kve_end
;
324 if (h2g_valid(startaddr
)) {
325 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
327 if (h2g_valid(endaddr
)) {
328 endaddr
= h2g(endaddr
);
329 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
331 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
333 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
344 last_brk
= (unsigned long)sbrk(0);
346 f
= fopen("/compat/linux/proc/self/maps", "r");
351 unsigned long startaddr
, endaddr
;
354 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
356 if (n
== 2 && h2g_valid(startaddr
)) {
357 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
359 if (h2g_valid(endaddr
)) {
360 endaddr
= h2g(endaddr
);
364 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
376 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
382 #if defined(CONFIG_USER_ONLY)
383 /* We can't use g_malloc because it may recurse into a locked mutex. */
384 # define ALLOC(P, SIZE) \
386 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
387 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
390 # define ALLOC(P, SIZE) \
391 do { P = g_malloc0(SIZE); } while (0)
394 /* Level 1. Always allocated. */
395 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
398 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
405 ALLOC(p
, sizeof(void *) * L2_SIZE
);
409 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
417 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
423 return pd
+ (index
& (L2_SIZE
- 1));
426 static inline PageDesc
*page_find(tb_page_addr_t index
)
428 return page_find_alloc(index
, 0);
431 #if !defined(CONFIG_USER_ONLY)
432 #define mmap_lock() do { } while (0)
433 #define mmap_unlock() do { } while (0)
436 #if defined(CONFIG_USER_ONLY)
437 /* Currently it is not recommended to allocate big chunks of data in
438 user mode. It will change when a dedicated libc will be used. */
439 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
440 region in which the guest needs to run. Revisit this. */
441 #define USE_STATIC_CODE_GEN_BUFFER
444 /* ??? Should configure for this, not list operating systems here. */
445 #if (defined(__linux__) \
446 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
447 || defined(__DragonFly__) || defined(__OpenBSD__) \
448 || defined(__NetBSD__))
452 /* Minimum size of the code gen buffer. This number is randomly chosen,
453 but not so small that we can't have a fair number of TB's live. */
454 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
456 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
457 indicated, this is constrained by the range of direct branches on the
458 host cpu, as used by the TCG implementation of goto_tb. */
459 #if defined(__x86_64__)
460 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
461 #elif defined(__sparc__)
462 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
463 #elif defined(__aarch64__)
464 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
465 #elif defined(__arm__)
466 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
467 #elif defined(__s390x__)
468 /* We have a +- 4GB range on the branches; leave some slop. */
469 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
471 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
474 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
476 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
477 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
478 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
480 static inline size_t size_code_gen_buffer(size_t tb_size
)
482 /* Size the buffer. */
484 #ifdef USE_STATIC_CODE_GEN_BUFFER
485 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
487 /* ??? Needs adjustments. */
488 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
489 static buffer, we could size this on RESERVED_VA, on the text
490 segment size of the executable, or continue to use the default. */
491 tb_size
= (unsigned long)(ram_size
/ 4);
494 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
495 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
497 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
498 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
500 tcg_ctx
.code_gen_buffer_size
= tb_size
;
504 #ifdef USE_STATIC_CODE_GEN_BUFFER
505 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
506 __attribute__((aligned(CODE_GEN_ALIGN
)));
508 static inline void *alloc_code_gen_buffer(void)
510 map_exec(static_code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
);
511 return static_code_gen_buffer
;
513 #elif defined(USE_MMAP)
514 static inline void *alloc_code_gen_buffer(void)
516 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
520 /* Constrain the position of the buffer based on the host cpu.
521 Note that these addresses are chosen in concert with the
522 addresses assigned in the relevant linker script file. */
523 # if defined(__PIE__) || defined(__PIC__)
524 /* Don't bother setting a preferred location if we're building
525 a position-independent executable. We're more likely to get
526 an address near the main executable if we let the kernel
527 choose the address. */
528 # elif defined(__x86_64__) && defined(MAP_32BIT)
529 /* Force the memory down into low memory with the executable.
530 Leave the choice of exact location with the kernel. */
532 /* Cannot expect to map more than 800MB in low memory. */
533 if (tcg_ctx
.code_gen_buffer_size
> 800u * 1024 * 1024) {
534 tcg_ctx
.code_gen_buffer_size
= 800u * 1024 * 1024;
536 # elif defined(__sparc__)
537 start
= 0x40000000ul
;
538 # elif defined(__s390x__)
539 start
= 0x90000000ul
;
542 buf
= mmap((void *)start
, tcg_ctx
.code_gen_buffer_size
,
543 PROT_WRITE
| PROT_READ
| PROT_EXEC
, flags
, -1, 0);
544 return buf
== MAP_FAILED
? NULL
: buf
;
547 static inline void *alloc_code_gen_buffer(void)
549 void *buf
= g_malloc(tcg_ctx
.code_gen_buffer_size
);
552 map_exec(buf
, tcg_ctx
.code_gen_buffer_size
);
556 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
558 static inline void code_gen_alloc(size_t tb_size
)
560 tcg_ctx
.code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
561 tcg_ctx
.code_gen_buffer
= alloc_code_gen_buffer();
562 if (tcg_ctx
.code_gen_buffer
== NULL
) {
563 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
567 qemu_madvise(tcg_ctx
.code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
,
570 /* Steal room for the prologue at the end of the buffer. This ensures
571 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
572 from TB's to the prologue are going to be in range. It also means
573 that we don't need to mark (additional) portions of the data segment
575 tcg_ctx
.code_gen_prologue
= tcg_ctx
.code_gen_buffer
+
576 tcg_ctx
.code_gen_buffer_size
- 1024;
577 tcg_ctx
.code_gen_buffer_size
-= 1024;
579 tcg_ctx
.code_gen_buffer_max_size
= tcg_ctx
.code_gen_buffer_size
-
580 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
581 tcg_ctx
.code_gen_max_blocks
= tcg_ctx
.code_gen_buffer_size
/
582 CODE_GEN_AVG_BLOCK_SIZE
;
584 g_malloc(tcg_ctx
.code_gen_max_blocks
* sizeof(TranslationBlock
));
587 /* Must be called before using the QEMU cpus. 'tb_size' is the size
588 (in bytes) allocated to the translation buffer. Zero means default
590 void tcg_exec_init(unsigned long tb_size
)
593 code_gen_alloc(tb_size
);
594 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
595 tcg_register_jit(tcg_ctx
.code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
);
597 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
598 /* There's no guest base to take into account, so go ahead and
599 initialize the prologue now. */
600 tcg_prologue_init(&tcg_ctx
);
604 bool tcg_enabled(void)
606 return tcg_ctx
.code_gen_buffer
!= NULL
;
609 /* Allocate a new translation block. Flush the translation buffer if
610 too many translation blocks or too much generated code. */
611 static TranslationBlock
*tb_alloc(target_ulong pc
)
613 TranslationBlock
*tb
;
615 if (tcg_ctx
.tb_ctx
.nb_tbs
>= tcg_ctx
.code_gen_max_blocks
||
616 (tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
) >=
617 tcg_ctx
.code_gen_buffer_max_size
) {
620 tb
= &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
++];
626 void tb_free(TranslationBlock
*tb
)
628 /* In practice this is mostly used for single use temporary TB
629 Ignore the hard cases and just back up if this TB happens to
630 be the last one generated. */
631 if (tcg_ctx
.tb_ctx
.nb_tbs
> 0 &&
632 tb
== &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
- 1]) {
633 tcg_ctx
.code_gen_ptr
= tb
->tc_ptr
;
634 tcg_ctx
.tb_ctx
.nb_tbs
--;
638 static inline void invalidate_page_bitmap(PageDesc
*p
)
640 if (p
->code_bitmap
) {
641 g_free(p
->code_bitmap
);
642 p
->code_bitmap
= NULL
;
644 p
->code_write_count
= 0;
647 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
648 static void page_flush_tb_1(int level
, void **lp
)
658 for (i
= 0; i
< L2_SIZE
; ++i
) {
659 pd
[i
].first_tb
= NULL
;
660 invalidate_page_bitmap(pd
+ i
);
665 for (i
= 0; i
< L2_SIZE
; ++i
) {
666 page_flush_tb_1(level
- 1, pp
+ i
);
671 static void page_flush_tb(void)
675 for (i
= 0; i
< V_L1_SIZE
; i
++) {
676 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
680 /* flush all the translation blocks */
681 /* XXX: tb_flush is currently not thread safe */
682 void tb_flush(CPUArchState
*env1
)
686 #if defined(DEBUG_FLUSH)
687 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
688 (unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
),
689 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.tb_ctx
.nb_tbs
> 0 ?
690 ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)) /
691 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
693 if ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)
694 > tcg_ctx
.code_gen_buffer_size
) {
695 cpu_abort(env1
, "Internal error: code buffer overflow\n");
697 tcg_ctx
.tb_ctx
.nb_tbs
= 0;
700 CPUArchState
*env
= cpu
->env_ptr
;
702 memset(env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof(void *));
705 memset(tcg_ctx
.tb_ctx
.tb_phys_hash
, 0,
706 CODE_GEN_PHYS_HASH_SIZE
* sizeof(void *));
709 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
710 /* XXX: flush processor icache at this point if cache flush is
712 tcg_ctx
.tb_ctx
.tb_flush_count
++;
715 #ifdef DEBUG_TB_CHECK
717 static void tb_invalidate_check(target_ulong address
)
719 TranslationBlock
*tb
;
722 address
&= TARGET_PAGE_MASK
;
723 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
724 for (tb
= tb_ctx
.tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
725 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
726 address
>= tb
->pc
+ tb
->size
)) {
727 printf("ERROR invalidate: address=" TARGET_FMT_lx
728 " PC=%08lx size=%04x\n",
729 address
, (long)tb
->pc
, tb
->size
);
735 /* verify that all the pages have correct rights for code */
736 static void tb_page_check(void)
738 TranslationBlock
*tb
;
739 int i
, flags1
, flags2
;
741 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
742 for (tb
= tcg_ctx
.tb_ctx
.tb_phys_hash
[i
]; tb
!= NULL
;
743 tb
= tb
->phys_hash_next
) {
744 flags1
= page_get_flags(tb
->pc
);
745 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
746 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
747 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
748 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
756 static inline void tb_hash_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
758 TranslationBlock
*tb1
;
763 *ptb
= tb1
->phys_hash_next
;
766 ptb
= &tb1
->phys_hash_next
;
770 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
772 TranslationBlock
*tb1
;
777 n1
= (uintptr_t)tb1
& 3;
778 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
780 *ptb
= tb1
->page_next
[n1
];
783 ptb
= &tb1
->page_next
[n1
];
787 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
789 TranslationBlock
*tb1
, **ptb
;
792 ptb
= &tb
->jmp_next
[n
];
795 /* find tb(n) in circular list */
798 n1
= (uintptr_t)tb1
& 3;
799 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
800 if (n1
== n
&& tb1
== tb
) {
804 ptb
= &tb1
->jmp_first
;
806 ptb
= &tb1
->jmp_next
[n1
];
809 /* now we can suppress tb(n) from the list */
810 *ptb
= tb
->jmp_next
[n
];
812 tb
->jmp_next
[n
] = NULL
;
816 /* reset the jump entry 'n' of a TB so that it is not chained to
818 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
820 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
823 /* invalidate one TB */
824 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
829 tb_page_addr_t phys_pc
;
830 TranslationBlock
*tb1
, *tb2
;
832 /* remove the TB from the hash list */
833 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
834 h
= tb_phys_hash_func(phys_pc
);
835 tb_hash_remove(&tcg_ctx
.tb_ctx
.tb_phys_hash
[h
], tb
);
837 /* remove the TB from the page list */
838 if (tb
->page_addr
[0] != page_addr
) {
839 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
840 tb_page_remove(&p
->first_tb
, tb
);
841 invalidate_page_bitmap(p
);
843 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
844 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
845 tb_page_remove(&p
->first_tb
, tb
);
846 invalidate_page_bitmap(p
);
849 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 1;
851 /* remove the TB from the hash list */
852 h
= tb_jmp_cache_hash_func(tb
->pc
);
854 CPUArchState
*env
= cpu
->env_ptr
;
856 if (env
->tb_jmp_cache
[h
] == tb
) {
857 env
->tb_jmp_cache
[h
] = NULL
;
861 /* suppress this TB from the two jump lists */
862 tb_jmp_remove(tb
, 0);
863 tb_jmp_remove(tb
, 1);
865 /* suppress any remaining jumps to this TB */
868 n1
= (uintptr_t)tb1
& 3;
872 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
873 tb2
= tb1
->jmp_next
[n1
];
874 tb_reset_jump(tb1
, n1
);
875 tb1
->jmp_next
[n1
] = NULL
;
878 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
880 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
++;
883 static inline void set_bits(uint8_t *tab
, int start
, int len
)
889 mask
= 0xff << (start
& 7);
890 if ((start
& ~7) == (end
& ~7)) {
892 mask
&= ~(0xff << (end
& 7));
897 start
= (start
+ 8) & ~7;
899 while (start
< end1
) {
904 mask
= ~(0xff << (end
& 7));
910 static void build_page_bitmap(PageDesc
*p
)
912 int n
, tb_start
, tb_end
;
913 TranslationBlock
*tb
;
915 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
919 n
= (uintptr_t)tb
& 3;
920 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
921 /* NOTE: this is subtle as a TB may span two physical pages */
923 /* NOTE: tb_end may be after the end of the page, but
924 it is not a problem */
925 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
926 tb_end
= tb_start
+ tb
->size
;
927 if (tb_end
> TARGET_PAGE_SIZE
) {
928 tb_end
= TARGET_PAGE_SIZE
;
932 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
934 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
935 tb
= tb
->page_next
[n
];
939 TranslationBlock
*tb_gen_code(CPUArchState
*env
,
940 target_ulong pc
, target_ulong cs_base
,
941 int flags
, int cflags
)
943 TranslationBlock
*tb
;
945 tb_page_addr_t phys_pc
, phys_page2
;
946 target_ulong virt_page2
;
949 phys_pc
= get_page_addr_code(env
, pc
);
952 /* flush must be done */
954 /* cannot fail at this point */
956 /* Don't forget to invalidate previous TB info. */
957 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 1;
959 tc_ptr
= tcg_ctx
.code_gen_ptr
;
961 tb
->cs_base
= cs_base
;
964 cpu_gen_code(env
, tb
, &code_gen_size
);
965 tcg_ctx
.code_gen_ptr
= (void *)(((uintptr_t)tcg_ctx
.code_gen_ptr
+
966 code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
968 /* check next page if needed */
969 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
971 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
972 phys_page2
= get_page_addr_code(env
, virt_page2
);
974 tb_link_page(tb
, phys_pc
, phys_page2
);
979 * Invalidate all TBs which intersect with the target physical address range
980 * [start;end[. NOTE: start and end may refer to *different* physical pages.
981 * 'is_cpu_write_access' should be true if called from a real cpu write
982 * access: the virtual CPU will exit the current TB if code is modified inside
985 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
,
986 int is_cpu_write_access
)
988 while (start
< end
) {
989 tb_invalidate_phys_page_range(start
, end
, is_cpu_write_access
);
990 start
&= TARGET_PAGE_MASK
;
991 start
+= TARGET_PAGE_SIZE
;
996 * Invalidate all TBs which intersect with the target physical address range
997 * [start;end[. NOTE: start and end must refer to the *same* physical page.
998 * 'is_cpu_write_access' should be true if called from a real cpu write
999 * access: the virtual CPU will exit the current TB if code is modified inside
1002 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1003 int is_cpu_write_access
)
1005 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1006 CPUState
*cpu
= current_cpu
;
1007 #if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1008 CPUArchState
*env
= NULL
;
1010 tb_page_addr_t tb_start
, tb_end
;
1013 #ifdef TARGET_HAS_PRECISE_SMC
1014 int current_tb_not_found
= is_cpu_write_access
;
1015 TranslationBlock
*current_tb
= NULL
;
1016 int current_tb_modified
= 0;
1017 target_ulong current_pc
= 0;
1018 target_ulong current_cs_base
= 0;
1019 int current_flags
= 0;
1020 #endif /* TARGET_HAS_PRECISE_SMC */
1022 p
= page_find(start
>> TARGET_PAGE_BITS
);
1026 if (!p
->code_bitmap
&&
1027 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1028 is_cpu_write_access
) {
1029 /* build code bitmap */
1030 build_page_bitmap(p
);
1032 #if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1038 /* we remove all the TBs in the range [start, end[ */
1039 /* XXX: see if in some cases it could be faster to invalidate all
1042 while (tb
!= NULL
) {
1043 n
= (uintptr_t)tb
& 3;
1044 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1045 tb_next
= tb
->page_next
[n
];
1046 /* NOTE: this is subtle as a TB may span two physical pages */
1048 /* NOTE: tb_end may be after the end of the page, but
1049 it is not a problem */
1050 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1051 tb_end
= tb_start
+ tb
->size
;
1053 tb_start
= tb
->page_addr
[1];
1054 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1056 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1057 #ifdef TARGET_HAS_PRECISE_SMC
1058 if (current_tb_not_found
) {
1059 current_tb_not_found
= 0;
1061 if (env
->mem_io_pc
) {
1062 /* now we have a real cpu fault */
1063 current_tb
= tb_find_pc(env
->mem_io_pc
);
1066 if (current_tb
== tb
&&
1067 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1068 /* If we are modifying the current TB, we must stop
1069 its execution. We could be more precise by checking
1070 that the modification is after the current PC, but it
1071 would require a specialized function to partially
1072 restore the CPU state */
1074 current_tb_modified
= 1;
1075 cpu_restore_state_from_tb(current_tb
, env
, env
->mem_io_pc
);
1076 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1079 #endif /* TARGET_HAS_PRECISE_SMC */
1080 /* we need to do that to handle the case where a signal
1081 occurs while doing tb_phys_invalidate() */
1084 saved_tb
= cpu
->current_tb
;
1085 cpu
->current_tb
= NULL
;
1087 tb_phys_invalidate(tb
, -1);
1089 cpu
->current_tb
= saved_tb
;
1090 if (cpu
->interrupt_request
&& cpu
->current_tb
) {
1091 cpu_interrupt(cpu
, cpu
->interrupt_request
);
1097 #if !defined(CONFIG_USER_ONLY)
1098 /* if no code remaining, no need to continue to use slow writes */
1100 invalidate_page_bitmap(p
);
1101 if (is_cpu_write_access
) {
1102 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1106 #ifdef TARGET_HAS_PRECISE_SMC
1107 if (current_tb_modified
) {
1108 /* we generate a block containing just the instruction
1109 modifying the memory. It will ensure that it cannot modify
1111 cpu
->current_tb
= NULL
;
1112 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1113 cpu_resume_from_signal(env
, NULL
);
1118 /* len must be <= 8 and start must be a multiple of len */
1119 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1126 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1127 cpu_single_env
->mem_io_vaddr
, len
,
1128 cpu_single_env
->eip
,
1129 cpu_single_env
->eip
+
1130 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1133 p
= page_find(start
>> TARGET_PAGE_BITS
);
1137 if (p
->code_bitmap
) {
1138 offset
= start
& ~TARGET_PAGE_MASK
;
1139 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1140 if (b
& ((1 << len
) - 1)) {
1145 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1149 #if !defined(CONFIG_SOFTMMU)
1150 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1151 uintptr_t pc
, void *puc
,
1154 TranslationBlock
*tb
;
1157 #ifdef TARGET_HAS_PRECISE_SMC
1158 TranslationBlock
*current_tb
= NULL
;
1159 CPUState
*cpu
= current_cpu
;
1160 CPUArchState
*env
= NULL
;
1161 int current_tb_modified
= 0;
1162 target_ulong current_pc
= 0;
1163 target_ulong current_cs_base
= 0;
1164 int current_flags
= 0;
1167 addr
&= TARGET_PAGE_MASK
;
1168 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1173 #ifdef TARGET_HAS_PRECISE_SMC
1174 if (tb
&& pc
!= 0) {
1175 current_tb
= tb_find_pc(pc
);
1181 while (tb
!= NULL
) {
1182 n
= (uintptr_t)tb
& 3;
1183 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1184 #ifdef TARGET_HAS_PRECISE_SMC
1185 if (current_tb
== tb
&&
1186 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1187 /* If we are modifying the current TB, we must stop
1188 its execution. We could be more precise by checking
1189 that the modification is after the current PC, but it
1190 would require a specialized function to partially
1191 restore the CPU state */
1193 current_tb_modified
= 1;
1194 cpu_restore_state_from_tb(current_tb
, env
, pc
);
1195 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1198 #endif /* TARGET_HAS_PRECISE_SMC */
1199 tb_phys_invalidate(tb
, addr
);
1200 tb
= tb
->page_next
[n
];
1203 #ifdef TARGET_HAS_PRECISE_SMC
1204 if (current_tb_modified
) {
1205 /* we generate a block containing just the instruction
1206 modifying the memory. It will ensure that it cannot modify
1208 cpu
->current_tb
= NULL
;
1209 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1213 cpu_resume_from_signal(env
, puc
);
1219 /* add the tb in the target page and protect it if necessary */
1220 static inline void tb_alloc_page(TranslationBlock
*tb
,
1221 unsigned int n
, tb_page_addr_t page_addr
)
1224 #ifndef CONFIG_USER_ONLY
1225 bool page_already_protected
;
1228 tb
->page_addr
[n
] = page_addr
;
1229 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1230 tb
->page_next
[n
] = p
->first_tb
;
1231 #ifndef CONFIG_USER_ONLY
1232 page_already_protected
= p
->first_tb
!= NULL
;
1234 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1235 invalidate_page_bitmap(p
);
1237 #if defined(TARGET_HAS_SMC) || 1
1239 #if defined(CONFIG_USER_ONLY)
1240 if (p
->flags
& PAGE_WRITE
) {
1245 /* force the host page as non writable (writes will have a
1246 page fault + mprotect overhead) */
1247 page_addr
&= qemu_host_page_mask
;
1249 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1250 addr
+= TARGET_PAGE_SIZE
) {
1252 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1257 p2
->flags
&= ~PAGE_WRITE
;
1259 mprotect(g2h(page_addr
), qemu_host_page_size
,
1260 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1261 #ifdef DEBUG_TB_INVALIDATE
1262 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1267 /* if some code is already present, then the pages are already
1268 protected. So we handle the case where only the first TB is
1269 allocated in a physical page */
1270 if (!page_already_protected
) {
1271 tlb_protect_code(page_addr
);
1275 #endif /* TARGET_HAS_SMC */
1278 /* add a new TB and link it to the physical page tables. phys_page2 is
1279 (-1) to indicate that only one page contains the TB. */
1280 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1281 tb_page_addr_t phys_page2
)
1284 TranslationBlock
**ptb
;
1286 /* Grab the mmap lock to stop another thread invalidating this TB
1287 before we are done. */
1289 /* add in the physical hash table */
1290 h
= tb_phys_hash_func(phys_pc
);
1291 ptb
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
1292 tb
->phys_hash_next
= *ptb
;
1295 /* add in the page list */
1296 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1297 if (phys_page2
!= -1) {
1298 tb_alloc_page(tb
, 1, phys_page2
);
1300 tb
->page_addr
[1] = -1;
1303 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1304 tb
->jmp_next
[0] = NULL
;
1305 tb
->jmp_next
[1] = NULL
;
1307 /* init original jump addresses */
1308 if (tb
->tb_next_offset
[0] != 0xffff) {
1309 tb_reset_jump(tb
, 0);
1311 if (tb
->tb_next_offset
[1] != 0xffff) {
1312 tb_reset_jump(tb
, 1);
1315 #ifdef DEBUG_TB_CHECK
1321 #if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1322 /* check whether the given addr is in TCG generated code buffer or not */
1323 bool is_tcg_gen_code(uintptr_t tc_ptr
)
1325 /* This can be called during code generation, code_gen_buffer_size
1326 is used instead of code_gen_ptr for upper boundary checking */
1327 return (tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_buffer
&&
1328 tc_ptr
< (uintptr_t)(tcg_ctx
.code_gen_buffer
+
1329 tcg_ctx
.code_gen_buffer_size
));
1333 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1334 tb[1].tc_ptr. Return NULL if not found */
1335 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1337 int m_min
, m_max
, m
;
1339 TranslationBlock
*tb
;
1341 if (tcg_ctx
.tb_ctx
.nb_tbs
<= 0) {
1344 if (tc_ptr
< (uintptr_t)tcg_ctx
.code_gen_buffer
||
1345 tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_ptr
) {
1348 /* binary search (cf Knuth) */
1350 m_max
= tcg_ctx
.tb_ctx
.nb_tbs
- 1;
1351 while (m_min
<= m_max
) {
1352 m
= (m_min
+ m_max
) >> 1;
1353 tb
= &tcg_ctx
.tb_ctx
.tbs
[m
];
1354 v
= (uintptr_t)tb
->tc_ptr
;
1357 } else if (tc_ptr
< v
) {
1363 return &tcg_ctx
.tb_ctx
.tbs
[m_max
];
1366 #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1367 void tb_invalidate_phys_addr(hwaddr addr
)
1369 ram_addr_t ram_addr
;
1373 mr
= address_space_translate(&address_space_memory
, addr
, &addr
, &l
, false);
1374 if (!(memory_region_is_ram(mr
)
1375 || memory_region_is_romd(mr
))) {
1378 ram_addr
= (memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
)
1380 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1382 #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1384 void tb_check_watchpoint(CPUArchState
*env
)
1386 TranslationBlock
*tb
;
1388 tb
= tb_find_pc(env
->mem_io_pc
);
1390 cpu_abort(env
, "check_watchpoint: could not find TB for pc=%p",
1391 (void *)env
->mem_io_pc
);
1393 cpu_restore_state_from_tb(tb
, env
, env
->mem_io_pc
);
1394 tb_phys_invalidate(tb
, -1);
1397 #ifndef CONFIG_USER_ONLY
1398 /* mask must never be zero, except for A20 change call */
1399 static void tcg_handle_interrupt(CPUState
*cpu
, int mask
)
1401 CPUArchState
*env
= cpu
->env_ptr
;
1404 old_mask
= cpu
->interrupt_request
;
1405 cpu
->interrupt_request
|= mask
;
1408 * If called from iothread context, wake the target cpu in
1411 if (!qemu_cpu_is_self(cpu
)) {
1417 env
->icount_decr
.u16
.high
= 0xffff;
1419 && (mask
& ~old_mask
) != 0) {
1420 cpu_abort(env
, "Raised interrupt while not in I/O function");
1423 cpu
->tcg_exit_req
= 1;
1427 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1429 /* in deterministic execution mode, instructions doing device I/Os
1430 must be at the end of the TB */
1431 void cpu_io_recompile(CPUArchState
*env
, uintptr_t retaddr
)
1433 TranslationBlock
*tb
;
1435 target_ulong pc
, cs_base
;
1438 tb
= tb_find_pc(retaddr
);
1440 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
1443 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
1444 cpu_restore_state_from_tb(tb
, env
, retaddr
);
1445 /* Calculate how many instructions had been executed before the fault
1447 n
= n
- env
->icount_decr
.u16
.low
;
1448 /* Generate a new TB ending on the I/O insn. */
1450 /* On MIPS and SH, delay slot instructions can only be restarted if
1451 they were already the first instruction in the TB. If this is not
1452 the first instruction in a TB then re-execute the preceding
1454 #if defined(TARGET_MIPS)
1455 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1456 env
->active_tc
.PC
-= 4;
1457 env
->icount_decr
.u16
.low
++;
1458 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1460 #elif defined(TARGET_SH4)
1461 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1464 env
->icount_decr
.u16
.low
++;
1465 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1468 /* This should never happen. */
1469 if (n
> CF_COUNT_MASK
) {
1470 cpu_abort(env
, "TB too big during recompile");
1473 cflags
= n
| CF_LAST_IO
;
1475 cs_base
= tb
->cs_base
;
1477 tb_phys_invalidate(tb
, -1);
1478 /* FIXME: In theory this could raise an exception. In practice
1479 we have already translated the block once so it's probably ok. */
1480 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
1481 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1482 the first in the TB) then we end up generating a whole new TB and
1483 repeating the fault, which is horribly inefficient.
1484 Better would be to execute just this insn uncached, or generate a
1486 cpu_resume_from_signal(env
, NULL
);
1489 void tb_flush_jmp_cache(CPUArchState
*env
, target_ulong addr
)
1493 /* Discard jump cache entries for any tb which might potentially
1494 overlap the flushed page. */
1495 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1496 memset(&env
->tb_jmp_cache
[i
], 0,
1497 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1499 i
= tb_jmp_cache_hash_page(addr
);
1500 memset(&env
->tb_jmp_cache
[i
], 0,
1501 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1504 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1506 int i
, target_code_size
, max_target_code_size
;
1507 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
1508 TranslationBlock
*tb
;
1510 target_code_size
= 0;
1511 max_target_code_size
= 0;
1513 direct_jmp_count
= 0;
1514 direct_jmp2_count
= 0;
1515 for (i
= 0; i
< tcg_ctx
.tb_ctx
.nb_tbs
; i
++) {
1516 tb
= &tcg_ctx
.tb_ctx
.tbs
[i
];
1517 target_code_size
+= tb
->size
;
1518 if (tb
->size
> max_target_code_size
) {
1519 max_target_code_size
= tb
->size
;
1521 if (tb
->page_addr
[1] != -1) {
1524 if (tb
->tb_next_offset
[0] != 0xffff) {
1526 if (tb
->tb_next_offset
[1] != 0xffff) {
1527 direct_jmp2_count
++;
1531 /* XXX: avoid using doubles ? */
1532 cpu_fprintf(f
, "Translation buffer state:\n");
1533 cpu_fprintf(f
, "gen code size %td/%zd\n",
1534 tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
,
1535 tcg_ctx
.code_gen_buffer_max_size
);
1536 cpu_fprintf(f
, "TB count %d/%d\n",
1537 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.code_gen_max_blocks
);
1538 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
1539 tcg_ctx
.tb_ctx
.nb_tbs
? target_code_size
/
1540 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1541 max_target_code_size
);
1542 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1543 tcg_ctx
.tb_ctx
.nb_tbs
? (tcg_ctx
.code_gen_ptr
-
1544 tcg_ctx
.code_gen_buffer
) /
1545 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1546 target_code_size
? (double) (tcg_ctx
.code_gen_ptr
-
1547 tcg_ctx
.code_gen_buffer
) /
1548 target_code_size
: 0);
1549 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n", cross_page
,
1550 tcg_ctx
.tb_ctx
.nb_tbs
? (cross_page
* 100) /
1551 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1552 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1554 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp_count
* 100) /
1555 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1557 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp2_count
* 100) /
1558 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1559 cpu_fprintf(f
, "\nStatistics:\n");
1560 cpu_fprintf(f
, "TB flush count %d\n", tcg_ctx
.tb_ctx
.tb_flush_count
);
1561 cpu_fprintf(f
, "TB invalidate count %d\n",
1562 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
);
1563 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
1564 tcg_dump_info(f
, cpu_fprintf
);
1567 #else /* CONFIG_USER_ONLY */
1569 void cpu_interrupt(CPUState
*cpu
, int mask
)
1571 cpu
->interrupt_request
|= mask
;
1572 cpu
->tcg_exit_req
= 1;
1576 * Walks guest process memory "regions" one by one
1577 * and calls callback function 'fn' for each region.
1579 struct walk_memory_regions_data
{
1580 walk_memory_regions_fn fn
;
1586 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1587 abi_ulong end
, int new_prot
)
1589 if (data
->start
!= -1ul) {
1590 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1596 data
->start
= (new_prot
? end
: -1ul);
1597 data
->prot
= new_prot
;
1602 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1603 abi_ulong base
, int level
, void **lp
)
1609 return walk_memory_regions_end(data
, base
, 0);
1615 for (i
= 0; i
< L2_SIZE
; ++i
) {
1616 int prot
= pd
[i
].flags
;
1618 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1619 if (prot
!= data
->prot
) {
1620 rc
= walk_memory_regions_end(data
, pa
, prot
);
1629 for (i
= 0; i
< L2_SIZE
; ++i
) {
1630 pa
= base
| ((abi_ulong
)i
<<
1631 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
1632 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1642 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1644 struct walk_memory_regions_data data
;
1652 for (i
= 0; i
< V_L1_SIZE
; i
++) {
1653 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
1654 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
1661 return walk_memory_regions_end(&data
, 0, 0);
1664 static int dump_region(void *priv
, abi_ulong start
,
1665 abi_ulong end
, unsigned long prot
)
1667 FILE *f
= (FILE *)priv
;
1669 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
1670 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
1671 start
, end
, end
- start
,
1672 ((prot
& PAGE_READ
) ? 'r' : '-'),
1673 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
1674 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
1679 /* dump memory mappings */
1680 void page_dump(FILE *f
)
1682 const int length
= sizeof(abi_ulong
) * 2;
1683 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
1684 length
, "start", length
, "end", length
, "size", "prot");
1685 walk_memory_regions(f
, dump_region
);
1688 int page_get_flags(target_ulong address
)
1692 p
= page_find(address
>> TARGET_PAGE_BITS
);
1699 /* Modify the flags of a page and invalidate the code if necessary.
1700 The flag PAGE_WRITE_ORG is positioned automatically depending
1701 on PAGE_WRITE. The mmap_lock should already be held. */
1702 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1704 target_ulong addr
, len
;
1706 /* This function should never be called with addresses outside the
1707 guest address space. If this assert fires, it probably indicates
1708 a missing call to h2g_valid. */
1709 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1710 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1712 assert(start
< end
);
1714 start
= start
& TARGET_PAGE_MASK
;
1715 end
= TARGET_PAGE_ALIGN(end
);
1717 if (flags
& PAGE_WRITE
) {
1718 flags
|= PAGE_WRITE_ORG
;
1721 for (addr
= start
, len
= end
- start
;
1723 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1724 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1726 /* If the write protection bit is set, then we invalidate
1728 if (!(p
->flags
& PAGE_WRITE
) &&
1729 (flags
& PAGE_WRITE
) &&
1731 tb_invalidate_phys_page(addr
, 0, NULL
, false);
1737 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1743 /* This function should never be called with addresses outside the
1744 guest address space. If this assert fires, it probably indicates
1745 a missing call to h2g_valid. */
1746 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1747 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1753 if (start
+ len
- 1 < start
) {
1754 /* We've wrapped around. */
1758 /* must do before we loose bits in the next step */
1759 end
= TARGET_PAGE_ALIGN(start
+ len
);
1760 start
= start
& TARGET_PAGE_MASK
;
1762 for (addr
= start
, len
= end
- start
;
1764 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1765 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1769 if (!(p
->flags
& PAGE_VALID
)) {
1773 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
1776 if (flags
& PAGE_WRITE
) {
1777 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
1780 /* unprotect the page if it was put read-only because it
1781 contains translated code */
1782 if (!(p
->flags
& PAGE_WRITE
)) {
1783 if (!page_unprotect(addr
, 0, NULL
)) {
1793 /* called from signal handler: invalidate the code and unprotect the
1794 page. Return TRUE if the fault was successfully handled. */
1795 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
1799 target_ulong host_start
, host_end
, addr
;
1801 /* Technically this isn't safe inside a signal handler. However we
1802 know this only ever happens in a synchronous SEGV handler, so in
1803 practice it seems to be ok. */
1806 p
= page_find(address
>> TARGET_PAGE_BITS
);
1812 /* if the page was really writable, then we change its
1813 protection back to writable */
1814 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
1815 host_start
= address
& qemu_host_page_mask
;
1816 host_end
= host_start
+ qemu_host_page_size
;
1819 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1820 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1821 p
->flags
|= PAGE_WRITE
;
1824 /* and since the content will be modified, we must invalidate
1825 the corresponding translated code. */
1826 tb_invalidate_phys_page(addr
, pc
, puc
, true);
1827 #ifdef DEBUG_TB_CHECK
1828 tb_invalidate_check(addr
);
1831 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1840 #endif /* CONFIG_USER_ONLY */