4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include <sys/types.h>
33 #include "qemu-common.h"
34 #define NO_CPU_IO_DEFS
38 #include "qemu-timer.h"
40 #include "exec-memory.h"
41 #if defined(CONFIG_USER_ONLY)
43 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
44 #include <sys/param.h>
45 #if __FreeBSD_version >= 700104
46 #define HAVE_KINFO_GETVMMAP
47 #define sigqueue sigqueue_freebsd /* avoid redefinition */
50 #include <machine/profile.h>
61 #include "translate-all.h"
63 //#define DEBUG_TB_INVALIDATE
65 /* make various TB consistency checks */
66 //#define DEBUG_TB_CHECK
68 #if !defined(CONFIG_USER_ONLY)
69 /* TB consistency checks only implemented for usermode emulation. */
73 #define SMC_BITMAP_USE_THRESHOLD 10
75 /* Code generation and translation blocks */
76 static TranslationBlock
*tbs
;
77 static int code_gen_max_blocks
;
78 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
80 /* any access to the tbs or the page table must use this lock */
81 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
83 uint8_t *code_gen_prologue
;
84 static uint8_t *code_gen_buffer
;
85 static size_t code_gen_buffer_size
;
86 /* threshold to flush the translated code buffer */
87 static size_t code_gen_buffer_max_size
;
88 static uint8_t *code_gen_ptr
;
90 typedef struct PageDesc
{
91 /* list of TBs intersecting this ram page */
92 TranslationBlock
*first_tb
;
93 /* in order to optimize self modifying code, we count the number
94 of lookups we do to a given page to use a bitmap */
95 unsigned int code_write_count
;
97 #if defined(CONFIG_USER_ONLY)
102 /* In system mode we want L1_MAP to be based on ram offsets,
103 while in user mode we want it to be based on virtual addresses. */
104 #if !defined(CONFIG_USER_ONLY)
105 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
106 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
108 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
111 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
114 /* The bits remaining after N lower levels of page tables. */
115 #define V_L1_BITS_REM \
116 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
118 #if V_L1_BITS_REM < 4
119 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
121 #define V_L1_BITS V_L1_BITS_REM
124 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
126 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
128 uintptr_t qemu_real_host_page_size
;
129 uintptr_t qemu_host_page_size
;
130 uintptr_t qemu_host_page_mask
;
132 /* This is a multi-level map on the virtual address space.
133 The bottom level has pointers to PageDesc. */
134 static void *l1_map
[V_L1_SIZE
];
137 static int tb_flush_count
;
138 static int tb_phys_invalidate_count
;
140 /* code generation context */
143 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
144 tb_page_addr_t phys_page2
);
145 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
147 void cpu_gen_init(void)
149 tcg_context_init(&tcg_ctx
);
152 /* return non zero if the very first instruction is invalid so that
153 the virtual CPU can trigger an exception.
155 '*gen_code_size_ptr' contains the size of the generated code (host
158 int cpu_gen_code(CPUArchState
*env
, TranslationBlock
*tb
, int *gen_code_size_ptr
)
160 TCGContext
*s
= &tcg_ctx
;
161 uint8_t *gen_code_buf
;
163 #ifdef CONFIG_PROFILER
167 #ifdef CONFIG_PROFILER
168 s
->tb_count1
++; /* includes aborted translations because of
170 ti
= profile_getclock();
174 gen_intermediate_code(env
, tb
);
176 /* generate machine code */
177 gen_code_buf
= tb
->tc_ptr
;
178 tb
->tb_next_offset
[0] = 0xffff;
179 tb
->tb_next_offset
[1] = 0xffff;
180 s
->tb_next_offset
= tb
->tb_next_offset
;
181 #ifdef USE_DIRECT_JUMP
182 s
->tb_jmp_offset
= tb
->tb_jmp_offset
;
185 s
->tb_jmp_offset
= NULL
;
186 s
->tb_next
= tb
->tb_next
;
189 #ifdef CONFIG_PROFILER
191 s
->interm_time
+= profile_getclock() - ti
;
192 s
->code_time
-= profile_getclock();
194 gen_code_size
= tcg_gen_code(s
, gen_code_buf
);
195 *gen_code_size_ptr
= gen_code_size
;
196 #ifdef CONFIG_PROFILER
197 s
->code_time
+= profile_getclock();
198 s
->code_in_len
+= tb
->size
;
199 s
->code_out_len
+= gen_code_size
;
203 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
204 qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr
);
205 log_disas(tb
->tc_ptr
, *gen_code_size_ptr
);
213 /* The cpu state corresponding to 'searched_pc' is restored.
215 static int cpu_restore_state_from_tb(TranslationBlock
*tb
, CPUArchState
*env
,
216 uintptr_t searched_pc
)
218 TCGContext
*s
= &tcg_ctx
;
221 #ifdef CONFIG_PROFILER
225 #ifdef CONFIG_PROFILER
226 ti
= profile_getclock();
230 gen_intermediate_code_pc(env
, tb
);
233 /* Reset the cycle counter to the start of the block. */
234 env
->icount_decr
.u16
.low
+= tb
->icount
;
235 /* Clear the IO flag. */
239 /* find opc index corresponding to search_pc */
240 tc_ptr
= (uintptr_t)tb
->tc_ptr
;
241 if (searched_pc
< tc_ptr
)
244 s
->tb_next_offset
= tb
->tb_next_offset
;
245 #ifdef USE_DIRECT_JUMP
246 s
->tb_jmp_offset
= tb
->tb_jmp_offset
;
249 s
->tb_jmp_offset
= NULL
;
250 s
->tb_next
= tb
->tb_next
;
252 j
= tcg_gen_code_search_pc(s
, (uint8_t *)tc_ptr
, searched_pc
- tc_ptr
);
255 /* now find start of instruction before */
256 while (s
->gen_opc_instr_start
[j
] == 0) {
259 env
->icount_decr
.u16
.low
-= s
->gen_opc_icount
[j
];
261 restore_state_to_opc(env
, tb
, j
);
263 #ifdef CONFIG_PROFILER
264 s
->restore_time
+= profile_getclock() - ti
;
270 bool cpu_restore_state(CPUArchState
*env
, uintptr_t retaddr
)
272 TranslationBlock
*tb
;
274 tb
= tb_find_pc(retaddr
);
276 cpu_restore_state_from_tb(tb
, env
, retaddr
);
283 static inline void map_exec(void *addr
, long size
)
286 VirtualProtect(addr
, size
,
287 PAGE_EXECUTE_READWRITE
, &old_protect
);
290 static inline void map_exec(void *addr
, long size
)
292 unsigned long start
, end
, page_size
;
294 page_size
= getpagesize();
295 start
= (unsigned long)addr
;
296 start
&= ~(page_size
- 1);
298 end
= (unsigned long)addr
+ size
;
299 end
+= page_size
- 1;
300 end
&= ~(page_size
- 1);
302 mprotect((void *)start
, end
- start
,
303 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
307 static void page_init(void)
309 /* NOTE: we can always suppose that qemu_host_page_size >=
313 SYSTEM_INFO system_info
;
315 GetSystemInfo(&system_info
);
316 qemu_real_host_page_size
= system_info
.dwPageSize
;
319 qemu_real_host_page_size
= getpagesize();
321 if (qemu_host_page_size
== 0) {
322 qemu_host_page_size
= qemu_real_host_page_size
;
324 if (qemu_host_page_size
< TARGET_PAGE_SIZE
) {
325 qemu_host_page_size
= TARGET_PAGE_SIZE
;
327 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
329 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
331 #ifdef HAVE_KINFO_GETVMMAP
332 struct kinfo_vmentry
*freep
;
335 freep
= kinfo_getvmmap(getpid(), &cnt
);
338 for (i
= 0; i
< cnt
; i
++) {
339 unsigned long startaddr
, endaddr
;
341 startaddr
= freep
[i
].kve_start
;
342 endaddr
= freep
[i
].kve_end
;
343 if (h2g_valid(startaddr
)) {
344 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
346 if (h2g_valid(endaddr
)) {
347 endaddr
= h2g(endaddr
);
348 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
350 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
352 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
363 last_brk
= (unsigned long)sbrk(0);
365 f
= fopen("/compat/linux/proc/self/maps", "r");
370 unsigned long startaddr
, endaddr
;
373 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
375 if (n
== 2 && h2g_valid(startaddr
)) {
376 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
378 if (h2g_valid(endaddr
)) {
379 endaddr
= h2g(endaddr
);
383 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
395 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
401 #if defined(CONFIG_USER_ONLY)
402 /* We can't use g_malloc because it may recurse into a locked mutex. */
403 # define ALLOC(P, SIZE) \
405 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
406 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
409 # define ALLOC(P, SIZE) \
410 do { P = g_malloc0(SIZE); } while (0)
413 /* Level 1. Always allocated. */
414 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
417 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
424 ALLOC(p
, sizeof(void *) * L2_SIZE
);
428 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
436 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
442 return pd
+ (index
& (L2_SIZE
- 1));
445 static inline PageDesc
*page_find(tb_page_addr_t index
)
447 return page_find_alloc(index
, 0);
450 #if !defined(CONFIG_USER_ONLY)
451 #define mmap_lock() do { } while (0)
452 #define mmap_unlock() do { } while (0)
455 #if defined(CONFIG_USER_ONLY)
456 /* Currently it is not recommended to allocate big chunks of data in
457 user mode. It will change when a dedicated libc will be used. */
458 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
459 region in which the guest needs to run. Revisit this. */
460 #define USE_STATIC_CODE_GEN_BUFFER
463 /* ??? Should configure for this, not list operating systems here. */
464 #if (defined(__linux__) \
465 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
466 || defined(__DragonFly__) || defined(__OpenBSD__) \
467 || defined(__NetBSD__))
471 /* Minimum size of the code gen buffer. This number is randomly chosen,
472 but not so small that we can't have a fair number of TB's live. */
473 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
475 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
476 indicated, this is constrained by the range of direct branches on the
477 host cpu, as used by the TCG implementation of goto_tb. */
478 #if defined(__x86_64__)
479 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
480 #elif defined(__sparc__)
481 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
482 #elif defined(__arm__)
483 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
484 #elif defined(__s390x__)
485 /* We have a +- 4GB range on the branches; leave some slop. */
486 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
488 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
491 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
493 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
494 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
495 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
497 static inline size_t size_code_gen_buffer(size_t tb_size
)
499 /* Size the buffer. */
501 #ifdef USE_STATIC_CODE_GEN_BUFFER
502 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
504 /* ??? Needs adjustments. */
505 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
506 static buffer, we could size this on RESERVED_VA, on the text
507 segment size of the executable, or continue to use the default. */
508 tb_size
= (unsigned long)(ram_size
/ 4);
511 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
512 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
514 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
515 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
517 code_gen_buffer_size
= tb_size
;
521 #ifdef USE_STATIC_CODE_GEN_BUFFER
522 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
523 __attribute__((aligned(CODE_GEN_ALIGN
)));
525 static inline void *alloc_code_gen_buffer(void)
527 map_exec(static_code_gen_buffer
, code_gen_buffer_size
);
528 return static_code_gen_buffer
;
530 #elif defined(USE_MMAP)
531 static inline void *alloc_code_gen_buffer(void)
533 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
537 /* Constrain the position of the buffer based on the host cpu.
538 Note that these addresses are chosen in concert with the
539 addresses assigned in the relevant linker script file. */
540 # if defined(__PIE__) || defined(__PIC__)
541 /* Don't bother setting a preferred location if we're building
542 a position-independent executable. We're more likely to get
543 an address near the main executable if we let the kernel
544 choose the address. */
545 # elif defined(__x86_64__) && defined(MAP_32BIT)
546 /* Force the memory down into low memory with the executable.
547 Leave the choice of exact location with the kernel. */
549 /* Cannot expect to map more than 800MB in low memory. */
550 if (code_gen_buffer_size
> 800u * 1024 * 1024) {
551 code_gen_buffer_size
= 800u * 1024 * 1024;
553 # elif defined(__sparc__)
554 start
= 0x40000000ul
;
555 # elif defined(__s390x__)
556 start
= 0x90000000ul
;
559 buf
= mmap((void *)start
, code_gen_buffer_size
,
560 PROT_WRITE
| PROT_READ
| PROT_EXEC
, flags
, -1, 0);
561 return buf
== MAP_FAILED
? NULL
: buf
;
564 static inline void *alloc_code_gen_buffer(void)
566 void *buf
= g_malloc(code_gen_buffer_size
);
569 map_exec(buf
, code_gen_buffer_size
);
573 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
575 static inline void code_gen_alloc(size_t tb_size
)
577 code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
578 code_gen_buffer
= alloc_code_gen_buffer();
579 if (code_gen_buffer
== NULL
) {
580 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
584 qemu_madvise(code_gen_buffer
, code_gen_buffer_size
, QEMU_MADV_HUGEPAGE
);
586 /* Steal room for the prologue at the end of the buffer. This ensures
587 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
588 from TB's to the prologue are going to be in range. It also means
589 that we don't need to mark (additional) portions of the data segment
591 code_gen_prologue
= code_gen_buffer
+ code_gen_buffer_size
- 1024;
592 code_gen_buffer_size
-= 1024;
594 code_gen_buffer_max_size
= code_gen_buffer_size
-
595 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
596 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
597 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
600 /* Must be called before using the QEMU cpus. 'tb_size' is the size
601 (in bytes) allocated to the translation buffer. Zero means default
603 void tcg_exec_init(unsigned long tb_size
)
606 code_gen_alloc(tb_size
);
607 code_gen_ptr
= code_gen_buffer
;
608 tcg_register_jit(code_gen_buffer
, code_gen_buffer_size
);
610 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
611 /* There's no guest base to take into account, so go ahead and
612 initialize the prologue now. */
613 tcg_prologue_init(&tcg_ctx
);
617 bool tcg_enabled(void)
619 return code_gen_buffer
!= NULL
;
622 /* Allocate a new translation block. Flush the translation buffer if
623 too many translation blocks or too much generated code. */
624 static TranslationBlock
*tb_alloc(target_ulong pc
)
626 TranslationBlock
*tb
;
628 if (nb_tbs
>= code_gen_max_blocks
||
629 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
) {
638 void tb_free(TranslationBlock
*tb
)
640 /* In practice this is mostly used for single use temporary TB
641 Ignore the hard cases and just back up if this TB happens to
642 be the last one generated. */
643 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
644 code_gen_ptr
= tb
->tc_ptr
;
649 static inline void invalidate_page_bitmap(PageDesc
*p
)
651 if (p
->code_bitmap
) {
652 g_free(p
->code_bitmap
);
653 p
->code_bitmap
= NULL
;
655 p
->code_write_count
= 0;
658 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
659 static void page_flush_tb_1(int level
, void **lp
)
669 for (i
= 0; i
< L2_SIZE
; ++i
) {
670 pd
[i
].first_tb
= NULL
;
671 invalidate_page_bitmap(pd
+ i
);
676 for (i
= 0; i
< L2_SIZE
; ++i
) {
677 page_flush_tb_1(level
- 1, pp
+ i
);
682 static void page_flush_tb(void)
686 for (i
= 0; i
< V_L1_SIZE
; i
++) {
687 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
691 /* flush all the translation blocks */
692 /* XXX: tb_flush is currently not thread safe */
693 void tb_flush(CPUArchState
*env1
)
697 #if defined(DEBUG_FLUSH)
698 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
699 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
701 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
703 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
)
704 > code_gen_buffer_size
) {
705 cpu_abort(env1
, "Internal error: code buffer overflow\n");
709 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
710 memset(env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof(void *));
713 memset(tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof(void *));
716 code_gen_ptr
= code_gen_buffer
;
717 /* XXX: flush processor icache at this point if cache flush is
722 #ifdef DEBUG_TB_CHECK
724 static void tb_invalidate_check(target_ulong address
)
726 TranslationBlock
*tb
;
729 address
&= TARGET_PAGE_MASK
;
730 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
731 for (tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
732 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
733 address
>= tb
->pc
+ tb
->size
)) {
734 printf("ERROR invalidate: address=" TARGET_FMT_lx
735 " PC=%08lx size=%04x\n",
736 address
, (long)tb
->pc
, tb
->size
);
742 /* verify that all the pages have correct rights for code */
743 static void tb_page_check(void)
745 TranslationBlock
*tb
;
746 int i
, flags1
, flags2
;
748 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
749 for (tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
750 flags1
= page_get_flags(tb
->pc
);
751 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
752 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
753 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
754 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
762 /* invalidate one TB */
763 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
766 TranslationBlock
*tb1
;
771 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
774 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
778 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
780 TranslationBlock
*tb1
;
785 n1
= (uintptr_t)tb1
& 3;
786 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
788 *ptb
= tb1
->page_next
[n1
];
791 ptb
= &tb1
->page_next
[n1
];
795 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
797 TranslationBlock
*tb1
, **ptb
;
800 ptb
= &tb
->jmp_next
[n
];
803 /* find tb(n) in circular list */
806 n1
= (uintptr_t)tb1
& 3;
807 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
808 if (n1
== n
&& tb1
== tb
) {
812 ptb
= &tb1
->jmp_first
;
814 ptb
= &tb1
->jmp_next
[n1
];
817 /* now we can suppress tb(n) from the list */
818 *ptb
= tb
->jmp_next
[n
];
820 tb
->jmp_next
[n
] = NULL
;
824 /* reset the jump entry 'n' of a TB so that it is not chained to
826 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
828 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
831 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
836 tb_page_addr_t phys_pc
;
837 TranslationBlock
*tb1
, *tb2
;
839 /* remove the TB from the hash list */
840 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
841 h
= tb_phys_hash_func(phys_pc
);
842 tb_remove(&tb_phys_hash
[h
], tb
,
843 offsetof(TranslationBlock
, phys_hash_next
));
845 /* remove the TB from the page list */
846 if (tb
->page_addr
[0] != page_addr
) {
847 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
848 tb_page_remove(&p
->first_tb
, tb
);
849 invalidate_page_bitmap(p
);
851 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
852 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
853 tb_page_remove(&p
->first_tb
, tb
);
854 invalidate_page_bitmap(p
);
857 tb_invalidated_flag
= 1;
859 /* remove the TB from the hash list */
860 h
= tb_jmp_cache_hash_func(tb
->pc
);
861 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
862 if (env
->tb_jmp_cache
[h
] == tb
) {
863 env
->tb_jmp_cache
[h
] = NULL
;
867 /* suppress this TB from the two jump lists */
868 tb_jmp_remove(tb
, 0);
869 tb_jmp_remove(tb
, 1);
871 /* suppress any remaining jumps to this TB */
874 n1
= (uintptr_t)tb1
& 3;
878 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
879 tb2
= tb1
->jmp_next
[n1
];
880 tb_reset_jump(tb1
, n1
);
881 tb1
->jmp_next
[n1
] = NULL
;
884 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
886 tb_phys_invalidate_count
++;
889 static inline void set_bits(uint8_t *tab
, int start
, int len
)
895 mask
= 0xff << (start
& 7);
896 if ((start
& ~7) == (end
& ~7)) {
898 mask
&= ~(0xff << (end
& 7));
903 start
= (start
+ 8) & ~7;
905 while (start
< end1
) {
910 mask
= ~(0xff << (end
& 7));
916 static void build_page_bitmap(PageDesc
*p
)
918 int n
, tb_start
, tb_end
;
919 TranslationBlock
*tb
;
921 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
925 n
= (uintptr_t)tb
& 3;
926 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
927 /* NOTE: this is subtle as a TB may span two physical pages */
929 /* NOTE: tb_end may be after the end of the page, but
930 it is not a problem */
931 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
932 tb_end
= tb_start
+ tb
->size
;
933 if (tb_end
> TARGET_PAGE_SIZE
) {
934 tb_end
= TARGET_PAGE_SIZE
;
938 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
940 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
941 tb
= tb
->page_next
[n
];
945 TranslationBlock
*tb_gen_code(CPUArchState
*env
,
946 target_ulong pc
, target_ulong cs_base
,
947 int flags
, int cflags
)
949 TranslationBlock
*tb
;
951 tb_page_addr_t phys_pc
, phys_page2
;
952 target_ulong virt_page2
;
955 phys_pc
= get_page_addr_code(env
, pc
);
958 /* flush must be done */
960 /* cannot fail at this point */
962 /* Don't forget to invalidate previous TB info. */
963 tb_invalidated_flag
= 1;
965 tc_ptr
= code_gen_ptr
;
967 tb
->cs_base
= cs_base
;
970 cpu_gen_code(env
, tb
, &code_gen_size
);
971 code_gen_ptr
= (void *)(((uintptr_t)code_gen_ptr
+ code_gen_size
+
972 CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
974 /* check next page if needed */
975 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
977 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
978 phys_page2
= get_page_addr_code(env
, virt_page2
);
980 tb_link_page(tb
, phys_pc
, phys_page2
);
985 * Invalidate all TBs which intersect with the target physical address range
986 * [start;end[. NOTE: start and end may refer to *different* physical pages.
987 * 'is_cpu_write_access' should be true if called from a real cpu write
988 * access: the virtual CPU will exit the current TB if code is modified inside
991 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
,
992 int is_cpu_write_access
)
994 while (start
< end
) {
995 tb_invalidate_phys_page_range(start
, end
, is_cpu_write_access
);
996 start
&= TARGET_PAGE_MASK
;
997 start
+= TARGET_PAGE_SIZE
;
1002 * Invalidate all TBs which intersect with the target physical address range
1003 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1004 * 'is_cpu_write_access' should be true if called from a real cpu write
1005 * access: the virtual CPU will exit the current TB if code is modified inside
1008 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1009 int is_cpu_write_access
)
1011 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1012 CPUArchState
*env
= cpu_single_env
;
1013 tb_page_addr_t tb_start
, tb_end
;
1016 #ifdef TARGET_HAS_PRECISE_SMC
1017 int current_tb_not_found
= is_cpu_write_access
;
1018 TranslationBlock
*current_tb
= NULL
;
1019 int current_tb_modified
= 0;
1020 target_ulong current_pc
= 0;
1021 target_ulong current_cs_base
= 0;
1022 int current_flags
= 0;
1023 #endif /* TARGET_HAS_PRECISE_SMC */
1025 p
= page_find(start
>> TARGET_PAGE_BITS
);
1029 if (!p
->code_bitmap
&&
1030 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1031 is_cpu_write_access
) {
1032 /* build code bitmap */
1033 build_page_bitmap(p
);
1036 /* we remove all the TBs in the range [start, end[ */
1037 /* XXX: see if in some cases it could be faster to invalidate all
1040 while (tb
!= NULL
) {
1041 n
= (uintptr_t)tb
& 3;
1042 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1043 tb_next
= tb
->page_next
[n
];
1044 /* NOTE: this is subtle as a TB may span two physical pages */
1046 /* NOTE: tb_end may be after the end of the page, but
1047 it is not a problem */
1048 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1049 tb_end
= tb_start
+ tb
->size
;
1051 tb_start
= tb
->page_addr
[1];
1052 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1054 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1055 #ifdef TARGET_HAS_PRECISE_SMC
1056 if (current_tb_not_found
) {
1057 current_tb_not_found
= 0;
1059 if (env
->mem_io_pc
) {
1060 /* now we have a real cpu fault */
1061 current_tb
= tb_find_pc(env
->mem_io_pc
);
1064 if (current_tb
== tb
&&
1065 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1066 /* If we are modifying the current TB, we must stop
1067 its execution. We could be more precise by checking
1068 that the modification is after the current PC, but it
1069 would require a specialized function to partially
1070 restore the CPU state */
1072 current_tb_modified
= 1;
1073 cpu_restore_state_from_tb(current_tb
, env
, env
->mem_io_pc
);
1074 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1077 #endif /* TARGET_HAS_PRECISE_SMC */
1078 /* we need to do that to handle the case where a signal
1079 occurs while doing tb_phys_invalidate() */
1082 saved_tb
= env
->current_tb
;
1083 env
->current_tb
= NULL
;
1085 tb_phys_invalidate(tb
, -1);
1087 env
->current_tb
= saved_tb
;
1088 if (env
->interrupt_request
&& env
->current_tb
) {
1089 cpu_interrupt(env
, env
->interrupt_request
);
1095 #if !defined(CONFIG_USER_ONLY)
1096 /* if no code remaining, no need to continue to use slow writes */
1098 invalidate_page_bitmap(p
);
1099 if (is_cpu_write_access
) {
1100 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1104 #ifdef TARGET_HAS_PRECISE_SMC
1105 if (current_tb_modified
) {
1106 /* we generate a block containing just the instruction
1107 modifying the memory. It will ensure that it cannot modify
1109 env
->current_tb
= NULL
;
1110 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1111 cpu_resume_from_signal(env
, NULL
);
1116 /* len must be <= 8 and start must be a multiple of len */
1117 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1124 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1125 cpu_single_env
->mem_io_vaddr
, len
,
1126 cpu_single_env
->eip
,
1127 cpu_single_env
->eip
+
1128 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1131 p
= page_find(start
>> TARGET_PAGE_BITS
);
1135 if (p
->code_bitmap
) {
1136 offset
= start
& ~TARGET_PAGE_MASK
;
1137 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1138 if (b
& ((1 << len
) - 1)) {
1143 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1147 #if !defined(CONFIG_SOFTMMU)
1148 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1149 uintptr_t pc
, void *puc
)
1151 TranslationBlock
*tb
;
1154 #ifdef TARGET_HAS_PRECISE_SMC
1155 TranslationBlock
*current_tb
= NULL
;
1156 CPUArchState
*env
= cpu_single_env
;
1157 int current_tb_modified
= 0;
1158 target_ulong current_pc
= 0;
1159 target_ulong current_cs_base
= 0;
1160 int current_flags
= 0;
1163 addr
&= TARGET_PAGE_MASK
;
1164 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1169 #ifdef TARGET_HAS_PRECISE_SMC
1170 if (tb
&& pc
!= 0) {
1171 current_tb
= tb_find_pc(pc
);
1174 while (tb
!= NULL
) {
1175 n
= (uintptr_t)tb
& 3;
1176 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1177 #ifdef TARGET_HAS_PRECISE_SMC
1178 if (current_tb
== tb
&&
1179 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1180 /* If we are modifying the current TB, we must stop
1181 its execution. We could be more precise by checking
1182 that the modification is after the current PC, but it
1183 would require a specialized function to partially
1184 restore the CPU state */
1186 current_tb_modified
= 1;
1187 cpu_restore_state_from_tb(current_tb
, env
, pc
);
1188 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1191 #endif /* TARGET_HAS_PRECISE_SMC */
1192 tb_phys_invalidate(tb
, addr
);
1193 tb
= tb
->page_next
[n
];
1196 #ifdef TARGET_HAS_PRECISE_SMC
1197 if (current_tb_modified
) {
1198 /* we generate a block containing just the instruction
1199 modifying the memory. It will ensure that it cannot modify
1201 env
->current_tb
= NULL
;
1202 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1203 cpu_resume_from_signal(env
, puc
);
1209 /* add the tb in the target page and protect it if necessary */
1210 static inline void tb_alloc_page(TranslationBlock
*tb
,
1211 unsigned int n
, tb_page_addr_t page_addr
)
1214 #ifndef CONFIG_USER_ONLY
1215 bool page_already_protected
;
1218 tb
->page_addr
[n
] = page_addr
;
1219 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1220 tb
->page_next
[n
] = p
->first_tb
;
1221 #ifndef CONFIG_USER_ONLY
1222 page_already_protected
= p
->first_tb
!= NULL
;
1224 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1225 invalidate_page_bitmap(p
);
1227 #if defined(TARGET_HAS_SMC) || 1
1229 #if defined(CONFIG_USER_ONLY)
1230 if (p
->flags
& PAGE_WRITE
) {
1235 /* force the host page as non writable (writes will have a
1236 page fault + mprotect overhead) */
1237 page_addr
&= qemu_host_page_mask
;
1239 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1240 addr
+= TARGET_PAGE_SIZE
) {
1242 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1247 p2
->flags
&= ~PAGE_WRITE
;
1249 mprotect(g2h(page_addr
), qemu_host_page_size
,
1250 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1251 #ifdef DEBUG_TB_INVALIDATE
1252 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1257 /* if some code is already present, then the pages are already
1258 protected. So we handle the case where only the first TB is
1259 allocated in a physical page */
1260 if (!page_already_protected
) {
1261 tlb_protect_code(page_addr
);
1265 #endif /* TARGET_HAS_SMC */
1268 /* add a new TB and link it to the physical page tables. phys_page2 is
1269 (-1) to indicate that only one page contains the TB. */
1270 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1271 tb_page_addr_t phys_page2
)
1274 TranslationBlock
**ptb
;
1276 /* Grab the mmap lock to stop another thread invalidating this TB
1277 before we are done. */
1279 /* add in the physical hash table */
1280 h
= tb_phys_hash_func(phys_pc
);
1281 ptb
= &tb_phys_hash
[h
];
1282 tb
->phys_hash_next
= *ptb
;
1285 /* add in the page list */
1286 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1287 if (phys_page2
!= -1) {
1288 tb_alloc_page(tb
, 1, phys_page2
);
1290 tb
->page_addr
[1] = -1;
1293 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1294 tb
->jmp_next
[0] = NULL
;
1295 tb
->jmp_next
[1] = NULL
;
1297 /* init original jump addresses */
1298 if (tb
->tb_next_offset
[0] != 0xffff) {
1299 tb_reset_jump(tb
, 0);
1301 if (tb
->tb_next_offset
[1] != 0xffff) {
1302 tb_reset_jump(tb
, 1);
1305 #ifdef DEBUG_TB_CHECK
1311 #if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1312 /* check whether the given addr is in TCG generated code buffer or not */
1313 bool is_tcg_gen_code(uintptr_t tc_ptr
)
1315 /* This can be called during code generation, code_gen_buffer_max_size
1316 is used instead of code_gen_ptr for upper boundary checking */
1317 return (tc_ptr
>= (uintptr_t)code_gen_buffer
&&
1318 tc_ptr
< (uintptr_t)(code_gen_buffer
+ code_gen_buffer_max_size
));
1322 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1323 tb[1].tc_ptr. Return NULL if not found */
1324 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1326 int m_min
, m_max
, m
;
1328 TranslationBlock
*tb
;
1333 if (tc_ptr
< (uintptr_t)code_gen_buffer
||
1334 tc_ptr
>= (uintptr_t)code_gen_ptr
) {
1337 /* binary search (cf Knuth) */
1340 while (m_min
<= m_max
) {
1341 m
= (m_min
+ m_max
) >> 1;
1343 v
= (uintptr_t)tb
->tc_ptr
;
1346 } else if (tc_ptr
< v
) {
1355 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1357 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1359 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1362 tb1
= tb
->jmp_next
[n
];
1364 /* find head of list */
1366 n1
= (uintptr_t)tb1
& 3;
1367 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1371 tb1
= tb1
->jmp_next
[n1
];
1373 /* we are now sure now that tb jumps to tb1 */
1376 /* remove tb from the jmp_first list */
1377 ptb
= &tb_next
->jmp_first
;
1380 n1
= (uintptr_t)tb1
& 3;
1381 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1382 if (n1
== n
&& tb1
== tb
) {
1385 ptb
= &tb1
->jmp_next
[n1
];
1387 *ptb
= tb
->jmp_next
[n
];
1388 tb
->jmp_next
[n
] = NULL
;
1390 /* suppress the jump to next tb in generated code */
1391 tb_reset_jump(tb
, n
);
1393 /* suppress jumps in the tb on which we could have jumped */
1394 tb_reset_jump_recursive(tb_next
);
1398 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1400 tb_reset_jump_recursive2(tb
, 0);
1401 tb_reset_jump_recursive2(tb
, 1);
1404 #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1405 void tb_invalidate_phys_addr(hwaddr addr
)
1407 ram_addr_t ram_addr
;
1408 MemoryRegionSection
*section
;
1410 section
= phys_page_find(address_space_memory
.dispatch
,
1411 addr
>> TARGET_PAGE_BITS
);
1412 if (!(memory_region_is_ram(section
->mr
)
1413 || (section
->mr
->rom_device
&& section
->mr
->readable
))) {
1416 ram_addr
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1417 + memory_region_section_addr(section
, addr
);
1418 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1420 #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1422 void cpu_unlink_tb(CPUArchState
*env
)
1424 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1425 problem and hope the cpu will stop of its own accord. For userspace
1426 emulation this often isn't actually as bad as it sounds. Often
1427 signals are used primarily to interrupt blocking syscalls. */
1428 TranslationBlock
*tb
;
1429 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1431 spin_lock(&interrupt_lock
);
1432 tb
= env
->current_tb
;
1433 /* if the cpu is currently executing code, we must unlink it and
1434 all the potentially executing TB */
1436 env
->current_tb
= NULL
;
1437 tb_reset_jump_recursive(tb
);
1439 spin_unlock(&interrupt_lock
);
1442 void tb_check_watchpoint(CPUArchState
*env
)
1444 TranslationBlock
*tb
;
1446 tb
= tb_find_pc(env
->mem_io_pc
);
1448 cpu_abort(env
, "check_watchpoint: could not find TB for pc=%p",
1449 (void *)env
->mem_io_pc
);
1451 cpu_restore_state_from_tb(tb
, env
, env
->mem_io_pc
);
1452 tb_phys_invalidate(tb
, -1);
1455 #ifndef CONFIG_USER_ONLY
1456 /* mask must never be zero, except for A20 change call */
1457 static void tcg_handle_interrupt(CPUArchState
*env
, int mask
)
1459 CPUState
*cpu
= ENV_GET_CPU(env
);
1462 old_mask
= env
->interrupt_request
;
1463 env
->interrupt_request
|= mask
;
1466 * If called from iothread context, wake the target cpu in
1469 if (!qemu_cpu_is_self(cpu
)) {
1475 env
->icount_decr
.u16
.high
= 0xffff;
1477 && (mask
& ~old_mask
) != 0) {
1478 cpu_abort(env
, "Raised interrupt while not in I/O function");
1485 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1487 /* in deterministic execution mode, instructions doing device I/Os
1488 must be at the end of the TB */
1489 void cpu_io_recompile(CPUArchState
*env
, uintptr_t retaddr
)
1491 TranslationBlock
*tb
;
1493 target_ulong pc
, cs_base
;
1496 tb
= tb_find_pc(retaddr
);
1498 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
1501 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
1502 cpu_restore_state_from_tb(tb
, env
, retaddr
);
1503 /* Calculate how many instructions had been executed before the fault
1505 n
= n
- env
->icount_decr
.u16
.low
;
1506 /* Generate a new TB ending on the I/O insn. */
1508 /* On MIPS and SH, delay slot instructions can only be restarted if
1509 they were already the first instruction in the TB. If this is not
1510 the first instruction in a TB then re-execute the preceding
1512 #if defined(TARGET_MIPS)
1513 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1514 env
->active_tc
.PC
-= 4;
1515 env
->icount_decr
.u16
.low
++;
1516 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1518 #elif defined(TARGET_SH4)
1519 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1522 env
->icount_decr
.u16
.low
++;
1523 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1526 /* This should never happen. */
1527 if (n
> CF_COUNT_MASK
) {
1528 cpu_abort(env
, "TB too big during recompile");
1531 cflags
= n
| CF_LAST_IO
;
1533 cs_base
= tb
->cs_base
;
1535 tb_phys_invalidate(tb
, -1);
1536 /* FIXME: In theory this could raise an exception. In practice
1537 we have already translated the block once so it's probably ok. */
1538 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
1539 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1540 the first in the TB) then we end up generating a whole new TB and
1541 repeating the fault, which is horribly inefficient.
1542 Better would be to execute just this insn uncached, or generate a
1544 cpu_resume_from_signal(env
, NULL
);
1547 void tb_flush_jmp_cache(CPUArchState
*env
, target_ulong addr
)
1551 /* Discard jump cache entries for any tb which might potentially
1552 overlap the flushed page. */
1553 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1554 memset(&env
->tb_jmp_cache
[i
], 0,
1555 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1557 i
= tb_jmp_cache_hash_page(addr
);
1558 memset(&env
->tb_jmp_cache
[i
], 0,
1559 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1562 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1564 int i
, target_code_size
, max_target_code_size
;
1565 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
1566 TranslationBlock
*tb
;
1568 target_code_size
= 0;
1569 max_target_code_size
= 0;
1571 direct_jmp_count
= 0;
1572 direct_jmp2_count
= 0;
1573 for (i
= 0; i
< nb_tbs
; i
++) {
1575 target_code_size
+= tb
->size
;
1576 if (tb
->size
> max_target_code_size
) {
1577 max_target_code_size
= tb
->size
;
1579 if (tb
->page_addr
[1] != -1) {
1582 if (tb
->tb_next_offset
[0] != 0xffff) {
1584 if (tb
->tb_next_offset
[1] != 0xffff) {
1585 direct_jmp2_count
++;
1589 /* XXX: avoid using doubles ? */
1590 cpu_fprintf(f
, "Translation buffer state:\n");
1591 cpu_fprintf(f
, "gen code size %td/%zd\n",
1592 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
1593 cpu_fprintf(f
, "TB count %d/%d\n",
1594 nb_tbs
, code_gen_max_blocks
);
1595 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
1596 nb_tbs
? target_code_size
/ nb_tbs
: 0,
1597 max_target_code_size
);
1598 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1599 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
1600 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
)
1601 / target_code_size
: 0);
1602 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
1604 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
1605 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1607 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
1609 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
1610 cpu_fprintf(f
, "\nStatistics:\n");
1611 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
1612 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
1613 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
1614 tcg_dump_info(f
, cpu_fprintf
);
1617 #else /* CONFIG_USER_ONLY */
1619 void cpu_interrupt(CPUArchState
*env
, int mask
)
1621 env
->interrupt_request
|= mask
;
1626 * Walks guest process memory "regions" one by one
1627 * and calls callback function 'fn' for each region.
1629 struct walk_memory_regions_data
{
1630 walk_memory_regions_fn fn
;
1636 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1637 abi_ulong end
, int new_prot
)
1639 if (data
->start
!= -1ul) {
1640 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1646 data
->start
= (new_prot
? end
: -1ul);
1647 data
->prot
= new_prot
;
1652 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1653 abi_ulong base
, int level
, void **lp
)
1659 return walk_memory_regions_end(data
, base
, 0);
1665 for (i
= 0; i
< L2_SIZE
; ++i
) {
1666 int prot
= pd
[i
].flags
;
1668 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1669 if (prot
!= data
->prot
) {
1670 rc
= walk_memory_regions_end(data
, pa
, prot
);
1679 for (i
= 0; i
< L2_SIZE
; ++i
) {
1680 pa
= base
| ((abi_ulong
)i
<<
1681 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
1682 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1692 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1694 struct walk_memory_regions_data data
;
1702 for (i
= 0; i
< V_L1_SIZE
; i
++) {
1703 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
1704 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
1711 return walk_memory_regions_end(&data
, 0, 0);
1714 static int dump_region(void *priv
, abi_ulong start
,
1715 abi_ulong end
, unsigned long prot
)
1717 FILE *f
= (FILE *)priv
;
1719 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
1720 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
1721 start
, end
, end
- start
,
1722 ((prot
& PAGE_READ
) ? 'r' : '-'),
1723 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
1724 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
1729 /* dump memory mappings */
1730 void page_dump(FILE *f
)
1732 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
1733 "start", "end", "size", "prot");
1734 walk_memory_regions(f
, dump_region
);
1737 int page_get_flags(target_ulong address
)
1741 p
= page_find(address
>> TARGET_PAGE_BITS
);
1748 /* Modify the flags of a page and invalidate the code if necessary.
1749 The flag PAGE_WRITE_ORG is positioned automatically depending
1750 on PAGE_WRITE. The mmap_lock should already be held. */
1751 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1753 target_ulong addr
, len
;
1755 /* This function should never be called with addresses outside the
1756 guest address space. If this assert fires, it probably indicates
1757 a missing call to h2g_valid. */
1758 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1759 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1761 assert(start
< end
);
1763 start
= start
& TARGET_PAGE_MASK
;
1764 end
= TARGET_PAGE_ALIGN(end
);
1766 if (flags
& PAGE_WRITE
) {
1767 flags
|= PAGE_WRITE_ORG
;
1770 for (addr
= start
, len
= end
- start
;
1772 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1773 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1775 /* If the write protection bit is set, then we invalidate
1777 if (!(p
->flags
& PAGE_WRITE
) &&
1778 (flags
& PAGE_WRITE
) &&
1780 tb_invalidate_phys_page(addr
, 0, NULL
);
1786 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1792 /* This function should never be called with addresses outside the
1793 guest address space. If this assert fires, it probably indicates
1794 a missing call to h2g_valid. */
1795 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1796 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1802 if (start
+ len
- 1 < start
) {
1803 /* We've wrapped around. */
1807 /* must do before we loose bits in the next step */
1808 end
= TARGET_PAGE_ALIGN(start
+ len
);
1809 start
= start
& TARGET_PAGE_MASK
;
1811 for (addr
= start
, len
= end
- start
;
1813 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1814 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1818 if (!(p
->flags
& PAGE_VALID
)) {
1822 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
1825 if (flags
& PAGE_WRITE
) {
1826 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
1829 /* unprotect the page if it was put read-only because it
1830 contains translated code */
1831 if (!(p
->flags
& PAGE_WRITE
)) {
1832 if (!page_unprotect(addr
, 0, NULL
)) {
1842 /* called from signal handler: invalidate the code and unprotect the
1843 page. Return TRUE if the fault was successfully handled. */
1844 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
1848 target_ulong host_start
, host_end
, addr
;
1850 /* Technically this isn't safe inside a signal handler. However we
1851 know this only ever happens in a synchronous SEGV handler, so in
1852 practice it seems to be ok. */
1855 p
= page_find(address
>> TARGET_PAGE_BITS
);
1861 /* if the page was really writable, then we change its
1862 protection back to writable */
1863 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
1864 host_start
= address
& qemu_host_page_mask
;
1865 host_end
= host_start
+ qemu_host_page_size
;
1868 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1869 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1870 p
->flags
|= PAGE_WRITE
;
1873 /* and since the content will be modified, we must invalidate
1874 the corresponding translated code. */
1875 tb_invalidate_phys_page(addr
, pc
, puc
);
1876 #ifdef DEBUG_TB_CHECK
1877 tb_invalidate_check(addr
);
1880 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1889 #endif /* CONFIG_USER_ONLY */