4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include <sys/types.h>
33 #include "qemu-common.h"
34 #define NO_CPU_IO_DEFS
36 #include "disas/disas.h"
38 #include "qemu/timer.h"
39 #include "exec/memory.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
43 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
44 #include <sys/param.h>
45 #if __FreeBSD_version >= 700104
46 #define HAVE_KINFO_GETVMMAP
47 #define sigqueue sigqueue_freebsd /* avoid redefinition */
50 #include <machine/profile.h>
60 #include "exec/cputlb.h"
61 #include "translate-all.h"
63 //#define DEBUG_TB_INVALIDATE
65 /* make various TB consistency checks */
66 //#define DEBUG_TB_CHECK
68 #if !defined(CONFIG_USER_ONLY)
69 /* TB consistency checks only implemented for usermode emulation. */
73 #define SMC_BITMAP_USE_THRESHOLD 10
75 /* Translation blocks */
76 static TranslationBlock
*tbs
;
77 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
79 /* any access to the tbs or the page table must use this lock */
80 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
82 typedef struct PageDesc
{
83 /* list of TBs intersecting this ram page */
84 TranslationBlock
*first_tb
;
85 /* in order to optimize self modifying code, we count the number
86 of lookups we do to a given page to use a bitmap */
87 unsigned int code_write_count
;
89 #if defined(CONFIG_USER_ONLY)
94 /* In system mode we want L1_MAP to be based on ram offsets,
95 while in user mode we want it to be based on virtual addresses. */
96 #if !defined(CONFIG_USER_ONLY)
97 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
98 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
100 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
103 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
106 /* The bits remaining after N lower levels of page tables. */
107 #define V_L1_BITS_REM \
108 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
110 #if V_L1_BITS_REM < 4
111 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
113 #define V_L1_BITS V_L1_BITS_REM
116 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
118 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
120 uintptr_t qemu_real_host_page_size
;
121 uintptr_t qemu_host_page_size
;
122 uintptr_t qemu_host_page_mask
;
124 /* This is a multi-level map on the virtual address space.
125 The bottom level has pointers to PageDesc. */
126 static void *l1_map
[V_L1_SIZE
];
129 static int tb_flush_count
;
130 static int tb_phys_invalidate_count
;
132 /* code generation context */
135 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
136 tb_page_addr_t phys_page2
);
137 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
139 void cpu_gen_init(void)
141 tcg_context_init(&tcg_ctx
);
144 /* return non zero if the very first instruction is invalid so that
145 the virtual CPU can trigger an exception.
147 '*gen_code_size_ptr' contains the size of the generated code (host
150 int cpu_gen_code(CPUArchState
*env
, TranslationBlock
*tb
, int *gen_code_size_ptr
)
152 TCGContext
*s
= &tcg_ctx
;
153 uint8_t *gen_code_buf
;
155 #ifdef CONFIG_PROFILER
159 #ifdef CONFIG_PROFILER
160 s
->tb_count1
++; /* includes aborted translations because of
162 ti
= profile_getclock();
166 gen_intermediate_code(env
, tb
);
168 /* generate machine code */
169 gen_code_buf
= tb
->tc_ptr
;
170 tb
->tb_next_offset
[0] = 0xffff;
171 tb
->tb_next_offset
[1] = 0xffff;
172 s
->tb_next_offset
= tb
->tb_next_offset
;
173 #ifdef USE_DIRECT_JUMP
174 s
->tb_jmp_offset
= tb
->tb_jmp_offset
;
177 s
->tb_jmp_offset
= NULL
;
178 s
->tb_next
= tb
->tb_next
;
181 #ifdef CONFIG_PROFILER
183 s
->interm_time
+= profile_getclock() - ti
;
184 s
->code_time
-= profile_getclock();
186 gen_code_size
= tcg_gen_code(s
, gen_code_buf
);
187 *gen_code_size_ptr
= gen_code_size
;
188 #ifdef CONFIG_PROFILER
189 s
->code_time
+= profile_getclock();
190 s
->code_in_len
+= tb
->size
;
191 s
->code_out_len
+= gen_code_size
;
195 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
196 qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr
);
197 log_disas(tb
->tc_ptr
, *gen_code_size_ptr
);
205 /* The cpu state corresponding to 'searched_pc' is restored.
207 static int cpu_restore_state_from_tb(TranslationBlock
*tb
, CPUArchState
*env
,
208 uintptr_t searched_pc
)
210 TCGContext
*s
= &tcg_ctx
;
213 #ifdef CONFIG_PROFILER
217 #ifdef CONFIG_PROFILER
218 ti
= profile_getclock();
222 gen_intermediate_code_pc(env
, tb
);
225 /* Reset the cycle counter to the start of the block. */
226 env
->icount_decr
.u16
.low
+= tb
->icount
;
227 /* Clear the IO flag. */
231 /* find opc index corresponding to search_pc */
232 tc_ptr
= (uintptr_t)tb
->tc_ptr
;
233 if (searched_pc
< tc_ptr
)
236 s
->tb_next_offset
= tb
->tb_next_offset
;
237 #ifdef USE_DIRECT_JUMP
238 s
->tb_jmp_offset
= tb
->tb_jmp_offset
;
241 s
->tb_jmp_offset
= NULL
;
242 s
->tb_next
= tb
->tb_next
;
244 j
= tcg_gen_code_search_pc(s
, (uint8_t *)tc_ptr
, searched_pc
- tc_ptr
);
247 /* now find start of instruction before */
248 while (s
->gen_opc_instr_start
[j
] == 0) {
251 env
->icount_decr
.u16
.low
-= s
->gen_opc_icount
[j
];
253 restore_state_to_opc(env
, tb
, j
);
255 #ifdef CONFIG_PROFILER
256 s
->restore_time
+= profile_getclock() - ti
;
262 bool cpu_restore_state(CPUArchState
*env
, uintptr_t retaddr
)
264 TranslationBlock
*tb
;
266 tb
= tb_find_pc(retaddr
);
268 cpu_restore_state_from_tb(tb
, env
, retaddr
);
275 static inline void map_exec(void *addr
, long size
)
278 VirtualProtect(addr
, size
,
279 PAGE_EXECUTE_READWRITE
, &old_protect
);
282 static inline void map_exec(void *addr
, long size
)
284 unsigned long start
, end
, page_size
;
286 page_size
= getpagesize();
287 start
= (unsigned long)addr
;
288 start
&= ~(page_size
- 1);
290 end
= (unsigned long)addr
+ size
;
291 end
+= page_size
- 1;
292 end
&= ~(page_size
- 1);
294 mprotect((void *)start
, end
- start
,
295 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
299 static void page_init(void)
301 /* NOTE: we can always suppose that qemu_host_page_size >=
305 SYSTEM_INFO system_info
;
307 GetSystemInfo(&system_info
);
308 qemu_real_host_page_size
= system_info
.dwPageSize
;
311 qemu_real_host_page_size
= getpagesize();
313 if (qemu_host_page_size
== 0) {
314 qemu_host_page_size
= qemu_real_host_page_size
;
316 if (qemu_host_page_size
< TARGET_PAGE_SIZE
) {
317 qemu_host_page_size
= TARGET_PAGE_SIZE
;
319 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
321 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
323 #ifdef HAVE_KINFO_GETVMMAP
324 struct kinfo_vmentry
*freep
;
327 freep
= kinfo_getvmmap(getpid(), &cnt
);
330 for (i
= 0; i
< cnt
; i
++) {
331 unsigned long startaddr
, endaddr
;
333 startaddr
= freep
[i
].kve_start
;
334 endaddr
= freep
[i
].kve_end
;
335 if (h2g_valid(startaddr
)) {
336 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
338 if (h2g_valid(endaddr
)) {
339 endaddr
= h2g(endaddr
);
340 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
342 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
344 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
355 last_brk
= (unsigned long)sbrk(0);
357 f
= fopen("/compat/linux/proc/self/maps", "r");
362 unsigned long startaddr
, endaddr
;
365 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
367 if (n
== 2 && h2g_valid(startaddr
)) {
368 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
370 if (h2g_valid(endaddr
)) {
371 endaddr
= h2g(endaddr
);
375 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
387 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
393 #if defined(CONFIG_USER_ONLY)
394 /* We can't use g_malloc because it may recurse into a locked mutex. */
395 # define ALLOC(P, SIZE) \
397 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
398 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
401 # define ALLOC(P, SIZE) \
402 do { P = g_malloc0(SIZE); } while (0)
405 /* Level 1. Always allocated. */
406 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
409 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
416 ALLOC(p
, sizeof(void *) * L2_SIZE
);
420 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
428 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
434 return pd
+ (index
& (L2_SIZE
- 1));
437 static inline PageDesc
*page_find(tb_page_addr_t index
)
439 return page_find_alloc(index
, 0);
442 #if !defined(CONFIG_USER_ONLY)
443 #define mmap_lock() do { } while (0)
444 #define mmap_unlock() do { } while (0)
447 #if defined(CONFIG_USER_ONLY)
448 /* Currently it is not recommended to allocate big chunks of data in
449 user mode. It will change when a dedicated libc will be used. */
450 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
451 region in which the guest needs to run. Revisit this. */
452 #define USE_STATIC_CODE_GEN_BUFFER
455 /* ??? Should configure for this, not list operating systems here. */
456 #if (defined(__linux__) \
457 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
458 || defined(__DragonFly__) || defined(__OpenBSD__) \
459 || defined(__NetBSD__))
463 /* Minimum size of the code gen buffer. This number is randomly chosen,
464 but not so small that we can't have a fair number of TB's live. */
465 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
467 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
468 indicated, this is constrained by the range of direct branches on the
469 host cpu, as used by the TCG implementation of goto_tb. */
470 #if defined(__x86_64__)
471 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
472 #elif defined(__sparc__)
473 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
474 #elif defined(__arm__)
475 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
476 #elif defined(__s390x__)
477 /* We have a +- 4GB range on the branches; leave some slop. */
478 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
480 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
483 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
485 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
486 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
487 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
489 static inline size_t size_code_gen_buffer(size_t tb_size
)
491 /* Size the buffer. */
493 #ifdef USE_STATIC_CODE_GEN_BUFFER
494 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
496 /* ??? Needs adjustments. */
497 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
498 static buffer, we could size this on RESERVED_VA, on the text
499 segment size of the executable, or continue to use the default. */
500 tb_size
= (unsigned long)(ram_size
/ 4);
503 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
504 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
506 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
507 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
509 tcg_ctx
.code_gen_buffer_size
= tb_size
;
513 #ifdef USE_STATIC_CODE_GEN_BUFFER
514 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
515 __attribute__((aligned(CODE_GEN_ALIGN
)));
517 static inline void *alloc_code_gen_buffer(void)
519 map_exec(static_code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
);
520 return static_code_gen_buffer
;
522 #elif defined(USE_MMAP)
523 static inline void *alloc_code_gen_buffer(void)
525 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
529 /* Constrain the position of the buffer based on the host cpu.
530 Note that these addresses are chosen in concert with the
531 addresses assigned in the relevant linker script file. */
532 # if defined(__PIE__) || defined(__PIC__)
533 /* Don't bother setting a preferred location if we're building
534 a position-independent executable. We're more likely to get
535 an address near the main executable if we let the kernel
536 choose the address. */
537 # elif defined(__x86_64__) && defined(MAP_32BIT)
538 /* Force the memory down into low memory with the executable.
539 Leave the choice of exact location with the kernel. */
541 /* Cannot expect to map more than 800MB in low memory. */
542 if (tcg_ctx
.code_gen_buffer_size
> 800u * 1024 * 1024) {
543 tcg_ctx
.code_gen_buffer_size
= 800u * 1024 * 1024;
545 # elif defined(__sparc__)
546 start
= 0x40000000ul
;
547 # elif defined(__s390x__)
548 start
= 0x90000000ul
;
551 buf
= mmap((void *)start
, tcg_ctx
.code_gen_buffer_size
,
552 PROT_WRITE
| PROT_READ
| PROT_EXEC
, flags
, -1, 0);
553 return buf
== MAP_FAILED
? NULL
: buf
;
556 static inline void *alloc_code_gen_buffer(void)
558 void *buf
= g_malloc(tcg_ctx
.code_gen_buffer_size
);
561 map_exec(buf
, tcg_ctx
.code_gen_buffer_size
);
565 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
567 static inline void code_gen_alloc(size_t tb_size
)
569 tcg_ctx
.code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
570 tcg_ctx
.code_gen_buffer
= alloc_code_gen_buffer();
571 if (tcg_ctx
.code_gen_buffer
== NULL
) {
572 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
576 qemu_madvise(tcg_ctx
.code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
,
579 /* Steal room for the prologue at the end of the buffer. This ensures
580 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
581 from TB's to the prologue are going to be in range. It also means
582 that we don't need to mark (additional) portions of the data segment
584 tcg_ctx
.code_gen_prologue
= tcg_ctx
.code_gen_buffer
+
585 tcg_ctx
.code_gen_buffer_size
- 1024;
586 tcg_ctx
.code_gen_buffer_size
-= 1024;
588 tcg_ctx
.code_gen_buffer_max_size
= tcg_ctx
.code_gen_buffer_size
-
589 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
590 tcg_ctx
.code_gen_max_blocks
= tcg_ctx
.code_gen_buffer_size
/
591 CODE_GEN_AVG_BLOCK_SIZE
;
592 tbs
= g_malloc(tcg_ctx
.code_gen_max_blocks
* sizeof(TranslationBlock
));
595 /* Must be called before using the QEMU cpus. 'tb_size' is the size
596 (in bytes) allocated to the translation buffer. Zero means default
598 void tcg_exec_init(unsigned long tb_size
)
601 code_gen_alloc(tb_size
);
602 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
603 tcg_register_jit(tcg_ctx
.code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
);
605 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
606 /* There's no guest base to take into account, so go ahead and
607 initialize the prologue now. */
608 tcg_prologue_init(&tcg_ctx
);
612 bool tcg_enabled(void)
614 return tcg_ctx
.code_gen_buffer
!= NULL
;
617 /* Allocate a new translation block. Flush the translation buffer if
618 too many translation blocks or too much generated code. */
619 static TranslationBlock
*tb_alloc(target_ulong pc
)
621 TranslationBlock
*tb
;
623 if (nb_tbs
>= tcg_ctx
.code_gen_max_blocks
||
624 (tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
) >=
625 tcg_ctx
.code_gen_buffer_max_size
) {
634 void tb_free(TranslationBlock
*tb
)
636 /* In practice this is mostly used for single use temporary TB
637 Ignore the hard cases and just back up if this TB happens to
638 be the last one generated. */
639 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
640 tcg_ctx
.code_gen_ptr
= tb
->tc_ptr
;
645 static inline void invalidate_page_bitmap(PageDesc
*p
)
647 if (p
->code_bitmap
) {
648 g_free(p
->code_bitmap
);
649 p
->code_bitmap
= NULL
;
651 p
->code_write_count
= 0;
654 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
655 static void page_flush_tb_1(int level
, void **lp
)
665 for (i
= 0; i
< L2_SIZE
; ++i
) {
666 pd
[i
].first_tb
= NULL
;
667 invalidate_page_bitmap(pd
+ i
);
672 for (i
= 0; i
< L2_SIZE
; ++i
) {
673 page_flush_tb_1(level
- 1, pp
+ i
);
678 static void page_flush_tb(void)
682 for (i
= 0; i
< V_L1_SIZE
; i
++) {
683 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
687 /* flush all the translation blocks */
688 /* XXX: tb_flush is currently not thread safe */
689 void tb_flush(CPUArchState
*env1
)
693 #if defined(DEBUG_FLUSH)
694 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
695 (unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
),
697 ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)) /
700 if ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)
701 > tcg_ctx
.code_gen_buffer_size
) {
702 cpu_abort(env1
, "Internal error: code buffer overflow\n");
706 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
707 memset(env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof(void *));
710 memset(tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof(void *));
713 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
714 /* XXX: flush processor icache at this point if cache flush is
719 #ifdef DEBUG_TB_CHECK
721 static void tb_invalidate_check(target_ulong address
)
723 TranslationBlock
*tb
;
726 address
&= TARGET_PAGE_MASK
;
727 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
728 for (tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
729 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
730 address
>= tb
->pc
+ tb
->size
)) {
731 printf("ERROR invalidate: address=" TARGET_FMT_lx
732 " PC=%08lx size=%04x\n",
733 address
, (long)tb
->pc
, tb
->size
);
739 /* verify that all the pages have correct rights for code */
740 static void tb_page_check(void)
742 TranslationBlock
*tb
;
743 int i
, flags1
, flags2
;
745 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
746 for (tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
747 flags1
= page_get_flags(tb
->pc
);
748 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
749 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
750 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
751 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
759 static inline void tb_hash_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
761 TranslationBlock
*tb1
;
766 *ptb
= tb1
->phys_hash_next
;
769 ptb
= &tb1
->phys_hash_next
;
773 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
775 TranslationBlock
*tb1
;
780 n1
= (uintptr_t)tb1
& 3;
781 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
783 *ptb
= tb1
->page_next
[n1
];
786 ptb
= &tb1
->page_next
[n1
];
790 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
792 TranslationBlock
*tb1
, **ptb
;
795 ptb
= &tb
->jmp_next
[n
];
798 /* find tb(n) in circular list */
801 n1
= (uintptr_t)tb1
& 3;
802 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
803 if (n1
== n
&& tb1
== tb
) {
807 ptb
= &tb1
->jmp_first
;
809 ptb
= &tb1
->jmp_next
[n1
];
812 /* now we can suppress tb(n) from the list */
813 *ptb
= tb
->jmp_next
[n
];
815 tb
->jmp_next
[n
] = NULL
;
819 /* reset the jump entry 'n' of a TB so that it is not chained to
821 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
823 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
826 /* invalidate one TB */
827 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
832 tb_page_addr_t phys_pc
;
833 TranslationBlock
*tb1
, *tb2
;
835 /* remove the TB from the hash list */
836 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
837 h
= tb_phys_hash_func(phys_pc
);
838 tb_hash_remove(&tb_phys_hash
[h
], tb
);
840 /* remove the TB from the page list */
841 if (tb
->page_addr
[0] != page_addr
) {
842 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
843 tb_page_remove(&p
->first_tb
, tb
);
844 invalidate_page_bitmap(p
);
846 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
847 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
848 tb_page_remove(&p
->first_tb
, tb
);
849 invalidate_page_bitmap(p
);
852 tb_invalidated_flag
= 1;
854 /* remove the TB from the hash list */
855 h
= tb_jmp_cache_hash_func(tb
->pc
);
856 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
857 if (env
->tb_jmp_cache
[h
] == tb
) {
858 env
->tb_jmp_cache
[h
] = NULL
;
862 /* suppress this TB from the two jump lists */
863 tb_jmp_remove(tb
, 0);
864 tb_jmp_remove(tb
, 1);
866 /* suppress any remaining jumps to this TB */
869 n1
= (uintptr_t)tb1
& 3;
873 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
874 tb2
= tb1
->jmp_next
[n1
];
875 tb_reset_jump(tb1
, n1
);
876 tb1
->jmp_next
[n1
] = NULL
;
879 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
881 tb_phys_invalidate_count
++;
884 static inline void set_bits(uint8_t *tab
, int start
, int len
)
890 mask
= 0xff << (start
& 7);
891 if ((start
& ~7) == (end
& ~7)) {
893 mask
&= ~(0xff << (end
& 7));
898 start
= (start
+ 8) & ~7;
900 while (start
< end1
) {
905 mask
= ~(0xff << (end
& 7));
911 static void build_page_bitmap(PageDesc
*p
)
913 int n
, tb_start
, tb_end
;
914 TranslationBlock
*tb
;
916 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
920 n
= (uintptr_t)tb
& 3;
921 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
922 /* NOTE: this is subtle as a TB may span two physical pages */
924 /* NOTE: tb_end may be after the end of the page, but
925 it is not a problem */
926 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
927 tb_end
= tb_start
+ tb
->size
;
928 if (tb_end
> TARGET_PAGE_SIZE
) {
929 tb_end
= TARGET_PAGE_SIZE
;
933 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
935 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
936 tb
= tb
->page_next
[n
];
940 TranslationBlock
*tb_gen_code(CPUArchState
*env
,
941 target_ulong pc
, target_ulong cs_base
,
942 int flags
, int cflags
)
944 TranslationBlock
*tb
;
946 tb_page_addr_t phys_pc
, phys_page2
;
947 target_ulong virt_page2
;
950 phys_pc
= get_page_addr_code(env
, pc
);
953 /* flush must be done */
955 /* cannot fail at this point */
957 /* Don't forget to invalidate previous TB info. */
958 tb_invalidated_flag
= 1;
960 tc_ptr
= tcg_ctx
.code_gen_ptr
;
962 tb
->cs_base
= cs_base
;
965 cpu_gen_code(env
, tb
, &code_gen_size
);
966 tcg_ctx
.code_gen_ptr
= (void *)(((uintptr_t)tcg_ctx
.code_gen_ptr
+
967 code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
969 /* check next page if needed */
970 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
972 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
973 phys_page2
= get_page_addr_code(env
, virt_page2
);
975 tb_link_page(tb
, phys_pc
, phys_page2
);
980 * Invalidate all TBs which intersect with the target physical address range
981 * [start;end[. NOTE: start and end may refer to *different* physical pages.
982 * 'is_cpu_write_access' should be true if called from a real cpu write
983 * access: the virtual CPU will exit the current TB if code is modified inside
986 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
,
987 int is_cpu_write_access
)
989 while (start
< end
) {
990 tb_invalidate_phys_page_range(start
, end
, is_cpu_write_access
);
991 start
&= TARGET_PAGE_MASK
;
992 start
+= TARGET_PAGE_SIZE
;
997 * Invalidate all TBs which intersect with the target physical address range
998 * [start;end[. NOTE: start and end must refer to the *same* physical page.
999 * 'is_cpu_write_access' should be true if called from a real cpu write
1000 * access: the virtual CPU will exit the current TB if code is modified inside
1003 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1004 int is_cpu_write_access
)
1006 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1007 CPUArchState
*env
= cpu_single_env
;
1008 tb_page_addr_t tb_start
, tb_end
;
1011 #ifdef TARGET_HAS_PRECISE_SMC
1012 int current_tb_not_found
= is_cpu_write_access
;
1013 TranslationBlock
*current_tb
= NULL
;
1014 int current_tb_modified
= 0;
1015 target_ulong current_pc
= 0;
1016 target_ulong current_cs_base
= 0;
1017 int current_flags
= 0;
1018 #endif /* TARGET_HAS_PRECISE_SMC */
1020 p
= page_find(start
>> TARGET_PAGE_BITS
);
1024 if (!p
->code_bitmap
&&
1025 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1026 is_cpu_write_access
) {
1027 /* build code bitmap */
1028 build_page_bitmap(p
);
1031 /* we remove all the TBs in the range [start, end[ */
1032 /* XXX: see if in some cases it could be faster to invalidate all
1035 while (tb
!= NULL
) {
1036 n
= (uintptr_t)tb
& 3;
1037 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1038 tb_next
= tb
->page_next
[n
];
1039 /* NOTE: this is subtle as a TB may span two physical pages */
1041 /* NOTE: tb_end may be after the end of the page, but
1042 it is not a problem */
1043 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1044 tb_end
= tb_start
+ tb
->size
;
1046 tb_start
= tb
->page_addr
[1];
1047 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1049 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1050 #ifdef TARGET_HAS_PRECISE_SMC
1051 if (current_tb_not_found
) {
1052 current_tb_not_found
= 0;
1054 if (env
->mem_io_pc
) {
1055 /* now we have a real cpu fault */
1056 current_tb
= tb_find_pc(env
->mem_io_pc
);
1059 if (current_tb
== tb
&&
1060 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1061 /* If we are modifying the current TB, we must stop
1062 its execution. We could be more precise by checking
1063 that the modification is after the current PC, but it
1064 would require a specialized function to partially
1065 restore the CPU state */
1067 current_tb_modified
= 1;
1068 cpu_restore_state_from_tb(current_tb
, env
, env
->mem_io_pc
);
1069 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1072 #endif /* TARGET_HAS_PRECISE_SMC */
1073 /* we need to do that to handle the case where a signal
1074 occurs while doing tb_phys_invalidate() */
1077 saved_tb
= env
->current_tb
;
1078 env
->current_tb
= NULL
;
1080 tb_phys_invalidate(tb
, -1);
1082 env
->current_tb
= saved_tb
;
1083 if (env
->interrupt_request
&& env
->current_tb
) {
1084 cpu_interrupt(env
, env
->interrupt_request
);
1090 #if !defined(CONFIG_USER_ONLY)
1091 /* if no code remaining, no need to continue to use slow writes */
1093 invalidate_page_bitmap(p
);
1094 if (is_cpu_write_access
) {
1095 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1099 #ifdef TARGET_HAS_PRECISE_SMC
1100 if (current_tb_modified
) {
1101 /* we generate a block containing just the instruction
1102 modifying the memory. It will ensure that it cannot modify
1104 env
->current_tb
= NULL
;
1105 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1106 cpu_resume_from_signal(env
, NULL
);
1111 /* len must be <= 8 and start must be a multiple of len */
1112 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1119 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1120 cpu_single_env
->mem_io_vaddr
, len
,
1121 cpu_single_env
->eip
,
1122 cpu_single_env
->eip
+
1123 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1126 p
= page_find(start
>> TARGET_PAGE_BITS
);
1130 if (p
->code_bitmap
) {
1131 offset
= start
& ~TARGET_PAGE_MASK
;
1132 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1133 if (b
& ((1 << len
) - 1)) {
1138 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1142 #if !defined(CONFIG_SOFTMMU)
1143 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1144 uintptr_t pc
, void *puc
)
1146 TranslationBlock
*tb
;
1149 #ifdef TARGET_HAS_PRECISE_SMC
1150 TranslationBlock
*current_tb
= NULL
;
1151 CPUArchState
*env
= cpu_single_env
;
1152 int current_tb_modified
= 0;
1153 target_ulong current_pc
= 0;
1154 target_ulong current_cs_base
= 0;
1155 int current_flags
= 0;
1158 addr
&= TARGET_PAGE_MASK
;
1159 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1164 #ifdef TARGET_HAS_PRECISE_SMC
1165 if (tb
&& pc
!= 0) {
1166 current_tb
= tb_find_pc(pc
);
1169 while (tb
!= NULL
) {
1170 n
= (uintptr_t)tb
& 3;
1171 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1172 #ifdef TARGET_HAS_PRECISE_SMC
1173 if (current_tb
== tb
&&
1174 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1175 /* If we are modifying the current TB, we must stop
1176 its execution. We could be more precise by checking
1177 that the modification is after the current PC, but it
1178 would require a specialized function to partially
1179 restore the CPU state */
1181 current_tb_modified
= 1;
1182 cpu_restore_state_from_tb(current_tb
, env
, pc
);
1183 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1186 #endif /* TARGET_HAS_PRECISE_SMC */
1187 tb_phys_invalidate(tb
, addr
);
1188 tb
= tb
->page_next
[n
];
1191 #ifdef TARGET_HAS_PRECISE_SMC
1192 if (current_tb_modified
) {
1193 /* we generate a block containing just the instruction
1194 modifying the memory. It will ensure that it cannot modify
1196 env
->current_tb
= NULL
;
1197 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1198 cpu_resume_from_signal(env
, puc
);
1204 /* add the tb in the target page and protect it if necessary */
1205 static inline void tb_alloc_page(TranslationBlock
*tb
,
1206 unsigned int n
, tb_page_addr_t page_addr
)
1209 #ifndef CONFIG_USER_ONLY
1210 bool page_already_protected
;
1213 tb
->page_addr
[n
] = page_addr
;
1214 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1215 tb
->page_next
[n
] = p
->first_tb
;
1216 #ifndef CONFIG_USER_ONLY
1217 page_already_protected
= p
->first_tb
!= NULL
;
1219 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1220 invalidate_page_bitmap(p
);
1222 #if defined(TARGET_HAS_SMC) || 1
1224 #if defined(CONFIG_USER_ONLY)
1225 if (p
->flags
& PAGE_WRITE
) {
1230 /* force the host page as non writable (writes will have a
1231 page fault + mprotect overhead) */
1232 page_addr
&= qemu_host_page_mask
;
1234 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1235 addr
+= TARGET_PAGE_SIZE
) {
1237 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1242 p2
->flags
&= ~PAGE_WRITE
;
1244 mprotect(g2h(page_addr
), qemu_host_page_size
,
1245 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1246 #ifdef DEBUG_TB_INVALIDATE
1247 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1252 /* if some code is already present, then the pages are already
1253 protected. So we handle the case where only the first TB is
1254 allocated in a physical page */
1255 if (!page_already_protected
) {
1256 tlb_protect_code(page_addr
);
1260 #endif /* TARGET_HAS_SMC */
1263 /* add a new TB and link it to the physical page tables. phys_page2 is
1264 (-1) to indicate that only one page contains the TB. */
1265 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1266 tb_page_addr_t phys_page2
)
1269 TranslationBlock
**ptb
;
1271 /* Grab the mmap lock to stop another thread invalidating this TB
1272 before we are done. */
1274 /* add in the physical hash table */
1275 h
= tb_phys_hash_func(phys_pc
);
1276 ptb
= &tb_phys_hash
[h
];
1277 tb
->phys_hash_next
= *ptb
;
1280 /* add in the page list */
1281 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1282 if (phys_page2
!= -1) {
1283 tb_alloc_page(tb
, 1, phys_page2
);
1285 tb
->page_addr
[1] = -1;
1288 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1289 tb
->jmp_next
[0] = NULL
;
1290 tb
->jmp_next
[1] = NULL
;
1292 /* init original jump addresses */
1293 if (tb
->tb_next_offset
[0] != 0xffff) {
1294 tb_reset_jump(tb
, 0);
1296 if (tb
->tb_next_offset
[1] != 0xffff) {
1297 tb_reset_jump(tb
, 1);
1300 #ifdef DEBUG_TB_CHECK
1306 #if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1307 /* check whether the given addr is in TCG generated code buffer or not */
1308 bool is_tcg_gen_code(uintptr_t tc_ptr
)
1310 /* This can be called during code generation, code_gen_buffer_max_size
1311 is used instead of code_gen_ptr for upper boundary checking */
1312 return (tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_buffer
&&
1313 tc_ptr
< (uintptr_t)(tcg_ctx
.code_gen_buffer
+
1314 tcg_ctx
.code_gen_buffer_max_size
));
1318 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1319 tb[1].tc_ptr. Return NULL if not found */
1320 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1322 int m_min
, m_max
, m
;
1324 TranslationBlock
*tb
;
1329 if (tc_ptr
< (uintptr_t)tcg_ctx
.code_gen_buffer
||
1330 tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_ptr
) {
1333 /* binary search (cf Knuth) */
1336 while (m_min
<= m_max
) {
1337 m
= (m_min
+ m_max
) >> 1;
1339 v
= (uintptr_t)tb
->tc_ptr
;
1342 } else if (tc_ptr
< v
) {
1351 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1353 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1355 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1358 tb1
= tb
->jmp_next
[n
];
1360 /* find head of list */
1362 n1
= (uintptr_t)tb1
& 3;
1363 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1367 tb1
= tb1
->jmp_next
[n1
];
1369 /* we are now sure now that tb jumps to tb1 */
1372 /* remove tb from the jmp_first list */
1373 ptb
= &tb_next
->jmp_first
;
1376 n1
= (uintptr_t)tb1
& 3;
1377 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1378 if (n1
== n
&& tb1
== tb
) {
1381 ptb
= &tb1
->jmp_next
[n1
];
1383 *ptb
= tb
->jmp_next
[n
];
1384 tb
->jmp_next
[n
] = NULL
;
1386 /* suppress the jump to next tb in generated code */
1387 tb_reset_jump(tb
, n
);
1389 /* suppress jumps in the tb on which we could have jumped */
1390 tb_reset_jump_recursive(tb_next
);
1394 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1396 tb_reset_jump_recursive2(tb
, 0);
1397 tb_reset_jump_recursive2(tb
, 1);
1400 #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1401 void tb_invalidate_phys_addr(hwaddr addr
)
1403 ram_addr_t ram_addr
;
1404 MemoryRegionSection
*section
;
1406 section
= phys_page_find(address_space_memory
.dispatch
,
1407 addr
>> TARGET_PAGE_BITS
);
1408 if (!(memory_region_is_ram(section
->mr
)
1409 || (section
->mr
->rom_device
&& section
->mr
->readable
))) {
1412 ram_addr
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1413 + memory_region_section_addr(section
, addr
);
1414 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1416 #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1418 void cpu_unlink_tb(CPUArchState
*env
)
1420 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1421 problem and hope the cpu will stop of its own accord. For userspace
1422 emulation this often isn't actually as bad as it sounds. Often
1423 signals are used primarily to interrupt blocking syscalls. */
1424 TranslationBlock
*tb
;
1425 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1427 spin_lock(&interrupt_lock
);
1428 tb
= env
->current_tb
;
1429 /* if the cpu is currently executing code, we must unlink it and
1430 all the potentially executing TB */
1432 env
->current_tb
= NULL
;
1433 tb_reset_jump_recursive(tb
);
1435 spin_unlock(&interrupt_lock
);
1438 void tb_check_watchpoint(CPUArchState
*env
)
1440 TranslationBlock
*tb
;
1442 tb
= tb_find_pc(env
->mem_io_pc
);
1444 cpu_abort(env
, "check_watchpoint: could not find TB for pc=%p",
1445 (void *)env
->mem_io_pc
);
1447 cpu_restore_state_from_tb(tb
, env
, env
->mem_io_pc
);
1448 tb_phys_invalidate(tb
, -1);
1451 #ifndef CONFIG_USER_ONLY
1452 /* mask must never be zero, except for A20 change call */
1453 static void tcg_handle_interrupt(CPUArchState
*env
, int mask
)
1455 CPUState
*cpu
= ENV_GET_CPU(env
);
1458 old_mask
= env
->interrupt_request
;
1459 env
->interrupt_request
|= mask
;
1462 * If called from iothread context, wake the target cpu in
1465 if (!qemu_cpu_is_self(cpu
)) {
1471 env
->icount_decr
.u16
.high
= 0xffff;
1473 && (mask
& ~old_mask
) != 0) {
1474 cpu_abort(env
, "Raised interrupt while not in I/O function");
1481 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1483 /* in deterministic execution mode, instructions doing device I/Os
1484 must be at the end of the TB */
1485 void cpu_io_recompile(CPUArchState
*env
, uintptr_t retaddr
)
1487 TranslationBlock
*tb
;
1489 target_ulong pc
, cs_base
;
1492 tb
= tb_find_pc(retaddr
);
1494 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
1497 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
1498 cpu_restore_state_from_tb(tb
, env
, retaddr
);
1499 /* Calculate how many instructions had been executed before the fault
1501 n
= n
- env
->icount_decr
.u16
.low
;
1502 /* Generate a new TB ending on the I/O insn. */
1504 /* On MIPS and SH, delay slot instructions can only be restarted if
1505 they were already the first instruction in the TB. If this is not
1506 the first instruction in a TB then re-execute the preceding
1508 #if defined(TARGET_MIPS)
1509 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1510 env
->active_tc
.PC
-= 4;
1511 env
->icount_decr
.u16
.low
++;
1512 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1514 #elif defined(TARGET_SH4)
1515 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1518 env
->icount_decr
.u16
.low
++;
1519 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1522 /* This should never happen. */
1523 if (n
> CF_COUNT_MASK
) {
1524 cpu_abort(env
, "TB too big during recompile");
1527 cflags
= n
| CF_LAST_IO
;
1529 cs_base
= tb
->cs_base
;
1531 tb_phys_invalidate(tb
, -1);
1532 /* FIXME: In theory this could raise an exception. In practice
1533 we have already translated the block once so it's probably ok. */
1534 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
1535 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1536 the first in the TB) then we end up generating a whole new TB and
1537 repeating the fault, which is horribly inefficient.
1538 Better would be to execute just this insn uncached, or generate a
1540 cpu_resume_from_signal(env
, NULL
);
1543 void tb_flush_jmp_cache(CPUArchState
*env
, target_ulong addr
)
1547 /* Discard jump cache entries for any tb which might potentially
1548 overlap the flushed page. */
1549 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1550 memset(&env
->tb_jmp_cache
[i
], 0,
1551 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1553 i
= tb_jmp_cache_hash_page(addr
);
1554 memset(&env
->tb_jmp_cache
[i
], 0,
1555 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1558 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1560 int i
, target_code_size
, max_target_code_size
;
1561 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
1562 TranslationBlock
*tb
;
1564 target_code_size
= 0;
1565 max_target_code_size
= 0;
1567 direct_jmp_count
= 0;
1568 direct_jmp2_count
= 0;
1569 for (i
= 0; i
< nb_tbs
; i
++) {
1571 target_code_size
+= tb
->size
;
1572 if (tb
->size
> max_target_code_size
) {
1573 max_target_code_size
= tb
->size
;
1575 if (tb
->page_addr
[1] != -1) {
1578 if (tb
->tb_next_offset
[0] != 0xffff) {
1580 if (tb
->tb_next_offset
[1] != 0xffff) {
1581 direct_jmp2_count
++;
1585 /* XXX: avoid using doubles ? */
1586 cpu_fprintf(f
, "Translation buffer state:\n");
1587 cpu_fprintf(f
, "gen code size %td/%zd\n",
1588 tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
,
1589 tcg_ctx
.code_gen_buffer_max_size
);
1590 cpu_fprintf(f
, "TB count %d/%d\n",
1591 nb_tbs
, tcg_ctx
.code_gen_max_blocks
);
1592 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
1593 nb_tbs
? target_code_size
/ nb_tbs
: 0,
1594 max_target_code_size
);
1595 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1596 nb_tbs
? (tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
) /
1599 (double) (tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
) /
1600 target_code_size
: 0);
1601 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
1603 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
1604 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1606 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
1608 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
1609 cpu_fprintf(f
, "\nStatistics:\n");
1610 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
1611 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
1612 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
1613 tcg_dump_info(f
, cpu_fprintf
);
1616 #else /* CONFIG_USER_ONLY */
1618 void cpu_interrupt(CPUArchState
*env
, int mask
)
1620 env
->interrupt_request
|= mask
;
1625 * Walks guest process memory "regions" one by one
1626 * and calls callback function 'fn' for each region.
1628 struct walk_memory_regions_data
{
1629 walk_memory_regions_fn fn
;
1635 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1636 abi_ulong end
, int new_prot
)
1638 if (data
->start
!= -1ul) {
1639 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1645 data
->start
= (new_prot
? end
: -1ul);
1646 data
->prot
= new_prot
;
1651 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1652 abi_ulong base
, int level
, void **lp
)
1658 return walk_memory_regions_end(data
, base
, 0);
1664 for (i
= 0; i
< L2_SIZE
; ++i
) {
1665 int prot
= pd
[i
].flags
;
1667 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1668 if (prot
!= data
->prot
) {
1669 rc
= walk_memory_regions_end(data
, pa
, prot
);
1678 for (i
= 0; i
< L2_SIZE
; ++i
) {
1679 pa
= base
| ((abi_ulong
)i
<<
1680 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
1681 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1691 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1693 struct walk_memory_regions_data data
;
1701 for (i
= 0; i
< V_L1_SIZE
; i
++) {
1702 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
1703 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
1710 return walk_memory_regions_end(&data
, 0, 0);
1713 static int dump_region(void *priv
, abi_ulong start
,
1714 abi_ulong end
, unsigned long prot
)
1716 FILE *f
= (FILE *)priv
;
1718 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
1719 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
1720 start
, end
, end
- start
,
1721 ((prot
& PAGE_READ
) ? 'r' : '-'),
1722 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
1723 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
1728 /* dump memory mappings */
1729 void page_dump(FILE *f
)
1731 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
1732 "start", "end", "size", "prot");
1733 walk_memory_regions(f
, dump_region
);
1736 int page_get_flags(target_ulong address
)
1740 p
= page_find(address
>> TARGET_PAGE_BITS
);
1747 /* Modify the flags of a page and invalidate the code if necessary.
1748 The flag PAGE_WRITE_ORG is positioned automatically depending
1749 on PAGE_WRITE. The mmap_lock should already be held. */
1750 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1752 target_ulong addr
, len
;
1754 /* This function should never be called with addresses outside the
1755 guest address space. If this assert fires, it probably indicates
1756 a missing call to h2g_valid. */
1757 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1758 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1760 assert(start
< end
);
1762 start
= start
& TARGET_PAGE_MASK
;
1763 end
= TARGET_PAGE_ALIGN(end
);
1765 if (flags
& PAGE_WRITE
) {
1766 flags
|= PAGE_WRITE_ORG
;
1769 for (addr
= start
, len
= end
- start
;
1771 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1772 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1774 /* If the write protection bit is set, then we invalidate
1776 if (!(p
->flags
& PAGE_WRITE
) &&
1777 (flags
& PAGE_WRITE
) &&
1779 tb_invalidate_phys_page(addr
, 0, NULL
);
1785 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1791 /* This function should never be called with addresses outside the
1792 guest address space. If this assert fires, it probably indicates
1793 a missing call to h2g_valid. */
1794 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1795 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1801 if (start
+ len
- 1 < start
) {
1802 /* We've wrapped around. */
1806 /* must do before we loose bits in the next step */
1807 end
= TARGET_PAGE_ALIGN(start
+ len
);
1808 start
= start
& TARGET_PAGE_MASK
;
1810 for (addr
= start
, len
= end
- start
;
1812 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1813 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1817 if (!(p
->flags
& PAGE_VALID
)) {
1821 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
1824 if (flags
& PAGE_WRITE
) {
1825 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
1828 /* unprotect the page if it was put read-only because it
1829 contains translated code */
1830 if (!(p
->flags
& PAGE_WRITE
)) {
1831 if (!page_unprotect(addr
, 0, NULL
)) {
1841 /* called from signal handler: invalidate the code and unprotect the
1842 page. Return TRUE if the fault was successfully handled. */
1843 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
1847 target_ulong host_start
, host_end
, addr
;
1849 /* Technically this isn't safe inside a signal handler. However we
1850 know this only ever happens in a synchronous SEGV handler, so in
1851 practice it seems to be ok. */
1854 p
= page_find(address
>> TARGET_PAGE_BITS
);
1860 /* if the page was really writable, then we change its
1861 protection back to writable */
1862 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
1863 host_start
= address
& qemu_host_page_mask
;
1864 host_end
= host_start
+ qemu_host_page_size
;
1867 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1868 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1869 p
->flags
|= PAGE_WRITE
;
1872 /* and since the content will be modified, we must invalidate
1873 the corresponding translated code. */
1874 tb_invalidate_phys_page(addr
, pc
, puc
);
1875 #ifdef DEBUG_TB_CHECK
1876 tb_invalidate_check(addr
);
1879 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1888 #endif /* CONFIG_USER_ONLY */