2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if defined(CONFIG_USER_ONLY)
44 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
84 TranslationBlock
*tbs
;
85 int code_gen_max_blocks
;
86 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
88 /* any access to the tbs or the page table must use this lock */
89 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
91 uint8_t code_gen_prologue
[1024] __attribute__((aligned (32)));
92 uint8_t *code_gen_buffer
;
93 unsigned long code_gen_buffer_size
;
94 /* threshold to flush the translated code buffer */
95 unsigned long code_gen_buffer_max_size
;
96 uint8_t *code_gen_ptr
;
98 #if !defined(CONFIG_USER_ONLY)
99 ram_addr_t phys_ram_size
;
101 uint8_t *phys_ram_base
;
102 uint8_t *phys_ram_dirty
;
103 static ram_addr_t phys_ram_alloc_offset
= 0;
107 /* current CPU in the current thread. It is only valid inside
109 CPUState
*cpu_single_env
;
111 typedef struct PageDesc
{
112 /* list of TBs intersecting this ram page */
113 TranslationBlock
*first_tb
;
114 /* in order to optimize self modifying code, we count the number
115 of lookups we do to a given page to use a bitmap */
116 unsigned int code_write_count
;
117 uint8_t *code_bitmap
;
118 #if defined(CONFIG_USER_ONLY)
123 typedef struct PhysPageDesc
{
124 /* offset in host memory of the page + io_index in the low 12 bits */
125 ram_addr_t phys_offset
;
129 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
130 /* XXX: this is a temporary hack for alpha target.
131 * In the future, this is to be replaced by a multi-level table
132 * to actually be able to handle the complete 64 bits address space.
134 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
136 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
139 #define L1_SIZE (1 << L1_BITS)
140 #define L2_SIZE (1 << L2_BITS)
142 unsigned long qemu_real_host_page_size
;
143 unsigned long qemu_host_page_bits
;
144 unsigned long qemu_host_page_size
;
145 unsigned long qemu_host_page_mask
;
147 /* XXX: for system emulation, it could just be an array */
148 static PageDesc
*l1_map
[L1_SIZE
];
149 PhysPageDesc
**l1_phys_map
;
151 #if !defined(CONFIG_USER_ONLY)
152 static void io_mem_init(void);
154 /* io memory support */
155 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
156 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
157 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
158 static int io_mem_nb
;
159 static int io_mem_watch
;
163 char *logfilename
= "/tmp/qemu.log";
166 static int log_append
= 0;
169 static int tlb_flush_count
;
170 static int tb_flush_count
;
171 static int tb_phys_invalidate_count
;
173 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
174 typedef struct subpage_t
{
175 target_phys_addr_t base
;
176 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
177 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
178 void *opaque
[TARGET_PAGE_SIZE
][2][4];
182 static void map_exec(void *addr
, long size
)
185 VirtualProtect(addr
, size
,
186 PAGE_EXECUTE_READWRITE
, &old_protect
);
190 static void map_exec(void *addr
, long size
)
192 unsigned long start
, end
, page_size
;
194 page_size
= getpagesize();
195 start
= (unsigned long)addr
;
196 start
&= ~(page_size
- 1);
198 end
= (unsigned long)addr
+ size
;
199 end
+= page_size
- 1;
200 end
&= ~(page_size
- 1);
202 mprotect((void *)start
, end
- start
,
203 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
207 static void page_init(void)
209 /* NOTE: we can always suppose that qemu_host_page_size >=
213 SYSTEM_INFO system_info
;
216 GetSystemInfo(&system_info
);
217 qemu_real_host_page_size
= system_info
.dwPageSize
;
220 qemu_real_host_page_size
= getpagesize();
222 if (qemu_host_page_size
== 0)
223 qemu_host_page_size
= qemu_real_host_page_size
;
224 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
225 qemu_host_page_size
= TARGET_PAGE_SIZE
;
226 qemu_host_page_bits
= 0;
227 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
228 qemu_host_page_bits
++;
229 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
230 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
231 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
233 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
235 long long startaddr
, endaddr
;
240 last_brk
= (unsigned long)sbrk(0);
241 f
= fopen("/proc/self/maps", "r");
244 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
246 startaddr
= MIN(startaddr
,
247 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
248 endaddr
= MIN(endaddr
,
249 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
250 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
251 TARGET_PAGE_ALIGN(endaddr
),
262 static inline PageDesc
*page_find_alloc(target_ulong index
)
266 lp
= &l1_map
[index
>> L2_BITS
];
269 /* allocate if not found */
270 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
271 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
274 return p
+ (index
& (L2_SIZE
- 1));
277 static inline PageDesc
*page_find(target_ulong index
)
281 p
= l1_map
[index
>> L2_BITS
];
284 return p
+ (index
& (L2_SIZE
- 1));
287 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
292 p
= (void **)l1_phys_map
;
293 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
295 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
296 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
298 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
301 /* allocate if not found */
304 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
305 memset(p
, 0, sizeof(void *) * L1_SIZE
);
309 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
313 /* allocate if not found */
316 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
318 for (i
= 0; i
< L2_SIZE
; i
++)
319 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
321 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
324 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
326 return phys_page_find_alloc(index
, 0);
329 #if !defined(CONFIG_USER_ONLY)
330 static void tlb_protect_code(ram_addr_t ram_addr
);
331 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
333 #define mmap_lock() do { } while(0)
334 #define mmap_unlock() do { } while(0)
337 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
339 #if defined(CONFIG_USER_ONLY)
340 /* Currently it is not recommanded to allocate big chunks of data in
341 user mode. It will change when a dedicated libc will be used */
342 #define USE_STATIC_CODE_GEN_BUFFER
345 #ifdef USE_STATIC_CODE_GEN_BUFFER
346 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
349 void code_gen_alloc(unsigned long tb_size
)
351 #ifdef USE_STATIC_CODE_GEN_BUFFER
352 code_gen_buffer
= static_code_gen_buffer
;
353 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
354 map_exec(code_gen_buffer
, code_gen_buffer_size
);
356 code_gen_buffer_size
= tb_size
;
357 if (code_gen_buffer_size
== 0) {
358 #if defined(CONFIG_USER_ONLY)
359 /* in user mode, phys_ram_size is not meaningful */
360 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
362 /* XXX: needs ajustments */
363 code_gen_buffer_size
= (int)(phys_ram_size
/ 4);
366 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
367 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
368 /* The code gen buffer location may have constraints depending on
369 the host cpu and OS */
370 #if defined(__linux__)
373 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
374 #if defined(__x86_64__)
376 /* Cannot map more than that */
377 if (code_gen_buffer_size
> (800 * 1024 * 1024))
378 code_gen_buffer_size
= (800 * 1024 * 1024);
380 code_gen_buffer
= mmap(NULL
, code_gen_buffer_size
,
381 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
383 if (code_gen_buffer
== MAP_FAILED
) {
384 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
389 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
390 if (!code_gen_buffer
) {
391 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
394 map_exec(code_gen_buffer
, code_gen_buffer_size
);
396 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
397 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
398 code_gen_buffer_max_size
= code_gen_buffer_size
-
399 code_gen_max_block_size();
400 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
401 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
404 /* Must be called before using the QEMU cpus. 'tb_size' is the size
405 (in bytes) allocated to the translation buffer. Zero means default
407 void cpu_exec_init_all(unsigned long tb_size
)
410 code_gen_alloc(tb_size
);
411 code_gen_ptr
= code_gen_buffer
;
413 #if !defined(CONFIG_USER_ONLY)
418 void cpu_exec_init(CPUState
*env
)
423 env
->next_cpu
= NULL
;
426 while (*penv
!= NULL
) {
427 penv
= (CPUState
**)&(*penv
)->next_cpu
;
430 env
->cpu_index
= cpu_index
;
431 env
->nb_watchpoints
= 0;
435 static inline void invalidate_page_bitmap(PageDesc
*p
)
437 if (p
->code_bitmap
) {
438 qemu_free(p
->code_bitmap
);
439 p
->code_bitmap
= NULL
;
441 p
->code_write_count
= 0;
444 /* set to NULL all the 'first_tb' fields in all PageDescs */
445 static void page_flush_tb(void)
450 for(i
= 0; i
< L1_SIZE
; i
++) {
453 for(j
= 0; j
< L2_SIZE
; j
++) {
455 invalidate_page_bitmap(p
);
462 /* flush all the translation blocks */
463 /* XXX: tb_flush is currently not thread safe */
464 void tb_flush(CPUState
*env1
)
467 #if defined(DEBUG_FLUSH)
468 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
469 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
471 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
473 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
474 cpu_abort(env1
, "Internal error: code buffer overflow\n");
478 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
479 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
482 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
485 code_gen_ptr
= code_gen_buffer
;
486 /* XXX: flush processor icache at this point if cache flush is
491 #ifdef DEBUG_TB_CHECK
493 static void tb_invalidate_check(target_ulong address
)
495 TranslationBlock
*tb
;
497 address
&= TARGET_PAGE_MASK
;
498 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
499 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
500 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
501 address
>= tb
->pc
+ tb
->size
)) {
502 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
503 address
, (long)tb
->pc
, tb
->size
);
509 /* verify that all the pages have correct rights for code */
510 static void tb_page_check(void)
512 TranslationBlock
*tb
;
513 int i
, flags1
, flags2
;
515 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
516 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
517 flags1
= page_get_flags(tb
->pc
);
518 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
519 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
520 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
521 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
527 void tb_jmp_check(TranslationBlock
*tb
)
529 TranslationBlock
*tb1
;
532 /* suppress any remaining jumps to this TB */
536 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
539 tb1
= tb1
->jmp_next
[n1
];
541 /* check end of list */
543 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
549 /* invalidate one TB */
550 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
553 TranslationBlock
*tb1
;
557 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
560 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
564 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
566 TranslationBlock
*tb1
;
572 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
574 *ptb
= tb1
->page_next
[n1
];
577 ptb
= &tb1
->page_next
[n1
];
581 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
583 TranslationBlock
*tb1
, **ptb
;
586 ptb
= &tb
->jmp_next
[n
];
589 /* find tb(n) in circular list */
593 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
594 if (n1
== n
&& tb1
== tb
)
597 ptb
= &tb1
->jmp_first
;
599 ptb
= &tb1
->jmp_next
[n1
];
602 /* now we can suppress tb(n) from the list */
603 *ptb
= tb
->jmp_next
[n
];
605 tb
->jmp_next
[n
] = NULL
;
609 /* reset the jump entry 'n' of a TB so that it is not chained to
611 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
613 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
616 static inline void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
621 target_phys_addr_t phys_pc
;
622 TranslationBlock
*tb1
, *tb2
;
624 /* remove the TB from the hash list */
625 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
626 h
= tb_phys_hash_func(phys_pc
);
627 tb_remove(&tb_phys_hash
[h
], tb
,
628 offsetof(TranslationBlock
, phys_hash_next
));
630 /* remove the TB from the page list */
631 if (tb
->page_addr
[0] != page_addr
) {
632 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
633 tb_page_remove(&p
->first_tb
, tb
);
634 invalidate_page_bitmap(p
);
636 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
637 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
638 tb_page_remove(&p
->first_tb
, tb
);
639 invalidate_page_bitmap(p
);
642 tb_invalidated_flag
= 1;
644 /* remove the TB from the hash list */
645 h
= tb_jmp_cache_hash_func(tb
->pc
);
646 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
647 if (env
->tb_jmp_cache
[h
] == tb
)
648 env
->tb_jmp_cache
[h
] = NULL
;
651 /* suppress this TB from the two jump lists */
652 tb_jmp_remove(tb
, 0);
653 tb_jmp_remove(tb
, 1);
655 /* suppress any remaining jumps to this TB */
661 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
662 tb2
= tb1
->jmp_next
[n1
];
663 tb_reset_jump(tb1
, n1
);
664 tb1
->jmp_next
[n1
] = NULL
;
667 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
669 tb_phys_invalidate_count
++;
672 static inline void set_bits(uint8_t *tab
, int start
, int len
)
678 mask
= 0xff << (start
& 7);
679 if ((start
& ~7) == (end
& ~7)) {
681 mask
&= ~(0xff << (end
& 7));
686 start
= (start
+ 8) & ~7;
688 while (start
< end1
) {
693 mask
= ~(0xff << (end
& 7));
699 static void build_page_bitmap(PageDesc
*p
)
701 int n
, tb_start
, tb_end
;
702 TranslationBlock
*tb
;
704 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
707 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
712 tb
= (TranslationBlock
*)((long)tb
& ~3);
713 /* NOTE: this is subtle as a TB may span two physical pages */
715 /* NOTE: tb_end may be after the end of the page, but
716 it is not a problem */
717 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
718 tb_end
= tb_start
+ tb
->size
;
719 if (tb_end
> TARGET_PAGE_SIZE
)
720 tb_end
= TARGET_PAGE_SIZE
;
723 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
725 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
726 tb
= tb
->page_next
[n
];
730 #ifdef TARGET_HAS_PRECISE_SMC
732 static void tb_gen_code(CPUState
*env
,
733 target_ulong pc
, target_ulong cs_base
, int flags
,
736 TranslationBlock
*tb
;
738 target_ulong phys_pc
, phys_page2
, virt_page2
;
741 phys_pc
= get_phys_addr_code(env
, pc
);
744 /* flush must be done */
746 /* cannot fail at this point */
749 tc_ptr
= code_gen_ptr
;
751 tb
->cs_base
= cs_base
;
754 cpu_gen_code(env
, tb
, &code_gen_size
);
755 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
757 /* check next page if needed */
758 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
760 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
761 phys_page2
= get_phys_addr_code(env
, virt_page2
);
763 tb_link_phys(tb
, phys_pc
, phys_page2
);
767 /* invalidate all TBs which intersect with the target physical page
768 starting in range [start;end[. NOTE: start and end must refer to
769 the same physical page. 'is_cpu_write_access' should be true if called
770 from a real cpu write access: the virtual CPU will exit the current
771 TB if code is modified inside this TB. */
772 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
773 int is_cpu_write_access
)
775 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
776 CPUState
*env
= cpu_single_env
;
778 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
779 target_ulong tb_start
, tb_end
;
780 target_ulong current_pc
, current_cs_base
;
782 p
= page_find(start
>> TARGET_PAGE_BITS
);
785 if (!p
->code_bitmap
&&
786 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
787 is_cpu_write_access
) {
788 /* build code bitmap */
789 build_page_bitmap(p
);
792 /* we remove all the TBs in the range [start, end[ */
793 /* XXX: see if in some cases it could be faster to invalidate all the code */
794 current_tb_not_found
= is_cpu_write_access
;
795 current_tb_modified
= 0;
796 current_tb
= NULL
; /* avoid warning */
797 current_pc
= 0; /* avoid warning */
798 current_cs_base
= 0; /* avoid warning */
799 current_flags
= 0; /* avoid warning */
803 tb
= (TranslationBlock
*)((long)tb
& ~3);
804 tb_next
= tb
->page_next
[n
];
805 /* NOTE: this is subtle as a TB may span two physical pages */
807 /* NOTE: tb_end may be after the end of the page, but
808 it is not a problem */
809 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
810 tb_end
= tb_start
+ tb
->size
;
812 tb_start
= tb
->page_addr
[1];
813 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
815 if (!(tb_end
<= start
|| tb_start
>= end
)) {
816 #ifdef TARGET_HAS_PRECISE_SMC
817 if (current_tb_not_found
) {
818 current_tb_not_found
= 0;
820 if (env
->mem_write_pc
) {
821 /* now we have a real cpu fault */
822 current_tb
= tb_find_pc(env
->mem_write_pc
);
825 if (current_tb
== tb
&&
826 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
827 /* If we are modifying the current TB, we must stop
828 its execution. We could be more precise by checking
829 that the modification is after the current PC, but it
830 would require a specialized function to partially
831 restore the CPU state */
833 current_tb_modified
= 1;
834 cpu_restore_state(current_tb
, env
,
835 env
->mem_write_pc
, NULL
);
836 #if defined(TARGET_I386)
837 current_flags
= env
->hflags
;
838 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
839 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
840 current_pc
= current_cs_base
+ env
->eip
;
842 #error unsupported CPU
845 #endif /* TARGET_HAS_PRECISE_SMC */
846 /* we need to do that to handle the case where a signal
847 occurs while doing tb_phys_invalidate() */
850 saved_tb
= env
->current_tb
;
851 env
->current_tb
= NULL
;
853 tb_phys_invalidate(tb
, -1);
855 env
->current_tb
= saved_tb
;
856 if (env
->interrupt_request
&& env
->current_tb
)
857 cpu_interrupt(env
, env
->interrupt_request
);
862 #if !defined(CONFIG_USER_ONLY)
863 /* if no code remaining, no need to continue to use slow writes */
865 invalidate_page_bitmap(p
);
866 if (is_cpu_write_access
) {
867 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
871 #ifdef TARGET_HAS_PRECISE_SMC
872 if (current_tb_modified
) {
873 /* we generate a block containing just the instruction
874 modifying the memory. It will ensure that it cannot modify
876 env
->current_tb
= NULL
;
877 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
879 cpu_resume_from_signal(env
, NULL
);
884 /* len must be <= 8 and start must be a multiple of len */
885 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
892 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
893 cpu_single_env
->mem_write_vaddr
, len
,
895 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
899 p
= page_find(start
>> TARGET_PAGE_BITS
);
902 if (p
->code_bitmap
) {
903 offset
= start
& ~TARGET_PAGE_MASK
;
904 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
905 if (b
& ((1 << len
) - 1))
909 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
913 #if !defined(CONFIG_SOFTMMU)
914 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
915 unsigned long pc
, void *puc
)
917 int n
, current_flags
, current_tb_modified
;
918 target_ulong current_pc
, current_cs_base
;
920 TranslationBlock
*tb
, *current_tb
;
921 #ifdef TARGET_HAS_PRECISE_SMC
922 CPUState
*env
= cpu_single_env
;
925 addr
&= TARGET_PAGE_MASK
;
926 p
= page_find(addr
>> TARGET_PAGE_BITS
);
930 current_tb_modified
= 0;
932 current_pc
= 0; /* avoid warning */
933 current_cs_base
= 0; /* avoid warning */
934 current_flags
= 0; /* avoid warning */
935 #ifdef TARGET_HAS_PRECISE_SMC
937 current_tb
= tb_find_pc(pc
);
942 tb
= (TranslationBlock
*)((long)tb
& ~3);
943 #ifdef TARGET_HAS_PRECISE_SMC
944 if (current_tb
== tb
&&
945 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
946 /* If we are modifying the current TB, we must stop
947 its execution. We could be more precise by checking
948 that the modification is after the current PC, but it
949 would require a specialized function to partially
950 restore the CPU state */
952 current_tb_modified
= 1;
953 cpu_restore_state(current_tb
, env
, pc
, puc
);
954 #if defined(TARGET_I386)
955 current_flags
= env
->hflags
;
956 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
957 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
958 current_pc
= current_cs_base
+ env
->eip
;
960 #error unsupported CPU
963 #endif /* TARGET_HAS_PRECISE_SMC */
964 tb_phys_invalidate(tb
, addr
);
965 tb
= tb
->page_next
[n
];
968 #ifdef TARGET_HAS_PRECISE_SMC
969 if (current_tb_modified
) {
970 /* we generate a block containing just the instruction
971 modifying the memory. It will ensure that it cannot modify
973 env
->current_tb
= NULL
;
974 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
976 cpu_resume_from_signal(env
, puc
);
982 /* add the tb in the target page and protect it if necessary */
983 static inline void tb_alloc_page(TranslationBlock
*tb
,
984 unsigned int n
, target_ulong page_addr
)
987 TranslationBlock
*last_first_tb
;
989 tb
->page_addr
[n
] = page_addr
;
990 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
991 tb
->page_next
[n
] = p
->first_tb
;
992 last_first_tb
= p
->first_tb
;
993 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
994 invalidate_page_bitmap(p
);
996 #if defined(TARGET_HAS_SMC) || 1
998 #if defined(CONFIG_USER_ONLY)
999 if (p
->flags
& PAGE_WRITE
) {
1004 /* force the host page as non writable (writes will have a
1005 page fault + mprotect overhead) */
1006 page_addr
&= qemu_host_page_mask
;
1008 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1009 addr
+= TARGET_PAGE_SIZE
) {
1011 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1015 p2
->flags
&= ~PAGE_WRITE
;
1016 page_get_flags(addr
);
1018 mprotect(g2h(page_addr
), qemu_host_page_size
,
1019 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1020 #ifdef DEBUG_TB_INVALIDATE
1021 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1026 /* if some code is already present, then the pages are already
1027 protected. So we handle the case where only the first TB is
1028 allocated in a physical page */
1029 if (!last_first_tb
) {
1030 tlb_protect_code(page_addr
);
1034 #endif /* TARGET_HAS_SMC */
1037 /* Allocate a new translation block. Flush the translation buffer if
1038 too many translation blocks or too much generated code. */
1039 TranslationBlock
*tb_alloc(target_ulong pc
)
1041 TranslationBlock
*tb
;
1043 if (nb_tbs
>= code_gen_max_blocks
||
1044 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1046 tb
= &tbs
[nb_tbs
++];
1052 /* add a new TB and link it to the physical page tables. phys_page2 is
1053 (-1) to indicate that only one page contains the TB. */
1054 void tb_link_phys(TranslationBlock
*tb
,
1055 target_ulong phys_pc
, target_ulong phys_page2
)
1058 TranslationBlock
**ptb
;
1060 /* Grab the mmap lock to stop another thread invalidating this TB
1061 before we are done. */
1063 /* add in the physical hash table */
1064 h
= tb_phys_hash_func(phys_pc
);
1065 ptb
= &tb_phys_hash
[h
];
1066 tb
->phys_hash_next
= *ptb
;
1069 /* add in the page list */
1070 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1071 if (phys_page2
!= -1)
1072 tb_alloc_page(tb
, 1, phys_page2
);
1074 tb
->page_addr
[1] = -1;
1076 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1077 tb
->jmp_next
[0] = NULL
;
1078 tb
->jmp_next
[1] = NULL
;
1080 /* init original jump addresses */
1081 if (tb
->tb_next_offset
[0] != 0xffff)
1082 tb_reset_jump(tb
, 0);
1083 if (tb
->tb_next_offset
[1] != 0xffff)
1084 tb_reset_jump(tb
, 1);
1086 #ifdef DEBUG_TB_CHECK
1092 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1093 tb[1].tc_ptr. Return NULL if not found */
1094 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1096 int m_min
, m_max
, m
;
1098 TranslationBlock
*tb
;
1102 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1103 tc_ptr
>= (unsigned long)code_gen_ptr
)
1105 /* binary search (cf Knuth) */
1108 while (m_min
<= m_max
) {
1109 m
= (m_min
+ m_max
) >> 1;
1111 v
= (unsigned long)tb
->tc_ptr
;
1114 else if (tc_ptr
< v
) {
1123 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1125 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1127 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1130 tb1
= tb
->jmp_next
[n
];
1132 /* find head of list */
1135 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1138 tb1
= tb1
->jmp_next
[n1
];
1140 /* we are now sure now that tb jumps to tb1 */
1143 /* remove tb from the jmp_first list */
1144 ptb
= &tb_next
->jmp_first
;
1148 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1149 if (n1
== n
&& tb1
== tb
)
1151 ptb
= &tb1
->jmp_next
[n1
];
1153 *ptb
= tb
->jmp_next
[n
];
1154 tb
->jmp_next
[n
] = NULL
;
1156 /* suppress the jump to next tb in generated code */
1157 tb_reset_jump(tb
, n
);
1159 /* suppress jumps in the tb on which we could have jumped */
1160 tb_reset_jump_recursive(tb_next
);
1164 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1166 tb_reset_jump_recursive2(tb
, 0);
1167 tb_reset_jump_recursive2(tb
, 1);
1170 #if defined(TARGET_HAS_ICE)
1171 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1173 target_phys_addr_t addr
;
1175 ram_addr_t ram_addr
;
1178 addr
= cpu_get_phys_page_debug(env
, pc
);
1179 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1181 pd
= IO_MEM_UNASSIGNED
;
1183 pd
= p
->phys_offset
;
1185 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1186 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1190 /* Add a watchpoint. */
1191 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1195 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1196 if (addr
== env
->watchpoint
[i
].vaddr
)
1199 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1202 i
= env
->nb_watchpoints
++;
1203 env
->watchpoint
[i
].vaddr
= addr
;
1204 tlb_flush_page(env
, addr
);
1205 /* FIXME: This flush is needed because of the hack to make memory ops
1206 terminate the TB. It can be removed once the proper IO trap and
1207 re-execute bits are in. */
1212 /* Remove a watchpoint. */
1213 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1217 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1218 if (addr
== env
->watchpoint
[i
].vaddr
) {
1219 env
->nb_watchpoints
--;
1220 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1221 tlb_flush_page(env
, addr
);
1228 /* Remove all watchpoints. */
1229 void cpu_watchpoint_remove_all(CPUState
*env
) {
1232 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1233 tlb_flush_page(env
, env
->watchpoint
[i
].vaddr
);
1235 env
->nb_watchpoints
= 0;
1238 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1239 breakpoint is reached */
1240 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1242 #if defined(TARGET_HAS_ICE)
1245 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1246 if (env
->breakpoints
[i
] == pc
)
1250 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1252 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1254 breakpoint_invalidate(env
, pc
);
1261 /* remove all breakpoints */
1262 void cpu_breakpoint_remove_all(CPUState
*env
) {
1263 #if defined(TARGET_HAS_ICE)
1265 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1266 breakpoint_invalidate(env
, env
->breakpoints
[i
]);
1268 env
->nb_breakpoints
= 0;
1272 /* remove a breakpoint */
1273 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1275 #if defined(TARGET_HAS_ICE)
1277 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1278 if (env
->breakpoints
[i
] == pc
)
1283 env
->nb_breakpoints
--;
1284 if (i
< env
->nb_breakpoints
)
1285 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1287 breakpoint_invalidate(env
, pc
);
1294 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1295 CPU loop after each instruction */
1296 void cpu_single_step(CPUState
*env
, int enabled
)
1298 #if defined(TARGET_HAS_ICE)
1299 if (env
->singlestep_enabled
!= enabled
) {
1300 env
->singlestep_enabled
= enabled
;
1301 /* must flush all the translated code to avoid inconsistancies */
1302 /* XXX: only flush what is necessary */
1308 /* enable or disable low levels log */
1309 void cpu_set_log(int log_flags
)
1311 loglevel
= log_flags
;
1312 if (loglevel
&& !logfile
) {
1313 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1315 perror(logfilename
);
1318 #if !defined(CONFIG_SOFTMMU)
1319 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1321 static uint8_t logfile_buf
[4096];
1322 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1325 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1329 if (!loglevel
&& logfile
) {
1335 void cpu_set_log_filename(const char *filename
)
1337 logfilename
= strdup(filename
);
1342 cpu_set_log(loglevel
);
1345 /* mask must never be zero, except for A20 change call */
1346 void cpu_interrupt(CPUState
*env
, int mask
)
1348 #if !defined(USE_NPTL)
1349 TranslationBlock
*tb
;
1350 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1353 /* FIXME: This is probably not threadsafe. A different thread could
1354 be in the mittle of a read-modify-write operation. */
1355 env
->interrupt_request
|= mask
;
1356 #if defined(USE_NPTL)
1357 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1358 problem and hope the cpu will stop of its own accord. For userspace
1359 emulation this often isn't actually as bad as it sounds. Often
1360 signals are used primarily to interrupt blocking syscalls. */
1362 /* if the cpu is currently executing code, we must unlink it and
1363 all the potentially executing TB */
1364 tb
= env
->current_tb
;
1365 if (tb
&& !testandset(&interrupt_lock
)) {
1366 env
->current_tb
= NULL
;
1367 tb_reset_jump_recursive(tb
);
1368 resetlock(&interrupt_lock
);
1373 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1375 env
->interrupt_request
&= ~mask
;
1378 CPULogItem cpu_log_items
[] = {
1379 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1380 "show generated host assembly code for each compiled TB" },
1381 { CPU_LOG_TB_IN_ASM
, "in_asm",
1382 "show target assembly code for each compiled TB" },
1383 { CPU_LOG_TB_OP
, "op",
1384 "show micro ops for each compiled TB" },
1385 { CPU_LOG_TB_OP_OPT
, "op_opt",
1388 "before eflags optimization and "
1390 "after liveness analysis" },
1391 { CPU_LOG_INT
, "int",
1392 "show interrupts/exceptions in short format" },
1393 { CPU_LOG_EXEC
, "exec",
1394 "show trace before each executed TB (lots of logs)" },
1395 { CPU_LOG_TB_CPU
, "cpu",
1396 "show CPU state before block translation" },
1398 { CPU_LOG_PCALL
, "pcall",
1399 "show protected mode far calls/returns/exceptions" },
1402 { CPU_LOG_IOPORT
, "ioport",
1403 "show all i/o ports accesses" },
1408 static int cmp1(const char *s1
, int n
, const char *s2
)
1410 if (strlen(s2
) != n
)
1412 return memcmp(s1
, s2
, n
) == 0;
1415 /* takes a comma separated list of log masks. Return 0 if error. */
1416 int cpu_str_to_log_mask(const char *str
)
1425 p1
= strchr(p
, ',');
1428 if(cmp1(p
,p1
-p
,"all")) {
1429 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1433 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1434 if (cmp1(p
, p1
- p
, item
->name
))
1448 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1455 fprintf(stderr
, "qemu: fatal: ");
1456 vfprintf(stderr
, fmt
, ap
);
1457 fprintf(stderr
, "\n");
1459 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1461 cpu_dump_state(env
, stderr
, fprintf
, 0);
1464 fprintf(logfile
, "qemu: fatal: ");
1465 vfprintf(logfile
, fmt
, ap2
);
1466 fprintf(logfile
, "\n");
1468 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1470 cpu_dump_state(env
, logfile
, fprintf
, 0);
1480 CPUState
*cpu_copy(CPUState
*env
)
1482 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1483 /* preserve chaining and index */
1484 CPUState
*next_cpu
= new_env
->next_cpu
;
1485 int cpu_index
= new_env
->cpu_index
;
1486 memcpy(new_env
, env
, sizeof(CPUState
));
1487 new_env
->next_cpu
= next_cpu
;
1488 new_env
->cpu_index
= cpu_index
;
1492 #if !defined(CONFIG_USER_ONLY)
1494 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1498 /* Discard jump cache entries for any tb which might potentially
1499 overlap the flushed page. */
1500 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1501 memset (&env
->tb_jmp_cache
[i
], 0,
1502 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1504 i
= tb_jmp_cache_hash_page(addr
);
1505 memset (&env
->tb_jmp_cache
[i
], 0,
1506 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1509 /* NOTE: if flush_global is true, also flush global entries (not
1511 void tlb_flush(CPUState
*env
, int flush_global
)
1515 #if defined(DEBUG_TLB)
1516 printf("tlb_flush:\n");
1518 /* must reset current TB so that interrupts cannot modify the
1519 links while we are modifying them */
1520 env
->current_tb
= NULL
;
1522 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1523 env
->tlb_table
[0][i
].addr_read
= -1;
1524 env
->tlb_table
[0][i
].addr_write
= -1;
1525 env
->tlb_table
[0][i
].addr_code
= -1;
1526 env
->tlb_table
[1][i
].addr_read
= -1;
1527 env
->tlb_table
[1][i
].addr_write
= -1;
1528 env
->tlb_table
[1][i
].addr_code
= -1;
1529 #if (NB_MMU_MODES >= 3)
1530 env
->tlb_table
[2][i
].addr_read
= -1;
1531 env
->tlb_table
[2][i
].addr_write
= -1;
1532 env
->tlb_table
[2][i
].addr_code
= -1;
1533 #if (NB_MMU_MODES == 4)
1534 env
->tlb_table
[3][i
].addr_read
= -1;
1535 env
->tlb_table
[3][i
].addr_write
= -1;
1536 env
->tlb_table
[3][i
].addr_code
= -1;
1541 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1544 if (env
->kqemu_enabled
) {
1545 kqemu_flush(env
, flush_global
);
1551 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1553 if (addr
== (tlb_entry
->addr_read
&
1554 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1555 addr
== (tlb_entry
->addr_write
&
1556 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1557 addr
== (tlb_entry
->addr_code
&
1558 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1559 tlb_entry
->addr_read
= -1;
1560 tlb_entry
->addr_write
= -1;
1561 tlb_entry
->addr_code
= -1;
1565 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1569 #if defined(DEBUG_TLB)
1570 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1572 /* must reset current TB so that interrupts cannot modify the
1573 links while we are modifying them */
1574 env
->current_tb
= NULL
;
1576 addr
&= TARGET_PAGE_MASK
;
1577 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1578 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1579 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1580 #if (NB_MMU_MODES >= 3)
1581 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1582 #if (NB_MMU_MODES == 4)
1583 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1587 tlb_flush_jmp_cache(env
, addr
);
1590 if (env
->kqemu_enabled
) {
1591 kqemu_flush_page(env
, addr
);
1596 /* update the TLBs so that writes to code in the virtual page 'addr'
1598 static void tlb_protect_code(ram_addr_t ram_addr
)
1600 cpu_physical_memory_reset_dirty(ram_addr
,
1601 ram_addr
+ TARGET_PAGE_SIZE
,
1605 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1606 tested for self modifying code */
1607 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1610 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1613 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1614 unsigned long start
, unsigned long length
)
1617 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1618 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1619 if ((addr
- start
) < length
) {
1620 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1625 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1629 unsigned long length
, start1
;
1633 start
&= TARGET_PAGE_MASK
;
1634 end
= TARGET_PAGE_ALIGN(end
);
1636 length
= end
- start
;
1639 len
= length
>> TARGET_PAGE_BITS
;
1641 /* XXX: should not depend on cpu context */
1643 if (env
->kqemu_enabled
) {
1646 for(i
= 0; i
< len
; i
++) {
1647 kqemu_set_notdirty(env
, addr
);
1648 addr
+= TARGET_PAGE_SIZE
;
1652 mask
= ~dirty_flags
;
1653 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1654 for(i
= 0; i
< len
; i
++)
1657 /* we modify the TLB cache so that the dirty bit will be set again
1658 when accessing the range */
1659 start1
= start
+ (unsigned long)phys_ram_base
;
1660 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1661 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1662 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1663 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1664 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1665 #if (NB_MMU_MODES >= 3)
1666 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1667 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1668 #if (NB_MMU_MODES == 4)
1669 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1670 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1676 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1678 ram_addr_t ram_addr
;
1680 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1681 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1682 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1683 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1684 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1689 /* update the TLB according to the current state of the dirty bits */
1690 void cpu_tlb_update_dirty(CPUState
*env
)
1693 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1694 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1695 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1696 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1697 #if (NB_MMU_MODES >= 3)
1698 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1699 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1700 #if (NB_MMU_MODES == 4)
1701 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1702 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1707 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1708 unsigned long start
)
1711 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1712 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1713 if (addr
== start
) {
1714 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1719 /* update the TLB corresponding to virtual page vaddr and phys addr
1720 addr so that it is no longer dirty */
1721 static inline void tlb_set_dirty(CPUState
*env
,
1722 unsigned long addr
, target_ulong vaddr
)
1726 addr
&= TARGET_PAGE_MASK
;
1727 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1728 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1729 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1730 #if (NB_MMU_MODES >= 3)
1731 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1732 #if (NB_MMU_MODES == 4)
1733 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1738 /* add a new TLB entry. At most one entry for a given virtual address
1739 is permitted. Return 0 if OK or 2 if the page could not be mapped
1740 (can only happen in non SOFTMMU mode for I/O pages or pages
1741 conflicting with the host address space). */
1742 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1743 target_phys_addr_t paddr
, int prot
,
1744 int mmu_idx
, int is_softmmu
)
1749 target_ulong address
;
1750 target_phys_addr_t addend
;
1755 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1757 pd
= IO_MEM_UNASSIGNED
;
1759 pd
= p
->phys_offset
;
1761 #if defined(DEBUG_TLB)
1762 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1763 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1768 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1769 /* IO memory case */
1770 address
= vaddr
| pd
;
1773 /* standard memory */
1775 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1778 /* Make accesses to pages with watchpoints go via the
1779 watchpoint trap routines. */
1780 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1781 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1782 if (address
& ~TARGET_PAGE_MASK
) {
1783 env
->watchpoint
[i
].addend
= 0;
1784 address
= vaddr
| io_mem_watch
;
1786 env
->watchpoint
[i
].addend
= pd
- paddr
+
1787 (unsigned long) phys_ram_base
;
1788 /* TODO: Figure out how to make read watchpoints coexist
1790 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1795 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1797 te
= &env
->tlb_table
[mmu_idx
][index
];
1798 te
->addend
= addend
;
1799 if (prot
& PAGE_READ
) {
1800 te
->addr_read
= address
;
1805 if (prot
& PAGE_EXEC
) {
1806 te
->addr_code
= address
;
1810 if (prot
& PAGE_WRITE
) {
1811 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1812 (pd
& IO_MEM_ROMD
)) {
1813 /* write access calls the I/O callback */
1814 te
->addr_write
= vaddr
|
1815 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1816 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1817 !cpu_physical_memory_is_dirty(pd
)) {
1818 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1820 te
->addr_write
= address
;
1823 te
->addr_write
= -1;
1831 void tlb_flush(CPUState
*env
, int flush_global
)
1835 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1839 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1840 target_phys_addr_t paddr
, int prot
,
1841 int mmu_idx
, int is_softmmu
)
1846 /* dump memory mappings */
1847 void page_dump(FILE *f
)
1849 unsigned long start
, end
;
1850 int i
, j
, prot
, prot1
;
1853 fprintf(f
, "%-8s %-8s %-8s %s\n",
1854 "start", "end", "size", "prot");
1858 for(i
= 0; i
<= L1_SIZE
; i
++) {
1863 for(j
= 0;j
< L2_SIZE
; j
++) {
1868 if (prot1
!= prot
) {
1869 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1871 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1872 start
, end
, end
- start
,
1873 prot
& PAGE_READ
? 'r' : '-',
1874 prot
& PAGE_WRITE
? 'w' : '-',
1875 prot
& PAGE_EXEC
? 'x' : '-');
1889 int page_get_flags(target_ulong address
)
1893 p
= page_find(address
>> TARGET_PAGE_BITS
);
1899 /* modify the flags of a page and invalidate the code if
1900 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1901 depending on PAGE_WRITE */
1902 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1907 /* mmap_lock should already be held. */
1908 start
= start
& TARGET_PAGE_MASK
;
1909 end
= TARGET_PAGE_ALIGN(end
);
1910 if (flags
& PAGE_WRITE
)
1911 flags
|= PAGE_WRITE_ORG
;
1912 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1913 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1914 /* if the write protection is set, then we invalidate the code
1916 if (!(p
->flags
& PAGE_WRITE
) &&
1917 (flags
& PAGE_WRITE
) &&
1919 tb_invalidate_phys_page(addr
, 0, NULL
);
1925 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1931 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1932 start
= start
& TARGET_PAGE_MASK
;
1935 /* we've wrapped around */
1937 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1938 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1941 if( !(p
->flags
& PAGE_VALID
) )
1944 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1946 if (flags
& PAGE_WRITE
) {
1947 if (!(p
->flags
& PAGE_WRITE_ORG
))
1949 /* unprotect the page if it was put read-only because it
1950 contains translated code */
1951 if (!(p
->flags
& PAGE_WRITE
)) {
1952 if (!page_unprotect(addr
, 0, NULL
))
1961 /* called from signal handler: invalidate the code and unprotect the
1962 page. Return TRUE if the fault was succesfully handled. */
1963 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1965 unsigned int page_index
, prot
, pindex
;
1967 target_ulong host_start
, host_end
, addr
;
1969 /* Technically this isn't safe inside a signal handler. However we
1970 know this only ever happens in a synchronous SEGV handler, so in
1971 practice it seems to be ok. */
1974 host_start
= address
& qemu_host_page_mask
;
1975 page_index
= host_start
>> TARGET_PAGE_BITS
;
1976 p1
= page_find(page_index
);
1981 host_end
= host_start
+ qemu_host_page_size
;
1984 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1988 /* if the page was really writable, then we change its
1989 protection back to writable */
1990 if (prot
& PAGE_WRITE_ORG
) {
1991 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1992 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1993 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1994 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1995 p1
[pindex
].flags
|= PAGE_WRITE
;
1996 /* and since the content will be modified, we must invalidate
1997 the corresponding translated code. */
1998 tb_invalidate_phys_page(address
, pc
, puc
);
1999 #ifdef DEBUG_TB_CHECK
2000 tb_invalidate_check(address
);
2010 static inline void tlb_set_dirty(CPUState
*env
,
2011 unsigned long addr
, target_ulong vaddr
)
2014 #endif /* defined(CONFIG_USER_ONLY) */
2016 #if !defined(CONFIG_USER_ONLY)
2017 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2019 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2020 ram_addr_t orig_memory
);
2021 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2024 if (addr > start_addr) \
2027 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2028 if (start_addr2 > 0) \
2032 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2033 end_addr2 = TARGET_PAGE_SIZE - 1; \
2035 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2036 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2041 /* register physical memory. 'size' must be a multiple of the target
2042 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2044 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2046 ram_addr_t phys_offset
)
2048 target_phys_addr_t addr
, end_addr
;
2051 ram_addr_t orig_size
= size
;
2055 /* XXX: should not depend on cpu context */
2057 if (env
->kqemu_enabled
) {
2058 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2061 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2062 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2063 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2064 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2065 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2066 ram_addr_t orig_memory
= p
->phys_offset
;
2067 target_phys_addr_t start_addr2
, end_addr2
;
2068 int need_subpage
= 0;
2070 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2072 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2073 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2074 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2075 &p
->phys_offset
, orig_memory
);
2077 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2080 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2082 p
->phys_offset
= phys_offset
;
2083 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2084 (phys_offset
& IO_MEM_ROMD
))
2085 phys_offset
+= TARGET_PAGE_SIZE
;
2088 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2089 p
->phys_offset
= phys_offset
;
2090 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2091 (phys_offset
& IO_MEM_ROMD
))
2092 phys_offset
+= TARGET_PAGE_SIZE
;
2094 target_phys_addr_t start_addr2
, end_addr2
;
2095 int need_subpage
= 0;
2097 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2098 end_addr2
, need_subpage
);
2100 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2101 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2102 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2103 subpage_register(subpage
, start_addr2
, end_addr2
,
2110 /* since each CPU stores ram addresses in its TLB cache, we must
2111 reset the modified entries */
2113 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2118 /* XXX: temporary until new memory mapping API */
2119 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2123 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2125 return IO_MEM_UNASSIGNED
;
2126 return p
->phys_offset
;
2129 /* XXX: better than nothing */
2130 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2133 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2134 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
"\n",
2135 (uint64_t)size
, (uint64_t)phys_ram_size
);
2138 addr
= phys_ram_alloc_offset
;
2139 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2143 void qemu_ram_free(ram_addr_t addr
)
2147 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2149 #ifdef DEBUG_UNASSIGNED
2150 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2153 do_unassigned_access(addr
, 0, 0, 0);
2155 do_unassigned_access(addr
, 0, 0, 0);
2160 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2162 #ifdef DEBUG_UNASSIGNED
2163 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2166 do_unassigned_access(addr
, 1, 0, 0);
2168 do_unassigned_access(addr
, 1, 0, 0);
2172 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2173 unassigned_mem_readb
,
2174 unassigned_mem_readb
,
2175 unassigned_mem_readb
,
2178 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2179 unassigned_mem_writeb
,
2180 unassigned_mem_writeb
,
2181 unassigned_mem_writeb
,
2184 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2186 unsigned long ram_addr
;
2188 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2189 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2190 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2191 #if !defined(CONFIG_USER_ONLY)
2192 tb_invalidate_phys_page_fast(ram_addr
, 1);
2193 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2196 stb_p((uint8_t *)(long)addr
, val
);
2198 if (cpu_single_env
->kqemu_enabled
&&
2199 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2200 kqemu_modify_page(cpu_single_env
, ram_addr
);
2202 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2203 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2204 /* we remove the notdirty callback only if the code has been
2206 if (dirty_flags
== 0xff)
2207 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2210 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2212 unsigned long ram_addr
;
2214 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2215 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2216 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2217 #if !defined(CONFIG_USER_ONLY)
2218 tb_invalidate_phys_page_fast(ram_addr
, 2);
2219 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2222 stw_p((uint8_t *)(long)addr
, val
);
2224 if (cpu_single_env
->kqemu_enabled
&&
2225 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2226 kqemu_modify_page(cpu_single_env
, ram_addr
);
2228 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2229 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2230 /* we remove the notdirty callback only if the code has been
2232 if (dirty_flags
== 0xff)
2233 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2236 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2238 unsigned long ram_addr
;
2240 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2241 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2242 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2243 #if !defined(CONFIG_USER_ONLY)
2244 tb_invalidate_phys_page_fast(ram_addr
, 4);
2245 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2248 stl_p((uint8_t *)(long)addr
, val
);
2250 if (cpu_single_env
->kqemu_enabled
&&
2251 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2252 kqemu_modify_page(cpu_single_env
, ram_addr
);
2254 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2255 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2256 /* we remove the notdirty callback only if the code has been
2258 if (dirty_flags
== 0xff)
2259 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2262 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2263 NULL
, /* never used */
2264 NULL
, /* never used */
2265 NULL
, /* never used */
2268 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2269 notdirty_mem_writeb
,
2270 notdirty_mem_writew
,
2271 notdirty_mem_writel
,
2274 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2275 so these check for a hit then pass through to the normal out-of-line
2277 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2279 return ldub_phys(addr
);
2282 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2284 return lduw_phys(addr
);
2287 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2289 return ldl_phys(addr
);
2292 /* Generate a debug exception if a watchpoint has been hit.
2293 Returns the real physical address of the access. addr will be a host
2294 address in case of a RAM location. */
2295 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2297 CPUState
*env
= cpu_single_env
;
2299 target_ulong retaddr
;
2303 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2304 watch
= env
->watchpoint
[i
].vaddr
;
2305 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2306 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2307 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2308 cpu_single_env
->watchpoint_hit
= i
+ 1;
2309 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2317 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2320 addr
= check_watchpoint(addr
);
2321 stb_phys(addr
, val
);
2324 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2327 addr
= check_watchpoint(addr
);
2328 stw_phys(addr
, val
);
2331 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2334 addr
= check_watchpoint(addr
);
2335 stl_phys(addr
, val
);
2338 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2344 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2350 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2356 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2357 #if defined(DEBUG_SUBPAGE)
2358 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2359 mmio
, len
, addr
, idx
);
2361 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2366 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2367 uint32_t value
, unsigned int len
)
2371 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2372 #if defined(DEBUG_SUBPAGE)
2373 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2374 mmio
, len
, addr
, idx
, value
);
2376 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2379 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2381 #if defined(DEBUG_SUBPAGE)
2382 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2385 return subpage_readlen(opaque
, addr
, 0);
2388 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2391 #if defined(DEBUG_SUBPAGE)
2392 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2394 subpage_writelen(opaque
, addr
, value
, 0);
2397 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2399 #if defined(DEBUG_SUBPAGE)
2400 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2403 return subpage_readlen(opaque
, addr
, 1);
2406 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2409 #if defined(DEBUG_SUBPAGE)
2410 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2412 subpage_writelen(opaque
, addr
, value
, 1);
2415 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2417 #if defined(DEBUG_SUBPAGE)
2418 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2421 return subpage_readlen(opaque
, addr
, 2);
2424 static void subpage_writel (void *opaque
,
2425 target_phys_addr_t addr
, uint32_t value
)
2427 #if defined(DEBUG_SUBPAGE)
2428 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2430 subpage_writelen(opaque
, addr
, value
, 2);
2433 static CPUReadMemoryFunc
*subpage_read
[] = {
2439 static CPUWriteMemoryFunc
*subpage_write
[] = {
2445 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2451 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2453 idx
= SUBPAGE_IDX(start
);
2454 eidx
= SUBPAGE_IDX(end
);
2455 #if defined(DEBUG_SUBPAGE)
2456 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2457 mmio
, start
, end
, idx
, eidx
, memory
);
2459 memory
>>= IO_MEM_SHIFT
;
2460 for (; idx
<= eidx
; idx
++) {
2461 for (i
= 0; i
< 4; i
++) {
2462 if (io_mem_read
[memory
][i
]) {
2463 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2464 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2466 if (io_mem_write
[memory
][i
]) {
2467 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2468 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2476 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2477 ram_addr_t orig_memory
)
2482 mmio
= qemu_mallocz(sizeof(subpage_t
));
2485 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2486 #if defined(DEBUG_SUBPAGE)
2487 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2488 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2490 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2491 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2497 static void io_mem_init(void)
2499 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2500 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2501 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2504 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2505 watch_mem_write
, NULL
);
2506 /* alloc dirty bits array */
2507 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2508 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2511 /* mem_read and mem_write are arrays of functions containing the
2512 function to access byte (index 0), word (index 1) and dword (index
2513 2). Functions can be omitted with a NULL function pointer. The
2514 registered functions may be modified dynamically later.
2515 If io_index is non zero, the corresponding io zone is
2516 modified. If it is zero, a new io zone is allocated. The return
2517 value can be used with cpu_register_physical_memory(). (-1) is
2518 returned if error. */
2519 int cpu_register_io_memory(int io_index
,
2520 CPUReadMemoryFunc
**mem_read
,
2521 CPUWriteMemoryFunc
**mem_write
,
2524 int i
, subwidth
= 0;
2526 if (io_index
<= 0) {
2527 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2529 io_index
= io_mem_nb
++;
2531 if (io_index
>= IO_MEM_NB_ENTRIES
)
2535 for(i
= 0;i
< 3; i
++) {
2536 if (!mem_read
[i
] || !mem_write
[i
])
2537 subwidth
= IO_MEM_SUBWIDTH
;
2538 io_mem_read
[io_index
][i
] = mem_read
[i
];
2539 io_mem_write
[io_index
][i
] = mem_write
[i
];
2541 io_mem_opaque
[io_index
] = opaque
;
2542 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2545 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2547 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2550 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2552 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2555 #endif /* !defined(CONFIG_USER_ONLY) */
2557 /* physical memory access (slow version, mainly for debug) */
2558 #if defined(CONFIG_USER_ONLY)
2559 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2560 int len
, int is_write
)
2567 page
= addr
& TARGET_PAGE_MASK
;
2568 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2571 flags
= page_get_flags(page
);
2572 if (!(flags
& PAGE_VALID
))
2575 if (!(flags
& PAGE_WRITE
))
2577 /* XXX: this code should not depend on lock_user */
2578 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2579 /* FIXME - should this return an error rather than just fail? */
2582 unlock_user(p
, addr
, l
);
2584 if (!(flags
& PAGE_READ
))
2586 /* XXX: this code should not depend on lock_user */
2587 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2588 /* FIXME - should this return an error rather than just fail? */
2591 unlock_user(p
, addr
, 0);
2600 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2601 int len
, int is_write
)
2606 target_phys_addr_t page
;
2611 page
= addr
& TARGET_PAGE_MASK
;
2612 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2615 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2617 pd
= IO_MEM_UNASSIGNED
;
2619 pd
= p
->phys_offset
;
2623 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2624 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2625 /* XXX: could force cpu_single_env to NULL to avoid
2627 if (l
>= 4 && ((addr
& 3) == 0)) {
2628 /* 32 bit write access */
2630 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2632 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2633 /* 16 bit write access */
2635 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2638 /* 8 bit write access */
2640 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2644 unsigned long addr1
;
2645 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2647 ptr
= phys_ram_base
+ addr1
;
2648 memcpy(ptr
, buf
, l
);
2649 if (!cpu_physical_memory_is_dirty(addr1
)) {
2650 /* invalidate code */
2651 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2653 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2654 (0xff & ~CODE_DIRTY_FLAG
);
2658 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2659 !(pd
& IO_MEM_ROMD
)) {
2661 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2662 if (l
>= 4 && ((addr
& 3) == 0)) {
2663 /* 32 bit read access */
2664 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2667 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2668 /* 16 bit read access */
2669 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2673 /* 8 bit read access */
2674 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2680 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2681 (addr
& ~TARGET_PAGE_MASK
);
2682 memcpy(buf
, ptr
, l
);
2691 /* used for ROM loading : can write in RAM and ROM */
2692 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2693 const uint8_t *buf
, int len
)
2697 target_phys_addr_t page
;
2702 page
= addr
& TARGET_PAGE_MASK
;
2703 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2706 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2708 pd
= IO_MEM_UNASSIGNED
;
2710 pd
= p
->phys_offset
;
2713 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2714 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2715 !(pd
& IO_MEM_ROMD
)) {
2718 unsigned long addr1
;
2719 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2721 ptr
= phys_ram_base
+ addr1
;
2722 memcpy(ptr
, buf
, l
);
2731 /* warning: addr must be aligned */
2732 uint32_t ldl_phys(target_phys_addr_t addr
)
2740 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2742 pd
= IO_MEM_UNASSIGNED
;
2744 pd
= p
->phys_offset
;
2747 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2748 !(pd
& IO_MEM_ROMD
)) {
2750 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2751 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2754 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2755 (addr
& ~TARGET_PAGE_MASK
);
2761 /* warning: addr must be aligned */
2762 uint64_t ldq_phys(target_phys_addr_t addr
)
2770 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2772 pd
= IO_MEM_UNASSIGNED
;
2774 pd
= p
->phys_offset
;
2777 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2778 !(pd
& IO_MEM_ROMD
)) {
2780 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2781 #ifdef TARGET_WORDS_BIGENDIAN
2782 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2783 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2785 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2786 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2790 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2791 (addr
& ~TARGET_PAGE_MASK
);
2798 uint32_t ldub_phys(target_phys_addr_t addr
)
2801 cpu_physical_memory_read(addr
, &val
, 1);
2806 uint32_t lduw_phys(target_phys_addr_t addr
)
2809 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2810 return tswap16(val
);
2813 /* warning: addr must be aligned. The ram page is not masked as dirty
2814 and the code inside is not invalidated. It is useful if the dirty
2815 bits are used to track modified PTEs */
2816 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2823 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2825 pd
= IO_MEM_UNASSIGNED
;
2827 pd
= p
->phys_offset
;
2830 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2831 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2832 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2834 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2835 (addr
& ~TARGET_PAGE_MASK
);
2840 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2847 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2849 pd
= IO_MEM_UNASSIGNED
;
2851 pd
= p
->phys_offset
;
2854 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2855 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2856 #ifdef TARGET_WORDS_BIGENDIAN
2857 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2858 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2860 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2861 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2864 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2865 (addr
& ~TARGET_PAGE_MASK
);
2870 /* warning: addr must be aligned */
2871 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2878 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2880 pd
= IO_MEM_UNASSIGNED
;
2882 pd
= p
->phys_offset
;
2885 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2886 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2887 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2889 unsigned long addr1
;
2890 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2892 ptr
= phys_ram_base
+ addr1
;
2894 if (!cpu_physical_memory_is_dirty(addr1
)) {
2895 /* invalidate code */
2896 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2898 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2899 (0xff & ~CODE_DIRTY_FLAG
);
2905 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2908 cpu_physical_memory_write(addr
, &v
, 1);
2912 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2914 uint16_t v
= tswap16(val
);
2915 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2919 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2922 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2927 /* virtual memory access for debug */
2928 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2929 uint8_t *buf
, int len
, int is_write
)
2932 target_phys_addr_t phys_addr
;
2936 page
= addr
& TARGET_PAGE_MASK
;
2937 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2938 /* if no physical page mapped, return an error */
2939 if (phys_addr
== -1)
2941 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2944 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2953 void dump_exec_info(FILE *f
,
2954 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2956 int i
, target_code_size
, max_target_code_size
;
2957 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2958 TranslationBlock
*tb
;
2960 target_code_size
= 0;
2961 max_target_code_size
= 0;
2963 direct_jmp_count
= 0;
2964 direct_jmp2_count
= 0;
2965 for(i
= 0; i
< nb_tbs
; i
++) {
2967 target_code_size
+= tb
->size
;
2968 if (tb
->size
> max_target_code_size
)
2969 max_target_code_size
= tb
->size
;
2970 if (tb
->page_addr
[1] != -1)
2972 if (tb
->tb_next_offset
[0] != 0xffff) {
2974 if (tb
->tb_next_offset
[1] != 0xffff) {
2975 direct_jmp2_count
++;
2979 /* XXX: avoid using doubles ? */
2980 cpu_fprintf(f
, "Translation buffer state:\n");
2981 cpu_fprintf(f
, "gen code size %ld/%ld\n",
2982 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
2983 cpu_fprintf(f
, "TB count %d/%d\n",
2984 nb_tbs
, code_gen_max_blocks
);
2985 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2986 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2987 max_target_code_size
);
2988 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2989 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2990 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2991 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2993 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2994 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2996 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2998 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2999 cpu_fprintf(f
, "\nStatistics:\n");
3000 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3001 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3002 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3003 tcg_dump_info(f
, cpu_fprintf
);
3006 #if !defined(CONFIG_USER_ONLY)
3008 #define MMUSUFFIX _cmmu
3009 #define GETPC() NULL
3010 #define env cpu_single_env
3011 #define SOFTMMU_CODE_ACCESS
3014 #include "softmmu_template.h"
3017 #include "softmmu_template.h"
3020 #include "softmmu_template.h"
3023 #include "softmmu_template.h"