2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if defined(CONFIG_USER_ONLY)
44 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
61 /* threshold to flush the translated code buffer */
62 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
87 TranslationBlock
*tbs
;
88 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
90 /* any access to the tbs or the page table must use this lock */
91 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
93 uint8_t code_gen_prologue
[1024] __attribute__((aligned (32)));
94 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
95 uint8_t *code_gen_ptr
;
97 ram_addr_t phys_ram_size
;
99 uint8_t *phys_ram_base
;
100 uint8_t *phys_ram_dirty
;
101 static ram_addr_t phys_ram_alloc_offset
= 0;
104 /* current CPU in the current thread. It is only valid inside
106 CPUState
*cpu_single_env
;
108 typedef struct PageDesc
{
109 /* list of TBs intersecting this ram page */
110 TranslationBlock
*first_tb
;
111 /* in order to optimize self modifying code, we count the number
112 of lookups we do to a given page to use a bitmap */
113 unsigned int code_write_count
;
114 uint8_t *code_bitmap
;
115 #if defined(CONFIG_USER_ONLY)
120 typedef struct PhysPageDesc
{
121 /* offset in host memory of the page + io_index in the low 12 bits */
122 ram_addr_t phys_offset
;
126 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
127 /* XXX: this is a temporary hack for alpha target.
128 * In the future, this is to be replaced by a multi-level table
129 * to actually be able to handle the complete 64 bits address space.
131 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
133 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
136 #define L1_SIZE (1 << L1_BITS)
137 #define L2_SIZE (1 << L2_BITS)
139 static void io_mem_init(void);
141 unsigned long qemu_real_host_page_size
;
142 unsigned long qemu_host_page_bits
;
143 unsigned long qemu_host_page_size
;
144 unsigned long qemu_host_page_mask
;
146 /* XXX: for system emulation, it could just be an array */
147 static PageDesc
*l1_map
[L1_SIZE
];
148 PhysPageDesc
**l1_phys_map
;
150 /* io memory support */
151 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
152 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
153 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
154 static int io_mem_nb
;
155 #if defined(CONFIG_SOFTMMU)
156 static int io_mem_watch
;
160 char *logfilename
= "/tmp/qemu.log";
163 static int log_append
= 0;
166 static int tlb_flush_count
;
167 static int tb_flush_count
;
168 static int tb_phys_invalidate_count
;
170 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
171 typedef struct subpage_t
{
172 target_phys_addr_t base
;
173 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
174 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
175 void *opaque
[TARGET_PAGE_SIZE
][2][4];
179 static void map_exec(void *addr
, long size
)
182 VirtualProtect(addr
, size
,
183 PAGE_EXECUTE_READWRITE
, &old_protect
);
187 static void map_exec(void *addr
, long size
)
189 unsigned long start
, end
;
191 start
= (unsigned long)addr
;
192 start
&= ~(qemu_real_host_page_size
- 1);
194 end
= (unsigned long)addr
+ size
;
195 end
+= qemu_real_host_page_size
- 1;
196 end
&= ~(qemu_real_host_page_size
- 1);
198 mprotect((void *)start
, end
- start
,
199 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
203 static void page_init(void)
205 /* NOTE: we can always suppose that qemu_host_page_size >=
209 SYSTEM_INFO system_info
;
212 GetSystemInfo(&system_info
);
213 qemu_real_host_page_size
= system_info
.dwPageSize
;
216 qemu_real_host_page_size
= getpagesize();
218 map_exec(code_gen_buffer
, sizeof(code_gen_buffer
));
219 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
221 if (qemu_host_page_size
== 0)
222 qemu_host_page_size
= qemu_real_host_page_size
;
223 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
224 qemu_host_page_size
= TARGET_PAGE_SIZE
;
225 qemu_host_page_bits
= 0;
226 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
227 qemu_host_page_bits
++;
228 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
229 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
230 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
232 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
234 long long startaddr
, endaddr
;
238 f
= fopen("/proc/self/maps", "r");
241 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
243 startaddr
= MIN(startaddr
,
244 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
245 endaddr
= MIN(endaddr
,
246 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
247 page_set_flags(TARGET_PAGE_ALIGN(startaddr
),
248 TARGET_PAGE_ALIGN(endaddr
),
258 static inline PageDesc
*page_find_alloc(target_ulong index
)
262 lp
= &l1_map
[index
>> L2_BITS
];
265 /* allocate if not found */
266 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
267 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
270 return p
+ (index
& (L2_SIZE
- 1));
273 static inline PageDesc
*page_find(target_ulong index
)
277 p
= l1_map
[index
>> L2_BITS
];
280 return p
+ (index
& (L2_SIZE
- 1));
283 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
288 p
= (void **)l1_phys_map
;
289 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
291 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
292 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
294 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
297 /* allocate if not found */
300 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
301 memset(p
, 0, sizeof(void *) * L1_SIZE
);
305 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
309 /* allocate if not found */
312 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
314 for (i
= 0; i
< L2_SIZE
; i
++)
315 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
317 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
320 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
322 return phys_page_find_alloc(index
, 0);
325 #if !defined(CONFIG_USER_ONLY)
326 static void tlb_protect_code(ram_addr_t ram_addr
);
327 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
331 void cpu_exec_init(CPUState
*env
)
338 tbs
= qemu_malloc(CODE_GEN_MAX_BLOCKS
* sizeof(TranslationBlock
));
339 code_gen_ptr
= code_gen_buffer
;
343 env
->next_cpu
= NULL
;
346 while (*penv
!= NULL
) {
347 penv
= (CPUState
**)&(*penv
)->next_cpu
;
350 env
->cpu_index
= cpu_index
;
351 env
->nb_watchpoints
= 0;
355 static inline void invalidate_page_bitmap(PageDesc
*p
)
357 if (p
->code_bitmap
) {
358 qemu_free(p
->code_bitmap
);
359 p
->code_bitmap
= NULL
;
361 p
->code_write_count
= 0;
364 /* set to NULL all the 'first_tb' fields in all PageDescs */
365 static void page_flush_tb(void)
370 for(i
= 0; i
< L1_SIZE
; i
++) {
373 for(j
= 0; j
< L2_SIZE
; j
++) {
375 invalidate_page_bitmap(p
);
382 /* flush all the translation blocks */
383 /* XXX: tb_flush is currently not thread safe */
384 void tb_flush(CPUState
*env1
)
387 #if defined(DEBUG_FLUSH)
388 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
389 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
391 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
393 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > CODE_GEN_BUFFER_SIZE
)
394 cpu_abort(env1
, "Internal error: code buffer overflow\n");
398 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
399 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
402 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
405 code_gen_ptr
= code_gen_buffer
;
406 /* XXX: flush processor icache at this point if cache flush is
411 #ifdef DEBUG_TB_CHECK
413 static void tb_invalidate_check(target_ulong address
)
415 TranslationBlock
*tb
;
417 address
&= TARGET_PAGE_MASK
;
418 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
419 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
420 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
421 address
>= tb
->pc
+ tb
->size
)) {
422 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
423 address
, (long)tb
->pc
, tb
->size
);
429 /* verify that all the pages have correct rights for code */
430 static void tb_page_check(void)
432 TranslationBlock
*tb
;
433 int i
, flags1
, flags2
;
435 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
436 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
437 flags1
= page_get_flags(tb
->pc
);
438 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
439 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
440 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
441 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
447 void tb_jmp_check(TranslationBlock
*tb
)
449 TranslationBlock
*tb1
;
452 /* suppress any remaining jumps to this TB */
456 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
459 tb1
= tb1
->jmp_next
[n1
];
461 /* check end of list */
463 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
469 /* invalidate one TB */
470 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
473 TranslationBlock
*tb1
;
477 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
480 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
484 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
486 TranslationBlock
*tb1
;
492 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
494 *ptb
= tb1
->page_next
[n1
];
497 ptb
= &tb1
->page_next
[n1
];
501 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
503 TranslationBlock
*tb1
, **ptb
;
506 ptb
= &tb
->jmp_next
[n
];
509 /* find tb(n) in circular list */
513 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
514 if (n1
== n
&& tb1
== tb
)
517 ptb
= &tb1
->jmp_first
;
519 ptb
= &tb1
->jmp_next
[n1
];
522 /* now we can suppress tb(n) from the list */
523 *ptb
= tb
->jmp_next
[n
];
525 tb
->jmp_next
[n
] = NULL
;
529 /* reset the jump entry 'n' of a TB so that it is not chained to
531 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
533 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
536 static inline void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
541 target_phys_addr_t phys_pc
;
542 TranslationBlock
*tb1
, *tb2
;
544 /* remove the TB from the hash list */
545 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
546 h
= tb_phys_hash_func(phys_pc
);
547 tb_remove(&tb_phys_hash
[h
], tb
,
548 offsetof(TranslationBlock
, phys_hash_next
));
550 /* remove the TB from the page list */
551 if (tb
->page_addr
[0] != page_addr
) {
552 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
553 tb_page_remove(&p
->first_tb
, tb
);
554 invalidate_page_bitmap(p
);
556 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
557 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
558 tb_page_remove(&p
->first_tb
, tb
);
559 invalidate_page_bitmap(p
);
562 tb_invalidated_flag
= 1;
564 /* remove the TB from the hash list */
565 h
= tb_jmp_cache_hash_func(tb
->pc
);
566 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
567 if (env
->tb_jmp_cache
[h
] == tb
)
568 env
->tb_jmp_cache
[h
] = NULL
;
571 /* suppress this TB from the two jump lists */
572 tb_jmp_remove(tb
, 0);
573 tb_jmp_remove(tb
, 1);
575 /* suppress any remaining jumps to this TB */
581 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
582 tb2
= tb1
->jmp_next
[n1
];
583 tb_reset_jump(tb1
, n1
);
584 tb1
->jmp_next
[n1
] = NULL
;
587 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
589 tb_phys_invalidate_count
++;
592 static inline void set_bits(uint8_t *tab
, int start
, int len
)
598 mask
= 0xff << (start
& 7);
599 if ((start
& ~7) == (end
& ~7)) {
601 mask
&= ~(0xff << (end
& 7));
606 start
= (start
+ 8) & ~7;
608 while (start
< end1
) {
613 mask
= ~(0xff << (end
& 7));
619 static void build_page_bitmap(PageDesc
*p
)
621 int n
, tb_start
, tb_end
;
622 TranslationBlock
*tb
;
624 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
627 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
632 tb
= (TranslationBlock
*)((long)tb
& ~3);
633 /* NOTE: this is subtle as a TB may span two physical pages */
635 /* NOTE: tb_end may be after the end of the page, but
636 it is not a problem */
637 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
638 tb_end
= tb_start
+ tb
->size
;
639 if (tb_end
> TARGET_PAGE_SIZE
)
640 tb_end
= TARGET_PAGE_SIZE
;
643 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
645 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
646 tb
= tb
->page_next
[n
];
650 #ifdef TARGET_HAS_PRECISE_SMC
652 static void tb_gen_code(CPUState
*env
,
653 target_ulong pc
, target_ulong cs_base
, int flags
,
656 TranslationBlock
*tb
;
658 target_ulong phys_pc
, phys_page2
, virt_page2
;
661 phys_pc
= get_phys_addr_code(env
, pc
);
664 /* flush must be done */
666 /* cannot fail at this point */
669 tc_ptr
= code_gen_ptr
;
671 tb
->cs_base
= cs_base
;
674 cpu_gen_code(env
, tb
, &code_gen_size
);
675 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
677 /* check next page if needed */
678 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
680 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
681 phys_page2
= get_phys_addr_code(env
, virt_page2
);
683 tb_link_phys(tb
, phys_pc
, phys_page2
);
687 /* invalidate all TBs which intersect with the target physical page
688 starting in range [start;end[. NOTE: start and end must refer to
689 the same physical page. 'is_cpu_write_access' should be true if called
690 from a real cpu write access: the virtual CPU will exit the current
691 TB if code is modified inside this TB. */
692 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
693 int is_cpu_write_access
)
695 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
696 CPUState
*env
= cpu_single_env
;
698 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
699 target_ulong tb_start
, tb_end
;
700 target_ulong current_pc
, current_cs_base
;
702 p
= page_find(start
>> TARGET_PAGE_BITS
);
705 if (!p
->code_bitmap
&&
706 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
707 is_cpu_write_access
) {
708 /* build code bitmap */
709 build_page_bitmap(p
);
712 /* we remove all the TBs in the range [start, end[ */
713 /* XXX: see if in some cases it could be faster to invalidate all the code */
714 current_tb_not_found
= is_cpu_write_access
;
715 current_tb_modified
= 0;
716 current_tb
= NULL
; /* avoid warning */
717 current_pc
= 0; /* avoid warning */
718 current_cs_base
= 0; /* avoid warning */
719 current_flags
= 0; /* avoid warning */
723 tb
= (TranslationBlock
*)((long)tb
& ~3);
724 tb_next
= tb
->page_next
[n
];
725 /* NOTE: this is subtle as a TB may span two physical pages */
727 /* NOTE: tb_end may be after the end of the page, but
728 it is not a problem */
729 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
730 tb_end
= tb_start
+ tb
->size
;
732 tb_start
= tb
->page_addr
[1];
733 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
735 if (!(tb_end
<= start
|| tb_start
>= end
)) {
736 #ifdef TARGET_HAS_PRECISE_SMC
737 if (current_tb_not_found
) {
738 current_tb_not_found
= 0;
740 if (env
->mem_write_pc
) {
741 /* now we have a real cpu fault */
742 current_tb
= tb_find_pc(env
->mem_write_pc
);
745 if (current_tb
== tb
&&
746 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
747 /* If we are modifying the current TB, we must stop
748 its execution. We could be more precise by checking
749 that the modification is after the current PC, but it
750 would require a specialized function to partially
751 restore the CPU state */
753 current_tb_modified
= 1;
754 cpu_restore_state(current_tb
, env
,
755 env
->mem_write_pc
, NULL
);
756 #if defined(TARGET_I386)
757 current_flags
= env
->hflags
;
758 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
759 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
760 current_pc
= current_cs_base
+ env
->eip
;
762 #error unsupported CPU
765 #endif /* TARGET_HAS_PRECISE_SMC */
766 /* we need to do that to handle the case where a signal
767 occurs while doing tb_phys_invalidate() */
770 saved_tb
= env
->current_tb
;
771 env
->current_tb
= NULL
;
773 tb_phys_invalidate(tb
, -1);
775 env
->current_tb
= saved_tb
;
776 if (env
->interrupt_request
&& env
->current_tb
)
777 cpu_interrupt(env
, env
->interrupt_request
);
782 #if !defined(CONFIG_USER_ONLY)
783 /* if no code remaining, no need to continue to use slow writes */
785 invalidate_page_bitmap(p
);
786 if (is_cpu_write_access
) {
787 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
791 #ifdef TARGET_HAS_PRECISE_SMC
792 if (current_tb_modified
) {
793 /* we generate a block containing just the instruction
794 modifying the memory. It will ensure that it cannot modify
796 env
->current_tb
= NULL
;
797 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
799 cpu_resume_from_signal(env
, NULL
);
804 /* len must be <= 8 and start must be a multiple of len */
805 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
812 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
813 cpu_single_env
->mem_write_vaddr
, len
,
815 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
819 p
= page_find(start
>> TARGET_PAGE_BITS
);
822 if (p
->code_bitmap
) {
823 offset
= start
& ~TARGET_PAGE_MASK
;
824 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
825 if (b
& ((1 << len
) - 1))
829 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
833 #if !defined(CONFIG_SOFTMMU)
834 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
835 unsigned long pc
, void *puc
)
837 int n
, current_flags
, current_tb_modified
;
838 target_ulong current_pc
, current_cs_base
;
840 TranslationBlock
*tb
, *current_tb
;
841 #ifdef TARGET_HAS_PRECISE_SMC
842 CPUState
*env
= cpu_single_env
;
845 addr
&= TARGET_PAGE_MASK
;
846 p
= page_find(addr
>> TARGET_PAGE_BITS
);
850 current_tb_modified
= 0;
852 current_pc
= 0; /* avoid warning */
853 current_cs_base
= 0; /* avoid warning */
854 current_flags
= 0; /* avoid warning */
855 #ifdef TARGET_HAS_PRECISE_SMC
857 current_tb
= tb_find_pc(pc
);
862 tb
= (TranslationBlock
*)((long)tb
& ~3);
863 #ifdef TARGET_HAS_PRECISE_SMC
864 if (current_tb
== tb
&&
865 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
866 /* If we are modifying the current TB, we must stop
867 its execution. We could be more precise by checking
868 that the modification is after the current PC, but it
869 would require a specialized function to partially
870 restore the CPU state */
872 current_tb_modified
= 1;
873 cpu_restore_state(current_tb
, env
, pc
, puc
);
874 #if defined(TARGET_I386)
875 current_flags
= env
->hflags
;
876 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
877 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
878 current_pc
= current_cs_base
+ env
->eip
;
880 #error unsupported CPU
883 #endif /* TARGET_HAS_PRECISE_SMC */
884 tb_phys_invalidate(tb
, addr
);
885 tb
= tb
->page_next
[n
];
888 #ifdef TARGET_HAS_PRECISE_SMC
889 if (current_tb_modified
) {
890 /* we generate a block containing just the instruction
891 modifying the memory. It will ensure that it cannot modify
893 env
->current_tb
= NULL
;
894 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
896 cpu_resume_from_signal(env
, puc
);
902 /* add the tb in the target page and protect it if necessary */
903 static inline void tb_alloc_page(TranslationBlock
*tb
,
904 unsigned int n
, target_ulong page_addr
)
907 TranslationBlock
*last_first_tb
;
909 tb
->page_addr
[n
] = page_addr
;
910 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
911 tb
->page_next
[n
] = p
->first_tb
;
912 last_first_tb
= p
->first_tb
;
913 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
914 invalidate_page_bitmap(p
);
916 #if defined(TARGET_HAS_SMC) || 1
918 #if defined(CONFIG_USER_ONLY)
919 if (p
->flags
& PAGE_WRITE
) {
924 /* force the host page as non writable (writes will have a
925 page fault + mprotect overhead) */
926 page_addr
&= qemu_host_page_mask
;
928 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
929 addr
+= TARGET_PAGE_SIZE
) {
931 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
935 p2
->flags
&= ~PAGE_WRITE
;
936 page_get_flags(addr
);
938 mprotect(g2h(page_addr
), qemu_host_page_size
,
939 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
940 #ifdef DEBUG_TB_INVALIDATE
941 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
946 /* if some code is already present, then the pages are already
947 protected. So we handle the case where only the first TB is
948 allocated in a physical page */
949 if (!last_first_tb
) {
950 tlb_protect_code(page_addr
);
954 #endif /* TARGET_HAS_SMC */
957 /* Allocate a new translation block. Flush the translation buffer if
958 too many translation blocks or too much generated code. */
959 TranslationBlock
*tb_alloc(target_ulong pc
)
961 TranslationBlock
*tb
;
963 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
964 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
972 /* add a new TB and link it to the physical page tables. phys_page2 is
973 (-1) to indicate that only one page contains the TB. */
974 void tb_link_phys(TranslationBlock
*tb
,
975 target_ulong phys_pc
, target_ulong phys_page2
)
978 TranslationBlock
**ptb
;
980 /* add in the physical hash table */
981 h
= tb_phys_hash_func(phys_pc
);
982 ptb
= &tb_phys_hash
[h
];
983 tb
->phys_hash_next
= *ptb
;
986 /* add in the page list */
987 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
988 if (phys_page2
!= -1)
989 tb_alloc_page(tb
, 1, phys_page2
);
991 tb
->page_addr
[1] = -1;
993 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
994 tb
->jmp_next
[0] = NULL
;
995 tb
->jmp_next
[1] = NULL
;
997 /* init original jump addresses */
998 if (tb
->tb_next_offset
[0] != 0xffff)
999 tb_reset_jump(tb
, 0);
1000 if (tb
->tb_next_offset
[1] != 0xffff)
1001 tb_reset_jump(tb
, 1);
1003 #ifdef DEBUG_TB_CHECK
1008 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1009 tb[1].tc_ptr. Return NULL if not found */
1010 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1012 int m_min
, m_max
, m
;
1014 TranslationBlock
*tb
;
1018 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1019 tc_ptr
>= (unsigned long)code_gen_ptr
)
1021 /* binary search (cf Knuth) */
1024 while (m_min
<= m_max
) {
1025 m
= (m_min
+ m_max
) >> 1;
1027 v
= (unsigned long)tb
->tc_ptr
;
1030 else if (tc_ptr
< v
) {
1039 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1041 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1043 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1046 tb1
= tb
->jmp_next
[n
];
1048 /* find head of list */
1051 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1054 tb1
= tb1
->jmp_next
[n1
];
1056 /* we are now sure now that tb jumps to tb1 */
1059 /* remove tb from the jmp_first list */
1060 ptb
= &tb_next
->jmp_first
;
1064 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1065 if (n1
== n
&& tb1
== tb
)
1067 ptb
= &tb1
->jmp_next
[n1
];
1069 *ptb
= tb
->jmp_next
[n
];
1070 tb
->jmp_next
[n
] = NULL
;
1072 /* suppress the jump to next tb in generated code */
1073 tb_reset_jump(tb
, n
);
1075 /* suppress jumps in the tb on which we could have jumped */
1076 tb_reset_jump_recursive(tb_next
);
1080 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1082 tb_reset_jump_recursive2(tb
, 0);
1083 tb_reset_jump_recursive2(tb
, 1);
1086 #if defined(TARGET_HAS_ICE)
1087 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1089 target_phys_addr_t addr
;
1091 ram_addr_t ram_addr
;
1094 addr
= cpu_get_phys_page_debug(env
, pc
);
1095 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1097 pd
= IO_MEM_UNASSIGNED
;
1099 pd
= p
->phys_offset
;
1101 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1102 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1106 /* Add a watchpoint. */
1107 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1111 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1112 if (addr
== env
->watchpoint
[i
].vaddr
)
1115 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1118 i
= env
->nb_watchpoints
++;
1119 env
->watchpoint
[i
].vaddr
= addr
;
1120 tlb_flush_page(env
, addr
);
1121 /* FIXME: This flush is needed because of the hack to make memory ops
1122 terminate the TB. It can be removed once the proper IO trap and
1123 re-execute bits are in. */
1128 /* Remove a watchpoint. */
1129 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1133 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1134 if (addr
== env
->watchpoint
[i
].vaddr
) {
1135 env
->nb_watchpoints
--;
1136 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1137 tlb_flush_page(env
, addr
);
1144 /* Remove all watchpoints. */
1145 void cpu_watchpoint_remove_all(CPUState
*env
) {
1148 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1149 tlb_flush_page(env
, env
->watchpoint
[i
].vaddr
);
1151 env
->nb_watchpoints
= 0;
1154 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1155 breakpoint is reached */
1156 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1158 #if defined(TARGET_HAS_ICE)
1161 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1162 if (env
->breakpoints
[i
] == pc
)
1166 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1168 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1170 breakpoint_invalidate(env
, pc
);
1177 /* remove all breakpoints */
1178 void cpu_breakpoint_remove_all(CPUState
*env
) {
1179 #if defined(TARGET_HAS_ICE)
1181 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1182 breakpoint_invalidate(env
, env
->breakpoints
[i
]);
1184 env
->nb_breakpoints
= 0;
1188 /* remove a breakpoint */
1189 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1191 #if defined(TARGET_HAS_ICE)
1193 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1194 if (env
->breakpoints
[i
] == pc
)
1199 env
->nb_breakpoints
--;
1200 if (i
< env
->nb_breakpoints
)
1201 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1203 breakpoint_invalidate(env
, pc
);
1210 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1211 CPU loop after each instruction */
1212 void cpu_single_step(CPUState
*env
, int enabled
)
1214 #if defined(TARGET_HAS_ICE)
1215 if (env
->singlestep_enabled
!= enabled
) {
1216 env
->singlestep_enabled
= enabled
;
1217 /* must flush all the translated code to avoid inconsistancies */
1218 /* XXX: only flush what is necessary */
1224 /* enable or disable low levels log */
1225 void cpu_set_log(int log_flags
)
1227 loglevel
= log_flags
;
1228 if (loglevel
&& !logfile
) {
1229 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1231 perror(logfilename
);
1234 #if !defined(CONFIG_SOFTMMU)
1235 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1237 static uint8_t logfile_buf
[4096];
1238 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1241 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1245 if (!loglevel
&& logfile
) {
1251 void cpu_set_log_filename(const char *filename
)
1253 logfilename
= strdup(filename
);
1258 cpu_set_log(loglevel
);
1261 /* mask must never be zero, except for A20 change call */
1262 void cpu_interrupt(CPUState
*env
, int mask
)
1264 TranslationBlock
*tb
;
1265 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1267 env
->interrupt_request
|= mask
;
1268 /* if the cpu is currently executing code, we must unlink it and
1269 all the potentially executing TB */
1270 tb
= env
->current_tb
;
1271 if (tb
&& !testandset(&interrupt_lock
)) {
1272 env
->current_tb
= NULL
;
1273 tb_reset_jump_recursive(tb
);
1274 resetlock(&interrupt_lock
);
1278 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1280 env
->interrupt_request
&= ~mask
;
1283 CPULogItem cpu_log_items
[] = {
1284 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1285 "show generated host assembly code for each compiled TB" },
1286 { CPU_LOG_TB_IN_ASM
, "in_asm",
1287 "show target assembly code for each compiled TB" },
1288 { CPU_LOG_TB_OP
, "op",
1289 "show micro ops for each compiled TB" },
1290 { CPU_LOG_TB_OP_OPT
, "op_opt",
1293 "before eflags optimization and "
1295 "after liveness analysis" },
1296 { CPU_LOG_INT
, "int",
1297 "show interrupts/exceptions in short format" },
1298 { CPU_LOG_EXEC
, "exec",
1299 "show trace before each executed TB (lots of logs)" },
1300 { CPU_LOG_TB_CPU
, "cpu",
1301 "show CPU state before block translation" },
1303 { CPU_LOG_PCALL
, "pcall",
1304 "show protected mode far calls/returns/exceptions" },
1307 { CPU_LOG_IOPORT
, "ioport",
1308 "show all i/o ports accesses" },
1313 static int cmp1(const char *s1
, int n
, const char *s2
)
1315 if (strlen(s2
) != n
)
1317 return memcmp(s1
, s2
, n
) == 0;
1320 /* takes a comma separated list of log masks. Return 0 if error. */
1321 int cpu_str_to_log_mask(const char *str
)
1330 p1
= strchr(p
, ',');
1333 if(cmp1(p
,p1
-p
,"all")) {
1334 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1338 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1339 if (cmp1(p
, p1
- p
, item
->name
))
1353 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1360 fprintf(stderr
, "qemu: fatal: ");
1361 vfprintf(stderr
, fmt
, ap
);
1362 fprintf(stderr
, "\n");
1364 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1366 cpu_dump_state(env
, stderr
, fprintf
, 0);
1369 fprintf(logfile
, "qemu: fatal: ");
1370 vfprintf(logfile
, fmt
, ap2
);
1371 fprintf(logfile
, "\n");
1373 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1375 cpu_dump_state(env
, logfile
, fprintf
, 0);
1385 CPUState
*cpu_copy(CPUState
*env
)
1387 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1388 /* preserve chaining and index */
1389 CPUState
*next_cpu
= new_env
->next_cpu
;
1390 int cpu_index
= new_env
->cpu_index
;
1391 memcpy(new_env
, env
, sizeof(CPUState
));
1392 new_env
->next_cpu
= next_cpu
;
1393 new_env
->cpu_index
= cpu_index
;
1397 #if !defined(CONFIG_USER_ONLY)
1399 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1403 /* Discard jump cache entries for any tb which might potentially
1404 overlap the flushed page. */
1405 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1406 memset (&env
->tb_jmp_cache
[i
], 0,
1407 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1409 i
= tb_jmp_cache_hash_page(addr
);
1410 memset (&env
->tb_jmp_cache
[i
], 0,
1411 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1414 /* NOTE: if flush_global is true, also flush global entries (not
1416 void tlb_flush(CPUState
*env
, int flush_global
)
1420 #if defined(DEBUG_TLB)
1421 printf("tlb_flush:\n");
1423 /* must reset current TB so that interrupts cannot modify the
1424 links while we are modifying them */
1425 env
->current_tb
= NULL
;
1427 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1428 env
->tlb_table
[0][i
].addr_read
= -1;
1429 env
->tlb_table
[0][i
].addr_write
= -1;
1430 env
->tlb_table
[0][i
].addr_code
= -1;
1431 env
->tlb_table
[1][i
].addr_read
= -1;
1432 env
->tlb_table
[1][i
].addr_write
= -1;
1433 env
->tlb_table
[1][i
].addr_code
= -1;
1434 #if (NB_MMU_MODES >= 3)
1435 env
->tlb_table
[2][i
].addr_read
= -1;
1436 env
->tlb_table
[2][i
].addr_write
= -1;
1437 env
->tlb_table
[2][i
].addr_code
= -1;
1438 #if (NB_MMU_MODES == 4)
1439 env
->tlb_table
[3][i
].addr_read
= -1;
1440 env
->tlb_table
[3][i
].addr_write
= -1;
1441 env
->tlb_table
[3][i
].addr_code
= -1;
1446 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1448 #if !defined(CONFIG_SOFTMMU)
1449 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1452 if (env
->kqemu_enabled
) {
1453 kqemu_flush(env
, flush_global
);
1459 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1461 if (addr
== (tlb_entry
->addr_read
&
1462 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1463 addr
== (tlb_entry
->addr_write
&
1464 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1465 addr
== (tlb_entry
->addr_code
&
1466 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1467 tlb_entry
->addr_read
= -1;
1468 tlb_entry
->addr_write
= -1;
1469 tlb_entry
->addr_code
= -1;
1473 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1477 #if defined(DEBUG_TLB)
1478 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1480 /* must reset current TB so that interrupts cannot modify the
1481 links while we are modifying them */
1482 env
->current_tb
= NULL
;
1484 addr
&= TARGET_PAGE_MASK
;
1485 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1486 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1487 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1488 #if (NB_MMU_MODES >= 3)
1489 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1490 #if (NB_MMU_MODES == 4)
1491 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1495 tlb_flush_jmp_cache(env
, addr
);
1497 #if !defined(CONFIG_SOFTMMU)
1498 if (addr
< MMAP_AREA_END
)
1499 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1502 if (env
->kqemu_enabled
) {
1503 kqemu_flush_page(env
, addr
);
1508 /* update the TLBs so that writes to code in the virtual page 'addr'
1510 static void tlb_protect_code(ram_addr_t ram_addr
)
1512 cpu_physical_memory_reset_dirty(ram_addr
,
1513 ram_addr
+ TARGET_PAGE_SIZE
,
1517 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1518 tested for self modifying code */
1519 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1522 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1525 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1526 unsigned long start
, unsigned long length
)
1529 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1530 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1531 if ((addr
- start
) < length
) {
1532 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1537 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1541 unsigned long length
, start1
;
1545 start
&= TARGET_PAGE_MASK
;
1546 end
= TARGET_PAGE_ALIGN(end
);
1548 length
= end
- start
;
1551 len
= length
>> TARGET_PAGE_BITS
;
1553 /* XXX: should not depend on cpu context */
1555 if (env
->kqemu_enabled
) {
1558 for(i
= 0; i
< len
; i
++) {
1559 kqemu_set_notdirty(env
, addr
);
1560 addr
+= TARGET_PAGE_SIZE
;
1564 mask
= ~dirty_flags
;
1565 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1566 for(i
= 0; i
< len
; i
++)
1569 /* we modify the TLB cache so that the dirty bit will be set again
1570 when accessing the range */
1571 start1
= start
+ (unsigned long)phys_ram_base
;
1572 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1573 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1574 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1575 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1576 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1577 #if (NB_MMU_MODES >= 3)
1578 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1579 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1580 #if (NB_MMU_MODES == 4)
1581 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1582 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1587 #if !defined(CONFIG_SOFTMMU)
1588 /* XXX: this is expensive */
1594 for(i
= 0; i
< L1_SIZE
; i
++) {
1597 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1598 for(j
= 0; j
< L2_SIZE
; j
++) {
1599 if (p
->valid_tag
== virt_valid_tag
&&
1600 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1601 (p
->prot
& PROT_WRITE
)) {
1602 if (addr
< MMAP_AREA_END
) {
1603 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1604 p
->prot
& ~PROT_WRITE
);
1607 addr
+= TARGET_PAGE_SIZE
;
1616 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1618 ram_addr_t ram_addr
;
1620 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1621 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1622 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1623 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1624 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1629 /* update the TLB according to the current state of the dirty bits */
1630 void cpu_tlb_update_dirty(CPUState
*env
)
1633 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1634 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1635 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1636 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1637 #if (NB_MMU_MODES >= 3)
1638 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1639 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1640 #if (NB_MMU_MODES == 4)
1641 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1642 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1647 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1648 unsigned long start
)
1651 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1652 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1653 if (addr
== start
) {
1654 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1659 /* update the TLB corresponding to virtual page vaddr and phys addr
1660 addr so that it is no longer dirty */
1661 static inline void tlb_set_dirty(CPUState
*env
,
1662 unsigned long addr
, target_ulong vaddr
)
1666 addr
&= TARGET_PAGE_MASK
;
1667 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1668 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1669 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1670 #if (NB_MMU_MODES >= 3)
1671 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1672 #if (NB_MMU_MODES == 4)
1673 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1678 /* add a new TLB entry. At most one entry for a given virtual address
1679 is permitted. Return 0 if OK or 2 if the page could not be mapped
1680 (can only happen in non SOFTMMU mode for I/O pages or pages
1681 conflicting with the host address space). */
1682 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1683 target_phys_addr_t paddr
, int prot
,
1684 int mmu_idx
, int is_softmmu
)
1689 target_ulong address
;
1690 target_phys_addr_t addend
;
1695 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1697 pd
= IO_MEM_UNASSIGNED
;
1699 pd
= p
->phys_offset
;
1701 #if defined(DEBUG_TLB)
1702 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1703 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1707 #if !defined(CONFIG_SOFTMMU)
1711 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1712 /* IO memory case */
1713 address
= vaddr
| pd
;
1716 /* standard memory */
1718 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1721 /* Make accesses to pages with watchpoints go via the
1722 watchpoint trap routines. */
1723 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1724 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1725 if (address
& ~TARGET_PAGE_MASK
) {
1726 env
->watchpoint
[i
].addend
= 0;
1727 address
= vaddr
| io_mem_watch
;
1729 env
->watchpoint
[i
].addend
= pd
- paddr
+
1730 (unsigned long) phys_ram_base
;
1731 /* TODO: Figure out how to make read watchpoints coexist
1733 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1738 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1740 te
= &env
->tlb_table
[mmu_idx
][index
];
1741 te
->addend
= addend
;
1742 if (prot
& PAGE_READ
) {
1743 te
->addr_read
= address
;
1748 if (prot
& PAGE_EXEC
) {
1749 te
->addr_code
= address
;
1753 if (prot
& PAGE_WRITE
) {
1754 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1755 (pd
& IO_MEM_ROMD
)) {
1756 /* write access calls the I/O callback */
1757 te
->addr_write
= vaddr
|
1758 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1759 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1760 !cpu_physical_memory_is_dirty(pd
)) {
1761 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1763 te
->addr_write
= address
;
1766 te
->addr_write
= -1;
1769 #if !defined(CONFIG_SOFTMMU)
1771 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1772 /* IO access: no mapping is done as it will be handled by the
1774 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1779 if (vaddr
>= MMAP_AREA_END
) {
1782 if (prot
& PROT_WRITE
) {
1783 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1784 #if defined(TARGET_HAS_SMC) || 1
1787 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1788 !cpu_physical_memory_is_dirty(pd
))) {
1789 /* ROM: we do as if code was inside */
1790 /* if code is present, we only map as read only and save the
1794 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1797 vp
->valid_tag
= virt_valid_tag
;
1798 prot
&= ~PAGE_WRITE
;
1801 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1802 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1803 if (map_addr
== MAP_FAILED
) {
1804 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1814 /* called from signal handler: invalidate the code and unprotect the
1815 page. Return TRUE if the fault was succesfully handled. */
1816 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1818 #if !defined(CONFIG_SOFTMMU)
1821 #if defined(DEBUG_TLB)
1822 printf("page_unprotect: addr=0x%08x\n", addr
);
1824 addr
&= TARGET_PAGE_MASK
;
1826 /* if it is not mapped, no need to worry here */
1827 if (addr
>= MMAP_AREA_END
)
1829 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1832 /* NOTE: in this case, validate_tag is _not_ tested as it
1833 validates only the code TLB */
1834 if (vp
->valid_tag
!= virt_valid_tag
)
1836 if (!(vp
->prot
& PAGE_WRITE
))
1838 #if defined(DEBUG_TLB)
1839 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1840 addr
, vp
->phys_addr
, vp
->prot
);
1842 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1843 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1844 (unsigned long)addr
, vp
->prot
);
1845 /* set the dirty bit */
1846 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1847 /* flush the code inside */
1848 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1857 void tlb_flush(CPUState
*env
, int flush_global
)
1861 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1865 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1866 target_phys_addr_t paddr
, int prot
,
1867 int mmu_idx
, int is_softmmu
)
1872 /* dump memory mappings */
1873 void page_dump(FILE *f
)
1875 unsigned long start
, end
;
1876 int i
, j
, prot
, prot1
;
1879 fprintf(f
, "%-8s %-8s %-8s %s\n",
1880 "start", "end", "size", "prot");
1884 for(i
= 0; i
<= L1_SIZE
; i
++) {
1889 for(j
= 0;j
< L2_SIZE
; j
++) {
1894 if (prot1
!= prot
) {
1895 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1897 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1898 start
, end
, end
- start
,
1899 prot
& PAGE_READ
? 'r' : '-',
1900 prot
& PAGE_WRITE
? 'w' : '-',
1901 prot
& PAGE_EXEC
? 'x' : '-');
1915 int page_get_flags(target_ulong address
)
1919 p
= page_find(address
>> TARGET_PAGE_BITS
);
1925 /* modify the flags of a page and invalidate the code if
1926 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1927 depending on PAGE_WRITE */
1928 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1933 start
= start
& TARGET_PAGE_MASK
;
1934 end
= TARGET_PAGE_ALIGN(end
);
1935 if (flags
& PAGE_WRITE
)
1936 flags
|= PAGE_WRITE_ORG
;
1937 spin_lock(&tb_lock
);
1938 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1939 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1940 /* if the write protection is set, then we invalidate the code
1942 if (!(p
->flags
& PAGE_WRITE
) &&
1943 (flags
& PAGE_WRITE
) &&
1945 tb_invalidate_phys_page(addr
, 0, NULL
);
1949 spin_unlock(&tb_lock
);
1952 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1958 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1959 start
= start
& TARGET_PAGE_MASK
;
1962 /* we've wrapped around */
1964 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1965 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1968 if( !(p
->flags
& PAGE_VALID
) )
1971 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1973 if (flags
& PAGE_WRITE
) {
1974 if (!(p
->flags
& PAGE_WRITE_ORG
))
1976 /* unprotect the page if it was put read-only because it
1977 contains translated code */
1978 if (!(p
->flags
& PAGE_WRITE
)) {
1979 if (!page_unprotect(addr
, 0, NULL
))
1988 /* called from signal handler: invalidate the code and unprotect the
1989 page. Return TRUE if the fault was succesfully handled. */
1990 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1992 unsigned int page_index
, prot
, pindex
;
1994 target_ulong host_start
, host_end
, addr
;
1996 host_start
= address
& qemu_host_page_mask
;
1997 page_index
= host_start
>> TARGET_PAGE_BITS
;
1998 p1
= page_find(page_index
);
2001 host_end
= host_start
+ qemu_host_page_size
;
2004 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2008 /* if the page was really writable, then we change its
2009 protection back to writable */
2010 if (prot
& PAGE_WRITE_ORG
) {
2011 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2012 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2013 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2014 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2015 p1
[pindex
].flags
|= PAGE_WRITE
;
2016 /* and since the content will be modified, we must invalidate
2017 the corresponding translated code. */
2018 tb_invalidate_phys_page(address
, pc
, puc
);
2019 #ifdef DEBUG_TB_CHECK
2020 tb_invalidate_check(address
);
2028 static inline void tlb_set_dirty(CPUState
*env
,
2029 unsigned long addr
, target_ulong vaddr
)
2032 #endif /* defined(CONFIG_USER_ONLY) */
2034 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2036 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2037 ram_addr_t orig_memory
);
2038 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2041 if (addr > start_addr) \
2044 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2045 if (start_addr2 > 0) \
2049 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2050 end_addr2 = TARGET_PAGE_SIZE - 1; \
2052 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2053 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2058 /* register physical memory. 'size' must be a multiple of the target
2059 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2061 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2063 ram_addr_t phys_offset
)
2065 target_phys_addr_t addr
, end_addr
;
2068 ram_addr_t orig_size
= size
;
2071 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2072 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2073 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2074 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2075 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2076 ram_addr_t orig_memory
= p
->phys_offset
;
2077 target_phys_addr_t start_addr2
, end_addr2
;
2078 int need_subpage
= 0;
2080 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2082 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2083 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2084 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2085 &p
->phys_offset
, orig_memory
);
2087 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2090 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2092 p
->phys_offset
= phys_offset
;
2093 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2094 (phys_offset
& IO_MEM_ROMD
))
2095 phys_offset
+= TARGET_PAGE_SIZE
;
2098 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2099 p
->phys_offset
= phys_offset
;
2100 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2101 (phys_offset
& IO_MEM_ROMD
))
2102 phys_offset
+= TARGET_PAGE_SIZE
;
2104 target_phys_addr_t start_addr2
, end_addr2
;
2105 int need_subpage
= 0;
2107 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2108 end_addr2
, need_subpage
);
2110 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2111 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2112 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2113 subpage_register(subpage
, start_addr2
, end_addr2
,
2120 /* since each CPU stores ram addresses in its TLB cache, we must
2121 reset the modified entries */
2123 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2128 /* XXX: temporary until new memory mapping API */
2129 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2133 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2135 return IO_MEM_UNASSIGNED
;
2136 return p
->phys_offset
;
2139 /* XXX: better than nothing */
2140 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2143 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2144 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
"\n",
2145 (uint64_t)size
, (uint64_t)phys_ram_size
);
2148 addr
= phys_ram_alloc_offset
;
2149 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2153 void qemu_ram_free(ram_addr_t addr
)
2157 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2159 #ifdef DEBUG_UNASSIGNED
2160 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2163 do_unassigned_access(addr
, 0, 0, 0);
2165 do_unassigned_access(addr
, 0, 0, 0);
2170 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2172 #ifdef DEBUG_UNASSIGNED
2173 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2176 do_unassigned_access(addr
, 1, 0, 0);
2178 do_unassigned_access(addr
, 1, 0, 0);
2182 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2183 unassigned_mem_readb
,
2184 unassigned_mem_readb
,
2185 unassigned_mem_readb
,
2188 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2189 unassigned_mem_writeb
,
2190 unassigned_mem_writeb
,
2191 unassigned_mem_writeb
,
2194 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2196 unsigned long ram_addr
;
2198 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2199 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2200 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2201 #if !defined(CONFIG_USER_ONLY)
2202 tb_invalidate_phys_page_fast(ram_addr
, 1);
2203 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2206 stb_p((uint8_t *)(long)addr
, val
);
2208 if (cpu_single_env
->kqemu_enabled
&&
2209 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2210 kqemu_modify_page(cpu_single_env
, ram_addr
);
2212 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2213 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2214 /* we remove the notdirty callback only if the code has been
2216 if (dirty_flags
== 0xff)
2217 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2220 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2222 unsigned long ram_addr
;
2224 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2225 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2226 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2227 #if !defined(CONFIG_USER_ONLY)
2228 tb_invalidate_phys_page_fast(ram_addr
, 2);
2229 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2232 stw_p((uint8_t *)(long)addr
, val
);
2234 if (cpu_single_env
->kqemu_enabled
&&
2235 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2236 kqemu_modify_page(cpu_single_env
, ram_addr
);
2238 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2239 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2240 /* we remove the notdirty callback only if the code has been
2242 if (dirty_flags
== 0xff)
2243 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2246 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2248 unsigned long ram_addr
;
2250 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2251 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2252 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2253 #if !defined(CONFIG_USER_ONLY)
2254 tb_invalidate_phys_page_fast(ram_addr
, 4);
2255 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2258 stl_p((uint8_t *)(long)addr
, val
);
2260 if (cpu_single_env
->kqemu_enabled
&&
2261 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2262 kqemu_modify_page(cpu_single_env
, ram_addr
);
2264 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2265 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2266 /* we remove the notdirty callback only if the code has been
2268 if (dirty_flags
== 0xff)
2269 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2272 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2273 NULL
, /* never used */
2274 NULL
, /* never used */
2275 NULL
, /* never used */
2278 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2279 notdirty_mem_writeb
,
2280 notdirty_mem_writew
,
2281 notdirty_mem_writel
,
2284 #if defined(CONFIG_SOFTMMU)
2285 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2286 so these check for a hit then pass through to the normal out-of-line
2288 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2290 return ldub_phys(addr
);
2293 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2295 return lduw_phys(addr
);
2298 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2300 return ldl_phys(addr
);
2303 /* Generate a debug exception if a watchpoint has been hit.
2304 Returns the real physical address of the access. addr will be a host
2305 address in case of a RAM location. */
2306 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2308 CPUState
*env
= cpu_single_env
;
2310 target_ulong retaddr
;
2314 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2315 watch
= env
->watchpoint
[i
].vaddr
;
2316 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2317 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2318 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2319 cpu_single_env
->watchpoint_hit
= i
+ 1;
2320 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2328 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2331 addr
= check_watchpoint(addr
);
2332 stb_phys(addr
, val
);
2335 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2338 addr
= check_watchpoint(addr
);
2339 stw_phys(addr
, val
);
2342 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2345 addr
= check_watchpoint(addr
);
2346 stl_phys(addr
, val
);
2349 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2355 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2362 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2368 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2369 #if defined(DEBUG_SUBPAGE)
2370 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2371 mmio
, len
, addr
, idx
);
2373 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2378 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2379 uint32_t value
, unsigned int len
)
2383 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2384 #if defined(DEBUG_SUBPAGE)
2385 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2386 mmio
, len
, addr
, idx
, value
);
2388 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2391 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2393 #if defined(DEBUG_SUBPAGE)
2394 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2397 return subpage_readlen(opaque
, addr
, 0);
2400 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2403 #if defined(DEBUG_SUBPAGE)
2404 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2406 subpage_writelen(opaque
, addr
, value
, 0);
2409 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2411 #if defined(DEBUG_SUBPAGE)
2412 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2415 return subpage_readlen(opaque
, addr
, 1);
2418 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2421 #if defined(DEBUG_SUBPAGE)
2422 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2424 subpage_writelen(opaque
, addr
, value
, 1);
2427 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2429 #if defined(DEBUG_SUBPAGE)
2430 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2433 return subpage_readlen(opaque
, addr
, 2);
2436 static void subpage_writel (void *opaque
,
2437 target_phys_addr_t addr
, uint32_t value
)
2439 #if defined(DEBUG_SUBPAGE)
2440 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2442 subpage_writelen(opaque
, addr
, value
, 2);
2445 static CPUReadMemoryFunc
*subpage_read
[] = {
2451 static CPUWriteMemoryFunc
*subpage_write
[] = {
2457 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2463 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2465 idx
= SUBPAGE_IDX(start
);
2466 eidx
= SUBPAGE_IDX(end
);
2467 #if defined(DEBUG_SUBPAGE)
2468 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2469 mmio
, start
, end
, idx
, eidx
, memory
);
2471 memory
>>= IO_MEM_SHIFT
;
2472 for (; idx
<= eidx
; idx
++) {
2473 for (i
= 0; i
< 4; i
++) {
2474 if (io_mem_read
[memory
][i
]) {
2475 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2476 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2478 if (io_mem_write
[memory
][i
]) {
2479 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2480 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2488 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2489 ram_addr_t orig_memory
)
2494 mmio
= qemu_mallocz(sizeof(subpage_t
));
2497 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2498 #if defined(DEBUG_SUBPAGE)
2499 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2500 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2502 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2503 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2509 static void io_mem_init(void)
2511 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2512 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2513 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2516 #if defined(CONFIG_SOFTMMU)
2517 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2518 watch_mem_write
, NULL
);
2520 /* alloc dirty bits array */
2521 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2522 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2525 /* mem_read and mem_write are arrays of functions containing the
2526 function to access byte (index 0), word (index 1) and dword (index
2527 2). Functions can be omitted with a NULL function pointer. The
2528 registered functions may be modified dynamically later.
2529 If io_index is non zero, the corresponding io zone is
2530 modified. If it is zero, a new io zone is allocated. The return
2531 value can be used with cpu_register_physical_memory(). (-1) is
2532 returned if error. */
2533 int cpu_register_io_memory(int io_index
,
2534 CPUReadMemoryFunc
**mem_read
,
2535 CPUWriteMemoryFunc
**mem_write
,
2538 int i
, subwidth
= 0;
2540 if (io_index
<= 0) {
2541 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2543 io_index
= io_mem_nb
++;
2545 if (io_index
>= IO_MEM_NB_ENTRIES
)
2549 for(i
= 0;i
< 3; i
++) {
2550 if (!mem_read
[i
] || !mem_write
[i
])
2551 subwidth
= IO_MEM_SUBWIDTH
;
2552 io_mem_read
[io_index
][i
] = mem_read
[i
];
2553 io_mem_write
[io_index
][i
] = mem_write
[i
];
2555 io_mem_opaque
[io_index
] = opaque
;
2556 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2559 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2561 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2564 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2566 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2569 /* physical memory access (slow version, mainly for debug) */
2570 #if defined(CONFIG_USER_ONLY)
2571 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2572 int len
, int is_write
)
2579 page
= addr
& TARGET_PAGE_MASK
;
2580 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2583 flags
= page_get_flags(page
);
2584 if (!(flags
& PAGE_VALID
))
2587 if (!(flags
& PAGE_WRITE
))
2589 /* XXX: this code should not depend on lock_user */
2590 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2591 /* FIXME - should this return an error rather than just fail? */
2594 unlock_user(p
, addr
, l
);
2596 if (!(flags
& PAGE_READ
))
2598 /* XXX: this code should not depend on lock_user */
2599 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2600 /* FIXME - should this return an error rather than just fail? */
2603 unlock_user(p
, addr
, 0);
2612 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2613 int len
, int is_write
)
2618 target_phys_addr_t page
;
2623 page
= addr
& TARGET_PAGE_MASK
;
2624 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2627 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2629 pd
= IO_MEM_UNASSIGNED
;
2631 pd
= p
->phys_offset
;
2635 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2636 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2637 /* XXX: could force cpu_single_env to NULL to avoid
2639 if (l
>= 4 && ((addr
& 3) == 0)) {
2640 /* 32 bit write access */
2642 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2644 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2645 /* 16 bit write access */
2647 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2650 /* 8 bit write access */
2652 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2656 unsigned long addr1
;
2657 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2659 ptr
= phys_ram_base
+ addr1
;
2660 memcpy(ptr
, buf
, l
);
2661 if (!cpu_physical_memory_is_dirty(addr1
)) {
2662 /* invalidate code */
2663 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2665 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2666 (0xff & ~CODE_DIRTY_FLAG
);
2670 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2671 !(pd
& IO_MEM_ROMD
)) {
2673 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2674 if (l
>= 4 && ((addr
& 3) == 0)) {
2675 /* 32 bit read access */
2676 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2679 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2680 /* 16 bit read access */
2681 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2685 /* 8 bit read access */
2686 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2692 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2693 (addr
& ~TARGET_PAGE_MASK
);
2694 memcpy(buf
, ptr
, l
);
2703 /* used for ROM loading : can write in RAM and ROM */
2704 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2705 const uint8_t *buf
, int len
)
2709 target_phys_addr_t page
;
2714 page
= addr
& TARGET_PAGE_MASK
;
2715 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2718 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2720 pd
= IO_MEM_UNASSIGNED
;
2722 pd
= p
->phys_offset
;
2725 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2726 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2727 !(pd
& IO_MEM_ROMD
)) {
2730 unsigned long addr1
;
2731 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2733 ptr
= phys_ram_base
+ addr1
;
2734 memcpy(ptr
, buf
, l
);
2743 /* warning: addr must be aligned */
2744 uint32_t ldl_phys(target_phys_addr_t addr
)
2752 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2754 pd
= IO_MEM_UNASSIGNED
;
2756 pd
= p
->phys_offset
;
2759 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2760 !(pd
& IO_MEM_ROMD
)) {
2762 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2763 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2766 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2767 (addr
& ~TARGET_PAGE_MASK
);
2773 /* warning: addr must be aligned */
2774 uint64_t ldq_phys(target_phys_addr_t addr
)
2782 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2784 pd
= IO_MEM_UNASSIGNED
;
2786 pd
= p
->phys_offset
;
2789 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2790 !(pd
& IO_MEM_ROMD
)) {
2792 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2793 #ifdef TARGET_WORDS_BIGENDIAN
2794 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2795 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2797 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2798 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2802 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2803 (addr
& ~TARGET_PAGE_MASK
);
2810 uint32_t ldub_phys(target_phys_addr_t addr
)
2813 cpu_physical_memory_read(addr
, &val
, 1);
2818 uint32_t lduw_phys(target_phys_addr_t addr
)
2821 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2822 return tswap16(val
);
2825 /* warning: addr must be aligned. The ram page is not masked as dirty
2826 and the code inside is not invalidated. It is useful if the dirty
2827 bits are used to track modified PTEs */
2828 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2835 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2837 pd
= IO_MEM_UNASSIGNED
;
2839 pd
= p
->phys_offset
;
2842 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2843 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2844 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2846 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2847 (addr
& ~TARGET_PAGE_MASK
);
2852 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2859 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2861 pd
= IO_MEM_UNASSIGNED
;
2863 pd
= p
->phys_offset
;
2866 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2867 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2868 #ifdef TARGET_WORDS_BIGENDIAN
2869 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2870 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2872 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2873 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2876 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2877 (addr
& ~TARGET_PAGE_MASK
);
2882 /* warning: addr must be aligned */
2883 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2890 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2892 pd
= IO_MEM_UNASSIGNED
;
2894 pd
= p
->phys_offset
;
2897 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2898 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2899 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2901 unsigned long addr1
;
2902 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2904 ptr
= phys_ram_base
+ addr1
;
2906 if (!cpu_physical_memory_is_dirty(addr1
)) {
2907 /* invalidate code */
2908 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2910 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2911 (0xff & ~CODE_DIRTY_FLAG
);
2917 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2920 cpu_physical_memory_write(addr
, &v
, 1);
2924 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2926 uint16_t v
= tswap16(val
);
2927 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2931 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2934 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2939 /* virtual memory access for debug */
2940 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2941 uint8_t *buf
, int len
, int is_write
)
2944 target_phys_addr_t phys_addr
;
2948 page
= addr
& TARGET_PAGE_MASK
;
2949 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2950 /* if no physical page mapped, return an error */
2951 if (phys_addr
== -1)
2953 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2956 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2965 void dump_exec_info(FILE *f
,
2966 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2968 int i
, target_code_size
, max_target_code_size
;
2969 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2970 TranslationBlock
*tb
;
2972 target_code_size
= 0;
2973 max_target_code_size
= 0;
2975 direct_jmp_count
= 0;
2976 direct_jmp2_count
= 0;
2977 for(i
= 0; i
< nb_tbs
; i
++) {
2979 target_code_size
+= tb
->size
;
2980 if (tb
->size
> max_target_code_size
)
2981 max_target_code_size
= tb
->size
;
2982 if (tb
->page_addr
[1] != -1)
2984 if (tb
->tb_next_offset
[0] != 0xffff) {
2986 if (tb
->tb_next_offset
[1] != 0xffff) {
2987 direct_jmp2_count
++;
2991 /* XXX: avoid using doubles ? */
2992 cpu_fprintf(f
, "Translation buffer state:\n");
2993 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2994 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2995 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2996 max_target_code_size
);
2997 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2998 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2999 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3000 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3002 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3003 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3005 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3007 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3008 cpu_fprintf(f
, "\nStatistics:\n");
3009 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3010 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3011 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3012 tcg_dump_info(f
, cpu_fprintf
);
3015 #if !defined(CONFIG_USER_ONLY)
3017 #define MMUSUFFIX _cmmu
3018 #define GETPC() NULL
3019 #define env cpu_single_env
3020 #define SOFTMMU_CODE_ACCESS
3023 #include "softmmu_template.h"
3026 #include "softmmu_template.h"
3029 #include "softmmu_template.h"
3032 #include "softmmu_template.h"