2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
38 //#define DEBUG_TB_INVALIDATE
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
49 #define SMC_BITMAP_USE_THRESHOLD 10
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
54 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
55 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
56 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
58 /* any access to the tbs or the page table must use this lock */
59 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
61 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
62 uint8_t *code_gen_ptr
;
66 uint8_t *phys_ram_base
;
67 uint8_t *phys_ram_dirty
;
69 typedef struct PageDesc
{
70 /* list of TBs intersecting this ram page */
71 TranslationBlock
*first_tb
;
72 /* in order to optimize self modifying code, we count the number
73 of lookups we do to a given page to use a bitmap */
74 unsigned int code_write_count
;
76 #if defined(CONFIG_USER_ONLY)
81 typedef struct PhysPageDesc
{
82 /* offset in host memory of the page + io_index in the low 12 bits */
83 unsigned long phys_offset
;
86 typedef struct VirtPageDesc
{
87 /* physical address of code page. It is valid only if 'valid_tag'
88 matches 'virt_valid_tag' */
89 target_ulong phys_addr
;
90 unsigned int valid_tag
;
91 #if !defined(CONFIG_SOFTMMU)
92 /* original page access rights. It is valid only if 'valid_tag'
93 matches 'virt_valid_tag' */
99 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
101 #define L1_SIZE (1 << L1_BITS)
102 #define L2_SIZE (1 << L2_BITS)
104 static void io_mem_init(void);
106 unsigned long qemu_real_host_page_size
;
107 unsigned long qemu_host_page_bits
;
108 unsigned long qemu_host_page_size
;
109 unsigned long qemu_host_page_mask
;
111 /* XXX: for system emulation, it could just be an array */
112 static PageDesc
*l1_map
[L1_SIZE
];
113 PhysPageDesc
**l1_phys_map
;
115 #if !defined(CONFIG_USER_ONLY)
116 static VirtPageDesc
*l1_virt_map
[L1_SIZE
];
117 static unsigned int virt_valid_tag
;
120 /* io memory support */
121 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
122 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
123 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
124 static int io_mem_nb
;
127 char *logfilename
= "/tmp/qemu.log";
132 static int tlb_flush_count
;
133 static int tb_flush_count
;
134 static int tb_phys_invalidate_count
;
136 static void page_init(void)
138 /* NOTE: we can always suppose that qemu_host_page_size >=
142 SYSTEM_INFO system_info
;
145 GetSystemInfo(&system_info
);
146 qemu_real_host_page_size
= system_info
.dwPageSize
;
148 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
149 PAGE_EXECUTE_READWRITE
, &old_protect
);
152 qemu_real_host_page_size
= getpagesize();
154 unsigned long start
, end
;
156 start
= (unsigned long)code_gen_buffer
;
157 start
&= ~(qemu_real_host_page_size
- 1);
159 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
160 end
+= qemu_real_host_page_size
- 1;
161 end
&= ~(qemu_real_host_page_size
- 1);
163 mprotect((void *)start
, end
- start
,
164 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
168 if (qemu_host_page_size
== 0)
169 qemu_host_page_size
= qemu_real_host_page_size
;
170 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
171 qemu_host_page_size
= TARGET_PAGE_SIZE
;
172 qemu_host_page_bits
= 0;
173 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
174 qemu_host_page_bits
++;
175 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
176 #if !defined(CONFIG_USER_ONLY)
179 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(PhysPageDesc
*));
180 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(PhysPageDesc
*));
183 static inline PageDesc
*page_find_alloc(unsigned int index
)
187 lp
= &l1_map
[index
>> L2_BITS
];
190 /* allocate if not found */
191 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
192 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
195 return p
+ (index
& (L2_SIZE
- 1));
198 static inline PageDesc
*page_find(unsigned int index
)
202 p
= l1_map
[index
>> L2_BITS
];
205 return p
+ (index
& (L2_SIZE
- 1));
208 static inline PhysPageDesc
*phys_page_find_alloc(unsigned int index
)
210 PhysPageDesc
**lp
, *p
;
212 lp
= &l1_phys_map
[index
>> L2_BITS
];
215 /* allocate if not found */
216 p
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
217 memset(p
, 0, sizeof(PhysPageDesc
) * L2_SIZE
);
220 return p
+ (index
& (L2_SIZE
- 1));
223 static inline PhysPageDesc
*phys_page_find(unsigned int index
)
227 p
= l1_phys_map
[index
>> L2_BITS
];
230 return p
+ (index
& (L2_SIZE
- 1));
233 #if !defined(CONFIG_USER_ONLY)
234 static void tlb_protect_code(CPUState
*env
, target_ulong addr
);
235 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
);
237 static inline VirtPageDesc
*virt_page_find_alloc(unsigned int index
)
239 VirtPageDesc
**lp
, *p
;
241 /* XXX: should not truncate for 64 bit addresses */
242 #if TARGET_LONG_BITS > 32
243 index
&= (L1_SIZE
- 1);
245 lp
= &l1_virt_map
[index
>> L2_BITS
];
248 /* allocate if not found */
249 p
= qemu_malloc(sizeof(VirtPageDesc
) * L2_SIZE
);
250 memset(p
, 0, sizeof(VirtPageDesc
) * L2_SIZE
);
253 return p
+ (index
& (L2_SIZE
- 1));
256 static inline VirtPageDesc
*virt_page_find(unsigned int index
)
260 p
= l1_virt_map
[index
>> L2_BITS
];
263 return p
+ (index
& (L2_SIZE
- 1));
266 static void virt_page_flush(void)
273 if (virt_valid_tag
== 0) {
275 for(i
= 0; i
< L1_SIZE
; i
++) {
278 for(j
= 0; j
< L2_SIZE
; j
++)
285 static void virt_page_flush(void)
290 void cpu_exec_init(void)
293 code_gen_ptr
= code_gen_buffer
;
299 static inline void invalidate_page_bitmap(PageDesc
*p
)
301 if (p
->code_bitmap
) {
302 qemu_free(p
->code_bitmap
);
303 p
->code_bitmap
= NULL
;
305 p
->code_write_count
= 0;
308 /* set to NULL all the 'first_tb' fields in all PageDescs */
309 static void page_flush_tb(void)
314 for(i
= 0; i
< L1_SIZE
; i
++) {
317 for(j
= 0; j
< L2_SIZE
; j
++) {
319 invalidate_page_bitmap(p
);
326 /* flush all the translation blocks */
327 /* XXX: tb_flush is currently not thread safe */
328 void tb_flush(CPUState
*env
)
330 #if defined(DEBUG_FLUSH)
331 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
332 code_gen_ptr
- code_gen_buffer
,
334 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
337 memset (tb_hash
, 0, CODE_GEN_HASH_SIZE
* sizeof (void *));
340 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
343 code_gen_ptr
= code_gen_buffer
;
344 /* XXX: flush processor icache at this point if cache flush is
349 #ifdef DEBUG_TB_CHECK
351 static void tb_invalidate_check(unsigned long address
)
353 TranslationBlock
*tb
;
355 address
&= TARGET_PAGE_MASK
;
356 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
357 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
358 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
359 address
>= tb
->pc
+ tb
->size
)) {
360 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
361 address
, tb
->pc
, tb
->size
);
367 /* verify that all the pages have correct rights for code */
368 static void tb_page_check(void)
370 TranslationBlock
*tb
;
371 int i
, flags1
, flags2
;
373 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
374 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
375 flags1
= page_get_flags(tb
->pc
);
376 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
377 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
378 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
379 tb
->pc
, tb
->size
, flags1
, flags2
);
385 void tb_jmp_check(TranslationBlock
*tb
)
387 TranslationBlock
*tb1
;
390 /* suppress any remaining jumps to this TB */
394 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
397 tb1
= tb1
->jmp_next
[n1
];
399 /* check end of list */
401 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
407 /* invalidate one TB */
408 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
411 TranslationBlock
*tb1
;
415 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
418 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
422 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
424 TranslationBlock
*tb1
;
430 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
432 *ptb
= tb1
->page_next
[n1
];
435 ptb
= &tb1
->page_next
[n1
];
439 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
441 TranslationBlock
*tb1
, **ptb
;
444 ptb
= &tb
->jmp_next
[n
];
447 /* find tb(n) in circular list */
451 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
452 if (n1
== n
&& tb1
== tb
)
455 ptb
= &tb1
->jmp_first
;
457 ptb
= &tb1
->jmp_next
[n1
];
460 /* now we can suppress tb(n) from the list */
461 *ptb
= tb
->jmp_next
[n
];
463 tb
->jmp_next
[n
] = NULL
;
467 /* reset the jump entry 'n' of a TB so that it is not chained to
469 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
471 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
474 static inline void tb_invalidate(TranslationBlock
*tb
)
477 TranslationBlock
*tb1
, *tb2
, **ptb
;
479 tb_invalidated_flag
= 1;
481 /* remove the TB from the hash list */
482 h
= tb_hash_func(tb
->pc
);
486 /* NOTE: the TB is not necessarily linked in the hash. It
487 indicates that it is not currently used */
491 *ptb
= tb1
->hash_next
;
494 ptb
= &tb1
->hash_next
;
497 /* suppress this TB from the two jump lists */
498 tb_jmp_remove(tb
, 0);
499 tb_jmp_remove(tb
, 1);
501 /* suppress any remaining jumps to this TB */
507 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
508 tb2
= tb1
->jmp_next
[n1
];
509 tb_reset_jump(tb1
, n1
);
510 tb1
->jmp_next
[n1
] = NULL
;
513 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
516 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
520 target_ulong phys_pc
;
522 /* remove the TB from the hash list */
523 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
524 h
= tb_phys_hash_func(phys_pc
);
525 tb_remove(&tb_phys_hash
[h
], tb
,
526 offsetof(TranslationBlock
, phys_hash_next
));
528 /* remove the TB from the page list */
529 if (tb
->page_addr
[0] != page_addr
) {
530 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
531 tb_page_remove(&p
->first_tb
, tb
);
532 invalidate_page_bitmap(p
);
534 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
535 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
536 tb_page_remove(&p
->first_tb
, tb
);
537 invalidate_page_bitmap(p
);
541 tb_phys_invalidate_count
++;
544 static inline void set_bits(uint8_t *tab
, int start
, int len
)
550 mask
= 0xff << (start
& 7);
551 if ((start
& ~7) == (end
& ~7)) {
553 mask
&= ~(0xff << (end
& 7));
558 start
= (start
+ 8) & ~7;
560 while (start
< end1
) {
565 mask
= ~(0xff << (end
& 7));
571 static void build_page_bitmap(PageDesc
*p
)
573 int n
, tb_start
, tb_end
;
574 TranslationBlock
*tb
;
576 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
579 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
584 tb
= (TranslationBlock
*)((long)tb
& ~3);
585 /* NOTE: this is subtle as a TB may span two physical pages */
587 /* NOTE: tb_end may be after the end of the page, but
588 it is not a problem */
589 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
590 tb_end
= tb_start
+ tb
->size
;
591 if (tb_end
> TARGET_PAGE_SIZE
)
592 tb_end
= TARGET_PAGE_SIZE
;
595 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
597 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
598 tb
= tb
->page_next
[n
];
602 #ifdef TARGET_HAS_PRECISE_SMC
604 static void tb_gen_code(CPUState
*env
,
605 target_ulong pc
, target_ulong cs_base
, int flags
,
608 TranslationBlock
*tb
;
610 target_ulong phys_pc
, phys_page2
, virt_page2
;
613 phys_pc
= get_phys_addr_code(env
, pc
);
616 /* flush must be done */
618 /* cannot fail at this point */
621 tc_ptr
= code_gen_ptr
;
623 tb
->cs_base
= cs_base
;
626 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
627 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
629 /* check next page if needed */
630 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
632 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
633 phys_page2
= get_phys_addr_code(env
, virt_page2
);
635 tb_link_phys(tb
, phys_pc
, phys_page2
);
639 /* invalidate all TBs which intersect with the target physical page
640 starting in range [start;end[. NOTE: start and end must refer to
641 the same physical page. 'is_cpu_write_access' should be true if called
642 from a real cpu write access: the virtual CPU will exit the current
643 TB if code is modified inside this TB. */
644 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
645 int is_cpu_write_access
)
647 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
648 CPUState
*env
= cpu_single_env
;
650 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
651 target_ulong tb_start
, tb_end
;
652 target_ulong current_pc
, current_cs_base
;
654 p
= page_find(start
>> TARGET_PAGE_BITS
);
657 if (!p
->code_bitmap
&&
658 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
659 is_cpu_write_access
) {
660 /* build code bitmap */
661 build_page_bitmap(p
);
664 /* we remove all the TBs in the range [start, end[ */
665 /* XXX: see if in some cases it could be faster to invalidate all the code */
666 current_tb_not_found
= is_cpu_write_access
;
667 current_tb_modified
= 0;
668 current_tb
= NULL
; /* avoid warning */
669 current_pc
= 0; /* avoid warning */
670 current_cs_base
= 0; /* avoid warning */
671 current_flags
= 0; /* avoid warning */
675 tb
= (TranslationBlock
*)((long)tb
& ~3);
676 tb_next
= tb
->page_next
[n
];
677 /* NOTE: this is subtle as a TB may span two physical pages */
679 /* NOTE: tb_end may be after the end of the page, but
680 it is not a problem */
681 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
682 tb_end
= tb_start
+ tb
->size
;
684 tb_start
= tb
->page_addr
[1];
685 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
687 if (!(tb_end
<= start
|| tb_start
>= end
)) {
688 #ifdef TARGET_HAS_PRECISE_SMC
689 if (current_tb_not_found
) {
690 current_tb_not_found
= 0;
692 if (env
->mem_write_pc
) {
693 /* now we have a real cpu fault */
694 current_tb
= tb_find_pc(env
->mem_write_pc
);
697 if (current_tb
== tb
&&
698 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
699 /* If we are modifying the current TB, we must stop
700 its execution. We could be more precise by checking
701 that the modification is after the current PC, but it
702 would require a specialized function to partially
703 restore the CPU state */
705 current_tb_modified
= 1;
706 cpu_restore_state(current_tb
, env
,
707 env
->mem_write_pc
, NULL
);
708 #if defined(TARGET_I386)
709 current_flags
= env
->hflags
;
710 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
711 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
712 current_pc
= current_cs_base
+ env
->eip
;
714 #error unsupported CPU
717 #endif /* TARGET_HAS_PRECISE_SMC */
718 saved_tb
= env
->current_tb
;
719 env
->current_tb
= NULL
;
720 tb_phys_invalidate(tb
, -1);
721 env
->current_tb
= saved_tb
;
722 if (env
->interrupt_request
&& env
->current_tb
)
723 cpu_interrupt(env
, env
->interrupt_request
);
727 #if !defined(CONFIG_USER_ONLY)
728 /* if no code remaining, no need to continue to use slow writes */
730 invalidate_page_bitmap(p
);
731 if (is_cpu_write_access
) {
732 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
736 #ifdef TARGET_HAS_PRECISE_SMC
737 if (current_tb_modified
) {
738 /* we generate a block containing just the instruction
739 modifying the memory. It will ensure that it cannot modify
741 env
->current_tb
= NULL
;
742 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
744 cpu_resume_from_signal(env
, NULL
);
749 /* len must be <= 8 and start must be a multiple of len */
750 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
757 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
758 cpu_single_env
->mem_write_vaddr
, len
,
760 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
764 p
= page_find(start
>> TARGET_PAGE_BITS
);
767 if (p
->code_bitmap
) {
768 offset
= start
& ~TARGET_PAGE_MASK
;
769 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
770 if (b
& ((1 << len
) - 1))
774 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
778 #if !defined(CONFIG_SOFTMMU)
779 static void tb_invalidate_phys_page(target_ulong addr
,
780 unsigned long pc
, void *puc
)
782 int n
, current_flags
, current_tb_modified
;
783 target_ulong current_pc
, current_cs_base
;
785 TranslationBlock
*tb
, *current_tb
;
786 #ifdef TARGET_HAS_PRECISE_SMC
787 CPUState
*env
= cpu_single_env
;
790 addr
&= TARGET_PAGE_MASK
;
791 p
= page_find(addr
>> TARGET_PAGE_BITS
);
795 current_tb_modified
= 0;
797 current_pc
= 0; /* avoid warning */
798 current_cs_base
= 0; /* avoid warning */
799 current_flags
= 0; /* avoid warning */
800 #ifdef TARGET_HAS_PRECISE_SMC
802 current_tb
= tb_find_pc(pc
);
807 tb
= (TranslationBlock
*)((long)tb
& ~3);
808 #ifdef TARGET_HAS_PRECISE_SMC
809 if (current_tb
== tb
&&
810 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
811 /* If we are modifying the current TB, we must stop
812 its execution. We could be more precise by checking
813 that the modification is after the current PC, but it
814 would require a specialized function to partially
815 restore the CPU state */
817 current_tb_modified
= 1;
818 cpu_restore_state(current_tb
, env
, pc
, puc
);
819 #if defined(TARGET_I386)
820 current_flags
= env
->hflags
;
821 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
822 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
823 current_pc
= current_cs_base
+ env
->eip
;
825 #error unsupported CPU
828 #endif /* TARGET_HAS_PRECISE_SMC */
829 tb_phys_invalidate(tb
, addr
);
830 tb
= tb
->page_next
[n
];
833 #ifdef TARGET_HAS_PRECISE_SMC
834 if (current_tb_modified
) {
835 /* we generate a block containing just the instruction
836 modifying the memory. It will ensure that it cannot modify
838 env
->current_tb
= NULL
;
839 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
841 cpu_resume_from_signal(env
, puc
);
847 /* add the tb in the target page and protect it if necessary */
848 static inline void tb_alloc_page(TranslationBlock
*tb
,
849 unsigned int n
, unsigned int page_addr
)
852 TranslationBlock
*last_first_tb
;
854 tb
->page_addr
[n
] = page_addr
;
855 p
= page_find(page_addr
>> TARGET_PAGE_BITS
);
856 tb
->page_next
[n
] = p
->first_tb
;
857 last_first_tb
= p
->first_tb
;
858 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
859 invalidate_page_bitmap(p
);
861 #if defined(TARGET_HAS_SMC) || 1
863 #if defined(CONFIG_USER_ONLY)
864 if (p
->flags
& PAGE_WRITE
) {
865 unsigned long host_start
, host_end
, addr
;
868 /* force the host page as non writable (writes will have a
869 page fault + mprotect overhead) */
870 host_start
= page_addr
& qemu_host_page_mask
;
871 host_end
= host_start
+ qemu_host_page_size
;
873 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
874 prot
|= page_get_flags(addr
);
875 mprotect((void *)host_start
, qemu_host_page_size
,
876 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
877 #ifdef DEBUG_TB_INVALIDATE
878 printf("protecting code page: 0x%08lx\n",
881 p
->flags
&= ~PAGE_WRITE
;
884 /* if some code is already present, then the pages are already
885 protected. So we handle the case where only the first TB is
886 allocated in a physical page */
887 if (!last_first_tb
) {
888 target_ulong virt_addr
;
890 virt_addr
= (tb
->pc
& TARGET_PAGE_MASK
) + (n
<< TARGET_PAGE_BITS
);
891 tlb_protect_code(cpu_single_env
, virt_addr
);
895 #endif /* TARGET_HAS_SMC */
898 /* Allocate a new translation block. Flush the translation buffer if
899 too many translation blocks or too much generated code. */
900 TranslationBlock
*tb_alloc(target_ulong pc
)
902 TranslationBlock
*tb
;
904 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
905 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
913 /* add a new TB and link it to the physical page tables. phys_page2 is
914 (-1) to indicate that only one page contains the TB. */
915 void tb_link_phys(TranslationBlock
*tb
,
916 target_ulong phys_pc
, target_ulong phys_page2
)
919 TranslationBlock
**ptb
;
921 /* add in the physical hash table */
922 h
= tb_phys_hash_func(phys_pc
);
923 ptb
= &tb_phys_hash
[h
];
924 tb
->phys_hash_next
= *ptb
;
927 /* add in the page list */
928 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
929 if (phys_page2
!= -1)
930 tb_alloc_page(tb
, 1, phys_page2
);
932 tb
->page_addr
[1] = -1;
933 #ifdef DEBUG_TB_CHECK
938 /* link the tb with the other TBs */
939 void tb_link(TranslationBlock
*tb
)
941 #if !defined(CONFIG_USER_ONLY)
946 /* save the code memory mappings (needed to invalidate the code) */
947 addr
= tb
->pc
& TARGET_PAGE_MASK
;
948 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
949 #ifdef DEBUG_TLB_CHECK
950 if (vp
->valid_tag
== virt_valid_tag
&&
951 vp
->phys_addr
!= tb
->page_addr
[0]) {
952 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
953 addr
, tb
->page_addr
[0], vp
->phys_addr
);
956 vp
->phys_addr
= tb
->page_addr
[0];
957 if (vp
->valid_tag
!= virt_valid_tag
) {
958 vp
->valid_tag
= virt_valid_tag
;
959 #if !defined(CONFIG_SOFTMMU)
964 if (tb
->page_addr
[1] != -1) {
965 addr
+= TARGET_PAGE_SIZE
;
966 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
967 #ifdef DEBUG_TLB_CHECK
968 if (vp
->valid_tag
== virt_valid_tag
&&
969 vp
->phys_addr
!= tb
->page_addr
[1]) {
970 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
971 addr
, tb
->page_addr
[1], vp
->phys_addr
);
974 vp
->phys_addr
= tb
->page_addr
[1];
975 if (vp
->valid_tag
!= virt_valid_tag
) {
976 vp
->valid_tag
= virt_valid_tag
;
977 #if !defined(CONFIG_SOFTMMU)
985 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
986 tb
->jmp_next
[0] = NULL
;
987 tb
->jmp_next
[1] = NULL
;
989 tb
->cflags
&= ~CF_FP_USED
;
990 if (tb
->cflags
& CF_TB_FP_USED
)
991 tb
->cflags
|= CF_FP_USED
;
994 /* init original jump addresses */
995 if (tb
->tb_next_offset
[0] != 0xffff)
996 tb_reset_jump(tb
, 0);
997 if (tb
->tb_next_offset
[1] != 0xffff)
998 tb_reset_jump(tb
, 1);
1001 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1002 tb[1].tc_ptr. Return NULL if not found */
1003 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1005 int m_min
, m_max
, m
;
1007 TranslationBlock
*tb
;
1011 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1012 tc_ptr
>= (unsigned long)code_gen_ptr
)
1014 /* binary search (cf Knuth) */
1017 while (m_min
<= m_max
) {
1018 m
= (m_min
+ m_max
) >> 1;
1020 v
= (unsigned long)tb
->tc_ptr
;
1023 else if (tc_ptr
< v
) {
1032 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1034 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1036 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1039 tb1
= tb
->jmp_next
[n
];
1041 /* find head of list */
1044 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1047 tb1
= tb1
->jmp_next
[n1
];
1049 /* we are now sure now that tb jumps to tb1 */
1052 /* remove tb from the jmp_first list */
1053 ptb
= &tb_next
->jmp_first
;
1057 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1058 if (n1
== n
&& tb1
== tb
)
1060 ptb
= &tb1
->jmp_next
[n1
];
1062 *ptb
= tb
->jmp_next
[n
];
1063 tb
->jmp_next
[n
] = NULL
;
1065 /* suppress the jump to next tb in generated code */
1066 tb_reset_jump(tb
, n
);
1068 /* suppress jumps in the tb on which we could have jumped */
1069 tb_reset_jump_recursive(tb_next
);
1073 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1075 tb_reset_jump_recursive2(tb
, 0);
1076 tb_reset_jump_recursive2(tb
, 1);
1079 #if defined(TARGET_HAS_ICE)
1080 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1082 target_ulong phys_addr
;
1084 phys_addr
= cpu_get_phys_page_debug(env
, pc
);
1085 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ 1, 0);
1089 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1090 breakpoint is reached */
1091 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1093 #if defined(TARGET_HAS_ICE)
1096 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1097 if (env
->breakpoints
[i
] == pc
)
1101 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1103 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1105 breakpoint_invalidate(env
, pc
);
1112 /* remove a breakpoint */
1113 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1115 #if defined(TARGET_HAS_ICE)
1117 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1118 if (env
->breakpoints
[i
] == pc
)
1123 env
->nb_breakpoints
--;
1124 if (i
< env
->nb_breakpoints
)
1125 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1127 breakpoint_invalidate(env
, pc
);
1134 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1135 CPU loop after each instruction */
1136 void cpu_single_step(CPUState
*env
, int enabled
)
1138 #if defined(TARGET_HAS_ICE)
1139 if (env
->singlestep_enabled
!= enabled
) {
1140 env
->singlestep_enabled
= enabled
;
1141 /* must flush all the translated code to avoid inconsistancies */
1142 /* XXX: only flush what is necessary */
1148 /* enable or disable low levels log */
1149 void cpu_set_log(int log_flags
)
1151 loglevel
= log_flags
;
1152 if (loglevel
&& !logfile
) {
1153 logfile
= fopen(logfilename
, "w");
1155 perror(logfilename
);
1158 #if !defined(CONFIG_SOFTMMU)
1159 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1161 static uint8_t logfile_buf
[4096];
1162 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1165 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1170 void cpu_set_log_filename(const char *filename
)
1172 logfilename
= strdup(filename
);
1175 /* mask must never be zero, except for A20 change call */
1176 void cpu_interrupt(CPUState
*env
, int mask
)
1178 TranslationBlock
*tb
;
1179 static int interrupt_lock
;
1181 env
->interrupt_request
|= mask
;
1182 /* if the cpu is currently executing code, we must unlink it and
1183 all the potentially executing TB */
1184 tb
= env
->current_tb
;
1185 if (tb
&& !testandset(&interrupt_lock
)) {
1186 env
->current_tb
= NULL
;
1187 tb_reset_jump_recursive(tb
);
1192 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1194 env
->interrupt_request
&= ~mask
;
1197 CPULogItem cpu_log_items
[] = {
1198 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1199 "show generated host assembly code for each compiled TB" },
1200 { CPU_LOG_TB_IN_ASM
, "in_asm",
1201 "show target assembly code for each compiled TB" },
1202 { CPU_LOG_TB_OP
, "op",
1203 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1205 { CPU_LOG_TB_OP_OPT
, "op_opt",
1206 "show micro ops after optimization for each compiled TB" },
1208 { CPU_LOG_INT
, "int",
1209 "show interrupts/exceptions in short format" },
1210 { CPU_LOG_EXEC
, "exec",
1211 "show trace before each executed TB (lots of logs)" },
1212 { CPU_LOG_TB_CPU
, "cpu",
1213 "show CPU state before bloc translation" },
1215 { CPU_LOG_PCALL
, "pcall",
1216 "show protected mode far calls/returns/exceptions" },
1219 { CPU_LOG_IOPORT
, "ioport",
1220 "show all i/o ports accesses" },
1225 static int cmp1(const char *s1
, int n
, const char *s2
)
1227 if (strlen(s2
) != n
)
1229 return memcmp(s1
, s2
, n
) == 0;
1232 /* takes a comma separated list of log masks. Return 0 if error. */
1233 int cpu_str_to_log_mask(const char *str
)
1242 p1
= strchr(p
, ',');
1245 if(cmp1(p
,p1
-p
,"all")) {
1246 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1250 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1251 if (cmp1(p
, p1
- p
, item
->name
))
1265 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1270 fprintf(stderr
, "qemu: fatal: ");
1271 vfprintf(stderr
, fmt
, ap
);
1272 fprintf(stderr
, "\n");
1274 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1276 cpu_dump_state(env
, stderr
, fprintf
, 0);
1282 #if !defined(CONFIG_USER_ONLY)
1284 /* NOTE: if flush_global is true, also flush global entries (not
1286 void tlb_flush(CPUState
*env
, int flush_global
)
1290 #if defined(DEBUG_TLB)
1291 printf("tlb_flush:\n");
1293 /* must reset current TB so that interrupts cannot modify the
1294 links while we are modifying them */
1295 env
->current_tb
= NULL
;
1297 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1298 env
->tlb_read
[0][i
].address
= -1;
1299 env
->tlb_write
[0][i
].address
= -1;
1300 env
->tlb_read
[1][i
].address
= -1;
1301 env
->tlb_write
[1][i
].address
= -1;
1305 memset (tb_hash
, 0, CODE_GEN_HASH_SIZE
* sizeof (void *));
1307 #if !defined(CONFIG_SOFTMMU)
1308 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1311 if (env
->kqemu_enabled
) {
1312 kqemu_flush(env
, flush_global
);
1318 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1320 if (addr
== (tlb_entry
->address
&
1321 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
1322 tlb_entry
->address
= -1;
1325 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1330 TranslationBlock
*tb
;
1332 #if defined(DEBUG_TLB)
1333 printf("tlb_flush_page: 0x%08x\n", addr
);
1335 /* must reset current TB so that interrupts cannot modify the
1336 links while we are modifying them */
1337 env
->current_tb
= NULL
;
1339 addr
&= TARGET_PAGE_MASK
;
1340 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1341 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
1342 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
1343 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
1344 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
1346 /* remove from the virtual pc hash table all the TB at this
1349 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1350 if (vp
&& vp
->valid_tag
== virt_valid_tag
) {
1351 p
= page_find(vp
->phys_addr
>> TARGET_PAGE_BITS
);
1353 /* we remove all the links to the TBs in this virtual page */
1355 while (tb
!= NULL
) {
1357 tb
= (TranslationBlock
*)((long)tb
& ~3);
1358 if ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1359 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
) {
1362 tb
= tb
->page_next
[n
];
1368 #if !defined(CONFIG_SOFTMMU)
1369 if (addr
< MMAP_AREA_END
)
1370 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1373 if (env
->kqemu_enabled
) {
1374 kqemu_flush_page(env
, addr
);
1379 static inline void tlb_protect_code1(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1381 if (addr
== (tlb_entry
->address
&
1382 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1383 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
&&
1384 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
) {
1385 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_CODE
;
1389 /* update the TLBs so that writes to code in the virtual page 'addr'
1391 static void tlb_protect_code(CPUState
*env
, target_ulong addr
)
1395 addr
&= TARGET_PAGE_MASK
;
1396 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1397 tlb_protect_code1(&env
->tlb_write
[0][i
], addr
);
1398 tlb_protect_code1(&env
->tlb_write
[1][i
], addr
);
1399 #if !defined(CONFIG_SOFTMMU)
1400 /* NOTE: as we generated the code for this page, it is already at
1402 if (addr
< MMAP_AREA_END
)
1403 mprotect((void *)addr
, TARGET_PAGE_SIZE
, PROT_READ
);
1407 static inline void tlb_unprotect_code2(CPUTLBEntry
*tlb_entry
,
1408 unsigned long phys_addr
)
1410 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
&&
1411 ((tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
) == phys_addr
) {
1412 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1416 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1417 tested self modifying code */
1418 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
)
1422 phys_addr
&= TARGET_PAGE_MASK
;
1423 phys_addr
+= (long)phys_ram_base
;
1424 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1425 tlb_unprotect_code2(&env
->tlb_write
[0][i
], phys_addr
);
1426 tlb_unprotect_code2(&env
->tlb_write
[1][i
], phys_addr
);
1429 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1430 unsigned long start
, unsigned long length
)
1433 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1434 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1435 if ((addr
- start
) < length
) {
1436 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1441 void cpu_physical_memory_reset_dirty(target_ulong start
, target_ulong end
,
1445 unsigned long length
, start1
;
1449 start
&= TARGET_PAGE_MASK
;
1450 end
= TARGET_PAGE_ALIGN(end
);
1452 length
= end
- start
;
1455 mask
= ~dirty_flags
;
1456 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1457 len
= length
>> TARGET_PAGE_BITS
;
1458 for(i
= 0; i
< len
; i
++)
1461 env
= cpu_single_env
;
1462 /* we modify the TLB cache so that the dirty bit will be set again
1463 when accessing the range */
1464 start1
= start
+ (unsigned long)phys_ram_base
;
1465 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1466 tlb_reset_dirty_range(&env
->tlb_write
[0][i
], start1
, length
);
1467 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1468 tlb_reset_dirty_range(&env
->tlb_write
[1][i
], start1
, length
);
1470 #if !defined(CONFIG_SOFTMMU)
1471 /* XXX: this is expensive */
1477 for(i
= 0; i
< L1_SIZE
; i
++) {
1480 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1481 for(j
= 0; j
< L2_SIZE
; j
++) {
1482 if (p
->valid_tag
== virt_valid_tag
&&
1483 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1484 (p
->prot
& PROT_WRITE
)) {
1485 if (addr
< MMAP_AREA_END
) {
1486 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1487 p
->prot
& ~PROT_WRITE
);
1490 addr
+= TARGET_PAGE_SIZE
;
1499 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1500 unsigned long start
)
1503 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1504 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1505 if (addr
== start
) {
1506 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1511 /* update the TLB corresponding to virtual page vaddr and phys addr
1512 addr so that it is no longer dirty */
1513 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1515 CPUState
*env
= cpu_single_env
;
1518 phys_ram_dirty
[(addr
- (unsigned long)phys_ram_base
) >> TARGET_PAGE_BITS
] = 0xff;
1520 addr
&= TARGET_PAGE_MASK
;
1521 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1522 tlb_set_dirty1(&env
->tlb_write
[0][i
], addr
);
1523 tlb_set_dirty1(&env
->tlb_write
[1][i
], addr
);
1526 /* add a new TLB entry. At most one entry for a given virtual address
1527 is permitted. Return 0 if OK or 2 if the page could not be mapped
1528 (can only happen in non SOFTMMU mode for I/O pages or pages
1529 conflicting with the host address space). */
1530 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1531 target_phys_addr_t paddr
, int prot
,
1532 int is_user
, int is_softmmu
)
1536 TranslationBlock
*first_tb
;
1538 target_ulong address
;
1539 unsigned long addend
;
1542 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1545 pd
= IO_MEM_UNASSIGNED
;
1548 pd
= p
->phys_offset
;
1549 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1550 /* NOTE: we also allocate the page at this stage */
1551 p1
= page_find_alloc(pd
>> TARGET_PAGE_BITS
);
1552 first_tb
= p1
->first_tb
;
1555 #if defined(DEBUG_TLB)
1556 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1557 vaddr
, paddr
, prot
, is_user
, (first_tb
!= NULL
), is_softmmu
, pd
);
1561 #if !defined(CONFIG_SOFTMMU)
1565 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1566 /* IO memory case */
1567 address
= vaddr
| pd
;
1570 /* standard memory */
1572 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1575 index
= (vaddr
>> 12) & (CPU_TLB_SIZE
- 1);
1577 if (prot
& PAGE_READ
) {
1578 env
->tlb_read
[is_user
][index
].address
= address
;
1579 env
->tlb_read
[is_user
][index
].addend
= addend
;
1581 env
->tlb_read
[is_user
][index
].address
= -1;
1582 env
->tlb_read
[is_user
][index
].addend
= -1;
1584 if (prot
& PAGE_WRITE
) {
1585 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1586 /* ROM: access is ignored (same as unassigned) */
1587 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_ROM
;
1588 env
->tlb_write
[is_user
][index
].addend
= addend
;
1590 /* XXX: the PowerPC code seems not ready to handle
1591 self modifying code with DCBI */
1592 #if defined(TARGET_HAS_SMC) || 1
1594 /* if code is present, we use a specific memory
1595 handler. It works only for physical memory access */
1596 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_CODE
;
1597 env
->tlb_write
[is_user
][index
].addend
= addend
;
1600 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1601 !cpu_physical_memory_is_dirty(pd
)) {
1602 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_NOTDIRTY
;
1603 env
->tlb_write
[is_user
][index
].addend
= addend
;
1605 env
->tlb_write
[is_user
][index
].address
= address
;
1606 env
->tlb_write
[is_user
][index
].addend
= addend
;
1609 env
->tlb_write
[is_user
][index
].address
= -1;
1610 env
->tlb_write
[is_user
][index
].addend
= -1;
1613 #if !defined(CONFIG_SOFTMMU)
1615 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1616 /* IO access: no mapping is done as it will be handled by the
1618 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1623 if (vaddr
>= MMAP_AREA_END
) {
1626 if (prot
& PROT_WRITE
) {
1627 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1628 #if defined(TARGET_HAS_SMC) || 1
1631 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1632 !cpu_physical_memory_is_dirty(pd
))) {
1633 /* ROM: we do as if code was inside */
1634 /* if code is present, we only map as read only and save the
1638 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
);
1641 vp
->valid_tag
= virt_valid_tag
;
1642 prot
&= ~PAGE_WRITE
;
1645 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1646 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1647 if (map_addr
== MAP_FAILED
) {
1648 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1658 /* called from signal handler: invalidate the code and unprotect the
1659 page. Return TRUE if the fault was succesfully handled. */
1660 int page_unprotect(unsigned long addr
, unsigned long pc
, void *puc
)
1662 #if !defined(CONFIG_SOFTMMU)
1665 #if defined(DEBUG_TLB)
1666 printf("page_unprotect: addr=0x%08x\n", addr
);
1668 addr
&= TARGET_PAGE_MASK
;
1670 /* if it is not mapped, no need to worry here */
1671 if (addr
>= MMAP_AREA_END
)
1673 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1676 /* NOTE: in this case, validate_tag is _not_ tested as it
1677 validates only the code TLB */
1678 if (vp
->valid_tag
!= virt_valid_tag
)
1680 if (!(vp
->prot
& PAGE_WRITE
))
1682 #if defined(DEBUG_TLB)
1683 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1684 addr
, vp
->phys_addr
, vp
->prot
);
1686 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1687 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1688 (unsigned long)addr
, vp
->prot
);
1689 /* set the dirty bit */
1690 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1691 /* flush the code inside */
1692 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1701 void tlb_flush(CPUState
*env
, int flush_global
)
1705 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1709 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1710 target_phys_addr_t paddr
, int prot
,
1711 int is_user
, int is_softmmu
)
1716 /* dump memory mappings */
1717 void page_dump(FILE *f
)
1719 unsigned long start
, end
;
1720 int i
, j
, prot
, prot1
;
1723 fprintf(f
, "%-8s %-8s %-8s %s\n",
1724 "start", "end", "size", "prot");
1728 for(i
= 0; i
<= L1_SIZE
; i
++) {
1733 for(j
= 0;j
< L2_SIZE
; j
++) {
1738 if (prot1
!= prot
) {
1739 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1741 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1742 start
, end
, end
- start
,
1743 prot
& PAGE_READ
? 'r' : '-',
1744 prot
& PAGE_WRITE
? 'w' : '-',
1745 prot
& PAGE_EXEC
? 'x' : '-');
1759 int page_get_flags(unsigned long address
)
1763 p
= page_find(address
>> TARGET_PAGE_BITS
);
1769 /* modify the flags of a page and invalidate the code if
1770 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1771 depending on PAGE_WRITE */
1772 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1777 start
= start
& TARGET_PAGE_MASK
;
1778 end
= TARGET_PAGE_ALIGN(end
);
1779 if (flags
& PAGE_WRITE
)
1780 flags
|= PAGE_WRITE_ORG
;
1781 spin_lock(&tb_lock
);
1782 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1783 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1784 /* if the write protection is set, then we invalidate the code
1786 if (!(p
->flags
& PAGE_WRITE
) &&
1787 (flags
& PAGE_WRITE
) &&
1789 tb_invalidate_phys_page(addr
, 0, NULL
);
1793 spin_unlock(&tb_lock
);
1796 /* called from signal handler: invalidate the code and unprotect the
1797 page. Return TRUE if the fault was succesfully handled. */
1798 int page_unprotect(unsigned long address
, unsigned long pc
, void *puc
)
1800 unsigned int page_index
, prot
, pindex
;
1802 unsigned long host_start
, host_end
, addr
;
1804 host_start
= address
& qemu_host_page_mask
;
1805 page_index
= host_start
>> TARGET_PAGE_BITS
;
1806 p1
= page_find(page_index
);
1809 host_end
= host_start
+ qemu_host_page_size
;
1812 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1816 /* if the page was really writable, then we change its
1817 protection back to writable */
1818 if (prot
& PAGE_WRITE_ORG
) {
1819 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1820 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1821 mprotect((void *)host_start
, qemu_host_page_size
,
1822 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1823 p1
[pindex
].flags
|= PAGE_WRITE
;
1824 /* and since the content will be modified, we must invalidate
1825 the corresponding translated code. */
1826 tb_invalidate_phys_page(address
, pc
, puc
);
1827 #ifdef DEBUG_TB_CHECK
1828 tb_invalidate_check(address
);
1836 /* call this function when system calls directly modify a memory area */
1837 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1839 unsigned long start
, end
, addr
;
1841 start
= (unsigned long)data
;
1842 end
= start
+ data_size
;
1843 start
&= TARGET_PAGE_MASK
;
1844 end
= TARGET_PAGE_ALIGN(end
);
1845 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1846 page_unprotect(addr
, 0, NULL
);
1850 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1853 #endif /* defined(CONFIG_USER_ONLY) */
1855 /* register physical memory. 'size' must be a multiple of the target
1856 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1858 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1860 unsigned long phys_offset
)
1862 unsigned long addr
, end_addr
;
1865 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1866 end_addr
= start_addr
+ size
;
1867 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1868 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1869 p
->phys_offset
= phys_offset
;
1870 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1871 phys_offset
+= TARGET_PAGE_SIZE
;
1875 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1880 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1884 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1885 unassigned_mem_readb
,
1886 unassigned_mem_readb
,
1887 unassigned_mem_readb
,
1890 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1891 unassigned_mem_writeb
,
1892 unassigned_mem_writeb
,
1893 unassigned_mem_writeb
,
1896 /* self modifying code support in soft mmu mode : writing to a page
1897 containing code comes to these functions */
1899 static void code_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1901 unsigned long phys_addr
;
1903 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1904 #if !defined(CONFIG_USER_ONLY)
1905 tb_invalidate_phys_page_fast(phys_addr
, 1);
1907 stb_p((uint8_t *)(long)addr
, val
);
1908 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1911 static void code_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1913 unsigned long phys_addr
;
1915 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1916 #if !defined(CONFIG_USER_ONLY)
1917 tb_invalidate_phys_page_fast(phys_addr
, 2);
1919 stw_p((uint8_t *)(long)addr
, val
);
1920 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1923 static void code_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1925 unsigned long phys_addr
;
1927 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1928 #if !defined(CONFIG_USER_ONLY)
1929 tb_invalidate_phys_page_fast(phys_addr
, 4);
1931 stl_p((uint8_t *)(long)addr
, val
);
1932 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1935 static CPUReadMemoryFunc
*code_mem_read
[3] = {
1936 NULL
, /* never used */
1937 NULL
, /* never used */
1938 NULL
, /* never used */
1941 static CPUWriteMemoryFunc
*code_mem_write
[3] = {
1947 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1949 stb_p((uint8_t *)(long)addr
, val
);
1950 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1953 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1955 stw_p((uint8_t *)(long)addr
, val
);
1956 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1959 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1961 stl_p((uint8_t *)(long)addr
, val
);
1962 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1965 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1966 notdirty_mem_writeb
,
1967 notdirty_mem_writew
,
1968 notdirty_mem_writel
,
1971 static void io_mem_init(void)
1973 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, code_mem_read
, unassigned_mem_write
, NULL
);
1974 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
1975 cpu_register_io_memory(IO_MEM_CODE
>> IO_MEM_SHIFT
, code_mem_read
, code_mem_write
, NULL
);
1976 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, code_mem_read
, notdirty_mem_write
, NULL
);
1979 /* alloc dirty bits array */
1980 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1983 /* mem_read and mem_write are arrays of functions containing the
1984 function to access byte (index 0), word (index 1) and dword (index
1985 2). All functions must be supplied. If io_index is non zero, the
1986 corresponding io zone is modified. If it is zero, a new io zone is
1987 allocated. The return value can be used with
1988 cpu_register_physical_memory(). (-1) is returned if error. */
1989 int cpu_register_io_memory(int io_index
,
1990 CPUReadMemoryFunc
**mem_read
,
1991 CPUWriteMemoryFunc
**mem_write
,
1996 if (io_index
<= 0) {
1997 if (io_index
>= IO_MEM_NB_ENTRIES
)
1999 io_index
= io_mem_nb
++;
2001 if (io_index
>= IO_MEM_NB_ENTRIES
)
2005 for(i
= 0;i
< 3; i
++) {
2006 io_mem_read
[io_index
][i
] = mem_read
[i
];
2007 io_mem_write
[io_index
][i
] = mem_write
[i
];
2009 io_mem_opaque
[io_index
] = opaque
;
2010 return io_index
<< IO_MEM_SHIFT
;
2013 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2015 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2018 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2020 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2023 /* physical memory access (slow version, mainly for debug) */
2024 #if defined(CONFIG_USER_ONLY)
2025 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2026 int len
, int is_write
)
2032 page
= addr
& TARGET_PAGE_MASK
;
2033 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2036 flags
= page_get_flags(page
);
2037 if (!(flags
& PAGE_VALID
))
2040 if (!(flags
& PAGE_WRITE
))
2042 memcpy((uint8_t *)addr
, buf
, len
);
2044 if (!(flags
& PAGE_READ
))
2046 memcpy(buf
, (uint8_t *)addr
, len
);
2055 uint32_t ldl_phys(target_phys_addr_t addr
)
2060 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2064 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2069 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2070 int len
, int is_write
)
2075 target_phys_addr_t page
;
2080 page
= addr
& TARGET_PAGE_MASK
;
2081 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2084 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2086 pd
= IO_MEM_UNASSIGNED
;
2088 pd
= p
->phys_offset
;
2092 if ((pd
& ~TARGET_PAGE_MASK
) != 0) {
2093 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2094 if (l
>= 4 && ((addr
& 3) == 0)) {
2095 /* 32 bit read access */
2097 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2099 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2100 /* 16 bit read access */
2102 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2107 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2111 unsigned long addr1
;
2112 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2114 ptr
= phys_ram_base
+ addr1
;
2115 memcpy(ptr
, buf
, l
);
2116 /* invalidate code */
2117 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2119 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] = 0xff;
2122 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2123 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
) {
2125 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2126 if (l
>= 4 && ((addr
& 3) == 0)) {
2127 /* 32 bit read access */
2128 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2131 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2132 /* 16 bit read access */
2133 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2138 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2144 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2145 (addr
& ~TARGET_PAGE_MASK
);
2146 memcpy(buf
, ptr
, l
);
2155 /* warning: addr must be aligned */
2156 uint32_t ldl_phys(target_phys_addr_t addr
)
2164 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2166 pd
= IO_MEM_UNASSIGNED
;
2168 pd
= p
->phys_offset
;
2171 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2172 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
) {
2174 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2175 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2178 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2179 (addr
& ~TARGET_PAGE_MASK
);
2185 /* warning: addr must be aligned. The ram page is not masked as dirty
2186 and the code inside is not invalidated. It is useful if the dirty
2187 bits are used to track modified PTEs */
2188 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2195 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2197 pd
= IO_MEM_UNASSIGNED
;
2199 pd
= p
->phys_offset
;
2202 if ((pd
& ~TARGET_PAGE_MASK
) != 0) {
2203 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2204 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2206 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2207 (addr
& ~TARGET_PAGE_MASK
);
2212 /* warning: addr must be aligned */
2213 /* XXX: optimize code invalidation test */
2214 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2221 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2223 pd
= IO_MEM_UNASSIGNED
;
2225 pd
= p
->phys_offset
;
2228 if ((pd
& ~TARGET_PAGE_MASK
) != 0) {
2229 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2230 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2232 unsigned long addr1
;
2233 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2235 ptr
= phys_ram_base
+ addr1
;
2237 /* invalidate code */
2238 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2240 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] = 0xff;
2246 /* virtual memory access for debug */
2247 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2248 uint8_t *buf
, int len
, int is_write
)
2251 target_ulong page
, phys_addr
;
2254 page
= addr
& TARGET_PAGE_MASK
;
2255 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2256 /* if no physical page mapped, return an error */
2257 if (phys_addr
== -1)
2259 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2262 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2271 void dump_exec_info(FILE *f
,
2272 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2274 int i
, target_code_size
, max_target_code_size
;
2275 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2276 TranslationBlock
*tb
;
2278 target_code_size
= 0;
2279 max_target_code_size
= 0;
2281 direct_jmp_count
= 0;
2282 direct_jmp2_count
= 0;
2283 for(i
= 0; i
< nb_tbs
; i
++) {
2285 target_code_size
+= tb
->size
;
2286 if (tb
->size
> max_target_code_size
)
2287 max_target_code_size
= tb
->size
;
2288 if (tb
->page_addr
[1] != -1)
2290 if (tb
->tb_next_offset
[0] != 0xffff) {
2292 if (tb
->tb_next_offset
[1] != 0xffff) {
2293 direct_jmp2_count
++;
2297 /* XXX: avoid using doubles ? */
2298 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2299 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2300 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2301 max_target_code_size
);
2302 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2303 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2304 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2305 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2307 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2308 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2310 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2312 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2313 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2314 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2315 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2318 #if !defined(CONFIG_USER_ONLY)
2320 #define MMUSUFFIX _cmmu
2321 #define GETPC() NULL
2322 #define env cpu_single_env
2323 #define SOFTMMU_CODE_ACCESS
2326 #include "softmmu_template.h"
2329 #include "softmmu_template.h"
2332 #include "softmmu_template.h"
2335 #include "softmmu_template.h"