2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
38 //#define DEBUG_TB_INVALIDATE
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
49 #define SMC_BITMAP_USE_THRESHOLD 10
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
54 #if defined(TARGET_SPARC64)
55 #define TARGET_PHYS_ADDR_SPACE_BITS 41
56 #elif defined(TARGET_PPC64)
57 #define TARGET_PHYS_ADDR_SPACE_BITS 42
59 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60 #define TARGET_PHYS_ADDR_SPACE_BITS 32
63 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
64 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
66 /* any access to the tbs or the page table must use this lock */
67 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
69 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
70 uint8_t *code_gen_ptr
;
74 uint8_t *phys_ram_base
;
75 uint8_t *phys_ram_dirty
;
78 /* current CPU in the current thread. It is only valid inside
80 CPUState
*cpu_single_env
;
82 typedef struct PageDesc
{
83 /* list of TBs intersecting this ram page */
84 TranslationBlock
*first_tb
;
85 /* in order to optimize self modifying code, we count the number
86 of lookups we do to a given page to use a bitmap */
87 unsigned int code_write_count
;
89 #if defined(CONFIG_USER_ONLY)
94 typedef struct PhysPageDesc
{
95 /* offset in host memory of the page + io_index in the low 12 bits */
100 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
102 #define L1_SIZE (1 << L1_BITS)
103 #define L2_SIZE (1 << L2_BITS)
105 static void io_mem_init(void);
107 unsigned long qemu_real_host_page_size
;
108 unsigned long qemu_host_page_bits
;
109 unsigned long qemu_host_page_size
;
110 unsigned long qemu_host_page_mask
;
112 /* XXX: for system emulation, it could just be an array */
113 static PageDesc
*l1_map
[L1_SIZE
];
114 PhysPageDesc
**l1_phys_map
;
116 /* io memory support */
117 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
118 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
119 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
120 static int io_mem_nb
;
123 char *logfilename
= "/tmp/qemu.log";
128 static int tlb_flush_count
;
129 static int tb_flush_count
;
130 static int tb_phys_invalidate_count
;
132 static void page_init(void)
134 /* NOTE: we can always suppose that qemu_host_page_size >=
138 SYSTEM_INFO system_info
;
141 GetSystemInfo(&system_info
);
142 qemu_real_host_page_size
= system_info
.dwPageSize
;
144 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
145 PAGE_EXECUTE_READWRITE
, &old_protect
);
148 qemu_real_host_page_size
= getpagesize();
150 unsigned long start
, end
;
152 start
= (unsigned long)code_gen_buffer
;
153 start
&= ~(qemu_real_host_page_size
- 1);
155 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
156 end
+= qemu_real_host_page_size
- 1;
157 end
&= ~(qemu_real_host_page_size
- 1);
159 mprotect((void *)start
, end
- start
,
160 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
164 if (qemu_host_page_size
== 0)
165 qemu_host_page_size
= qemu_real_host_page_size
;
166 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
167 qemu_host_page_size
= TARGET_PAGE_SIZE
;
168 qemu_host_page_bits
= 0;
169 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
170 qemu_host_page_bits
++;
171 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
172 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
173 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
176 static inline PageDesc
*page_find_alloc(unsigned int index
)
180 lp
= &l1_map
[index
>> L2_BITS
];
183 /* allocate if not found */
184 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
185 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
188 return p
+ (index
& (L2_SIZE
- 1));
191 static inline PageDesc
*page_find(unsigned int index
)
195 p
= l1_map
[index
>> L2_BITS
];
198 return p
+ (index
& (L2_SIZE
- 1));
201 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
205 p
= (void **)l1_phys_map
;
206 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
208 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
209 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
211 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
214 /* allocate if not found */
217 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
218 memset(p
, 0, sizeof(void *) * L1_SIZE
);
222 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
225 /* allocate if not found */
228 p
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
229 memset(p
, 0, sizeof(PhysPageDesc
) * L2_SIZE
);
232 return ((PhysPageDesc
*)p
) + (index
& (L2_SIZE
- 1));
235 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
237 return phys_page_find_alloc(index
, 0);
240 #if !defined(CONFIG_USER_ONLY)
241 static void tlb_protect_code(ram_addr_t ram_addr
);
242 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
246 void cpu_exec_init(CPUState
*env
)
252 code_gen_ptr
= code_gen_buffer
;
256 env
->next_cpu
= NULL
;
259 while (*penv
!= NULL
) {
260 penv
= (CPUState
**)&(*penv
)->next_cpu
;
263 env
->cpu_index
= cpu_index
;
267 static inline void invalidate_page_bitmap(PageDesc
*p
)
269 if (p
->code_bitmap
) {
270 qemu_free(p
->code_bitmap
);
271 p
->code_bitmap
= NULL
;
273 p
->code_write_count
= 0;
276 /* set to NULL all the 'first_tb' fields in all PageDescs */
277 static void page_flush_tb(void)
282 for(i
= 0; i
< L1_SIZE
; i
++) {
285 for(j
= 0; j
< L2_SIZE
; j
++) {
287 invalidate_page_bitmap(p
);
294 /* flush all the translation blocks */
295 /* XXX: tb_flush is currently not thread safe */
296 void tb_flush(CPUState
*env1
)
299 #if defined(DEBUG_FLUSH)
300 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
301 code_gen_ptr
- code_gen_buffer
,
303 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
307 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
308 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
311 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
314 code_gen_ptr
= code_gen_buffer
;
315 /* XXX: flush processor icache at this point if cache flush is
320 #ifdef DEBUG_TB_CHECK
322 static void tb_invalidate_check(unsigned long address
)
324 TranslationBlock
*tb
;
326 address
&= TARGET_PAGE_MASK
;
327 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
328 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
329 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
330 address
>= tb
->pc
+ tb
->size
)) {
331 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
332 address
, tb
->pc
, tb
->size
);
338 /* verify that all the pages have correct rights for code */
339 static void tb_page_check(void)
341 TranslationBlock
*tb
;
342 int i
, flags1
, flags2
;
344 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
345 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
346 flags1
= page_get_flags(tb
->pc
);
347 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
348 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
349 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
350 tb
->pc
, tb
->size
, flags1
, flags2
);
356 void tb_jmp_check(TranslationBlock
*tb
)
358 TranslationBlock
*tb1
;
361 /* suppress any remaining jumps to this TB */
365 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
368 tb1
= tb1
->jmp_next
[n1
];
370 /* check end of list */
372 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
378 /* invalidate one TB */
379 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
382 TranslationBlock
*tb1
;
386 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
389 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
393 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
395 TranslationBlock
*tb1
;
401 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
403 *ptb
= tb1
->page_next
[n1
];
406 ptb
= &tb1
->page_next
[n1
];
410 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
412 TranslationBlock
*tb1
, **ptb
;
415 ptb
= &tb
->jmp_next
[n
];
418 /* find tb(n) in circular list */
422 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
423 if (n1
== n
&& tb1
== tb
)
426 ptb
= &tb1
->jmp_first
;
428 ptb
= &tb1
->jmp_next
[n1
];
431 /* now we can suppress tb(n) from the list */
432 *ptb
= tb
->jmp_next
[n
];
434 tb
->jmp_next
[n
] = NULL
;
438 /* reset the jump entry 'n' of a TB so that it is not chained to
440 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
442 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
445 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
450 target_ulong phys_pc
;
451 TranslationBlock
*tb1
, *tb2
;
453 /* remove the TB from the hash list */
454 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
455 h
= tb_phys_hash_func(phys_pc
);
456 tb_remove(&tb_phys_hash
[h
], tb
,
457 offsetof(TranslationBlock
, phys_hash_next
));
459 /* remove the TB from the page list */
460 if (tb
->page_addr
[0] != page_addr
) {
461 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
462 tb_page_remove(&p
->first_tb
, tb
);
463 invalidate_page_bitmap(p
);
465 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
466 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
467 tb_page_remove(&p
->first_tb
, tb
);
468 invalidate_page_bitmap(p
);
471 tb_invalidated_flag
= 1;
473 /* remove the TB from the hash list */
474 h
= tb_jmp_cache_hash_func(tb
->pc
);
475 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
476 if (env
->tb_jmp_cache
[h
] == tb
)
477 env
->tb_jmp_cache
[h
] = NULL
;
480 /* suppress this TB from the two jump lists */
481 tb_jmp_remove(tb
, 0);
482 tb_jmp_remove(tb
, 1);
484 /* suppress any remaining jumps to this TB */
490 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
491 tb2
= tb1
->jmp_next
[n1
];
492 tb_reset_jump(tb1
, n1
);
493 tb1
->jmp_next
[n1
] = NULL
;
496 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
498 tb_phys_invalidate_count
++;
501 static inline void set_bits(uint8_t *tab
, int start
, int len
)
507 mask
= 0xff << (start
& 7);
508 if ((start
& ~7) == (end
& ~7)) {
510 mask
&= ~(0xff << (end
& 7));
515 start
= (start
+ 8) & ~7;
517 while (start
< end1
) {
522 mask
= ~(0xff << (end
& 7));
528 static void build_page_bitmap(PageDesc
*p
)
530 int n
, tb_start
, tb_end
;
531 TranslationBlock
*tb
;
533 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
536 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
541 tb
= (TranslationBlock
*)((long)tb
& ~3);
542 /* NOTE: this is subtle as a TB may span two physical pages */
544 /* NOTE: tb_end may be after the end of the page, but
545 it is not a problem */
546 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
547 tb_end
= tb_start
+ tb
->size
;
548 if (tb_end
> TARGET_PAGE_SIZE
)
549 tb_end
= TARGET_PAGE_SIZE
;
552 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
554 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
555 tb
= tb
->page_next
[n
];
559 #ifdef TARGET_HAS_PRECISE_SMC
561 static void tb_gen_code(CPUState
*env
,
562 target_ulong pc
, target_ulong cs_base
, int flags
,
565 TranslationBlock
*tb
;
567 target_ulong phys_pc
, phys_page2
, virt_page2
;
570 phys_pc
= get_phys_addr_code(env
, pc
);
573 /* flush must be done */
575 /* cannot fail at this point */
578 tc_ptr
= code_gen_ptr
;
580 tb
->cs_base
= cs_base
;
583 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
584 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
586 /* check next page if needed */
587 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
589 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
590 phys_page2
= get_phys_addr_code(env
, virt_page2
);
592 tb_link_phys(tb
, phys_pc
, phys_page2
);
596 /* invalidate all TBs which intersect with the target physical page
597 starting in range [start;end[. NOTE: start and end must refer to
598 the same physical page. 'is_cpu_write_access' should be true if called
599 from a real cpu write access: the virtual CPU will exit the current
600 TB if code is modified inside this TB. */
601 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
602 int is_cpu_write_access
)
604 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
605 CPUState
*env
= cpu_single_env
;
607 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
608 target_ulong tb_start
, tb_end
;
609 target_ulong current_pc
, current_cs_base
;
611 p
= page_find(start
>> TARGET_PAGE_BITS
);
614 if (!p
->code_bitmap
&&
615 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
616 is_cpu_write_access
) {
617 /* build code bitmap */
618 build_page_bitmap(p
);
621 /* we remove all the TBs in the range [start, end[ */
622 /* XXX: see if in some cases it could be faster to invalidate all the code */
623 current_tb_not_found
= is_cpu_write_access
;
624 current_tb_modified
= 0;
625 current_tb
= NULL
; /* avoid warning */
626 current_pc
= 0; /* avoid warning */
627 current_cs_base
= 0; /* avoid warning */
628 current_flags
= 0; /* avoid warning */
632 tb
= (TranslationBlock
*)((long)tb
& ~3);
633 tb_next
= tb
->page_next
[n
];
634 /* NOTE: this is subtle as a TB may span two physical pages */
636 /* NOTE: tb_end may be after the end of the page, but
637 it is not a problem */
638 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
639 tb_end
= tb_start
+ tb
->size
;
641 tb_start
= tb
->page_addr
[1];
642 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
644 if (!(tb_end
<= start
|| tb_start
>= end
)) {
645 #ifdef TARGET_HAS_PRECISE_SMC
646 if (current_tb_not_found
) {
647 current_tb_not_found
= 0;
649 if (env
->mem_write_pc
) {
650 /* now we have a real cpu fault */
651 current_tb
= tb_find_pc(env
->mem_write_pc
);
654 if (current_tb
== tb
&&
655 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
656 /* If we are modifying the current TB, we must stop
657 its execution. We could be more precise by checking
658 that the modification is after the current PC, but it
659 would require a specialized function to partially
660 restore the CPU state */
662 current_tb_modified
= 1;
663 cpu_restore_state(current_tb
, env
,
664 env
->mem_write_pc
, NULL
);
665 #if defined(TARGET_I386)
666 current_flags
= env
->hflags
;
667 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
668 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
669 current_pc
= current_cs_base
+ env
->eip
;
671 #error unsupported CPU
674 #endif /* TARGET_HAS_PRECISE_SMC */
675 /* we need to do that to handle the case where a signal
676 occurs while doing tb_phys_invalidate() */
679 saved_tb
= env
->current_tb
;
680 env
->current_tb
= NULL
;
682 tb_phys_invalidate(tb
, -1);
684 env
->current_tb
= saved_tb
;
685 if (env
->interrupt_request
&& env
->current_tb
)
686 cpu_interrupt(env
, env
->interrupt_request
);
691 #if !defined(CONFIG_USER_ONLY)
692 /* if no code remaining, no need to continue to use slow writes */
694 invalidate_page_bitmap(p
);
695 if (is_cpu_write_access
) {
696 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
700 #ifdef TARGET_HAS_PRECISE_SMC
701 if (current_tb_modified
) {
702 /* we generate a block containing just the instruction
703 modifying the memory. It will ensure that it cannot modify
705 env
->current_tb
= NULL
;
706 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
708 cpu_resume_from_signal(env
, NULL
);
713 /* len must be <= 8 and start must be a multiple of len */
714 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
721 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
722 cpu_single_env
->mem_write_vaddr
, len
,
724 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
728 p
= page_find(start
>> TARGET_PAGE_BITS
);
731 if (p
->code_bitmap
) {
732 offset
= start
& ~TARGET_PAGE_MASK
;
733 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
734 if (b
& ((1 << len
) - 1))
738 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
742 #if !defined(CONFIG_SOFTMMU)
743 static void tb_invalidate_phys_page(target_ulong addr
,
744 unsigned long pc
, void *puc
)
746 int n
, current_flags
, current_tb_modified
;
747 target_ulong current_pc
, current_cs_base
;
749 TranslationBlock
*tb
, *current_tb
;
750 #ifdef TARGET_HAS_PRECISE_SMC
751 CPUState
*env
= cpu_single_env
;
754 addr
&= TARGET_PAGE_MASK
;
755 p
= page_find(addr
>> TARGET_PAGE_BITS
);
759 current_tb_modified
= 0;
761 current_pc
= 0; /* avoid warning */
762 current_cs_base
= 0; /* avoid warning */
763 current_flags
= 0; /* avoid warning */
764 #ifdef TARGET_HAS_PRECISE_SMC
766 current_tb
= tb_find_pc(pc
);
771 tb
= (TranslationBlock
*)((long)tb
& ~3);
772 #ifdef TARGET_HAS_PRECISE_SMC
773 if (current_tb
== tb
&&
774 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
775 /* If we are modifying the current TB, we must stop
776 its execution. We could be more precise by checking
777 that the modification is after the current PC, but it
778 would require a specialized function to partially
779 restore the CPU state */
781 current_tb_modified
= 1;
782 cpu_restore_state(current_tb
, env
, pc
, puc
);
783 #if defined(TARGET_I386)
784 current_flags
= env
->hflags
;
785 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
786 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
787 current_pc
= current_cs_base
+ env
->eip
;
789 #error unsupported CPU
792 #endif /* TARGET_HAS_PRECISE_SMC */
793 tb_phys_invalidate(tb
, addr
);
794 tb
= tb
->page_next
[n
];
797 #ifdef TARGET_HAS_PRECISE_SMC
798 if (current_tb_modified
) {
799 /* we generate a block containing just the instruction
800 modifying the memory. It will ensure that it cannot modify
802 env
->current_tb
= NULL
;
803 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
805 cpu_resume_from_signal(env
, puc
);
811 /* add the tb in the target page and protect it if necessary */
812 static inline void tb_alloc_page(TranslationBlock
*tb
,
813 unsigned int n
, unsigned int page_addr
)
816 TranslationBlock
*last_first_tb
;
818 tb
->page_addr
[n
] = page_addr
;
819 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
820 tb
->page_next
[n
] = p
->first_tb
;
821 last_first_tb
= p
->first_tb
;
822 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
823 invalidate_page_bitmap(p
);
825 #if defined(TARGET_HAS_SMC) || 1
827 #if defined(CONFIG_USER_ONLY)
828 if (p
->flags
& PAGE_WRITE
) {
829 unsigned long host_start
, host_end
, addr
;
832 /* force the host page as non writable (writes will have a
833 page fault + mprotect overhead) */
834 host_start
= page_addr
& qemu_host_page_mask
;
835 host_end
= host_start
+ qemu_host_page_size
;
837 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
838 prot
|= page_get_flags(addr
);
839 mprotect((void *)host_start
, qemu_host_page_size
,
840 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
841 #ifdef DEBUG_TB_INVALIDATE
842 printf("protecting code page: 0x%08lx\n",
845 p
->flags
&= ~PAGE_WRITE
;
848 /* if some code is already present, then the pages are already
849 protected. So we handle the case where only the first TB is
850 allocated in a physical page */
851 if (!last_first_tb
) {
852 tlb_protect_code(page_addr
);
856 #endif /* TARGET_HAS_SMC */
859 /* Allocate a new translation block. Flush the translation buffer if
860 too many translation blocks or too much generated code. */
861 TranslationBlock
*tb_alloc(target_ulong pc
)
863 TranslationBlock
*tb
;
865 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
866 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
874 /* add a new TB and link it to the physical page tables. phys_page2 is
875 (-1) to indicate that only one page contains the TB. */
876 void tb_link_phys(TranslationBlock
*tb
,
877 target_ulong phys_pc
, target_ulong phys_page2
)
880 TranslationBlock
**ptb
;
882 /* add in the physical hash table */
883 h
= tb_phys_hash_func(phys_pc
);
884 ptb
= &tb_phys_hash
[h
];
885 tb
->phys_hash_next
= *ptb
;
888 /* add in the page list */
889 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
890 if (phys_page2
!= -1)
891 tb_alloc_page(tb
, 1, phys_page2
);
893 tb
->page_addr
[1] = -1;
895 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
896 tb
->jmp_next
[0] = NULL
;
897 tb
->jmp_next
[1] = NULL
;
899 tb
->cflags
&= ~CF_FP_USED
;
900 if (tb
->cflags
& CF_TB_FP_USED
)
901 tb
->cflags
|= CF_FP_USED
;
904 /* init original jump addresses */
905 if (tb
->tb_next_offset
[0] != 0xffff)
906 tb_reset_jump(tb
, 0);
907 if (tb
->tb_next_offset
[1] != 0xffff)
908 tb_reset_jump(tb
, 1);
910 #ifdef DEBUG_TB_CHECK
915 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
916 tb[1].tc_ptr. Return NULL if not found */
917 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
921 TranslationBlock
*tb
;
925 if (tc_ptr
< (unsigned long)code_gen_buffer
||
926 tc_ptr
>= (unsigned long)code_gen_ptr
)
928 /* binary search (cf Knuth) */
931 while (m_min
<= m_max
) {
932 m
= (m_min
+ m_max
) >> 1;
934 v
= (unsigned long)tb
->tc_ptr
;
937 else if (tc_ptr
< v
) {
946 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
948 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
950 TranslationBlock
*tb1
, *tb_next
, **ptb
;
953 tb1
= tb
->jmp_next
[n
];
955 /* find head of list */
958 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
961 tb1
= tb1
->jmp_next
[n1
];
963 /* we are now sure now that tb jumps to tb1 */
966 /* remove tb from the jmp_first list */
967 ptb
= &tb_next
->jmp_first
;
971 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
972 if (n1
== n
&& tb1
== tb
)
974 ptb
= &tb1
->jmp_next
[n1
];
976 *ptb
= tb
->jmp_next
[n
];
977 tb
->jmp_next
[n
] = NULL
;
979 /* suppress the jump to next tb in generated code */
980 tb_reset_jump(tb
, n
);
982 /* suppress jumps in the tb on which we could have jumped */
983 tb_reset_jump_recursive(tb_next
);
987 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
989 tb_reset_jump_recursive2(tb
, 0);
990 tb_reset_jump_recursive2(tb
, 1);
993 #if defined(TARGET_HAS_ICE)
994 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
996 target_ulong phys_addr
;
998 phys_addr
= cpu_get_phys_page_debug(env
, pc
);
999 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ 1, 0);
1003 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1004 breakpoint is reached */
1005 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1007 #if defined(TARGET_HAS_ICE)
1010 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1011 if (env
->breakpoints
[i
] == pc
)
1015 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1017 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1019 breakpoint_invalidate(env
, pc
);
1026 /* remove a breakpoint */
1027 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1029 #if defined(TARGET_HAS_ICE)
1031 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1032 if (env
->breakpoints
[i
] == pc
)
1037 env
->nb_breakpoints
--;
1038 if (i
< env
->nb_breakpoints
)
1039 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1041 breakpoint_invalidate(env
, pc
);
1048 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1049 CPU loop after each instruction */
1050 void cpu_single_step(CPUState
*env
, int enabled
)
1052 #if defined(TARGET_HAS_ICE)
1053 if (env
->singlestep_enabled
!= enabled
) {
1054 env
->singlestep_enabled
= enabled
;
1055 /* must flush all the translated code to avoid inconsistancies */
1056 /* XXX: only flush what is necessary */
1062 /* enable or disable low levels log */
1063 void cpu_set_log(int log_flags
)
1065 loglevel
= log_flags
;
1066 if (loglevel
&& !logfile
) {
1067 logfile
= fopen(logfilename
, "w");
1069 perror(logfilename
);
1072 #if !defined(CONFIG_SOFTMMU)
1073 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1075 static uint8_t logfile_buf
[4096];
1076 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1079 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1084 void cpu_set_log_filename(const char *filename
)
1086 logfilename
= strdup(filename
);
1089 /* mask must never be zero, except for A20 change call */
1090 void cpu_interrupt(CPUState
*env
, int mask
)
1092 TranslationBlock
*tb
;
1093 static int interrupt_lock
;
1095 env
->interrupt_request
|= mask
;
1096 /* if the cpu is currently executing code, we must unlink it and
1097 all the potentially executing TB */
1098 tb
= env
->current_tb
;
1099 if (tb
&& !testandset(&interrupt_lock
)) {
1100 env
->current_tb
= NULL
;
1101 tb_reset_jump_recursive(tb
);
1106 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1108 env
->interrupt_request
&= ~mask
;
1111 CPULogItem cpu_log_items
[] = {
1112 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1113 "show generated host assembly code for each compiled TB" },
1114 { CPU_LOG_TB_IN_ASM
, "in_asm",
1115 "show target assembly code for each compiled TB" },
1116 { CPU_LOG_TB_OP
, "op",
1117 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1119 { CPU_LOG_TB_OP_OPT
, "op_opt",
1120 "show micro ops after optimization for each compiled TB" },
1122 { CPU_LOG_INT
, "int",
1123 "show interrupts/exceptions in short format" },
1124 { CPU_LOG_EXEC
, "exec",
1125 "show trace before each executed TB (lots of logs)" },
1126 { CPU_LOG_TB_CPU
, "cpu",
1127 "show CPU state before bloc translation" },
1129 { CPU_LOG_PCALL
, "pcall",
1130 "show protected mode far calls/returns/exceptions" },
1133 { CPU_LOG_IOPORT
, "ioport",
1134 "show all i/o ports accesses" },
1139 static int cmp1(const char *s1
, int n
, const char *s2
)
1141 if (strlen(s2
) != n
)
1143 return memcmp(s1
, s2
, n
) == 0;
1146 /* takes a comma separated list of log masks. Return 0 if error. */
1147 int cpu_str_to_log_mask(const char *str
)
1156 p1
= strchr(p
, ',');
1159 if(cmp1(p
,p1
-p
,"all")) {
1160 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1164 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1165 if (cmp1(p
, p1
- p
, item
->name
))
1179 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1184 fprintf(stderr
, "qemu: fatal: ");
1185 vfprintf(stderr
, fmt
, ap
);
1186 fprintf(stderr
, "\n");
1188 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1190 cpu_dump_state(env
, stderr
, fprintf
, 0);
1196 #if !defined(CONFIG_USER_ONLY)
1198 /* NOTE: if flush_global is true, also flush global entries (not
1200 void tlb_flush(CPUState
*env
, int flush_global
)
1204 #if defined(DEBUG_TLB)
1205 printf("tlb_flush:\n");
1207 /* must reset current TB so that interrupts cannot modify the
1208 links while we are modifying them */
1209 env
->current_tb
= NULL
;
1211 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1212 env
->tlb_table
[0][i
].addr_read
= -1;
1213 env
->tlb_table
[0][i
].addr_write
= -1;
1214 env
->tlb_table
[0][i
].addr_code
= -1;
1215 env
->tlb_table
[1][i
].addr_read
= -1;
1216 env
->tlb_table
[1][i
].addr_write
= -1;
1217 env
->tlb_table
[1][i
].addr_code
= -1;
1220 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1222 #if !defined(CONFIG_SOFTMMU)
1223 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1226 if (env
->kqemu_enabled
) {
1227 kqemu_flush(env
, flush_global
);
1233 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1235 if (addr
== (tlb_entry
->addr_read
&
1236 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1237 addr
== (tlb_entry
->addr_write
&
1238 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1239 addr
== (tlb_entry
->addr_code
&
1240 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1241 tlb_entry
->addr_read
= -1;
1242 tlb_entry
->addr_write
= -1;
1243 tlb_entry
->addr_code
= -1;
1247 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1250 TranslationBlock
*tb
;
1252 #if defined(DEBUG_TLB)
1253 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1255 /* must reset current TB so that interrupts cannot modify the
1256 links while we are modifying them */
1257 env
->current_tb
= NULL
;
1259 addr
&= TARGET_PAGE_MASK
;
1260 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1261 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1262 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1264 for(i
= 0; i
< TB_JMP_CACHE_SIZE
; i
++) {
1265 tb
= env
->tb_jmp_cache
[i
];
1267 ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1268 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
)) {
1269 env
->tb_jmp_cache
[i
] = NULL
;
1273 #if !defined(CONFIG_SOFTMMU)
1274 if (addr
< MMAP_AREA_END
)
1275 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1278 if (env
->kqemu_enabled
) {
1279 kqemu_flush_page(env
, addr
);
1284 /* update the TLBs so that writes to code in the virtual page 'addr'
1286 static void tlb_protect_code(ram_addr_t ram_addr
)
1288 cpu_physical_memory_reset_dirty(ram_addr
,
1289 ram_addr
+ TARGET_PAGE_SIZE
,
1293 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1294 tested for self modifying code */
1295 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1298 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1301 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1302 unsigned long start
, unsigned long length
)
1305 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1306 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1307 if ((addr
- start
) < length
) {
1308 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1313 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1317 unsigned long length
, start1
;
1321 start
&= TARGET_PAGE_MASK
;
1322 end
= TARGET_PAGE_ALIGN(end
);
1324 length
= end
- start
;
1327 len
= length
>> TARGET_PAGE_BITS
;
1329 /* XXX: should not depend on cpu context */
1331 if (env
->kqemu_enabled
) {
1334 for(i
= 0; i
< len
; i
++) {
1335 kqemu_set_notdirty(env
, addr
);
1336 addr
+= TARGET_PAGE_SIZE
;
1340 mask
= ~dirty_flags
;
1341 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1342 for(i
= 0; i
< len
; i
++)
1345 /* we modify the TLB cache so that the dirty bit will be set again
1346 when accessing the range */
1347 start1
= start
+ (unsigned long)phys_ram_base
;
1348 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1349 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1350 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1351 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1352 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1355 #if !defined(CONFIG_SOFTMMU)
1356 /* XXX: this is expensive */
1362 for(i
= 0; i
< L1_SIZE
; i
++) {
1365 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1366 for(j
= 0; j
< L2_SIZE
; j
++) {
1367 if (p
->valid_tag
== virt_valid_tag
&&
1368 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1369 (p
->prot
& PROT_WRITE
)) {
1370 if (addr
< MMAP_AREA_END
) {
1371 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1372 p
->prot
& ~PROT_WRITE
);
1375 addr
+= TARGET_PAGE_SIZE
;
1384 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1386 ram_addr_t ram_addr
;
1388 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1389 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1390 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1391 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1392 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1397 /* update the TLB according to the current state of the dirty bits */
1398 void cpu_tlb_update_dirty(CPUState
*env
)
1401 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1402 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1403 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1404 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1407 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1408 unsigned long start
)
1411 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1412 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1413 if (addr
== start
) {
1414 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1419 /* update the TLB corresponding to virtual page vaddr and phys addr
1420 addr so that it is no longer dirty */
1421 static inline void tlb_set_dirty(CPUState
*env
,
1422 unsigned long addr
, target_ulong vaddr
)
1426 addr
&= TARGET_PAGE_MASK
;
1427 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1428 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1429 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1432 /* add a new TLB entry. At most one entry for a given virtual address
1433 is permitted. Return 0 if OK or 2 if the page could not be mapped
1434 (can only happen in non SOFTMMU mode for I/O pages or pages
1435 conflicting with the host address space). */
1436 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1437 target_phys_addr_t paddr
, int prot
,
1438 int is_user
, int is_softmmu
)
1443 target_ulong address
;
1444 target_phys_addr_t addend
;
1448 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1450 pd
= IO_MEM_UNASSIGNED
;
1452 pd
= p
->phys_offset
;
1454 #if defined(DEBUG_TLB)
1455 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1456 vaddr
, (int)paddr
, prot
, is_user
, is_softmmu
, pd
);
1460 #if !defined(CONFIG_SOFTMMU)
1464 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1465 /* IO memory case */
1466 address
= vaddr
| pd
;
1469 /* standard memory */
1471 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1474 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1476 te
= &env
->tlb_table
[is_user
][index
];
1477 te
->addend
= addend
;
1478 if (prot
& PAGE_READ
) {
1479 te
->addr_read
= address
;
1483 if (prot
& PAGE_EXEC
) {
1484 te
->addr_code
= address
;
1488 if (prot
& PAGE_WRITE
) {
1489 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1490 /* ROM: access is ignored (same as unassigned) */
1491 te
->addr_write
= vaddr
| IO_MEM_ROM
;
1492 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1493 !cpu_physical_memory_is_dirty(pd
)) {
1494 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1496 te
->addr_write
= address
;
1499 te
->addr_write
= -1;
1502 #if !defined(CONFIG_SOFTMMU)
1504 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1505 /* IO access: no mapping is done as it will be handled by the
1507 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1512 if (vaddr
>= MMAP_AREA_END
) {
1515 if (prot
& PROT_WRITE
) {
1516 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1517 #if defined(TARGET_HAS_SMC) || 1
1520 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1521 !cpu_physical_memory_is_dirty(pd
))) {
1522 /* ROM: we do as if code was inside */
1523 /* if code is present, we only map as read only and save the
1527 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1530 vp
->valid_tag
= virt_valid_tag
;
1531 prot
&= ~PAGE_WRITE
;
1534 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1535 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1536 if (map_addr
== MAP_FAILED
) {
1537 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1547 /* called from signal handler: invalidate the code and unprotect the
1548 page. Return TRUE if the fault was succesfully handled. */
1549 int page_unprotect(unsigned long addr
, unsigned long pc
, void *puc
)
1551 #if !defined(CONFIG_SOFTMMU)
1554 #if defined(DEBUG_TLB)
1555 printf("page_unprotect: addr=0x%08x\n", addr
);
1557 addr
&= TARGET_PAGE_MASK
;
1559 /* if it is not mapped, no need to worry here */
1560 if (addr
>= MMAP_AREA_END
)
1562 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1565 /* NOTE: in this case, validate_tag is _not_ tested as it
1566 validates only the code TLB */
1567 if (vp
->valid_tag
!= virt_valid_tag
)
1569 if (!(vp
->prot
& PAGE_WRITE
))
1571 #if defined(DEBUG_TLB)
1572 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1573 addr
, vp
->phys_addr
, vp
->prot
);
1575 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1576 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1577 (unsigned long)addr
, vp
->prot
);
1578 /* set the dirty bit */
1579 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1580 /* flush the code inside */
1581 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1590 void tlb_flush(CPUState
*env
, int flush_global
)
1594 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1598 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1599 target_phys_addr_t paddr
, int prot
,
1600 int is_user
, int is_softmmu
)
1605 /* dump memory mappings */
1606 void page_dump(FILE *f
)
1608 unsigned long start
, end
;
1609 int i
, j
, prot
, prot1
;
1612 fprintf(f
, "%-8s %-8s %-8s %s\n",
1613 "start", "end", "size", "prot");
1617 for(i
= 0; i
<= L1_SIZE
; i
++) {
1622 for(j
= 0;j
< L2_SIZE
; j
++) {
1627 if (prot1
!= prot
) {
1628 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1630 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1631 start
, end
, end
- start
,
1632 prot
& PAGE_READ
? 'r' : '-',
1633 prot
& PAGE_WRITE
? 'w' : '-',
1634 prot
& PAGE_EXEC
? 'x' : '-');
1648 int page_get_flags(unsigned long address
)
1652 p
= page_find(address
>> TARGET_PAGE_BITS
);
1658 /* modify the flags of a page and invalidate the code if
1659 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1660 depending on PAGE_WRITE */
1661 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1666 start
= start
& TARGET_PAGE_MASK
;
1667 end
= TARGET_PAGE_ALIGN(end
);
1668 if (flags
& PAGE_WRITE
)
1669 flags
|= PAGE_WRITE_ORG
;
1670 spin_lock(&tb_lock
);
1671 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1672 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1673 /* if the write protection is set, then we invalidate the code
1675 if (!(p
->flags
& PAGE_WRITE
) &&
1676 (flags
& PAGE_WRITE
) &&
1678 tb_invalidate_phys_page(addr
, 0, NULL
);
1682 spin_unlock(&tb_lock
);
1685 /* called from signal handler: invalidate the code and unprotect the
1686 page. Return TRUE if the fault was succesfully handled. */
1687 int page_unprotect(unsigned long address
, unsigned long pc
, void *puc
)
1689 unsigned int page_index
, prot
, pindex
;
1691 unsigned long host_start
, host_end
, addr
;
1693 host_start
= address
& qemu_host_page_mask
;
1694 page_index
= host_start
>> TARGET_PAGE_BITS
;
1695 p1
= page_find(page_index
);
1698 host_end
= host_start
+ qemu_host_page_size
;
1701 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1705 /* if the page was really writable, then we change its
1706 protection back to writable */
1707 if (prot
& PAGE_WRITE_ORG
) {
1708 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1709 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1710 mprotect((void *)host_start
, qemu_host_page_size
,
1711 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1712 p1
[pindex
].flags
|= PAGE_WRITE
;
1713 /* and since the content will be modified, we must invalidate
1714 the corresponding translated code. */
1715 tb_invalidate_phys_page(address
, pc
, puc
);
1716 #ifdef DEBUG_TB_CHECK
1717 tb_invalidate_check(address
);
1725 /* call this function when system calls directly modify a memory area */
1726 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1728 unsigned long start
, end
, addr
;
1730 start
= (unsigned long)data
;
1731 end
= start
+ data_size
;
1732 start
&= TARGET_PAGE_MASK
;
1733 end
= TARGET_PAGE_ALIGN(end
);
1734 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1735 page_unprotect(addr
, 0, NULL
);
1739 static inline void tlb_set_dirty(CPUState
*env
,
1740 unsigned long addr
, target_ulong vaddr
)
1743 #endif /* defined(CONFIG_USER_ONLY) */
1745 /* register physical memory. 'size' must be a multiple of the target
1746 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1748 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1750 unsigned long phys_offset
)
1752 target_phys_addr_t addr
, end_addr
;
1755 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1756 end_addr
= start_addr
+ size
;
1757 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1758 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1759 p
->phys_offset
= phys_offset
;
1760 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1761 phys_offset
+= TARGET_PAGE_SIZE
;
1765 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1770 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1774 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1775 unassigned_mem_readb
,
1776 unassigned_mem_readb
,
1777 unassigned_mem_readb
,
1780 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1781 unassigned_mem_writeb
,
1782 unassigned_mem_writeb
,
1783 unassigned_mem_writeb
,
1786 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1788 unsigned long ram_addr
;
1790 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1791 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1792 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1793 #if !defined(CONFIG_USER_ONLY)
1794 tb_invalidate_phys_page_fast(ram_addr
, 1);
1795 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1798 stb_p((uint8_t *)(long)addr
, val
);
1800 if (cpu_single_env
->kqemu_enabled
&&
1801 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1802 kqemu_modify_page(cpu_single_env
, ram_addr
);
1804 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1805 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1806 /* we remove the notdirty callback only if the code has been
1808 if (dirty_flags
== 0xff)
1809 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1812 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1814 unsigned long ram_addr
;
1816 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1817 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1818 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1819 #if !defined(CONFIG_USER_ONLY)
1820 tb_invalidate_phys_page_fast(ram_addr
, 2);
1821 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1824 stw_p((uint8_t *)(long)addr
, val
);
1826 if (cpu_single_env
->kqemu_enabled
&&
1827 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1828 kqemu_modify_page(cpu_single_env
, ram_addr
);
1830 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1831 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1832 /* we remove the notdirty callback only if the code has been
1834 if (dirty_flags
== 0xff)
1835 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1838 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1840 unsigned long ram_addr
;
1842 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1843 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1844 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1845 #if !defined(CONFIG_USER_ONLY)
1846 tb_invalidate_phys_page_fast(ram_addr
, 4);
1847 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1850 stl_p((uint8_t *)(long)addr
, val
);
1852 if (cpu_single_env
->kqemu_enabled
&&
1853 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1854 kqemu_modify_page(cpu_single_env
, ram_addr
);
1856 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1857 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1858 /* we remove the notdirty callback only if the code has been
1860 if (dirty_flags
== 0xff)
1861 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1864 static CPUReadMemoryFunc
*error_mem_read
[3] = {
1865 NULL
, /* never used */
1866 NULL
, /* never used */
1867 NULL
, /* never used */
1870 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1871 notdirty_mem_writeb
,
1872 notdirty_mem_writew
,
1873 notdirty_mem_writel
,
1876 static void io_mem_init(void)
1878 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
1879 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
1880 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
1883 /* alloc dirty bits array */
1884 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1885 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
1888 /* mem_read and mem_write are arrays of functions containing the
1889 function to access byte (index 0), word (index 1) and dword (index
1890 2). All functions must be supplied. If io_index is non zero, the
1891 corresponding io zone is modified. If it is zero, a new io zone is
1892 allocated. The return value can be used with
1893 cpu_register_physical_memory(). (-1) is returned if error. */
1894 int cpu_register_io_memory(int io_index
,
1895 CPUReadMemoryFunc
**mem_read
,
1896 CPUWriteMemoryFunc
**mem_write
,
1901 if (io_index
<= 0) {
1902 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
1904 io_index
= io_mem_nb
++;
1906 if (io_index
>= IO_MEM_NB_ENTRIES
)
1910 for(i
= 0;i
< 3; i
++) {
1911 io_mem_read
[io_index
][i
] = mem_read
[i
];
1912 io_mem_write
[io_index
][i
] = mem_write
[i
];
1914 io_mem_opaque
[io_index
] = opaque
;
1915 return io_index
<< IO_MEM_SHIFT
;
1918 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
1920 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
1923 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
1925 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
1928 /* physical memory access (slow version, mainly for debug) */
1929 #if defined(CONFIG_USER_ONLY)
1930 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
1931 int len
, int is_write
)
1937 page
= addr
& TARGET_PAGE_MASK
;
1938 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1941 flags
= page_get_flags(page
);
1942 if (!(flags
& PAGE_VALID
))
1945 if (!(flags
& PAGE_WRITE
))
1947 memcpy((uint8_t *)addr
, buf
, len
);
1949 if (!(flags
& PAGE_READ
))
1951 memcpy(buf
, (uint8_t *)addr
, len
);
1960 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
1961 int len
, int is_write
)
1966 target_phys_addr_t page
;
1971 page
= addr
& TARGET_PAGE_MASK
;
1972 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1975 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
1977 pd
= IO_MEM_UNASSIGNED
;
1979 pd
= p
->phys_offset
;
1983 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
1984 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
1985 /* XXX: could force cpu_single_env to NULL to avoid
1987 if (l
>= 4 && ((addr
& 3) == 0)) {
1988 /* 32 bit write access */
1990 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
1992 } else if (l
>= 2 && ((addr
& 1) == 0)) {
1993 /* 16 bit write access */
1995 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
1998 /* 8 bit write access */
2000 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2004 unsigned long addr1
;
2005 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2007 ptr
= phys_ram_base
+ addr1
;
2008 memcpy(ptr
, buf
, l
);
2009 if (!cpu_physical_memory_is_dirty(addr1
)) {
2010 /* invalidate code */
2011 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2013 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2014 (0xff & ~CODE_DIRTY_FLAG
);
2018 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
2020 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2021 if (l
>= 4 && ((addr
& 3) == 0)) {
2022 /* 32 bit read access */
2023 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2026 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2027 /* 16 bit read access */
2028 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2032 /* 8 bit read access */
2033 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2039 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2040 (addr
& ~TARGET_PAGE_MASK
);
2041 memcpy(buf
, ptr
, l
);
2050 /* warning: addr must be aligned */
2051 uint32_t ldl_phys(target_phys_addr_t addr
)
2059 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2061 pd
= IO_MEM_UNASSIGNED
;
2063 pd
= p
->phys_offset
;
2066 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
2068 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2069 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2072 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2073 (addr
& ~TARGET_PAGE_MASK
);
2079 /* warning: addr must be aligned */
2080 uint64_t ldq_phys(target_phys_addr_t addr
)
2088 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2090 pd
= IO_MEM_UNASSIGNED
;
2092 pd
= p
->phys_offset
;
2095 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
2097 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2098 #ifdef TARGET_WORDS_BIGENDIAN
2099 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2100 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2102 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2103 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2107 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2108 (addr
& ~TARGET_PAGE_MASK
);
2115 uint32_t ldub_phys(target_phys_addr_t addr
)
2118 cpu_physical_memory_read(addr
, &val
, 1);
2123 uint32_t lduw_phys(target_phys_addr_t addr
)
2126 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2127 return tswap16(val
);
2130 /* warning: addr must be aligned. The ram page is not masked as dirty
2131 and the code inside is not invalidated. It is useful if the dirty
2132 bits are used to track modified PTEs */
2133 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2140 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2142 pd
= IO_MEM_UNASSIGNED
;
2144 pd
= p
->phys_offset
;
2147 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2148 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2149 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2151 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2152 (addr
& ~TARGET_PAGE_MASK
);
2157 /* warning: addr must be aligned */
2158 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2165 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2167 pd
= IO_MEM_UNASSIGNED
;
2169 pd
= p
->phys_offset
;
2172 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2173 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2174 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2176 unsigned long addr1
;
2177 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2179 ptr
= phys_ram_base
+ addr1
;
2181 if (!cpu_physical_memory_is_dirty(addr1
)) {
2182 /* invalidate code */
2183 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2185 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2186 (0xff & ~CODE_DIRTY_FLAG
);
2192 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2195 cpu_physical_memory_write(addr
, &v
, 1);
2199 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2201 uint16_t v
= tswap16(val
);
2202 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2206 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2209 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2214 /* virtual memory access for debug */
2215 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2216 uint8_t *buf
, int len
, int is_write
)
2219 target_ulong page
, phys_addr
;
2222 page
= addr
& TARGET_PAGE_MASK
;
2223 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2224 /* if no physical page mapped, return an error */
2225 if (phys_addr
== -1)
2227 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2230 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2239 void dump_exec_info(FILE *f
,
2240 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2242 int i
, target_code_size
, max_target_code_size
;
2243 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2244 TranslationBlock
*tb
;
2246 target_code_size
= 0;
2247 max_target_code_size
= 0;
2249 direct_jmp_count
= 0;
2250 direct_jmp2_count
= 0;
2251 for(i
= 0; i
< nb_tbs
; i
++) {
2253 target_code_size
+= tb
->size
;
2254 if (tb
->size
> max_target_code_size
)
2255 max_target_code_size
= tb
->size
;
2256 if (tb
->page_addr
[1] != -1)
2258 if (tb
->tb_next_offset
[0] != 0xffff) {
2260 if (tb
->tb_next_offset
[1] != 0xffff) {
2261 direct_jmp2_count
++;
2265 /* XXX: avoid using doubles ? */
2266 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2267 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2268 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2269 max_target_code_size
);
2270 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2271 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2272 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2273 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2275 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2276 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2278 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2280 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2281 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2282 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2283 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2286 #if !defined(CONFIG_USER_ONLY)
2288 #define MMUSUFFIX _cmmu
2289 #define GETPC() NULL
2290 #define env cpu_single_env
2291 #define SOFTMMU_CODE_ACCESS
2294 #include "softmmu_template.h"
2297 #include "softmmu_template.h"
2300 #include "softmmu_template.h"
2303 #include "softmmu_template.h"