2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
40 #if defined(CONFIG_USER_ONLY)
44 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
61 /* threshold to flush the translated code buffer */
62 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80 #define TARGET_PHYS_ADDR_SPACE_BITS 32
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
83 #elif defined(TARGET_IA64)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 36
86 #define TARGET_PHYS_ADDR_SPACE_BITS 32
89 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
90 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
92 /* any access to the tbs or the page table must use this lock */
93 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
95 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
96 uint8_t *code_gen_ptr
;
98 ram_addr_t phys_ram_size
;
100 uint8_t *phys_ram_base
;
101 uint8_t *phys_ram_dirty
;
103 static int in_migration
;
104 static ram_addr_t phys_ram_alloc_offset
= 0;
107 /* current CPU in the current thread. It is only valid inside
109 CPUState
*cpu_single_env
;
111 typedef struct PageDesc
{
112 /* list of TBs intersecting this ram page */
113 TranslationBlock
*first_tb
;
114 /* in order to optimize self modifying code, we count the number
115 of lookups we do to a given page to use a bitmap */
116 unsigned int code_write_count
;
117 uint8_t *code_bitmap
;
118 #if defined(CONFIG_USER_ONLY)
123 typedef struct PhysPageDesc
{
124 /* offset in host memory of the page + io_index in the low 12 bits */
125 ram_addr_t phys_offset
;
129 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
130 /* XXX: this is a temporary hack for alpha target.
131 * In the future, this is to be replaced by a multi-level table
132 * to actually be able to handle the complete 64 bits address space.
134 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
136 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
139 #define L1_SIZE (1 << L1_BITS)
140 #define L2_SIZE (1 << L2_BITS)
142 static void io_mem_init(void);
144 unsigned long qemu_real_host_page_size
;
145 unsigned long qemu_host_page_bits
;
146 unsigned long qemu_host_page_size
;
147 unsigned long qemu_host_page_mask
;
149 /* XXX: for system emulation, it could just be an array */
150 static PageDesc
*l1_map
[L1_SIZE
];
151 PhysPageDesc
**l1_phys_map
;
153 /* io memory support */
154 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
155 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
156 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
157 static int io_mem_nb
;
158 #if defined(CONFIG_SOFTMMU)
159 static int io_mem_watch
;
163 char *logfilename
= "/tmp/qemu.log";
166 static int log_append
= 0;
169 static int tlb_flush_count
;
170 static int tb_flush_count
;
171 static int tb_phys_invalidate_count
;
173 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
174 typedef struct subpage_t
{
175 target_phys_addr_t base
;
176 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
177 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
178 void *opaque
[TARGET_PAGE_SIZE
][2][4];
181 static void page_init(void)
183 /* NOTE: we can always suppose that qemu_host_page_size >=
187 SYSTEM_INFO system_info
;
190 GetSystemInfo(&system_info
);
191 qemu_real_host_page_size
= system_info
.dwPageSize
;
193 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
194 PAGE_EXECUTE_READWRITE
, &old_protect
);
197 qemu_real_host_page_size
= getpagesize();
199 unsigned long start
, end
;
201 start
= (unsigned long)code_gen_buffer
;
202 start
&= ~(qemu_real_host_page_size
- 1);
204 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
205 end
+= qemu_real_host_page_size
- 1;
206 end
&= ~(qemu_real_host_page_size
- 1);
208 mprotect((void *)start
, end
- start
,
209 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
213 if (qemu_host_page_size
== 0)
214 qemu_host_page_size
= qemu_real_host_page_size
;
215 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
216 qemu_host_page_size
= TARGET_PAGE_SIZE
;
217 qemu_host_page_bits
= 0;
218 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
219 qemu_host_page_bits
++;
220 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
221 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
222 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
224 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
226 long long startaddr
, endaddr
;
230 f
= fopen("/proc/self/maps", "r");
233 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
235 page_set_flags(TARGET_PAGE_ALIGN(startaddr
),
236 TARGET_PAGE_ALIGN(endaddr
),
246 static inline PageDesc
*page_find_alloc(unsigned int index
)
250 lp
= &l1_map
[index
>> L2_BITS
];
253 /* allocate if not found */
254 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
255 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
258 return p
+ (index
& (L2_SIZE
- 1));
261 static inline PageDesc
*page_find(unsigned int index
)
265 p
= l1_map
[index
>> L2_BITS
];
268 return p
+ (index
& (L2_SIZE
- 1));
271 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
276 p
= (void **)l1_phys_map
;
277 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
279 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
280 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
282 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
285 /* allocate if not found */
288 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
289 memset(p
, 0, sizeof(void *) * L1_SIZE
);
293 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
297 /* allocate if not found */
300 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
302 for (i
= 0; i
< L2_SIZE
; i
++)
303 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
305 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
308 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
310 return phys_page_find_alloc(index
, 0);
313 #if !defined(CONFIG_USER_ONLY)
314 static void tlb_protect_code(ram_addr_t ram_addr
);
315 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
319 void cpu_exec_init(CPUState
*env
)
325 code_gen_ptr
= code_gen_buffer
;
329 env
->next_cpu
= NULL
;
332 while (*penv
!= NULL
) {
333 penv
= (CPUState
**)&(*penv
)->next_cpu
;
336 env
->cpu_index
= cpu_index
;
337 env
->nb_watchpoints
= 0;
341 static inline void invalidate_page_bitmap(PageDesc
*p
)
343 if (p
->code_bitmap
) {
344 qemu_free(p
->code_bitmap
);
345 p
->code_bitmap
= NULL
;
347 p
->code_write_count
= 0;
350 /* set to NULL all the 'first_tb' fields in all PageDescs */
351 static void page_flush_tb(void)
356 for(i
= 0; i
< L1_SIZE
; i
++) {
359 for(j
= 0; j
< L2_SIZE
; j
++) {
361 invalidate_page_bitmap(p
);
368 /* flush all the translation blocks */
369 /* XXX: tb_flush is currently not thread safe */
370 void tb_flush(CPUState
*env1
)
373 #if defined(DEBUG_FLUSH)
374 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
375 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
377 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
381 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
382 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
385 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
388 code_gen_ptr
= code_gen_buffer
;
389 /* XXX: flush processor icache at this point if cache flush is
394 #ifdef DEBUG_TB_CHECK
396 static void tb_invalidate_check(target_ulong address
)
398 TranslationBlock
*tb
;
400 address
&= TARGET_PAGE_MASK
;
401 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
402 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
403 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
404 address
>= tb
->pc
+ tb
->size
)) {
405 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
406 address
, (long)tb
->pc
, tb
->size
);
412 /* verify that all the pages have correct rights for code */
413 static void tb_page_check(void)
415 TranslationBlock
*tb
;
416 int i
, flags1
, flags2
;
418 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
419 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
420 flags1
= page_get_flags(tb
->pc
);
421 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
422 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
423 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
424 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
430 void tb_jmp_check(TranslationBlock
*tb
)
432 TranslationBlock
*tb1
;
435 /* suppress any remaining jumps to this TB */
439 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
442 tb1
= tb1
->jmp_next
[n1
];
444 /* check end of list */
446 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
452 /* invalidate one TB */
453 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
456 TranslationBlock
*tb1
;
460 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
463 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
467 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
469 TranslationBlock
*tb1
;
475 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
477 *ptb
= tb1
->page_next
[n1
];
480 ptb
= &tb1
->page_next
[n1
];
484 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
486 TranslationBlock
*tb1
, **ptb
;
489 ptb
= &tb
->jmp_next
[n
];
492 /* find tb(n) in circular list */
496 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
497 if (n1
== n
&& tb1
== tb
)
500 ptb
= &tb1
->jmp_first
;
502 ptb
= &tb1
->jmp_next
[n1
];
505 /* now we can suppress tb(n) from the list */
506 *ptb
= tb
->jmp_next
[n
];
508 tb
->jmp_next
[n
] = NULL
;
512 /* reset the jump entry 'n' of a TB so that it is not chained to
514 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
516 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
519 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
524 target_ulong phys_pc
;
525 TranslationBlock
*tb1
, *tb2
;
527 /* remove the TB from the hash list */
528 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
529 h
= tb_phys_hash_func(phys_pc
);
530 tb_remove(&tb_phys_hash
[h
], tb
,
531 offsetof(TranslationBlock
, phys_hash_next
));
533 /* remove the TB from the page list */
534 if (tb
->page_addr
[0] != page_addr
) {
535 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
536 tb_page_remove(&p
->first_tb
, tb
);
537 invalidate_page_bitmap(p
);
539 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
540 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
541 tb_page_remove(&p
->first_tb
, tb
);
542 invalidate_page_bitmap(p
);
545 tb_invalidated_flag
= 1;
547 /* remove the TB from the hash list */
548 h
= tb_jmp_cache_hash_func(tb
->pc
);
549 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
550 if (env
->tb_jmp_cache
[h
] == tb
)
551 env
->tb_jmp_cache
[h
] = NULL
;
554 /* suppress this TB from the two jump lists */
555 tb_jmp_remove(tb
, 0);
556 tb_jmp_remove(tb
, 1);
558 /* suppress any remaining jumps to this TB */
564 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
565 tb2
= tb1
->jmp_next
[n1
];
566 tb_reset_jump(tb1
, n1
);
567 tb1
->jmp_next
[n1
] = NULL
;
570 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
572 tb_phys_invalidate_count
++;
575 static inline void set_bits(uint8_t *tab
, int start
, int len
)
581 mask
= 0xff << (start
& 7);
582 if ((start
& ~7) == (end
& ~7)) {
584 mask
&= ~(0xff << (end
& 7));
589 start
= (start
+ 8) & ~7;
591 while (start
< end1
) {
596 mask
= ~(0xff << (end
& 7));
602 static void build_page_bitmap(PageDesc
*p
)
604 int n
, tb_start
, tb_end
;
605 TranslationBlock
*tb
;
607 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
610 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
615 tb
= (TranslationBlock
*)((long)tb
& ~3);
616 /* NOTE: this is subtle as a TB may span two physical pages */
618 /* NOTE: tb_end may be after the end of the page, but
619 it is not a problem */
620 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
621 tb_end
= tb_start
+ tb
->size
;
622 if (tb_end
> TARGET_PAGE_SIZE
)
623 tb_end
= TARGET_PAGE_SIZE
;
626 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
628 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
629 tb
= tb
->page_next
[n
];
633 #ifdef TARGET_HAS_PRECISE_SMC
635 static void tb_gen_code(CPUState
*env
,
636 target_ulong pc
, target_ulong cs_base
, int flags
,
639 TranslationBlock
*tb
;
641 target_ulong phys_pc
, phys_page2
, virt_page2
;
644 phys_pc
= get_phys_addr_code(env
, pc
);
647 /* flush must be done */
649 /* cannot fail at this point */
652 tc_ptr
= code_gen_ptr
;
654 tb
->cs_base
= cs_base
;
657 cpu_gen_code(env
, tb
, &code_gen_size
);
658 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
660 /* check next page if needed */
661 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
663 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
664 phys_page2
= get_phys_addr_code(env
, virt_page2
);
666 tb_link_phys(tb
, phys_pc
, phys_page2
);
670 /* invalidate all TBs which intersect with the target physical page
671 starting in range [start;end[. NOTE: start and end must refer to
672 the same physical page. 'is_cpu_write_access' should be true if called
673 from a real cpu write access: the virtual CPU will exit the current
674 TB if code is modified inside this TB. */
675 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
676 int is_cpu_write_access
)
678 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
679 CPUState
*env
= cpu_single_env
;
681 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
682 target_ulong tb_start
, tb_end
;
683 target_ulong current_pc
, current_cs_base
;
685 p
= page_find(start
>> TARGET_PAGE_BITS
);
688 if (!p
->code_bitmap
&&
689 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
690 is_cpu_write_access
) {
691 /* build code bitmap */
692 build_page_bitmap(p
);
695 /* we remove all the TBs in the range [start, end[ */
696 /* XXX: see if in some cases it could be faster to invalidate all the code */
697 current_tb_not_found
= is_cpu_write_access
;
698 current_tb_modified
= 0;
699 current_tb
= NULL
; /* avoid warning */
700 current_pc
= 0; /* avoid warning */
701 current_cs_base
= 0; /* avoid warning */
702 current_flags
= 0; /* avoid warning */
706 tb
= (TranslationBlock
*)((long)tb
& ~3);
707 tb_next
= tb
->page_next
[n
];
708 /* NOTE: this is subtle as a TB may span two physical pages */
710 /* NOTE: tb_end may be after the end of the page, but
711 it is not a problem */
712 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
713 tb_end
= tb_start
+ tb
->size
;
715 tb_start
= tb
->page_addr
[1];
716 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
718 if (!(tb_end
<= start
|| tb_start
>= end
)) {
719 #ifdef TARGET_HAS_PRECISE_SMC
720 if (current_tb_not_found
) {
721 current_tb_not_found
= 0;
723 if (env
->mem_write_pc
) {
724 /* now we have a real cpu fault */
725 current_tb
= tb_find_pc(env
->mem_write_pc
);
728 if (current_tb
== tb
&&
729 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
730 /* If we are modifying the current TB, we must stop
731 its execution. We could be more precise by checking
732 that the modification is after the current PC, but it
733 would require a specialized function to partially
734 restore the CPU state */
736 current_tb_modified
= 1;
737 cpu_restore_state(current_tb
, env
,
738 env
->mem_write_pc
, NULL
);
739 #if defined(TARGET_I386)
740 current_flags
= env
->hflags
;
741 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
742 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
743 current_pc
= current_cs_base
+ env
->eip
;
745 #error unsupported CPU
748 #endif /* TARGET_HAS_PRECISE_SMC */
749 /* we need to do that to handle the case where a signal
750 occurs while doing tb_phys_invalidate() */
753 saved_tb
= env
->current_tb
;
754 env
->current_tb
= NULL
;
756 tb_phys_invalidate(tb
, -1);
758 env
->current_tb
= saved_tb
;
759 if (env
->interrupt_request
&& env
->current_tb
)
760 cpu_interrupt(env
, env
->interrupt_request
);
765 #if !defined(CONFIG_USER_ONLY)
766 /* if no code remaining, no need to continue to use slow writes */
768 invalidate_page_bitmap(p
);
769 if (is_cpu_write_access
) {
770 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
774 #ifdef TARGET_HAS_PRECISE_SMC
775 if (current_tb_modified
) {
776 /* we generate a block containing just the instruction
777 modifying the memory. It will ensure that it cannot modify
779 env
->current_tb
= NULL
;
780 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
782 cpu_resume_from_signal(env
, NULL
);
787 /* len must be <= 8 and start must be a multiple of len */
788 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
795 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
796 cpu_single_env
->mem_write_vaddr
, len
,
798 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
802 p
= page_find(start
>> TARGET_PAGE_BITS
);
805 if (p
->code_bitmap
) {
806 offset
= start
& ~TARGET_PAGE_MASK
;
807 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
808 if (b
& ((1 << len
) - 1))
812 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
816 #if !defined(CONFIG_SOFTMMU)
817 static void tb_invalidate_phys_page(target_ulong addr
,
818 unsigned long pc
, void *puc
)
820 int n
, current_flags
, current_tb_modified
;
821 target_ulong current_pc
, current_cs_base
;
823 TranslationBlock
*tb
, *current_tb
;
824 #ifdef TARGET_HAS_PRECISE_SMC
825 CPUState
*env
= cpu_single_env
;
828 addr
&= TARGET_PAGE_MASK
;
829 p
= page_find(addr
>> TARGET_PAGE_BITS
);
833 current_tb_modified
= 0;
835 current_pc
= 0; /* avoid warning */
836 current_cs_base
= 0; /* avoid warning */
837 current_flags
= 0; /* avoid warning */
838 #ifdef TARGET_HAS_PRECISE_SMC
840 current_tb
= tb_find_pc(pc
);
845 tb
= (TranslationBlock
*)((long)tb
& ~3);
846 #ifdef TARGET_HAS_PRECISE_SMC
847 if (current_tb
== tb
&&
848 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
849 /* If we are modifying the current TB, we must stop
850 its execution. We could be more precise by checking
851 that the modification is after the current PC, but it
852 would require a specialized function to partially
853 restore the CPU state */
855 current_tb_modified
= 1;
856 cpu_restore_state(current_tb
, env
, pc
, puc
);
857 #if defined(TARGET_I386)
858 current_flags
= env
->hflags
;
859 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
860 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
861 current_pc
= current_cs_base
+ env
->eip
;
863 #error unsupported CPU
866 #endif /* TARGET_HAS_PRECISE_SMC */
867 tb_phys_invalidate(tb
, addr
);
868 tb
= tb
->page_next
[n
];
871 #ifdef TARGET_HAS_PRECISE_SMC
872 if (current_tb_modified
) {
873 /* we generate a block containing just the instruction
874 modifying the memory. It will ensure that it cannot modify
876 env
->current_tb
= NULL
;
877 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
879 cpu_resume_from_signal(env
, puc
);
885 /* add the tb in the target page and protect it if necessary */
886 static inline void tb_alloc_page(TranslationBlock
*tb
,
887 unsigned int n
, target_ulong page_addr
)
890 TranslationBlock
*last_first_tb
;
892 tb
->page_addr
[n
] = page_addr
;
893 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
894 tb
->page_next
[n
] = p
->first_tb
;
895 last_first_tb
= p
->first_tb
;
896 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
897 invalidate_page_bitmap(p
);
899 #if defined(TARGET_HAS_SMC) || 1
901 #if defined(CONFIG_USER_ONLY)
902 if (p
->flags
& PAGE_WRITE
) {
907 /* force the host page as non writable (writes will have a
908 page fault + mprotect overhead) */
909 page_addr
&= qemu_host_page_mask
;
911 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
912 addr
+= TARGET_PAGE_SIZE
) {
914 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
918 p2
->flags
&= ~PAGE_WRITE
;
919 page_get_flags(addr
);
921 mprotect(g2h(page_addr
), qemu_host_page_size
,
922 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
923 #ifdef DEBUG_TB_INVALIDATE
924 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
929 /* if some code is already present, then the pages are already
930 protected. So we handle the case where only the first TB is
931 allocated in a physical page */
932 if (!last_first_tb
) {
933 tlb_protect_code(page_addr
);
937 #endif /* TARGET_HAS_SMC */
940 /* Allocate a new translation block. Flush the translation buffer if
941 too many translation blocks or too much generated code. */
942 TranslationBlock
*tb_alloc(target_ulong pc
)
944 TranslationBlock
*tb
;
946 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
947 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
955 /* add a new TB and link it to the physical page tables. phys_page2 is
956 (-1) to indicate that only one page contains the TB. */
957 void tb_link_phys(TranslationBlock
*tb
,
958 target_ulong phys_pc
, target_ulong phys_page2
)
961 TranslationBlock
**ptb
;
963 /* add in the physical hash table */
964 h
= tb_phys_hash_func(phys_pc
);
965 ptb
= &tb_phys_hash
[h
];
966 tb
->phys_hash_next
= *ptb
;
969 /* add in the page list */
970 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
971 if (phys_page2
!= -1)
972 tb_alloc_page(tb
, 1, phys_page2
);
974 tb
->page_addr
[1] = -1;
976 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
977 tb
->jmp_next
[0] = NULL
;
978 tb
->jmp_next
[1] = NULL
;
980 /* init original jump addresses */
981 if (tb
->tb_next_offset
[0] != 0xffff)
982 tb_reset_jump(tb
, 0);
983 if (tb
->tb_next_offset
[1] != 0xffff)
984 tb_reset_jump(tb
, 1);
986 #ifdef DEBUG_TB_CHECK
991 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
992 tb[1].tc_ptr. Return NULL if not found */
993 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
997 TranslationBlock
*tb
;
1001 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1002 tc_ptr
>= (unsigned long)code_gen_ptr
)
1004 /* binary search (cf Knuth) */
1007 while (m_min
<= m_max
) {
1008 m
= (m_min
+ m_max
) >> 1;
1010 v
= (unsigned long)tb
->tc_ptr
;
1013 else if (tc_ptr
< v
) {
1022 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1024 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1026 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1029 tb1
= tb
->jmp_next
[n
];
1031 /* find head of list */
1034 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1037 tb1
= tb1
->jmp_next
[n1
];
1039 /* we are now sure now that tb jumps to tb1 */
1042 /* remove tb from the jmp_first list */
1043 ptb
= &tb_next
->jmp_first
;
1047 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1048 if (n1
== n
&& tb1
== tb
)
1050 ptb
= &tb1
->jmp_next
[n1
];
1052 *ptb
= tb
->jmp_next
[n
];
1053 tb
->jmp_next
[n
] = NULL
;
1055 /* suppress the jump to next tb in generated code */
1056 tb_reset_jump(tb
, n
);
1058 /* suppress jumps in the tb on which we could have jumped */
1059 tb_reset_jump_recursive(tb_next
);
1063 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1065 tb_reset_jump_recursive2(tb
, 0);
1066 tb_reset_jump_recursive2(tb
, 1);
1069 #if defined(TARGET_HAS_ICE)
1070 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1072 target_phys_addr_t addr
;
1074 ram_addr_t ram_addr
;
1077 addr
= cpu_get_phys_page_debug(env
, pc
);
1078 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1080 pd
= IO_MEM_UNASSIGNED
;
1082 pd
= p
->phys_offset
;
1084 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1085 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1089 /* Add a watchpoint. */
1090 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1094 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1095 if (addr
== env
->watchpoint
[i
].vaddr
)
1098 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1101 i
= env
->nb_watchpoints
++;
1102 env
->watchpoint
[i
].vaddr
= addr
;
1103 tlb_flush_page(env
, addr
);
1104 /* FIXME: This flush is needed because of the hack to make memory ops
1105 terminate the TB. It can be removed once the proper IO trap and
1106 re-execute bits are in. */
1111 /* Remove a watchpoint. */
1112 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1116 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1117 if (addr
== env
->watchpoint
[i
].vaddr
) {
1118 env
->nb_watchpoints
--;
1119 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1120 tlb_flush_page(env
, addr
);
1127 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1128 breakpoint is reached */
1129 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1131 #if defined(TARGET_HAS_ICE)
1134 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1135 if (env
->breakpoints
[i
] == pc
)
1139 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1141 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1144 kvm_update_debugger(env
);
1146 breakpoint_invalidate(env
, pc
);
1153 /* remove a breakpoint */
1154 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1156 #if defined(TARGET_HAS_ICE)
1158 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1159 if (env
->breakpoints
[i
] == pc
)
1164 env
->nb_breakpoints
--;
1165 if (i
< env
->nb_breakpoints
)
1166 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1169 kvm_update_debugger(env
);
1171 breakpoint_invalidate(env
, pc
);
1178 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1179 CPU loop after each instruction */
1180 void cpu_single_step(CPUState
*env
, int enabled
)
1182 #if defined(TARGET_HAS_ICE)
1183 if (env
->singlestep_enabled
!= enabled
) {
1184 env
->singlestep_enabled
= enabled
;
1185 /* must flush all the translated code to avoid inconsistancies */
1186 /* XXX: only flush what is necessary */
1190 kvm_update_debugger(env
);
1194 /* enable or disable low levels log */
1195 void cpu_set_log(int log_flags
)
1197 loglevel
= log_flags
;
1198 if (loglevel
&& !logfile
) {
1199 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1201 perror(logfilename
);
1204 #if !defined(CONFIG_SOFTMMU)
1205 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1207 static uint8_t logfile_buf
[4096];
1208 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1211 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1215 if (!loglevel
&& logfile
) {
1221 void cpu_set_log_filename(const char *filename
)
1223 logfilename
= strdup(filename
);
1228 cpu_set_log(loglevel
);
1231 /* mask must never be zero, except for A20 change call */
1232 void cpu_interrupt(CPUState
*env
, int mask
)
1234 TranslationBlock
*tb
;
1235 static int interrupt_lock
;
1237 env
->interrupt_request
|= mask
;
1238 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1239 kvm_update_interrupt_request(env
);
1241 /* if the cpu is currently executing code, we must unlink it and
1242 all the potentially executing TB */
1243 tb
= env
->current_tb
;
1244 if (tb
&& !testandset(&interrupt_lock
)) {
1245 env
->current_tb
= NULL
;
1246 tb_reset_jump_recursive(tb
);
1251 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1253 env
->interrupt_request
&= ~mask
;
1256 CPULogItem cpu_log_items
[] = {
1257 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1258 "show generated host assembly code for each compiled TB" },
1259 { CPU_LOG_TB_IN_ASM
, "in_asm",
1260 "show target assembly code for each compiled TB" },
1261 { CPU_LOG_TB_OP
, "op",
1262 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1264 { CPU_LOG_TB_OP_OPT
, "op_opt",
1265 "show micro ops after optimization for each compiled TB" },
1267 { CPU_LOG_INT
, "int",
1268 "show interrupts/exceptions in short format" },
1269 { CPU_LOG_EXEC
, "exec",
1270 "show trace before each executed TB (lots of logs)" },
1271 { CPU_LOG_TB_CPU
, "cpu",
1272 "show CPU state before block translation" },
1274 { CPU_LOG_PCALL
, "pcall",
1275 "show protected mode far calls/returns/exceptions" },
1278 { CPU_LOG_IOPORT
, "ioport",
1279 "show all i/o ports accesses" },
1284 static int cmp1(const char *s1
, int n
, const char *s2
)
1286 if (strlen(s2
) != n
)
1288 return memcmp(s1
, s2
, n
) == 0;
1291 /* takes a comma separated list of log masks. Return 0 if error. */
1292 int cpu_str_to_log_mask(const char *str
)
1301 p1
= strchr(p
, ',');
1304 if(cmp1(p
,p1
-p
,"all")) {
1305 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1309 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1310 if (cmp1(p
, p1
- p
, item
->name
))
1324 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1331 fprintf(stderr
, "qemu: fatal: ");
1332 vfprintf(stderr
, fmt
, ap
);
1333 fprintf(stderr
, "\n");
1335 if(env
->intercept
& INTERCEPT_SVM_MASK
) {
1336 /* most probably the virtual machine should not
1337 be shut down but rather caught by the VMM */
1338 vmexit(SVM_EXIT_SHUTDOWN
, 0);
1340 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1342 cpu_dump_state(env
, stderr
, fprintf
, 0);
1345 fprintf(logfile
, "qemu: fatal: ");
1346 vfprintf(logfile
, fmt
, ap2
);
1347 fprintf(logfile
, "\n");
1349 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1351 cpu_dump_state(env
, logfile
, fprintf
, 0);
1361 CPUState
*cpu_copy(CPUState
*env
)
1363 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1364 /* preserve chaining and index */
1365 CPUState
*next_cpu
= new_env
->next_cpu
;
1366 int cpu_index
= new_env
->cpu_index
;
1367 memcpy(new_env
, env
, sizeof(CPUState
));
1368 new_env
->next_cpu
= next_cpu
;
1369 new_env
->cpu_index
= cpu_index
;
1373 #if !defined(CONFIG_USER_ONLY)
1375 /* NOTE: if flush_global is true, also flush global entries (not
1377 void tlb_flush(CPUState
*env
, int flush_global
)
1381 #if defined(DEBUG_TLB)
1382 printf("tlb_flush:\n");
1384 /* must reset current TB so that interrupts cannot modify the
1385 links while we are modifying them */
1386 env
->current_tb
= NULL
;
1388 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1389 env
->tlb_table
[0][i
].addr_read
= -1;
1390 env
->tlb_table
[0][i
].addr_write
= -1;
1391 env
->tlb_table
[0][i
].addr_code
= -1;
1392 env
->tlb_table
[1][i
].addr_read
= -1;
1393 env
->tlb_table
[1][i
].addr_write
= -1;
1394 env
->tlb_table
[1][i
].addr_code
= -1;
1395 #if (NB_MMU_MODES >= 3)
1396 env
->tlb_table
[2][i
].addr_read
= -1;
1397 env
->tlb_table
[2][i
].addr_write
= -1;
1398 env
->tlb_table
[2][i
].addr_code
= -1;
1399 #if (NB_MMU_MODES == 4)
1400 env
->tlb_table
[3][i
].addr_read
= -1;
1401 env
->tlb_table
[3][i
].addr_write
= -1;
1402 env
->tlb_table
[3][i
].addr_code
= -1;
1407 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1409 #if !defined(CONFIG_SOFTMMU)
1410 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1413 if (env
->kqemu_enabled
) {
1414 kqemu_flush(env
, flush_global
);
1420 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1422 if (addr
== (tlb_entry
->addr_read
&
1423 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1424 addr
== (tlb_entry
->addr_write
&
1425 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1426 addr
== (tlb_entry
->addr_code
&
1427 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1428 tlb_entry
->addr_read
= -1;
1429 tlb_entry
->addr_write
= -1;
1430 tlb_entry
->addr_code
= -1;
1434 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1437 TranslationBlock
*tb
;
1439 #if defined(DEBUG_TLB)
1440 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1442 /* must reset current TB so that interrupts cannot modify the
1443 links while we are modifying them */
1444 env
->current_tb
= NULL
;
1446 addr
&= TARGET_PAGE_MASK
;
1447 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1448 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1449 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1450 #if (NB_MMU_MODES >= 3)
1451 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1452 #if (NB_MMU_MODES == 4)
1453 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1457 /* Discard jump cache entries for any tb which might potentially
1458 overlap the flushed page. */
1459 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1460 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1462 i
= tb_jmp_cache_hash_page(addr
);
1463 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1465 #if !defined(CONFIG_SOFTMMU)
1466 if (addr
< MMAP_AREA_END
)
1467 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1470 if (env
->kqemu_enabled
) {
1471 kqemu_flush_page(env
, addr
);
1476 /* update the TLBs so that writes to code in the virtual page 'addr'
1478 static void tlb_protect_code(ram_addr_t ram_addr
)
1480 cpu_physical_memory_reset_dirty(ram_addr
,
1481 ram_addr
+ TARGET_PAGE_SIZE
,
1485 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1486 tested for self modifying code */
1487 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1490 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1493 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1494 unsigned long start
, unsigned long length
)
1497 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1498 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1499 if ((addr
- start
) < length
) {
1500 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1505 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1509 unsigned long length
, start1
;
1513 start
&= TARGET_PAGE_MASK
;
1514 end
= TARGET_PAGE_ALIGN(end
);
1516 length
= end
- start
;
1519 len
= length
>> TARGET_PAGE_BITS
;
1521 /* XXX: should not depend on cpu context */
1523 if (env
->kqemu_enabled
) {
1526 for(i
= 0; i
< len
; i
++) {
1527 kqemu_set_notdirty(env
, addr
);
1528 addr
+= TARGET_PAGE_SIZE
;
1532 mask
= ~dirty_flags
;
1533 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1534 for(i
= 0; i
< len
; i
++)
1537 /* we modify the TLB cache so that the dirty bit will be set again
1538 when accessing the range */
1539 start1
= start
+ (unsigned long)phys_ram_base
;
1540 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1541 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1542 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1543 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1544 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1545 #if (NB_MMU_MODES >= 3)
1546 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1547 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1548 #if (NB_MMU_MODES == 4)
1549 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1550 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1555 #if !defined(CONFIG_SOFTMMU)
1556 /* XXX: this is expensive */
1562 for(i
= 0; i
< L1_SIZE
; i
++) {
1565 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1566 for(j
= 0; j
< L2_SIZE
; j
++) {
1567 if (p
->valid_tag
== virt_valid_tag
&&
1568 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1569 (p
->prot
& PROT_WRITE
)) {
1570 if (addr
< MMAP_AREA_END
) {
1571 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1572 p
->prot
& ~PROT_WRITE
);
1575 addr
+= TARGET_PAGE_SIZE
;
1584 int cpu_physical_memory_set_dirty_tracking(int enable
)
1589 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1590 in_migration
= enable
;
1594 int cpu_physical_memory_get_dirty_tracking(void)
1596 return in_migration
;
1599 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1601 ram_addr_t ram_addr
;
1603 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1604 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1605 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1606 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1607 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1612 /* update the TLB according to the current state of the dirty bits */
1613 void cpu_tlb_update_dirty(CPUState
*env
)
1616 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1617 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1618 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1619 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1620 #if (NB_MMU_MODES >= 3)
1621 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1622 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1623 #if (NB_MMU_MODES == 4)
1624 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1625 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1630 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1631 unsigned long start
)
1634 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1635 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1636 if (addr
== start
) {
1637 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1642 /* update the TLB corresponding to virtual page vaddr and phys addr
1643 addr so that it is no longer dirty */
1644 static inline void tlb_set_dirty(CPUState
*env
,
1645 unsigned long addr
, target_ulong vaddr
)
1649 addr
&= TARGET_PAGE_MASK
;
1650 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1651 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1652 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1653 #if (NB_MMU_MODES >= 3)
1654 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1655 #if (NB_MMU_MODES == 4)
1656 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1661 /* add a new TLB entry. At most one entry for a given virtual address
1662 is permitted. Return 0 if OK or 2 if the page could not be mapped
1663 (can only happen in non SOFTMMU mode for I/O pages or pages
1664 conflicting with the host address space). */
1665 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1666 target_phys_addr_t paddr
, int prot
,
1667 int mmu_idx
, int is_softmmu
)
1672 target_ulong address
;
1673 target_phys_addr_t addend
;
1678 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1680 pd
= IO_MEM_UNASSIGNED
;
1682 pd
= p
->phys_offset
;
1684 #if defined(DEBUG_TLB)
1685 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1686 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1690 #if !defined(CONFIG_SOFTMMU)
1694 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1695 /* IO memory case */
1696 address
= vaddr
| pd
;
1699 /* standard memory */
1701 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1704 /* Make accesses to pages with watchpoints go via the
1705 watchpoint trap routines. */
1706 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1707 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1708 if (address
& ~TARGET_PAGE_MASK
) {
1709 env
->watchpoint
[i
].addend
= 0;
1710 address
= vaddr
| io_mem_watch
;
1712 env
->watchpoint
[i
].addend
= pd
- paddr
+
1713 (unsigned long) phys_ram_base
;
1714 /* TODO: Figure out how to make read watchpoints coexist
1716 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1721 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1723 te
= &env
->tlb_table
[mmu_idx
][index
];
1724 te
->addend
= addend
;
1725 if (prot
& PAGE_READ
) {
1726 te
->addr_read
= address
;
1730 if (prot
& PAGE_EXEC
) {
1731 te
->addr_code
= address
;
1735 if (prot
& PAGE_WRITE
) {
1736 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1737 (pd
& IO_MEM_ROMD
)) {
1738 /* write access calls the I/O callback */
1739 te
->addr_write
= vaddr
|
1740 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1741 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1742 !cpu_physical_memory_is_dirty(pd
)) {
1743 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1745 te
->addr_write
= address
;
1748 te
->addr_write
= -1;
1751 #if !defined(CONFIG_SOFTMMU)
1753 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1754 /* IO access: no mapping is done as it will be handled by the
1756 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1761 if (vaddr
>= MMAP_AREA_END
) {
1764 if (prot
& PROT_WRITE
) {
1765 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1766 #if defined(TARGET_HAS_SMC) || 1
1769 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1770 !cpu_physical_memory_is_dirty(pd
))) {
1771 /* ROM: we do as if code was inside */
1772 /* if code is present, we only map as read only and save the
1776 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1779 vp
->valid_tag
= virt_valid_tag
;
1780 prot
&= ~PAGE_WRITE
;
1783 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1784 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1785 if (map_addr
== MAP_FAILED
) {
1786 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1796 /* called from signal handler: invalidate the code and unprotect the
1797 page. Return TRUE if the fault was succesfully handled. */
1798 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1800 #if !defined(CONFIG_SOFTMMU)
1803 #if defined(DEBUG_TLB)
1804 printf("page_unprotect: addr=0x%08x\n", addr
);
1806 addr
&= TARGET_PAGE_MASK
;
1808 /* if it is not mapped, no need to worry here */
1809 if (addr
>= MMAP_AREA_END
)
1811 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1814 /* NOTE: in this case, validate_tag is _not_ tested as it
1815 validates only the code TLB */
1816 if (vp
->valid_tag
!= virt_valid_tag
)
1818 if (!(vp
->prot
& PAGE_WRITE
))
1820 #if defined(DEBUG_TLB)
1821 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1822 addr
, vp
->phys_addr
, vp
->prot
);
1824 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1825 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1826 (unsigned long)addr
, vp
->prot
);
1827 /* set the dirty bit */
1828 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1829 /* flush the code inside */
1830 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1839 void tlb_flush(CPUState
*env
, int flush_global
)
1843 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1847 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1848 target_phys_addr_t paddr
, int prot
,
1849 int mmu_idx
, int is_softmmu
)
1854 /* dump memory mappings */
1855 void page_dump(FILE *f
)
1857 unsigned long start
, end
;
1858 int i
, j
, prot
, prot1
;
1861 fprintf(f
, "%-8s %-8s %-8s %s\n",
1862 "start", "end", "size", "prot");
1866 for(i
= 0; i
<= L1_SIZE
; i
++) {
1871 for(j
= 0;j
< L2_SIZE
; j
++) {
1876 if (prot1
!= prot
) {
1877 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1879 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1880 start
, end
, end
- start
,
1881 prot
& PAGE_READ
? 'r' : '-',
1882 prot
& PAGE_WRITE
? 'w' : '-',
1883 prot
& PAGE_EXEC
? 'x' : '-');
1897 int page_get_flags(target_ulong address
)
1901 p
= page_find(address
>> TARGET_PAGE_BITS
);
1907 /* modify the flags of a page and invalidate the code if
1908 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1909 depending on PAGE_WRITE */
1910 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1915 start
= start
& TARGET_PAGE_MASK
;
1916 end
= TARGET_PAGE_ALIGN(end
);
1917 if (flags
& PAGE_WRITE
)
1918 flags
|= PAGE_WRITE_ORG
;
1919 spin_lock(&tb_lock
);
1920 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1921 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1922 /* if the write protection is set, then we invalidate the code
1924 if (!(p
->flags
& PAGE_WRITE
) &&
1925 (flags
& PAGE_WRITE
) &&
1927 tb_invalidate_phys_page(addr
, 0, NULL
);
1931 spin_unlock(&tb_lock
);
1934 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1940 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1941 start
= start
& TARGET_PAGE_MASK
;
1944 /* we've wrapped around */
1946 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1947 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1950 if( !(p
->flags
& PAGE_VALID
) )
1953 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1955 if (flags
& PAGE_WRITE
) {
1956 if (!(p
->flags
& PAGE_WRITE_ORG
))
1958 /* unprotect the page if it was put read-only because it
1959 contains translated code */
1960 if (!(p
->flags
& PAGE_WRITE
)) {
1961 if (!page_unprotect(addr
, 0, NULL
))
1970 /* called from signal handler: invalidate the code and unprotect the
1971 page. Return TRUE if the fault was succesfully handled. */
1972 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1974 unsigned int page_index
, prot
, pindex
;
1976 target_ulong host_start
, host_end
, addr
;
1978 host_start
= address
& qemu_host_page_mask
;
1979 page_index
= host_start
>> TARGET_PAGE_BITS
;
1980 p1
= page_find(page_index
);
1983 host_end
= host_start
+ qemu_host_page_size
;
1986 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1990 /* if the page was really writable, then we change its
1991 protection back to writable */
1992 if (prot
& PAGE_WRITE_ORG
) {
1993 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1994 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1995 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1996 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1997 p1
[pindex
].flags
|= PAGE_WRITE
;
1998 /* and since the content will be modified, we must invalidate
1999 the corresponding translated code. */
2000 tb_invalidate_phys_page(address
, pc
, puc
);
2001 #ifdef DEBUG_TB_CHECK
2002 tb_invalidate_check(address
);
2010 static inline void tlb_set_dirty(CPUState
*env
,
2011 unsigned long addr
, target_ulong vaddr
)
2014 #endif /* defined(CONFIG_USER_ONLY) */
2016 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2018 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2020 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2023 if (addr > start_addr) \
2026 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2027 if (start_addr2 > 0) \
2031 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2032 end_addr2 = TARGET_PAGE_SIZE - 1; \
2034 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2035 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2040 /* register physical memory. 'size' must be a multiple of the target
2041 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2043 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2045 unsigned long phys_offset
)
2047 target_phys_addr_t addr
, end_addr
;
2050 unsigned long orig_size
= size
;
2053 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2054 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2055 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2056 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2057 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2058 unsigned long orig_memory
= p
->phys_offset
;
2059 target_phys_addr_t start_addr2
, end_addr2
;
2060 int need_subpage
= 0;
2062 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2064 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2065 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2066 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2067 &p
->phys_offset
, orig_memory
);
2069 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2072 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2074 p
->phys_offset
= phys_offset
;
2075 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2076 (phys_offset
& IO_MEM_ROMD
))
2077 phys_offset
+= TARGET_PAGE_SIZE
;
2080 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2081 p
->phys_offset
= phys_offset
;
2082 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2083 (phys_offset
& IO_MEM_ROMD
))
2084 phys_offset
+= TARGET_PAGE_SIZE
;
2086 target_phys_addr_t start_addr2
, end_addr2
;
2087 int need_subpage
= 0;
2089 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2090 end_addr2
, need_subpage
);
2092 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2093 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2094 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2095 subpage_register(subpage
, start_addr2
, end_addr2
,
2102 /* since each CPU stores ram addresses in its TLB cache, we must
2103 reset the modified entries */
2105 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2110 /* XXX: temporary until new memory mapping API */
2111 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
2115 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2117 return IO_MEM_UNASSIGNED
;
2118 return p
->phys_offset
;
2121 /* XXX: better than nothing */
2122 ram_addr_t
qemu_ram_alloc(unsigned long size
)
2125 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2126 fprintf(stderr
, "Not enough memory (requested_size = %lu, max memory = %d)\n",
2127 size
, phys_ram_size
);
2130 addr
= phys_ram_alloc_offset
;
2131 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2135 void qemu_ram_free(ram_addr_t addr
)
2139 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2141 #ifdef DEBUG_UNASSIGNED
2142 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2145 do_unassigned_access(addr
, 0, 0, 0);
2147 do_unassigned_access(addr
, 0, 0, 0);
2152 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2154 #ifdef DEBUG_UNASSIGNED
2155 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2158 do_unassigned_access(addr
, 1, 0, 0);
2160 do_unassigned_access(addr
, 1, 0, 0);
2164 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2165 unassigned_mem_readb
,
2166 unassigned_mem_readb
,
2167 unassigned_mem_readb
,
2170 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2171 unassigned_mem_writeb
,
2172 unassigned_mem_writeb
,
2173 unassigned_mem_writeb
,
2176 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2178 unsigned long ram_addr
;
2180 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2181 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2182 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2183 #if !defined(CONFIG_USER_ONLY)
2184 tb_invalidate_phys_page_fast(ram_addr
, 1);
2185 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2188 stb_p((uint8_t *)(long)addr
, val
);
2190 if (cpu_single_env
->kqemu_enabled
&&
2191 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2192 kqemu_modify_page(cpu_single_env
, ram_addr
);
2194 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2195 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2196 /* we remove the notdirty callback only if the code has been
2198 if (dirty_flags
== 0xff)
2199 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2202 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2204 unsigned long ram_addr
;
2206 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2207 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2208 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2209 #if !defined(CONFIG_USER_ONLY)
2210 tb_invalidate_phys_page_fast(ram_addr
, 2);
2211 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2214 stw_p((uint8_t *)(long)addr
, val
);
2216 if (cpu_single_env
->kqemu_enabled
&&
2217 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2218 kqemu_modify_page(cpu_single_env
, ram_addr
);
2220 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2221 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2222 /* we remove the notdirty callback only if the code has been
2224 if (dirty_flags
== 0xff)
2225 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2228 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2230 unsigned long ram_addr
;
2232 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2233 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2234 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2235 #if !defined(CONFIG_USER_ONLY)
2236 tb_invalidate_phys_page_fast(ram_addr
, 4);
2237 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2240 stl_p((uint8_t *)(long)addr
, val
);
2242 if (cpu_single_env
->kqemu_enabled
&&
2243 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2244 kqemu_modify_page(cpu_single_env
, ram_addr
);
2246 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2247 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2248 /* we remove the notdirty callback only if the code has been
2250 if (dirty_flags
== 0xff)
2251 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2254 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2255 NULL
, /* never used */
2256 NULL
, /* never used */
2257 NULL
, /* never used */
2260 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2261 notdirty_mem_writeb
,
2262 notdirty_mem_writew
,
2263 notdirty_mem_writel
,
2266 #if defined(CONFIG_SOFTMMU)
2267 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2268 so these check for a hit then pass through to the normal out-of-line
2270 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2272 return ldub_phys(addr
);
2275 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2277 return lduw_phys(addr
);
2280 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2282 return ldl_phys(addr
);
2285 /* Generate a debug exception if a watchpoint has been hit.
2286 Returns the real physical address of the access. addr will be a host
2287 address in case of a RAM location. */
2288 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2290 CPUState
*env
= cpu_single_env
;
2292 target_ulong retaddr
;
2296 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2297 watch
= env
->watchpoint
[i
].vaddr
;
2298 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2299 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2300 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2301 cpu_single_env
->watchpoint_hit
= i
+ 1;
2302 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2310 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2313 addr
= check_watchpoint(addr
);
2314 stb_phys(addr
, val
);
2317 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2320 addr
= check_watchpoint(addr
);
2321 stw_phys(addr
, val
);
2324 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2327 addr
= check_watchpoint(addr
);
2328 stl_phys(addr
, val
);
2331 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2337 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2344 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2350 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2351 #if defined(DEBUG_SUBPAGE)
2352 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2353 mmio
, len
, addr
, idx
);
2355 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2360 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2361 uint32_t value
, unsigned int len
)
2365 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2366 #if defined(DEBUG_SUBPAGE)
2367 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2368 mmio
, len
, addr
, idx
, value
);
2370 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2373 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2375 #if defined(DEBUG_SUBPAGE)
2376 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2379 return subpage_readlen(opaque
, addr
, 0);
2382 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2385 #if defined(DEBUG_SUBPAGE)
2386 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2388 subpage_writelen(opaque
, addr
, value
, 0);
2391 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2393 #if defined(DEBUG_SUBPAGE)
2394 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2397 return subpage_readlen(opaque
, addr
, 1);
2400 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2403 #if defined(DEBUG_SUBPAGE)
2404 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2406 subpage_writelen(opaque
, addr
, value
, 1);
2409 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2411 #if defined(DEBUG_SUBPAGE)
2412 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2415 return subpage_readlen(opaque
, addr
, 2);
2418 static void subpage_writel (void *opaque
,
2419 target_phys_addr_t addr
, uint32_t value
)
2421 #if defined(DEBUG_SUBPAGE)
2422 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2424 subpage_writelen(opaque
, addr
, value
, 2);
2427 static CPUReadMemoryFunc
*subpage_read
[] = {
2433 static CPUWriteMemoryFunc
*subpage_write
[] = {
2439 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2445 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2447 idx
= SUBPAGE_IDX(start
);
2448 eidx
= SUBPAGE_IDX(end
);
2449 #if defined(DEBUG_SUBPAGE)
2450 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2451 mmio
, start
, end
, idx
, eidx
, memory
);
2453 memory
>>= IO_MEM_SHIFT
;
2454 for (; idx
<= eidx
; idx
++) {
2455 for (i
= 0; i
< 4; i
++) {
2456 if (io_mem_read
[memory
][i
]) {
2457 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2458 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2460 if (io_mem_write
[memory
][i
]) {
2461 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2462 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2470 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2476 mmio
= qemu_mallocz(sizeof(subpage_t
));
2479 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2480 #if defined(DEBUG_SUBPAGE)
2481 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2482 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2484 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2485 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2491 static void io_mem_init(void)
2493 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2494 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2495 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2498 #if defined(CONFIG_SOFTMMU)
2499 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2500 watch_mem_write
, NULL
);
2502 /* alloc dirty bits array */
2503 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2504 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2507 /* mem_read and mem_write are arrays of functions containing the
2508 function to access byte (index 0), word (index 1) and dword (index
2509 2). Functions can be omitted with a NULL function pointer. The
2510 registered functions may be modified dynamically later.
2511 If io_index is non zero, the corresponding io zone is
2512 modified. If it is zero, a new io zone is allocated. The return
2513 value can be used with cpu_register_physical_memory(). (-1) is
2514 returned if error. */
2515 int cpu_register_io_memory(int io_index
,
2516 CPUReadMemoryFunc
**mem_read
,
2517 CPUWriteMemoryFunc
**mem_write
,
2520 int i
, subwidth
= 0;
2522 if (io_index
<= 0) {
2523 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2525 io_index
= io_mem_nb
++;
2527 if (io_index
>= IO_MEM_NB_ENTRIES
)
2531 for(i
= 0;i
< 3; i
++) {
2532 if (!mem_read
[i
] || !mem_write
[i
])
2533 subwidth
= IO_MEM_SUBWIDTH
;
2534 io_mem_read
[io_index
][i
] = mem_read
[i
];
2535 io_mem_write
[io_index
][i
] = mem_write
[i
];
2537 io_mem_opaque
[io_index
] = opaque
;
2538 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2541 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2543 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2546 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2548 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2551 /* physical memory access (slow version, mainly for debug) */
2552 #if defined(CONFIG_USER_ONLY)
2553 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2554 int len
, int is_write
)
2561 page
= addr
& TARGET_PAGE_MASK
;
2562 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2565 flags
= page_get_flags(page
);
2566 if (!(flags
& PAGE_VALID
))
2569 if (!(flags
& PAGE_WRITE
))
2571 /* XXX: this code should not depend on lock_user */
2572 if (!(p
= lock_user(VERIFY_WRITE
, addr
, len
, 0)))
2573 /* FIXME - should this return an error rather than just fail? */
2575 memcpy(p
, buf
, len
);
2576 unlock_user(p
, addr
, len
);
2578 if (!(flags
& PAGE_READ
))
2580 /* XXX: this code should not depend on lock_user */
2581 if (!(p
= lock_user(VERIFY_READ
, addr
, len
, 1)))
2582 /* FIXME - should this return an error rather than just fail? */
2584 memcpy(buf
, p
, len
);
2585 unlock_user(p
, addr
, 0);
2594 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2595 int len
, int is_write
)
2600 target_phys_addr_t page
;
2605 page
= addr
& TARGET_PAGE_MASK
;
2606 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2609 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2611 pd
= IO_MEM_UNASSIGNED
;
2613 pd
= p
->phys_offset
;
2617 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2618 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2619 /* XXX: could force cpu_single_env to NULL to avoid
2621 if (l
>= 4 && ((addr
& 3) == 0)) {
2622 /* 32 bit write access */
2624 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2626 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2627 /* 16 bit write access */
2629 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2632 /* 8 bit write access */
2634 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2638 unsigned long addr1
;
2639 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2641 ptr
= phys_ram_base
+ addr1
;
2642 memcpy(ptr
, buf
, l
);
2643 if (!cpu_physical_memory_is_dirty(addr1
)) {
2644 /* invalidate code */
2645 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2647 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2648 (0xff & ~CODE_DIRTY_FLAG
);
2650 /* qemu doesn't execute guest code directly, but kvm does
2651 therefore fluch instruction caches */
2653 flush_icache_range((unsigned long)ptr
,
2654 ((unsigned long)ptr
)+l
);
2657 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2658 !(pd
& IO_MEM_ROMD
)) {
2660 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2661 if (l
>= 4 && ((addr
& 3) == 0)) {
2662 /* 32 bit read access */
2663 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2666 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2667 /* 16 bit read access */
2668 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2672 /* 8 bit read access */
2673 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2679 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2680 (addr
& ~TARGET_PAGE_MASK
);
2681 memcpy(buf
, ptr
, l
);
2690 /* used for ROM loading : can write in RAM and ROM */
2691 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2692 const uint8_t *buf
, int len
)
2696 target_phys_addr_t page
;
2701 page
= addr
& TARGET_PAGE_MASK
;
2702 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2705 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2707 pd
= IO_MEM_UNASSIGNED
;
2709 pd
= p
->phys_offset
;
2712 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2713 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2714 !(pd
& IO_MEM_ROMD
)) {
2717 unsigned long addr1
;
2718 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2720 ptr
= phys_ram_base
+ addr1
;
2721 memcpy(ptr
, buf
, l
);
2730 /* warning: addr must be aligned */
2731 uint32_t ldl_phys(target_phys_addr_t addr
)
2739 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2741 pd
= IO_MEM_UNASSIGNED
;
2743 pd
= p
->phys_offset
;
2746 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2747 !(pd
& IO_MEM_ROMD
)) {
2749 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2750 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2753 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2754 (addr
& ~TARGET_PAGE_MASK
);
2760 /* warning: addr must be aligned */
2761 uint64_t ldq_phys(target_phys_addr_t addr
)
2769 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2771 pd
= IO_MEM_UNASSIGNED
;
2773 pd
= p
->phys_offset
;
2776 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2777 !(pd
& IO_MEM_ROMD
)) {
2779 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2780 #ifdef TARGET_WORDS_BIGENDIAN
2781 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2782 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2784 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2785 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2789 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2790 (addr
& ~TARGET_PAGE_MASK
);
2797 uint32_t ldub_phys(target_phys_addr_t addr
)
2800 cpu_physical_memory_read(addr
, &val
, 1);
2805 uint32_t lduw_phys(target_phys_addr_t addr
)
2808 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2809 return tswap16(val
);
2813 #define likely(x) __builtin_expect(!!(x), 1)
2814 #define unlikely(x) __builtin_expect(!!(x), 0)
2817 #define unlikely(x) x
2820 /* warning: addr must be aligned. The ram page is not masked as dirty
2821 and the code inside is not invalidated. It is useful if the dirty
2822 bits are used to track modified PTEs */
2823 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2830 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2832 pd
= IO_MEM_UNASSIGNED
;
2834 pd
= p
->phys_offset
;
2837 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2838 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2839 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2841 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2842 ptr
= phys_ram_base
+ addr1
;
2845 if (unlikely(in_migration
)) {
2846 if (!cpu_physical_memory_is_dirty(addr1
)) {
2847 /* invalidate code */
2848 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2850 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2851 (0xff & ~CODE_DIRTY_FLAG
);
2857 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2864 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2866 pd
= IO_MEM_UNASSIGNED
;
2868 pd
= p
->phys_offset
;
2871 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2872 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2873 #ifdef TARGET_WORDS_BIGENDIAN
2874 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2875 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2877 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2878 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2881 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2882 (addr
& ~TARGET_PAGE_MASK
);
2887 /* warning: addr must be aligned */
2888 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2895 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2897 pd
= IO_MEM_UNASSIGNED
;
2899 pd
= p
->phys_offset
;
2902 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2903 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2904 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2906 unsigned long addr1
;
2907 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2909 ptr
= phys_ram_base
+ addr1
;
2911 if (!cpu_physical_memory_is_dirty(addr1
)) {
2912 /* invalidate code */
2913 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2915 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2916 (0xff & ~CODE_DIRTY_FLAG
);
2922 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2925 cpu_physical_memory_write(addr
, &v
, 1);
2929 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2931 uint16_t v
= tswap16(val
);
2932 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2936 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2939 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2944 /* virtual memory access for debug */
2945 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2946 uint8_t *buf
, int len
, int is_write
)
2949 target_phys_addr_t phys_addr
;
2953 page
= addr
& TARGET_PAGE_MASK
;
2954 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2955 /* if no physical page mapped, return an error */
2956 if (phys_addr
== -1)
2958 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2961 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2970 void dump_exec_info(FILE *f
,
2971 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2973 int i
, target_code_size
, max_target_code_size
;
2974 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2975 TranslationBlock
*tb
;
2977 target_code_size
= 0;
2978 max_target_code_size
= 0;
2980 direct_jmp_count
= 0;
2981 direct_jmp2_count
= 0;
2982 for(i
= 0; i
< nb_tbs
; i
++) {
2984 target_code_size
+= tb
->size
;
2985 if (tb
->size
> max_target_code_size
)
2986 max_target_code_size
= tb
->size
;
2987 if (tb
->page_addr
[1] != -1)
2989 if (tb
->tb_next_offset
[0] != 0xffff) {
2991 if (tb
->tb_next_offset
[1] != 0xffff) {
2992 direct_jmp2_count
++;
2996 /* XXX: avoid using doubles ? */
2997 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2998 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2999 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3000 max_target_code_size
);
3001 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3002 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3003 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3004 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3006 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3007 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3009 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3011 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3012 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3013 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3014 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3017 #if !defined(CONFIG_USER_ONLY)
3019 #define MMUSUFFIX _cmmu
3020 #define GETPC() NULL
3021 #define env cpu_single_env
3022 #define SOFTMMU_CODE_ACCESS
3025 #include "softmmu_template.h"
3028 #include "softmmu_template.h"
3031 #include "softmmu_template.h"
3034 #include "softmmu_template.h"