2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
41 #if defined(CONFIG_USER_ONLY)
45 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_UNASSIGNED
50 /* make various TB consistency checks */
51 //#define DEBUG_TB_CHECK
52 //#define DEBUG_TLB_CHECK
54 //#define DEBUG_IOPORT
55 //#define DEBUG_SUBPAGE
57 #if !defined(CONFIG_USER_ONLY)
58 /* TB consistency checks only implemented for usermode emulation. */
62 /* threshold to flush the translated code buffer */
63 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
65 #define SMC_BITMAP_USE_THRESHOLD 10
67 #define MMAP_AREA_START 0x00000000
68 #define MMAP_AREA_END 0xa8000000
70 #if defined(TARGET_SPARC64)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 41
72 #elif defined(TARGET_SPARC)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 36
74 #elif defined(TARGET_ALPHA)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #define TARGET_VIRT_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_PPC64)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
83 #define TARGET_PHYS_ADDR_SPACE_BITS 42
85 #define TARGET_PHYS_ADDR_SPACE_BITS 32
89 extern int kvm_allowed
;
90 extern kvm_context_t kvm_context
;
93 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
94 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
96 /* any access to the tbs or the page table must use this lock */
97 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
99 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
100 uint8_t *code_gen_ptr
;
102 ram_addr_t phys_ram_size
;
104 uint8_t *phys_ram_base
;
105 uint8_t *phys_ram_dirty
;
107 static int in_migration
;
108 static ram_addr_t phys_ram_alloc_offset
= 0;
111 /* current CPU in the current thread. It is only valid inside
113 CPUState
*cpu_single_env
;
115 typedef struct PageDesc
{
116 /* list of TBs intersecting this ram page */
117 TranslationBlock
*first_tb
;
118 /* in order to optimize self modifying code, we count the number
119 of lookups we do to a given page to use a bitmap */
120 unsigned int code_write_count
;
121 uint8_t *code_bitmap
;
122 #if defined(CONFIG_USER_ONLY)
127 typedef struct PhysPageDesc
{
128 /* offset in host memory of the page + io_index in the low 12 bits */
129 ram_addr_t phys_offset
;
133 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
134 /* XXX: this is a temporary hack for alpha target.
135 * In the future, this is to be replaced by a multi-level table
136 * to actually be able to handle the complete 64 bits address space.
138 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
140 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
143 #define L1_SIZE (1 << L1_BITS)
144 #define L2_SIZE (1 << L2_BITS)
146 static void io_mem_init(void);
148 unsigned long qemu_real_host_page_size
;
149 unsigned long qemu_host_page_bits
;
150 unsigned long qemu_host_page_size
;
151 unsigned long qemu_host_page_mask
;
153 /* XXX: for system emulation, it could just be an array */
154 static PageDesc
*l1_map
[L1_SIZE
];
155 PhysPageDesc
**l1_phys_map
;
157 /* io memory support */
158 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
159 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
160 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
161 static int io_mem_nb
;
162 #if defined(CONFIG_SOFTMMU)
163 static int io_mem_watch
;
167 char *logfilename
= "/tmp/qemu.log";
170 static int log_append
= 0;
173 static int tlb_flush_count
;
174 static int tb_flush_count
;
175 static int tb_phys_invalidate_count
;
177 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
178 typedef struct subpage_t
{
179 target_phys_addr_t base
;
180 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
];
181 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
];
182 void *opaque
[TARGET_PAGE_SIZE
];
185 static void page_init(void)
187 /* NOTE: we can always suppose that qemu_host_page_size >=
191 SYSTEM_INFO system_info
;
194 GetSystemInfo(&system_info
);
195 qemu_real_host_page_size
= system_info
.dwPageSize
;
197 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
198 PAGE_EXECUTE_READWRITE
, &old_protect
);
201 qemu_real_host_page_size
= getpagesize();
203 unsigned long start
, end
;
205 start
= (unsigned long)code_gen_buffer
;
206 start
&= ~(qemu_real_host_page_size
- 1);
208 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
209 end
+= qemu_real_host_page_size
- 1;
210 end
&= ~(qemu_real_host_page_size
- 1);
212 mprotect((void *)start
, end
- start
,
213 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
217 if (qemu_host_page_size
== 0)
218 qemu_host_page_size
= qemu_real_host_page_size
;
219 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
220 qemu_host_page_size
= TARGET_PAGE_SIZE
;
221 qemu_host_page_bits
= 0;
222 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
223 qemu_host_page_bits
++;
224 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
225 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
226 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
228 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
230 long long startaddr
, endaddr
;
234 f
= fopen("/proc/self/maps", "r");
237 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
239 page_set_flags(TARGET_PAGE_ALIGN(startaddr
),
240 TARGET_PAGE_ALIGN(endaddr
),
250 static inline PageDesc
*page_find_alloc(unsigned int index
)
254 lp
= &l1_map
[index
>> L2_BITS
];
257 /* allocate if not found */
258 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
259 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
262 return p
+ (index
& (L2_SIZE
- 1));
265 static inline PageDesc
*page_find(unsigned int index
)
269 p
= l1_map
[index
>> L2_BITS
];
272 return p
+ (index
& (L2_SIZE
- 1));
275 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
280 p
= (void **)l1_phys_map
;
281 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
283 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
284 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
286 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
289 /* allocate if not found */
292 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
293 memset(p
, 0, sizeof(void *) * L1_SIZE
);
297 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
301 /* allocate if not found */
304 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
306 for (i
= 0; i
< L2_SIZE
; i
++)
307 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
309 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
312 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
314 return phys_page_find_alloc(index
, 0);
317 #if !defined(CONFIG_USER_ONLY)
318 static void tlb_protect_code(ram_addr_t ram_addr
);
319 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
323 void cpu_exec_init(CPUState
*env
)
329 code_gen_ptr
= code_gen_buffer
;
333 env
->next_cpu
= NULL
;
336 while (*penv
!= NULL
) {
337 penv
= (CPUState
**)&(*penv
)->next_cpu
;
340 env
->cpu_index
= cpu_index
;
341 env
->nb_watchpoints
= 0;
345 static inline void invalidate_page_bitmap(PageDesc
*p
)
347 if (p
->code_bitmap
) {
348 qemu_free(p
->code_bitmap
);
349 p
->code_bitmap
= NULL
;
351 p
->code_write_count
= 0;
354 /* set to NULL all the 'first_tb' fields in all PageDescs */
355 static void page_flush_tb(void)
360 for(i
= 0; i
< L1_SIZE
; i
++) {
363 for(j
= 0; j
< L2_SIZE
; j
++) {
365 invalidate_page_bitmap(p
);
372 /* flush all the translation blocks */
373 /* XXX: tb_flush is currently not thread safe */
374 void tb_flush(CPUState
*env1
)
377 #if defined(DEBUG_FLUSH)
378 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
379 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
381 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
385 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
386 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
389 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
392 code_gen_ptr
= code_gen_buffer
;
393 /* XXX: flush processor icache at this point if cache flush is
398 #ifdef DEBUG_TB_CHECK
400 static void tb_invalidate_check(target_ulong address
)
402 TranslationBlock
*tb
;
404 address
&= TARGET_PAGE_MASK
;
405 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
406 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
407 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
408 address
>= tb
->pc
+ tb
->size
)) {
409 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
410 address
, (long)tb
->pc
, tb
->size
);
416 /* verify that all the pages have correct rights for code */
417 static void tb_page_check(void)
419 TranslationBlock
*tb
;
420 int i
, flags1
, flags2
;
422 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
423 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
424 flags1
= page_get_flags(tb
->pc
);
425 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
426 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
427 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
428 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
434 void tb_jmp_check(TranslationBlock
*tb
)
436 TranslationBlock
*tb1
;
439 /* suppress any remaining jumps to this TB */
443 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
446 tb1
= tb1
->jmp_next
[n1
];
448 /* check end of list */
450 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
456 /* invalidate one TB */
457 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
460 TranslationBlock
*tb1
;
464 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
467 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
471 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
473 TranslationBlock
*tb1
;
479 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
481 *ptb
= tb1
->page_next
[n1
];
484 ptb
= &tb1
->page_next
[n1
];
488 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
490 TranslationBlock
*tb1
, **ptb
;
493 ptb
= &tb
->jmp_next
[n
];
496 /* find tb(n) in circular list */
500 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
501 if (n1
== n
&& tb1
== tb
)
504 ptb
= &tb1
->jmp_first
;
506 ptb
= &tb1
->jmp_next
[n1
];
509 /* now we can suppress tb(n) from the list */
510 *ptb
= tb
->jmp_next
[n
];
512 tb
->jmp_next
[n
] = NULL
;
516 /* reset the jump entry 'n' of a TB so that it is not chained to
518 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
520 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
523 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
528 target_ulong phys_pc
;
529 TranslationBlock
*tb1
, *tb2
;
531 /* remove the TB from the hash list */
532 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
533 h
= tb_phys_hash_func(phys_pc
);
534 tb_remove(&tb_phys_hash
[h
], tb
,
535 offsetof(TranslationBlock
, phys_hash_next
));
537 /* remove the TB from the page list */
538 if (tb
->page_addr
[0] != page_addr
) {
539 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
540 tb_page_remove(&p
->first_tb
, tb
);
541 invalidate_page_bitmap(p
);
543 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
544 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
545 tb_page_remove(&p
->first_tb
, tb
);
546 invalidate_page_bitmap(p
);
549 tb_invalidated_flag
= 1;
551 /* remove the TB from the hash list */
552 h
= tb_jmp_cache_hash_func(tb
->pc
);
553 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
554 if (env
->tb_jmp_cache
[h
] == tb
)
555 env
->tb_jmp_cache
[h
] = NULL
;
558 /* suppress this TB from the two jump lists */
559 tb_jmp_remove(tb
, 0);
560 tb_jmp_remove(tb
, 1);
562 /* suppress any remaining jumps to this TB */
568 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
569 tb2
= tb1
->jmp_next
[n1
];
570 tb_reset_jump(tb1
, n1
);
571 tb1
->jmp_next
[n1
] = NULL
;
574 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
576 tb_phys_invalidate_count
++;
579 static inline void set_bits(uint8_t *tab
, int start
, int len
)
585 mask
= 0xff << (start
& 7);
586 if ((start
& ~7) == (end
& ~7)) {
588 mask
&= ~(0xff << (end
& 7));
593 start
= (start
+ 8) & ~7;
595 while (start
< end1
) {
600 mask
= ~(0xff << (end
& 7));
606 static void build_page_bitmap(PageDesc
*p
)
608 int n
, tb_start
, tb_end
;
609 TranslationBlock
*tb
;
611 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
614 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
619 tb
= (TranslationBlock
*)((long)tb
& ~3);
620 /* NOTE: this is subtle as a TB may span two physical pages */
622 /* NOTE: tb_end may be after the end of the page, but
623 it is not a problem */
624 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
625 tb_end
= tb_start
+ tb
->size
;
626 if (tb_end
> TARGET_PAGE_SIZE
)
627 tb_end
= TARGET_PAGE_SIZE
;
630 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
632 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
633 tb
= tb
->page_next
[n
];
637 #ifdef TARGET_HAS_PRECISE_SMC
639 static void tb_gen_code(CPUState
*env
,
640 target_ulong pc
, target_ulong cs_base
, int flags
,
643 TranslationBlock
*tb
;
645 target_ulong phys_pc
, phys_page2
, virt_page2
;
648 phys_pc
= get_phys_addr_code(env
, pc
);
651 /* flush must be done */
653 /* cannot fail at this point */
656 tc_ptr
= code_gen_ptr
;
658 tb
->cs_base
= cs_base
;
661 cpu_gen_code(env
, tb
, &code_gen_size
);
662 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
664 /* check next page if needed */
665 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
667 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
668 phys_page2
= get_phys_addr_code(env
, virt_page2
);
670 tb_link_phys(tb
, phys_pc
, phys_page2
);
674 /* invalidate all TBs which intersect with the target physical page
675 starting in range [start;end[. NOTE: start and end must refer to
676 the same physical page. 'is_cpu_write_access' should be true if called
677 from a real cpu write access: the virtual CPU will exit the current
678 TB if code is modified inside this TB. */
679 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
680 int is_cpu_write_access
)
682 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
683 CPUState
*env
= cpu_single_env
;
685 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
686 target_ulong tb_start
, tb_end
;
687 target_ulong current_pc
, current_cs_base
;
689 p
= page_find(start
>> TARGET_PAGE_BITS
);
692 if (!p
->code_bitmap
&&
693 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
694 is_cpu_write_access
) {
695 /* build code bitmap */
696 build_page_bitmap(p
);
699 /* we remove all the TBs in the range [start, end[ */
700 /* XXX: see if in some cases it could be faster to invalidate all the code */
701 current_tb_not_found
= is_cpu_write_access
;
702 current_tb_modified
= 0;
703 current_tb
= NULL
; /* avoid warning */
704 current_pc
= 0; /* avoid warning */
705 current_cs_base
= 0; /* avoid warning */
706 current_flags
= 0; /* avoid warning */
710 tb
= (TranslationBlock
*)((long)tb
& ~3);
711 tb_next
= tb
->page_next
[n
];
712 /* NOTE: this is subtle as a TB may span two physical pages */
714 /* NOTE: tb_end may be after the end of the page, but
715 it is not a problem */
716 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
717 tb_end
= tb_start
+ tb
->size
;
719 tb_start
= tb
->page_addr
[1];
720 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
722 if (!(tb_end
<= start
|| tb_start
>= end
)) {
723 #ifdef TARGET_HAS_PRECISE_SMC
724 if (current_tb_not_found
) {
725 current_tb_not_found
= 0;
727 if (env
->mem_write_pc
) {
728 /* now we have a real cpu fault */
729 current_tb
= tb_find_pc(env
->mem_write_pc
);
732 if (current_tb
== tb
&&
733 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
734 /* If we are modifying the current TB, we must stop
735 its execution. We could be more precise by checking
736 that the modification is after the current PC, but it
737 would require a specialized function to partially
738 restore the CPU state */
740 current_tb_modified
= 1;
741 cpu_restore_state(current_tb
, env
,
742 env
->mem_write_pc
, NULL
);
743 #if defined(TARGET_I386)
744 current_flags
= env
->hflags
;
745 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
746 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
747 current_pc
= current_cs_base
+ env
->eip
;
749 #error unsupported CPU
752 #endif /* TARGET_HAS_PRECISE_SMC */
753 /* we need to do that to handle the case where a signal
754 occurs while doing tb_phys_invalidate() */
757 saved_tb
= env
->current_tb
;
758 env
->current_tb
= NULL
;
760 tb_phys_invalidate(tb
, -1);
762 env
->current_tb
= saved_tb
;
763 if (env
->interrupt_request
&& env
->current_tb
)
764 cpu_interrupt(env
, env
->interrupt_request
);
769 #if !defined(CONFIG_USER_ONLY)
770 /* if no code remaining, no need to continue to use slow writes */
772 invalidate_page_bitmap(p
);
773 if (is_cpu_write_access
) {
774 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
778 #ifdef TARGET_HAS_PRECISE_SMC
779 if (current_tb_modified
) {
780 /* we generate a block containing just the instruction
781 modifying the memory. It will ensure that it cannot modify
783 env
->current_tb
= NULL
;
784 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
786 cpu_resume_from_signal(env
, NULL
);
791 /* len must be <= 8 and start must be a multiple of len */
792 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
799 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
800 cpu_single_env
->mem_write_vaddr
, len
,
802 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
806 p
= page_find(start
>> TARGET_PAGE_BITS
);
809 if (p
->code_bitmap
) {
810 offset
= start
& ~TARGET_PAGE_MASK
;
811 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
812 if (b
& ((1 << len
) - 1))
816 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
820 #if !defined(CONFIG_SOFTMMU)
821 static void tb_invalidate_phys_page(target_ulong addr
,
822 unsigned long pc
, void *puc
)
824 int n
, current_flags
, current_tb_modified
;
825 target_ulong current_pc
, current_cs_base
;
827 TranslationBlock
*tb
, *current_tb
;
828 #ifdef TARGET_HAS_PRECISE_SMC
829 CPUState
*env
= cpu_single_env
;
832 addr
&= TARGET_PAGE_MASK
;
833 p
= page_find(addr
>> TARGET_PAGE_BITS
);
837 current_tb_modified
= 0;
839 current_pc
= 0; /* avoid warning */
840 current_cs_base
= 0; /* avoid warning */
841 current_flags
= 0; /* avoid warning */
842 #ifdef TARGET_HAS_PRECISE_SMC
844 current_tb
= tb_find_pc(pc
);
849 tb
= (TranslationBlock
*)((long)tb
& ~3);
850 #ifdef TARGET_HAS_PRECISE_SMC
851 if (current_tb
== tb
&&
852 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
853 /* If we are modifying the current TB, we must stop
854 its execution. We could be more precise by checking
855 that the modification is after the current PC, but it
856 would require a specialized function to partially
857 restore the CPU state */
859 current_tb_modified
= 1;
860 cpu_restore_state(current_tb
, env
, pc
, puc
);
861 #if defined(TARGET_I386)
862 current_flags
= env
->hflags
;
863 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
864 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
865 current_pc
= current_cs_base
+ env
->eip
;
867 #error unsupported CPU
870 #endif /* TARGET_HAS_PRECISE_SMC */
871 tb_phys_invalidate(tb
, addr
);
872 tb
= tb
->page_next
[n
];
875 #ifdef TARGET_HAS_PRECISE_SMC
876 if (current_tb_modified
) {
877 /* we generate a block containing just the instruction
878 modifying the memory. It will ensure that it cannot modify
880 env
->current_tb
= NULL
;
881 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
883 cpu_resume_from_signal(env
, puc
);
889 /* add the tb in the target page and protect it if necessary */
890 static inline void tb_alloc_page(TranslationBlock
*tb
,
891 unsigned int n
, target_ulong page_addr
)
894 TranslationBlock
*last_first_tb
;
896 tb
->page_addr
[n
] = page_addr
;
897 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
898 tb
->page_next
[n
] = p
->first_tb
;
899 last_first_tb
= p
->first_tb
;
900 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
901 invalidate_page_bitmap(p
);
903 #if defined(TARGET_HAS_SMC) || 1
905 #if defined(CONFIG_USER_ONLY)
906 if (p
->flags
& PAGE_WRITE
) {
911 /* force the host page as non writable (writes will have a
912 page fault + mprotect overhead) */
913 page_addr
&= qemu_host_page_mask
;
915 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
916 addr
+= TARGET_PAGE_SIZE
) {
918 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
922 p2
->flags
&= ~PAGE_WRITE
;
923 page_get_flags(addr
);
925 mprotect(g2h(page_addr
), qemu_host_page_size
,
926 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
927 #ifdef DEBUG_TB_INVALIDATE
928 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
933 /* if some code is already present, then the pages are already
934 protected. So we handle the case where only the first TB is
935 allocated in a physical page */
936 if (!last_first_tb
) {
937 tlb_protect_code(page_addr
);
941 #endif /* TARGET_HAS_SMC */
944 /* Allocate a new translation block. Flush the translation buffer if
945 too many translation blocks or too much generated code. */
946 TranslationBlock
*tb_alloc(target_ulong pc
)
948 TranslationBlock
*tb
;
950 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
951 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
959 /* add a new TB and link it to the physical page tables. phys_page2 is
960 (-1) to indicate that only one page contains the TB. */
961 void tb_link_phys(TranslationBlock
*tb
,
962 target_ulong phys_pc
, target_ulong phys_page2
)
965 TranslationBlock
**ptb
;
967 /* add in the physical hash table */
968 h
= tb_phys_hash_func(phys_pc
);
969 ptb
= &tb_phys_hash
[h
];
970 tb
->phys_hash_next
= *ptb
;
973 /* add in the page list */
974 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
975 if (phys_page2
!= -1)
976 tb_alloc_page(tb
, 1, phys_page2
);
978 tb
->page_addr
[1] = -1;
980 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
981 tb
->jmp_next
[0] = NULL
;
982 tb
->jmp_next
[1] = NULL
;
984 /* init original jump addresses */
985 if (tb
->tb_next_offset
[0] != 0xffff)
986 tb_reset_jump(tb
, 0);
987 if (tb
->tb_next_offset
[1] != 0xffff)
988 tb_reset_jump(tb
, 1);
990 #ifdef DEBUG_TB_CHECK
995 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
996 tb[1].tc_ptr. Return NULL if not found */
997 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1001 TranslationBlock
*tb
;
1005 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1006 tc_ptr
>= (unsigned long)code_gen_ptr
)
1008 /* binary search (cf Knuth) */
1011 while (m_min
<= m_max
) {
1012 m
= (m_min
+ m_max
) >> 1;
1014 v
= (unsigned long)tb
->tc_ptr
;
1017 else if (tc_ptr
< v
) {
1026 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1028 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1030 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1033 tb1
= tb
->jmp_next
[n
];
1035 /* find head of list */
1038 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1041 tb1
= tb1
->jmp_next
[n1
];
1043 /* we are now sure now that tb jumps to tb1 */
1046 /* remove tb from the jmp_first list */
1047 ptb
= &tb_next
->jmp_first
;
1051 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1052 if (n1
== n
&& tb1
== tb
)
1054 ptb
= &tb1
->jmp_next
[n1
];
1056 *ptb
= tb
->jmp_next
[n
];
1057 tb
->jmp_next
[n
] = NULL
;
1059 /* suppress the jump to next tb in generated code */
1060 tb_reset_jump(tb
, n
);
1062 /* suppress jumps in the tb on which we could have jumped */
1063 tb_reset_jump_recursive(tb_next
);
1067 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1069 tb_reset_jump_recursive2(tb
, 0);
1070 tb_reset_jump_recursive2(tb
, 1);
1073 #if defined(TARGET_HAS_ICE)
1074 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1076 target_phys_addr_t addr
;
1078 ram_addr_t ram_addr
;
1081 addr
= cpu_get_phys_page_debug(env
, pc
);
1082 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1084 pd
= IO_MEM_UNASSIGNED
;
1086 pd
= p
->phys_offset
;
1088 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1089 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1093 /* Add a watchpoint. */
1094 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1098 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1099 if (addr
== env
->watchpoint
[i
].vaddr
)
1102 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1105 i
= env
->nb_watchpoints
++;
1106 env
->watchpoint
[i
].vaddr
= addr
;
1107 tlb_flush_page(env
, addr
);
1108 /* FIXME: This flush is needed because of the hack to make memory ops
1109 terminate the TB. It can be removed once the proper IO trap and
1110 re-execute bits are in. */
1115 /* Remove a watchpoint. */
1116 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1120 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1121 if (addr
== env
->watchpoint
[i
].vaddr
) {
1122 env
->nb_watchpoints
--;
1123 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1124 tlb_flush_page(env
, addr
);
1131 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1132 breakpoint is reached */
1133 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1135 #if defined(TARGET_HAS_ICE)
1138 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1139 if (env
->breakpoints
[i
] == pc
)
1143 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1145 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1149 kvm_update_debugger(env
);
1152 breakpoint_invalidate(env
, pc
);
1159 /* remove a breakpoint */
1160 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1162 #if defined(TARGET_HAS_ICE)
1164 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1165 if (env
->breakpoints
[i
] == pc
)
1170 env
->nb_breakpoints
--;
1171 if (i
< env
->nb_breakpoints
)
1172 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1176 kvm_update_debugger(env
);
1179 breakpoint_invalidate(env
, pc
);
1186 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1187 CPU loop after each instruction */
1188 void cpu_single_step(CPUState
*env
, int enabled
)
1190 #if defined(TARGET_HAS_ICE)
1191 if (env
->singlestep_enabled
!= enabled
) {
1192 env
->singlestep_enabled
= enabled
;
1193 /* must flush all the translated code to avoid inconsistancies */
1194 /* XXX: only flush what is necessary */
1199 kvm_update_debugger(env
);
1204 /* enable or disable low levels log */
1205 void cpu_set_log(int log_flags
)
1207 loglevel
= log_flags
;
1208 if (loglevel
&& !logfile
) {
1209 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1211 perror(logfilename
);
1214 #if !defined(CONFIG_SOFTMMU)
1215 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1217 static uint8_t logfile_buf
[4096];
1218 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1221 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1225 if (!loglevel
&& logfile
) {
1231 void cpu_set_log_filename(const char *filename
)
1233 logfilename
= strdup(filename
);
1238 cpu_set_log(loglevel
);
1241 /* mask must never be zero, except for A20 change call */
1242 void cpu_interrupt(CPUState
*env
, int mask
)
1244 TranslationBlock
*tb
;
1245 static int interrupt_lock
;
1247 env
->interrupt_request
|= mask
;
1249 if (kvm_allowed
&& !kvm_irqchip_in_kernel(kvm_context
))
1250 kvm_update_interrupt_request(env
);
1252 /* if the cpu is currently executing code, we must unlink it and
1253 all the potentially executing TB */
1254 tb
= env
->current_tb
;
1255 if (tb
&& !testandset(&interrupt_lock
)) {
1256 env
->current_tb
= NULL
;
1257 tb_reset_jump_recursive(tb
);
1262 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1264 env
->interrupt_request
&= ~mask
;
1267 CPULogItem cpu_log_items
[] = {
1268 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1269 "show generated host assembly code for each compiled TB" },
1270 { CPU_LOG_TB_IN_ASM
, "in_asm",
1271 "show target assembly code for each compiled TB" },
1272 { CPU_LOG_TB_OP
, "op",
1273 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1275 { CPU_LOG_TB_OP_OPT
, "op_opt",
1276 "show micro ops after optimization for each compiled TB" },
1278 { CPU_LOG_INT
, "int",
1279 "show interrupts/exceptions in short format" },
1280 { CPU_LOG_EXEC
, "exec",
1281 "show trace before each executed TB (lots of logs)" },
1282 { CPU_LOG_TB_CPU
, "cpu",
1283 "show CPU state before block translation" },
1285 { CPU_LOG_PCALL
, "pcall",
1286 "show protected mode far calls/returns/exceptions" },
1289 { CPU_LOG_IOPORT
, "ioport",
1290 "show all i/o ports accesses" },
1295 static int cmp1(const char *s1
, int n
, const char *s2
)
1297 if (strlen(s2
) != n
)
1299 return memcmp(s1
, s2
, n
) == 0;
1302 /* takes a comma separated list of log masks. Return 0 if error. */
1303 int cpu_str_to_log_mask(const char *str
)
1312 p1
= strchr(p
, ',');
1315 if(cmp1(p
,p1
-p
,"all")) {
1316 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1320 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1321 if (cmp1(p
, p1
- p
, item
->name
))
1335 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1342 fprintf(stderr
, "qemu: fatal: ");
1343 vfprintf(stderr
, fmt
, ap
);
1344 fprintf(stderr
, "\n");
1346 if(env
->intercept
& INTERCEPT_SVM_MASK
) {
1347 /* most probably the virtual machine should not
1348 be shut down but rather caught by the VMM */
1349 vmexit(SVM_EXIT_SHUTDOWN
, 0);
1351 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1353 cpu_dump_state(env
, stderr
, fprintf
, 0);
1356 fprintf(logfile
, "qemu: fatal: ");
1357 vfprintf(logfile
, fmt
, ap2
);
1358 fprintf(logfile
, "\n");
1360 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1362 cpu_dump_state(env
, logfile
, fprintf
, 0);
1372 CPUState
*cpu_copy(CPUState
*env
)
1374 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1375 /* preserve chaining and index */
1376 CPUState
*next_cpu
= new_env
->next_cpu
;
1377 int cpu_index
= new_env
->cpu_index
;
1378 memcpy(new_env
, env
, sizeof(CPUState
));
1379 new_env
->next_cpu
= next_cpu
;
1380 new_env
->cpu_index
= cpu_index
;
1384 #if !defined(CONFIG_USER_ONLY)
1386 /* NOTE: if flush_global is true, also flush global entries (not
1388 void tlb_flush(CPUState
*env
, int flush_global
)
1392 #if defined(DEBUG_TLB)
1393 printf("tlb_flush:\n");
1395 /* must reset current TB so that interrupts cannot modify the
1396 links while we are modifying them */
1397 env
->current_tb
= NULL
;
1399 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1400 env
->tlb_table
[0][i
].addr_read
= -1;
1401 env
->tlb_table
[0][i
].addr_write
= -1;
1402 env
->tlb_table
[0][i
].addr_code
= -1;
1403 env
->tlb_table
[1][i
].addr_read
= -1;
1404 env
->tlb_table
[1][i
].addr_write
= -1;
1405 env
->tlb_table
[1][i
].addr_code
= -1;
1406 #if (NB_MMU_MODES >= 3)
1407 env
->tlb_table
[2][i
].addr_read
= -1;
1408 env
->tlb_table
[2][i
].addr_write
= -1;
1409 env
->tlb_table
[2][i
].addr_code
= -1;
1410 #if (NB_MMU_MODES == 4)
1411 env
->tlb_table
[3][i
].addr_read
= -1;
1412 env
->tlb_table
[3][i
].addr_write
= -1;
1413 env
->tlb_table
[3][i
].addr_code
= -1;
1418 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1420 #if !defined(CONFIG_SOFTMMU)
1421 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1424 if (env
->kqemu_enabled
) {
1425 kqemu_flush(env
, flush_global
);
1431 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1433 if (addr
== (tlb_entry
->addr_read
&
1434 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1435 addr
== (tlb_entry
->addr_write
&
1436 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1437 addr
== (tlb_entry
->addr_code
&
1438 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1439 tlb_entry
->addr_read
= -1;
1440 tlb_entry
->addr_write
= -1;
1441 tlb_entry
->addr_code
= -1;
1445 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1448 TranslationBlock
*tb
;
1450 #if defined(DEBUG_TLB)
1451 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1453 /* must reset current TB so that interrupts cannot modify the
1454 links while we are modifying them */
1455 env
->current_tb
= NULL
;
1457 addr
&= TARGET_PAGE_MASK
;
1458 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1459 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1460 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1461 #if (NB_MMU_MODES >= 3)
1462 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1463 #if (NB_MMU_MODES == 4)
1464 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1468 /* Discard jump cache entries for any tb which might potentially
1469 overlap the flushed page. */
1470 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1471 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1473 i
= tb_jmp_cache_hash_page(addr
);
1474 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1476 #if !defined(CONFIG_SOFTMMU)
1477 if (addr
< MMAP_AREA_END
)
1478 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1481 if (env
->kqemu_enabled
) {
1482 kqemu_flush_page(env
, addr
);
1487 /* update the TLBs so that writes to code in the virtual page 'addr'
1489 static void tlb_protect_code(ram_addr_t ram_addr
)
1491 cpu_physical_memory_reset_dirty(ram_addr
,
1492 ram_addr
+ TARGET_PAGE_SIZE
,
1496 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1497 tested for self modifying code */
1498 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1501 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1504 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1505 unsigned long start
, unsigned long length
)
1508 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1509 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1510 if ((addr
- start
) < length
) {
1511 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1516 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1520 unsigned long length
, start1
;
1524 start
&= TARGET_PAGE_MASK
;
1525 end
= TARGET_PAGE_ALIGN(end
);
1527 length
= end
- start
;
1530 len
= length
>> TARGET_PAGE_BITS
;
1532 /* XXX: should not depend on cpu context */
1534 if (env
->kqemu_enabled
) {
1537 for(i
= 0; i
< len
; i
++) {
1538 kqemu_set_notdirty(env
, addr
);
1539 addr
+= TARGET_PAGE_SIZE
;
1543 mask
= ~dirty_flags
;
1544 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1545 for(i
= 0; i
< len
; i
++)
1548 /* we modify the TLB cache so that the dirty bit will be set again
1549 when accessing the range */
1550 start1
= start
+ (unsigned long)phys_ram_base
;
1551 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1552 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1553 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1554 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1555 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1556 #if (NB_MMU_MODES >= 3)
1557 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1558 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1559 #if (NB_MMU_MODES == 4)
1560 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1561 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1566 #if !defined(CONFIG_SOFTMMU)
1567 /* XXX: this is expensive */
1573 for(i
= 0; i
< L1_SIZE
; i
++) {
1576 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1577 for(j
= 0; j
< L2_SIZE
; j
++) {
1578 if (p
->valid_tag
== virt_valid_tag
&&
1579 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1580 (p
->prot
& PROT_WRITE
)) {
1581 if (addr
< MMAP_AREA_END
) {
1582 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1583 p
->prot
& ~PROT_WRITE
);
1586 addr
+= TARGET_PAGE_SIZE
;
1595 int cpu_physical_memory_set_dirty_tracking(int enable
)
1600 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1602 in_migration
= enable
;
1606 int cpu_physical_memory_get_dirty_tracking(void)
1608 return in_migration
;
1611 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1613 ram_addr_t ram_addr
;
1615 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1616 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1617 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1618 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1619 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1624 /* update the TLB according to the current state of the dirty bits */
1625 void cpu_tlb_update_dirty(CPUState
*env
)
1628 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1629 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1630 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1631 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1632 #if (NB_MMU_MODES >= 3)
1633 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1634 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1635 #if (NB_MMU_MODES == 4)
1636 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1637 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1642 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1643 unsigned long start
)
1646 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1647 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1648 if (addr
== start
) {
1649 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1654 /* update the TLB corresponding to virtual page vaddr and phys addr
1655 addr so that it is no longer dirty */
1656 static inline void tlb_set_dirty(CPUState
*env
,
1657 unsigned long addr
, target_ulong vaddr
)
1661 addr
&= TARGET_PAGE_MASK
;
1662 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1663 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1664 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1665 #if (NB_MMU_MODES >= 3)
1666 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1667 #if (NB_MMU_MODES == 4)
1668 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1673 /* add a new TLB entry. At most one entry for a given virtual address
1674 is permitted. Return 0 if OK or 2 if the page could not be mapped
1675 (can only happen in non SOFTMMU mode for I/O pages or pages
1676 conflicting with the host address space). */
1677 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1678 target_phys_addr_t paddr
, int prot
,
1679 int mmu_idx
, int is_softmmu
)
1684 target_ulong address
;
1685 target_phys_addr_t addend
;
1690 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1692 pd
= IO_MEM_UNASSIGNED
;
1694 pd
= p
->phys_offset
;
1696 #if defined(DEBUG_TLB)
1697 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1698 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1702 #if !defined(CONFIG_SOFTMMU)
1706 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1707 /* IO memory case */
1708 address
= vaddr
| pd
;
1711 /* standard memory */
1713 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1716 /* Make accesses to pages with watchpoints go via the
1717 watchpoint trap routines. */
1718 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1719 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1720 if (address
& ~TARGET_PAGE_MASK
) {
1721 env
->watchpoint
[i
].addend
= 0;
1722 address
= vaddr
| io_mem_watch
;
1724 env
->watchpoint
[i
].addend
= pd
- paddr
+
1725 (unsigned long) phys_ram_base
;
1726 /* TODO: Figure out how to make read watchpoints coexist
1728 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1733 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1735 te
= &env
->tlb_table
[mmu_idx
][index
];
1736 te
->addend
= addend
;
1737 if (prot
& PAGE_READ
) {
1738 te
->addr_read
= address
;
1742 if (prot
& PAGE_EXEC
) {
1743 te
->addr_code
= address
;
1747 if (prot
& PAGE_WRITE
) {
1748 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1749 (pd
& IO_MEM_ROMD
)) {
1750 /* write access calls the I/O callback */
1751 te
->addr_write
= vaddr
|
1752 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1753 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1754 !cpu_physical_memory_is_dirty(pd
)) {
1755 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1757 te
->addr_write
= address
;
1760 te
->addr_write
= -1;
1763 #if !defined(CONFIG_SOFTMMU)
1765 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1766 /* IO access: no mapping is done as it will be handled by the
1768 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1773 if (vaddr
>= MMAP_AREA_END
) {
1776 if (prot
& PROT_WRITE
) {
1777 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1778 #if defined(TARGET_HAS_SMC) || 1
1781 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1782 !cpu_physical_memory_is_dirty(pd
))) {
1783 /* ROM: we do as if code was inside */
1784 /* if code is present, we only map as read only and save the
1788 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1791 vp
->valid_tag
= virt_valid_tag
;
1792 prot
&= ~PAGE_WRITE
;
1795 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1796 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1797 if (map_addr
== MAP_FAILED
) {
1798 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1808 /* called from signal handler: invalidate the code and unprotect the
1809 page. Return TRUE if the fault was succesfully handled. */
1810 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1812 #if !defined(CONFIG_SOFTMMU)
1815 #if defined(DEBUG_TLB)
1816 printf("page_unprotect: addr=0x%08x\n", addr
);
1818 addr
&= TARGET_PAGE_MASK
;
1820 /* if it is not mapped, no need to worry here */
1821 if (addr
>= MMAP_AREA_END
)
1823 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1826 /* NOTE: in this case, validate_tag is _not_ tested as it
1827 validates only the code TLB */
1828 if (vp
->valid_tag
!= virt_valid_tag
)
1830 if (!(vp
->prot
& PAGE_WRITE
))
1832 #if defined(DEBUG_TLB)
1833 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1834 addr
, vp
->phys_addr
, vp
->prot
);
1836 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1837 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1838 (unsigned long)addr
, vp
->prot
);
1839 /* set the dirty bit */
1840 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1841 /* flush the code inside */
1842 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1851 void tlb_flush(CPUState
*env
, int flush_global
)
1855 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1859 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1860 target_phys_addr_t paddr
, int prot
,
1861 int mmu_idx
, int is_softmmu
)
1866 /* dump memory mappings */
1867 void page_dump(FILE *f
)
1869 unsigned long start
, end
;
1870 int i
, j
, prot
, prot1
;
1873 fprintf(f
, "%-8s %-8s %-8s %s\n",
1874 "start", "end", "size", "prot");
1878 for(i
= 0; i
<= L1_SIZE
; i
++) {
1883 for(j
= 0;j
< L2_SIZE
; j
++) {
1888 if (prot1
!= prot
) {
1889 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1891 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1892 start
, end
, end
- start
,
1893 prot
& PAGE_READ
? 'r' : '-',
1894 prot
& PAGE_WRITE
? 'w' : '-',
1895 prot
& PAGE_EXEC
? 'x' : '-');
1909 int page_get_flags(target_ulong address
)
1913 p
= page_find(address
>> TARGET_PAGE_BITS
);
1919 /* modify the flags of a page and invalidate the code if
1920 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1921 depending on PAGE_WRITE */
1922 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1927 start
= start
& TARGET_PAGE_MASK
;
1928 end
= TARGET_PAGE_ALIGN(end
);
1929 if (flags
& PAGE_WRITE
)
1930 flags
|= PAGE_WRITE_ORG
;
1931 spin_lock(&tb_lock
);
1932 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1933 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1934 /* if the write protection is set, then we invalidate the code
1936 if (!(p
->flags
& PAGE_WRITE
) &&
1937 (flags
& PAGE_WRITE
) &&
1939 tb_invalidate_phys_page(addr
, 0, NULL
);
1943 spin_unlock(&tb_lock
);
1946 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1952 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1953 start
= start
& TARGET_PAGE_MASK
;
1956 /* we've wrapped around */
1958 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1959 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1962 if( !(p
->flags
& PAGE_VALID
) )
1965 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1967 if (flags
& PAGE_WRITE
) {
1968 if (!(p
->flags
& PAGE_WRITE_ORG
))
1970 /* unprotect the page if it was put read-only because it
1971 contains translated code */
1972 if (!(p
->flags
& PAGE_WRITE
)) {
1973 if (!page_unprotect(addr
, 0, NULL
))
1982 /* called from signal handler: invalidate the code and unprotect the
1983 page. Return TRUE if the fault was succesfully handled. */
1984 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1986 unsigned int page_index
, prot
, pindex
;
1988 target_ulong host_start
, host_end
, addr
;
1990 host_start
= address
& qemu_host_page_mask
;
1991 page_index
= host_start
>> TARGET_PAGE_BITS
;
1992 p1
= page_find(page_index
);
1995 host_end
= host_start
+ qemu_host_page_size
;
1998 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2002 /* if the page was really writable, then we change its
2003 protection back to writable */
2004 if (prot
& PAGE_WRITE_ORG
) {
2005 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2006 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2007 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2008 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2009 p1
[pindex
].flags
|= PAGE_WRITE
;
2010 /* and since the content will be modified, we must invalidate
2011 the corresponding translated code. */
2012 tb_invalidate_phys_page(address
, pc
, puc
);
2013 #ifdef DEBUG_TB_CHECK
2014 tb_invalidate_check(address
);
2022 static inline void tlb_set_dirty(CPUState
*env
,
2023 unsigned long addr
, target_ulong vaddr
)
2026 #endif /* defined(CONFIG_USER_ONLY) */
2028 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2030 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2032 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2035 if (addr > start_addr) \
2038 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2039 if (start_addr2 > 0) \
2043 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2044 end_addr2 = TARGET_PAGE_SIZE - 1; \
2046 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2047 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2052 /* register physical memory. 'size' must be a multiple of the target
2053 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2055 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2057 unsigned long phys_offset
)
2059 target_phys_addr_t addr
, end_addr
;
2062 unsigned long orig_size
= size
;
2065 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2066 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2067 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2068 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2069 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2070 unsigned long orig_memory
= p
->phys_offset
;
2071 target_phys_addr_t start_addr2
, end_addr2
;
2072 int need_subpage
= 0;
2074 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2077 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2078 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2079 &p
->phys_offset
, orig_memory
);
2081 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2084 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2086 p
->phys_offset
= phys_offset
;
2087 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2088 (phys_offset
& IO_MEM_ROMD
))
2089 phys_offset
+= TARGET_PAGE_SIZE
;
2092 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2093 p
->phys_offset
= phys_offset
;
2094 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2095 (phys_offset
& IO_MEM_ROMD
))
2096 phys_offset
+= TARGET_PAGE_SIZE
;
2098 target_phys_addr_t start_addr2
, end_addr2
;
2099 int need_subpage
= 0;
2101 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2102 end_addr2
, need_subpage
);
2105 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2106 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2107 subpage_register(subpage
, start_addr2
, end_addr2
,
2114 /* since each CPU stores ram addresses in its TLB cache, we must
2115 reset the modified entries */
2117 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2122 /* XXX: temporary until new memory mapping API */
2123 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
2127 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2129 return IO_MEM_UNASSIGNED
;
2130 return p
->phys_offset
;
2133 /* XXX: better than nothing */
2134 ram_addr_t
qemu_ram_alloc(unsigned long size
)
2137 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2138 fprintf(stderr
, "Not enough memory (requested_size = %lu, max memory = %d)\n",
2139 size
, phys_ram_size
);
2142 addr
= phys_ram_alloc_offset
;
2143 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2147 void qemu_ram_free(ram_addr_t addr
)
2151 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2153 #ifdef DEBUG_UNASSIGNED
2154 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2157 do_unassigned_access(addr
, 0, 0, 0);
2159 do_unassigned_access(addr
, 0, 0, 0);
2164 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2166 #ifdef DEBUG_UNASSIGNED
2167 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2170 do_unassigned_access(addr
, 1, 0, 0);
2172 do_unassigned_access(addr
, 1, 0, 0);
2176 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2177 unassigned_mem_readb
,
2178 unassigned_mem_readb
,
2179 unassigned_mem_readb
,
2182 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2183 unassigned_mem_writeb
,
2184 unassigned_mem_writeb
,
2185 unassigned_mem_writeb
,
2188 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2190 unsigned long ram_addr
;
2192 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2193 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2194 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2195 #if !defined(CONFIG_USER_ONLY)
2196 tb_invalidate_phys_page_fast(ram_addr
, 1);
2197 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2200 stb_p((uint8_t *)(long)addr
, val
);
2202 if (cpu_single_env
->kqemu_enabled
&&
2203 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2204 kqemu_modify_page(cpu_single_env
, ram_addr
);
2206 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2207 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2208 /* we remove the notdirty callback only if the code has been
2210 if (dirty_flags
== 0xff)
2211 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2214 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2216 unsigned long ram_addr
;
2218 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2219 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2220 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2221 #if !defined(CONFIG_USER_ONLY)
2222 tb_invalidate_phys_page_fast(ram_addr
, 2);
2223 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2226 stw_p((uint8_t *)(long)addr
, val
);
2228 if (cpu_single_env
->kqemu_enabled
&&
2229 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2230 kqemu_modify_page(cpu_single_env
, ram_addr
);
2232 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2233 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2234 /* we remove the notdirty callback only if the code has been
2236 if (dirty_flags
== 0xff)
2237 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2240 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2242 unsigned long ram_addr
;
2244 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2245 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2246 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2247 #if !defined(CONFIG_USER_ONLY)
2248 tb_invalidate_phys_page_fast(ram_addr
, 4);
2249 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2252 stl_p((uint8_t *)(long)addr
, val
);
2254 if (cpu_single_env
->kqemu_enabled
&&
2255 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2256 kqemu_modify_page(cpu_single_env
, ram_addr
);
2258 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2259 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2260 /* we remove the notdirty callback only if the code has been
2262 if (dirty_flags
== 0xff)
2263 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2266 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2267 NULL
, /* never used */
2268 NULL
, /* never used */
2269 NULL
, /* never used */
2272 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2273 notdirty_mem_writeb
,
2274 notdirty_mem_writew
,
2275 notdirty_mem_writel
,
2278 #if defined(CONFIG_SOFTMMU)
2279 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2280 so these check for a hit then pass through to the normal out-of-line
2282 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2284 return ldub_phys(addr
);
2287 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2289 return lduw_phys(addr
);
2292 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2294 return ldl_phys(addr
);
2297 /* Generate a debug exception if a watchpoint has been hit.
2298 Returns the real physical address of the access. addr will be a host
2299 address in case of a RAM location. */
2300 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2302 CPUState
*env
= cpu_single_env
;
2304 target_ulong retaddr
;
2308 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2309 watch
= env
->watchpoint
[i
].vaddr
;
2310 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2311 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2312 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2313 cpu_single_env
->watchpoint_hit
= i
+ 1;
2314 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2322 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2325 addr
= check_watchpoint(addr
);
2326 stb_phys(addr
, val
);
2329 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2332 addr
= check_watchpoint(addr
);
2333 stw_phys(addr
, val
);
2336 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2339 addr
= check_watchpoint(addr
);
2340 stl_phys(addr
, val
);
2343 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2349 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2356 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2359 CPUReadMemoryFunc
**mem_read
;
2363 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2364 #if defined(DEBUG_SUBPAGE)
2365 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2366 mmio
, len
, addr
, idx
);
2368 mem_read
= mmio
->mem_read
[idx
];
2369 ret
= (*mem_read
[len
])(mmio
->opaque
[idx
], addr
);
2374 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2375 uint32_t value
, unsigned int len
)
2377 CPUWriteMemoryFunc
**mem_write
;
2380 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2381 #if defined(DEBUG_SUBPAGE)
2382 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2383 mmio
, len
, addr
, idx
, value
);
2385 mem_write
= mmio
->mem_write
[idx
];
2386 (*mem_write
[len
])(mmio
->opaque
[idx
], addr
, value
);
2389 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2391 #if defined(DEBUG_SUBPAGE)
2392 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2395 return subpage_readlen(opaque
, addr
, 0);
2398 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2401 #if defined(DEBUG_SUBPAGE)
2402 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2404 subpage_writelen(opaque
, addr
, value
, 0);
2407 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2409 #if defined(DEBUG_SUBPAGE)
2410 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2413 return subpage_readlen(opaque
, addr
, 1);
2416 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2419 #if defined(DEBUG_SUBPAGE)
2420 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2422 subpage_writelen(opaque
, addr
, value
, 1);
2425 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2427 #if defined(DEBUG_SUBPAGE)
2428 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2431 return subpage_readlen(opaque
, addr
, 2);
2434 static void subpage_writel (void *opaque
,
2435 target_phys_addr_t addr
, uint32_t value
)
2437 #if defined(DEBUG_SUBPAGE)
2438 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2440 subpage_writelen(opaque
, addr
, value
, 2);
2443 static CPUReadMemoryFunc
*subpage_read
[] = {
2449 static CPUWriteMemoryFunc
*subpage_write
[] = {
2455 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2460 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2462 idx
= SUBPAGE_IDX(start
);
2463 eidx
= SUBPAGE_IDX(end
);
2464 #if defined(DEBUG_SUBPAGE)
2465 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2466 mmio
, start
, end
, idx
, eidx
, memory
);
2468 memory
>>= IO_MEM_SHIFT
;
2469 for (; idx
<= eidx
; idx
++) {
2470 mmio
->mem_read
[idx
] = io_mem_read
[memory
];
2471 mmio
->mem_write
[idx
] = io_mem_write
[memory
];
2472 mmio
->opaque
[idx
] = io_mem_opaque
[memory
];
2478 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2484 mmio
= qemu_mallocz(sizeof(subpage_t
));
2487 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2488 #if defined(DEBUG_SUBPAGE)
2489 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2490 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2492 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2493 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2499 static void io_mem_init(void)
2501 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2502 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2503 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2506 #if defined(CONFIG_SOFTMMU)
2507 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2508 watch_mem_write
, NULL
);
2510 /* alloc dirty bits array */
2511 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2512 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2515 /* mem_read and mem_write are arrays of functions containing the
2516 function to access byte (index 0), word (index 1) and dword (index
2517 2). All functions must be supplied. If io_index is non zero, the
2518 corresponding io zone is modified. If it is zero, a new io zone is
2519 allocated. The return value can be used with
2520 cpu_register_physical_memory(). (-1) is returned if error. */
2521 int cpu_register_io_memory(int io_index
,
2522 CPUReadMemoryFunc
**mem_read
,
2523 CPUWriteMemoryFunc
**mem_write
,
2528 if (io_index
<= 0) {
2529 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2531 io_index
= io_mem_nb
++;
2533 if (io_index
>= IO_MEM_NB_ENTRIES
)
2537 for(i
= 0;i
< 3; i
++) {
2538 io_mem_read
[io_index
][i
] = mem_read
[i
];
2539 io_mem_write
[io_index
][i
] = mem_write
[i
];
2541 io_mem_opaque
[io_index
] = opaque
;
2542 return io_index
<< IO_MEM_SHIFT
;
2545 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2547 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2550 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2552 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2555 /* physical memory access (slow version, mainly for debug) */
2556 #if defined(CONFIG_USER_ONLY)
2557 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2558 int len
, int is_write
)
2565 page
= addr
& TARGET_PAGE_MASK
;
2566 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2569 flags
= page_get_flags(page
);
2570 if (!(flags
& PAGE_VALID
))
2573 if (!(flags
& PAGE_WRITE
))
2575 /* XXX: this code should not depend on lock_user */
2576 if (!(p
= lock_user(VERIFY_WRITE
, addr
, len
, 0)))
2577 /* FIXME - should this return an error rather than just fail? */
2579 memcpy(p
, buf
, len
);
2580 unlock_user(p
, addr
, len
);
2582 if (!(flags
& PAGE_READ
))
2584 /* XXX: this code should not depend on lock_user */
2585 if (!(p
= lock_user(VERIFY_READ
, addr
, len
, 1)))
2586 /* FIXME - should this return an error rather than just fail? */
2588 memcpy(buf
, p
, len
);
2589 unlock_user(p
, addr
, 0);
2598 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2599 int len
, int is_write
)
2604 target_phys_addr_t page
;
2609 page
= addr
& TARGET_PAGE_MASK
;
2610 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2613 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2615 pd
= IO_MEM_UNASSIGNED
;
2617 pd
= p
->phys_offset
;
2621 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2622 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2623 /* XXX: could force cpu_single_env to NULL to avoid
2625 if (l
>= 4 && ((addr
& 3) == 0)) {
2626 /* 32 bit write access */
2628 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2630 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2631 /* 16 bit write access */
2633 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2636 /* 8 bit write access */
2638 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2642 unsigned long addr1
;
2643 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2645 ptr
= phys_ram_base
+ addr1
;
2646 memcpy(ptr
, buf
, l
);
2647 if (!cpu_physical_memory_is_dirty(addr1
)) {
2648 /* invalidate code */
2649 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2651 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2652 (0xff & ~CODE_DIRTY_FLAG
);
2655 /* qemu doesn't execute guest code directly, but kvm does
2656 therefore fluch instruction caches */
2657 flush_icache_range((unsigned long)ptr
, ((unsigned long)ptr
)+l
);
2661 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2662 !(pd
& IO_MEM_ROMD
)) {
2664 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2665 if (l
>= 4 && ((addr
& 3) == 0)) {
2666 /* 32 bit read access */
2667 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2670 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2671 /* 16 bit read access */
2672 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2676 /* 8 bit read access */
2677 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2683 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2684 (addr
& ~TARGET_PAGE_MASK
);
2685 memcpy(buf
, ptr
, l
);
2694 /* used for ROM loading : can write in RAM and ROM */
2695 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2696 const uint8_t *buf
, int len
)
2700 target_phys_addr_t page
;
2705 page
= addr
& TARGET_PAGE_MASK
;
2706 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2709 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2711 pd
= IO_MEM_UNASSIGNED
;
2713 pd
= p
->phys_offset
;
2716 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2717 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2718 !(pd
& IO_MEM_ROMD
)) {
2721 unsigned long addr1
;
2722 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2724 ptr
= phys_ram_base
+ addr1
;
2725 memcpy(ptr
, buf
, l
);
2734 /* warning: addr must be aligned */
2735 uint32_t ldl_phys(target_phys_addr_t addr
)
2743 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2745 pd
= IO_MEM_UNASSIGNED
;
2747 pd
= p
->phys_offset
;
2750 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2751 !(pd
& IO_MEM_ROMD
)) {
2753 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2754 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2757 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2758 (addr
& ~TARGET_PAGE_MASK
);
2764 /* warning: addr must be aligned */
2765 uint64_t ldq_phys(target_phys_addr_t addr
)
2773 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2775 pd
= IO_MEM_UNASSIGNED
;
2777 pd
= p
->phys_offset
;
2780 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2781 !(pd
& IO_MEM_ROMD
)) {
2783 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2784 #ifdef TARGET_WORDS_BIGENDIAN
2785 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2786 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2788 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2789 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2793 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2794 (addr
& ~TARGET_PAGE_MASK
);
2801 uint32_t ldub_phys(target_phys_addr_t addr
)
2804 cpu_physical_memory_read(addr
, &val
, 1);
2809 uint32_t lduw_phys(target_phys_addr_t addr
)
2812 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2813 return tswap16(val
);
2817 #define likely(x) __builtin_expect(!!(x), 1)
2818 #define unlikely(x) __builtin_expect(!!(x), 0)
2821 #define unlikely(x) x
2824 /* warning: addr must be aligned. The ram page is not masked as dirty
2825 and the code inside is not invalidated. It is useful if the dirty
2826 bits are used to track modified PTEs */
2827 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2834 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2836 pd
= IO_MEM_UNASSIGNED
;
2838 pd
= p
->phys_offset
;
2841 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2842 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2843 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2845 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2846 ptr
= phys_ram_base
+ addr1
;
2849 if (unlikely(in_migration
)) {
2850 if (!cpu_physical_memory_is_dirty(addr1
)) {
2851 /* invalidate code */
2852 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2854 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2855 (0xff & ~CODE_DIRTY_FLAG
);
2861 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2868 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2870 pd
= IO_MEM_UNASSIGNED
;
2872 pd
= p
->phys_offset
;
2875 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2876 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2877 #ifdef TARGET_WORDS_BIGENDIAN
2878 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2879 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2881 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2882 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2885 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2886 (addr
& ~TARGET_PAGE_MASK
);
2891 /* warning: addr must be aligned */
2892 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2899 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2901 pd
= IO_MEM_UNASSIGNED
;
2903 pd
= p
->phys_offset
;
2906 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2907 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2908 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2910 unsigned long addr1
;
2911 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2913 ptr
= phys_ram_base
+ addr1
;
2915 if (!cpu_physical_memory_is_dirty(addr1
)) {
2916 /* invalidate code */
2917 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2919 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2920 (0xff & ~CODE_DIRTY_FLAG
);
2926 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2929 cpu_physical_memory_write(addr
, &v
, 1);
2933 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2935 uint16_t v
= tswap16(val
);
2936 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2940 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2943 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2948 /* virtual memory access for debug */
2949 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2950 uint8_t *buf
, int len
, int is_write
)
2953 target_phys_addr_t phys_addr
;
2957 page
= addr
& TARGET_PAGE_MASK
;
2958 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2959 /* if no physical page mapped, return an error */
2960 if (phys_addr
== -1)
2962 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2965 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2974 void dump_exec_info(FILE *f
,
2975 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2977 int i
, target_code_size
, max_target_code_size
;
2978 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2979 TranslationBlock
*tb
;
2981 target_code_size
= 0;
2982 max_target_code_size
= 0;
2984 direct_jmp_count
= 0;
2985 direct_jmp2_count
= 0;
2986 for(i
= 0; i
< nb_tbs
; i
++) {
2988 target_code_size
+= tb
->size
;
2989 if (tb
->size
> max_target_code_size
)
2990 max_target_code_size
= tb
->size
;
2991 if (tb
->page_addr
[1] != -1)
2993 if (tb
->tb_next_offset
[0] != 0xffff) {
2995 if (tb
->tb_next_offset
[1] != 0xffff) {
2996 direct_jmp2_count
++;
3000 /* XXX: avoid using doubles ? */
3001 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
3002 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3003 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3004 max_target_code_size
);
3005 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3006 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3007 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3008 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3010 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3011 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3013 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3015 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3016 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3017 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3018 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3021 #if !defined(CONFIG_USER_ONLY)
3023 #define MMUSUFFIX _cmmu
3024 #define GETPC() NULL
3025 #define env cpu_single_env
3026 #define SOFTMMU_CODE_ACCESS
3029 #include "softmmu_template.h"
3032 #include "softmmu_template.h"
3035 #include "softmmu_template.h"
3038 #include "softmmu_template.h"