2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
38 //#define DEBUG_TB_INVALIDATE
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
49 #define SMC_BITMAP_USE_THRESHOLD 10
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
54 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
55 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
56 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
58 /* any access to the tbs or the page table must use this lock */
59 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
61 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
62 uint8_t *code_gen_ptr
;
66 uint8_t *phys_ram_base
;
67 uint8_t *phys_ram_dirty
;
69 typedef struct PageDesc
{
70 /* list of TBs intersecting this ram page */
71 TranslationBlock
*first_tb
;
72 /* in order to optimize self modifying code, we count the number
73 of lookups we do to a given page to use a bitmap */
74 unsigned int code_write_count
;
76 #if defined(CONFIG_USER_ONLY)
81 typedef struct PhysPageDesc
{
82 /* offset in host memory of the page + io_index in the low 12 bits */
86 /* Note: the VirtPage handling is absolete and will be suppressed
88 typedef struct VirtPageDesc
{
89 /* physical address of code page. It is valid only if 'valid_tag'
90 matches 'virt_valid_tag' */
91 target_ulong phys_addr
;
92 unsigned int valid_tag
;
93 #if !defined(CONFIG_SOFTMMU)
94 /* original page access rights. It is valid only if 'valid_tag'
95 matches 'virt_valid_tag' */
101 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
103 #define L1_SIZE (1 << L1_BITS)
104 #define L2_SIZE (1 << L2_BITS)
106 static void io_mem_init(void);
108 unsigned long qemu_real_host_page_size
;
109 unsigned long qemu_host_page_bits
;
110 unsigned long qemu_host_page_size
;
111 unsigned long qemu_host_page_mask
;
113 /* XXX: for system emulation, it could just be an array */
114 static PageDesc
*l1_map
[L1_SIZE
];
115 PhysPageDesc
**l1_phys_map
;
117 #if !defined(CONFIG_USER_ONLY)
118 #if TARGET_LONG_BITS > 32
119 #define VIRT_L_BITS 9
120 #define VIRT_L_SIZE (1 << VIRT_L_BITS)
121 static void *l1_virt_map
[VIRT_L_SIZE
];
123 static VirtPageDesc
*l1_virt_map
[L1_SIZE
];
125 static unsigned int virt_valid_tag
;
128 /* io memory support */
129 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
130 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
131 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
132 static int io_mem_nb
;
135 char *logfilename
= "/tmp/qemu.log";
140 static int tlb_flush_count
;
141 static int tb_flush_count
;
142 static int tb_phys_invalidate_count
;
144 static void page_init(void)
146 /* NOTE: we can always suppose that qemu_host_page_size >=
150 SYSTEM_INFO system_info
;
153 GetSystemInfo(&system_info
);
154 qemu_real_host_page_size
= system_info
.dwPageSize
;
156 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
157 PAGE_EXECUTE_READWRITE
, &old_protect
);
160 qemu_real_host_page_size
= getpagesize();
162 unsigned long start
, end
;
164 start
= (unsigned long)code_gen_buffer
;
165 start
&= ~(qemu_real_host_page_size
- 1);
167 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
168 end
+= qemu_real_host_page_size
- 1;
169 end
&= ~(qemu_real_host_page_size
- 1);
171 mprotect((void *)start
, end
- start
,
172 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
176 if (qemu_host_page_size
== 0)
177 qemu_host_page_size
= qemu_real_host_page_size
;
178 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
179 qemu_host_page_size
= TARGET_PAGE_SIZE
;
180 qemu_host_page_bits
= 0;
181 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
182 qemu_host_page_bits
++;
183 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
184 #if !defined(CONFIG_USER_ONLY)
187 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(PhysPageDesc
*));
188 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(PhysPageDesc
*));
191 static inline PageDesc
*page_find_alloc(unsigned int index
)
195 lp
= &l1_map
[index
>> L2_BITS
];
198 /* allocate if not found */
199 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
200 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
203 return p
+ (index
& (L2_SIZE
- 1));
206 static inline PageDesc
*page_find(unsigned int index
)
210 p
= l1_map
[index
>> L2_BITS
];
213 return p
+ (index
& (L2_SIZE
- 1));
216 static inline PhysPageDesc
*phys_page_find_alloc(unsigned int index
)
218 PhysPageDesc
**lp
, *p
;
220 lp
= &l1_phys_map
[index
>> L2_BITS
];
223 /* allocate if not found */
224 p
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
225 memset(p
, 0, sizeof(PhysPageDesc
) * L2_SIZE
);
228 return p
+ (index
& (L2_SIZE
- 1));
231 static inline PhysPageDesc
*phys_page_find(unsigned int index
)
235 p
= l1_phys_map
[index
>> L2_BITS
];
238 return p
+ (index
& (L2_SIZE
- 1));
241 #if !defined(CONFIG_USER_ONLY)
242 static void tlb_protect_code(CPUState
*env
, target_ulong addr
);
243 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
);
245 static VirtPageDesc
*virt_page_find_alloc(target_ulong index
, int alloc
)
247 #if TARGET_LONG_BITS > 32
251 lp
= p
+ ((index
>> (5 * VIRT_L_BITS
)) & (VIRT_L_SIZE
- 1));
256 p
= qemu_mallocz(sizeof(void *) * VIRT_L_SIZE
);
259 lp
= p
+ ((index
>> (4 * VIRT_L_BITS
)) & (VIRT_L_SIZE
- 1));
264 p
= qemu_mallocz(sizeof(void *) * VIRT_L_SIZE
);
267 lp
= p
+ ((index
>> (3 * VIRT_L_BITS
)) & (VIRT_L_SIZE
- 1));
272 p
= qemu_mallocz(sizeof(void *) * VIRT_L_SIZE
);
275 lp
= p
+ ((index
>> (2 * VIRT_L_BITS
)) & (VIRT_L_SIZE
- 1));
280 p
= qemu_mallocz(sizeof(void *) * VIRT_L_SIZE
);
283 lp
= p
+ ((index
>> (1 * VIRT_L_BITS
)) & (VIRT_L_SIZE
- 1));
288 p
= qemu_mallocz(sizeof(VirtPageDesc
) * VIRT_L_SIZE
);
291 return ((VirtPageDesc
*)p
) + (index
& (VIRT_L_SIZE
- 1));
293 VirtPageDesc
*p
, **lp
;
295 lp
= &l1_virt_map
[index
>> L2_BITS
];
298 /* allocate if not found */
301 p
= qemu_mallocz(sizeof(VirtPageDesc
) * L2_SIZE
);
304 return p
+ (index
& (L2_SIZE
- 1));
308 static inline VirtPageDesc
*virt_page_find(target_ulong index
)
310 return virt_page_find_alloc(index
, 0);
313 #if TARGET_LONG_BITS > 32
314 static void virt_page_flush_internal(void **p
, int level
)
318 VirtPageDesc
*q
= (VirtPageDesc
*)p
;
319 for(i
= 0; i
< VIRT_L_SIZE
; i
++)
323 for(i
= 0; i
< VIRT_L_SIZE
; i
++) {
325 virt_page_flush_internal(p
[i
], level
);
331 static void virt_page_flush(void)
335 if (virt_valid_tag
== 0) {
337 #if TARGET_LONG_BITS > 32
338 virt_page_flush_internal(l1_virt_map
, 5);
343 for(i
= 0; i
< L1_SIZE
; i
++) {
346 for(j
= 0; j
< L2_SIZE
; j
++)
355 static void virt_page_flush(void)
360 void cpu_exec_init(void)
363 code_gen_ptr
= code_gen_buffer
;
369 static inline void invalidate_page_bitmap(PageDesc
*p
)
371 if (p
->code_bitmap
) {
372 qemu_free(p
->code_bitmap
);
373 p
->code_bitmap
= NULL
;
375 p
->code_write_count
= 0;
378 /* set to NULL all the 'first_tb' fields in all PageDescs */
379 static void page_flush_tb(void)
384 for(i
= 0; i
< L1_SIZE
; i
++) {
387 for(j
= 0; j
< L2_SIZE
; j
++) {
389 invalidate_page_bitmap(p
);
396 /* flush all the translation blocks */
397 /* XXX: tb_flush is currently not thread safe */
398 void tb_flush(CPUState
*env
)
400 #if defined(DEBUG_FLUSH)
401 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
402 code_gen_ptr
- code_gen_buffer
,
404 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
407 memset (tb_hash
, 0, CODE_GEN_HASH_SIZE
* sizeof (void *));
410 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
413 code_gen_ptr
= code_gen_buffer
;
414 /* XXX: flush processor icache at this point if cache flush is
419 #ifdef DEBUG_TB_CHECK
421 static void tb_invalidate_check(unsigned long address
)
423 TranslationBlock
*tb
;
425 address
&= TARGET_PAGE_MASK
;
426 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
427 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
428 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
429 address
>= tb
->pc
+ tb
->size
)) {
430 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
431 address
, tb
->pc
, tb
->size
);
437 /* verify that all the pages have correct rights for code */
438 static void tb_page_check(void)
440 TranslationBlock
*tb
;
441 int i
, flags1
, flags2
;
443 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
444 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
445 flags1
= page_get_flags(tb
->pc
);
446 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
447 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
448 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
449 tb
->pc
, tb
->size
, flags1
, flags2
);
455 void tb_jmp_check(TranslationBlock
*tb
)
457 TranslationBlock
*tb1
;
460 /* suppress any remaining jumps to this TB */
464 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
467 tb1
= tb1
->jmp_next
[n1
];
469 /* check end of list */
471 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
477 /* invalidate one TB */
478 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
481 TranslationBlock
*tb1
;
485 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
488 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
492 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
494 TranslationBlock
*tb1
;
500 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
502 *ptb
= tb1
->page_next
[n1
];
505 ptb
= &tb1
->page_next
[n1
];
509 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
511 TranslationBlock
*tb1
, **ptb
;
514 ptb
= &tb
->jmp_next
[n
];
517 /* find tb(n) in circular list */
521 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
522 if (n1
== n
&& tb1
== tb
)
525 ptb
= &tb1
->jmp_first
;
527 ptb
= &tb1
->jmp_next
[n1
];
530 /* now we can suppress tb(n) from the list */
531 *ptb
= tb
->jmp_next
[n
];
533 tb
->jmp_next
[n
] = NULL
;
537 /* reset the jump entry 'n' of a TB so that it is not chained to
539 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
541 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
544 static inline void tb_invalidate(TranslationBlock
*tb
)
547 TranslationBlock
*tb1
, *tb2
, **ptb
;
549 tb_invalidated_flag
= 1;
551 /* remove the TB from the hash list */
552 h
= tb_hash_func(tb
->pc
);
556 /* NOTE: the TB is not necessarily linked in the hash. It
557 indicates that it is not currently used */
561 *ptb
= tb1
->hash_next
;
564 ptb
= &tb1
->hash_next
;
567 /* suppress this TB from the two jump lists */
568 tb_jmp_remove(tb
, 0);
569 tb_jmp_remove(tb
, 1);
571 /* suppress any remaining jumps to this TB */
577 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
578 tb2
= tb1
->jmp_next
[n1
];
579 tb_reset_jump(tb1
, n1
);
580 tb1
->jmp_next
[n1
] = NULL
;
583 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
586 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
590 target_ulong phys_pc
;
592 /* remove the TB from the hash list */
593 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
594 h
= tb_phys_hash_func(phys_pc
);
595 tb_remove(&tb_phys_hash
[h
], tb
,
596 offsetof(TranslationBlock
, phys_hash_next
));
598 /* remove the TB from the page list */
599 if (tb
->page_addr
[0] != page_addr
) {
600 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
601 tb_page_remove(&p
->first_tb
, tb
);
602 invalidate_page_bitmap(p
);
604 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
605 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
606 tb_page_remove(&p
->first_tb
, tb
);
607 invalidate_page_bitmap(p
);
611 tb_phys_invalidate_count
++;
614 static inline void set_bits(uint8_t *tab
, int start
, int len
)
620 mask
= 0xff << (start
& 7);
621 if ((start
& ~7) == (end
& ~7)) {
623 mask
&= ~(0xff << (end
& 7));
628 start
= (start
+ 8) & ~7;
630 while (start
< end1
) {
635 mask
= ~(0xff << (end
& 7));
641 static void build_page_bitmap(PageDesc
*p
)
643 int n
, tb_start
, tb_end
;
644 TranslationBlock
*tb
;
646 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
649 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
654 tb
= (TranslationBlock
*)((long)tb
& ~3);
655 /* NOTE: this is subtle as a TB may span two physical pages */
657 /* NOTE: tb_end may be after the end of the page, but
658 it is not a problem */
659 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
660 tb_end
= tb_start
+ tb
->size
;
661 if (tb_end
> TARGET_PAGE_SIZE
)
662 tb_end
= TARGET_PAGE_SIZE
;
665 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
667 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
668 tb
= tb
->page_next
[n
];
672 #ifdef TARGET_HAS_PRECISE_SMC
674 static void tb_gen_code(CPUState
*env
,
675 target_ulong pc
, target_ulong cs_base
, int flags
,
678 TranslationBlock
*tb
;
680 target_ulong phys_pc
, phys_page2
, virt_page2
;
683 phys_pc
= get_phys_addr_code(env
, pc
);
686 /* flush must be done */
688 /* cannot fail at this point */
691 tc_ptr
= code_gen_ptr
;
693 tb
->cs_base
= cs_base
;
696 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
697 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
699 /* check next page if needed */
700 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
702 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
703 phys_page2
= get_phys_addr_code(env
, virt_page2
);
705 tb_link_phys(tb
, phys_pc
, phys_page2
);
709 /* invalidate all TBs which intersect with the target physical page
710 starting in range [start;end[. NOTE: start and end must refer to
711 the same physical page. 'is_cpu_write_access' should be true if called
712 from a real cpu write access: the virtual CPU will exit the current
713 TB if code is modified inside this TB. */
714 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
715 int is_cpu_write_access
)
717 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
718 CPUState
*env
= cpu_single_env
;
720 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
721 target_ulong tb_start
, tb_end
;
722 target_ulong current_pc
, current_cs_base
;
724 p
= page_find(start
>> TARGET_PAGE_BITS
);
727 if (!p
->code_bitmap
&&
728 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
729 is_cpu_write_access
) {
730 /* build code bitmap */
731 build_page_bitmap(p
);
734 /* we remove all the TBs in the range [start, end[ */
735 /* XXX: see if in some cases it could be faster to invalidate all the code */
736 current_tb_not_found
= is_cpu_write_access
;
737 current_tb_modified
= 0;
738 current_tb
= NULL
; /* avoid warning */
739 current_pc
= 0; /* avoid warning */
740 current_cs_base
= 0; /* avoid warning */
741 current_flags
= 0; /* avoid warning */
745 tb
= (TranslationBlock
*)((long)tb
& ~3);
746 tb_next
= tb
->page_next
[n
];
747 /* NOTE: this is subtle as a TB may span two physical pages */
749 /* NOTE: tb_end may be after the end of the page, but
750 it is not a problem */
751 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
752 tb_end
= tb_start
+ tb
->size
;
754 tb_start
= tb
->page_addr
[1];
755 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
757 if (!(tb_end
<= start
|| tb_start
>= end
)) {
758 #ifdef TARGET_HAS_PRECISE_SMC
759 if (current_tb_not_found
) {
760 current_tb_not_found
= 0;
762 if (env
->mem_write_pc
) {
763 /* now we have a real cpu fault */
764 current_tb
= tb_find_pc(env
->mem_write_pc
);
767 if (current_tb
== tb
&&
768 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
769 /* If we are modifying the current TB, we must stop
770 its execution. We could be more precise by checking
771 that the modification is after the current PC, but it
772 would require a specialized function to partially
773 restore the CPU state */
775 current_tb_modified
= 1;
776 cpu_restore_state(current_tb
, env
,
777 env
->mem_write_pc
, NULL
);
778 #if defined(TARGET_I386)
779 current_flags
= env
->hflags
;
780 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
781 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
782 current_pc
= current_cs_base
+ env
->eip
;
784 #error unsupported CPU
787 #endif /* TARGET_HAS_PRECISE_SMC */
788 saved_tb
= env
->current_tb
;
789 env
->current_tb
= NULL
;
790 tb_phys_invalidate(tb
, -1);
791 env
->current_tb
= saved_tb
;
792 if (env
->interrupt_request
&& env
->current_tb
)
793 cpu_interrupt(env
, env
->interrupt_request
);
797 #if !defined(CONFIG_USER_ONLY)
798 /* if no code remaining, no need to continue to use slow writes */
800 invalidate_page_bitmap(p
);
801 if (is_cpu_write_access
) {
802 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
806 #ifdef TARGET_HAS_PRECISE_SMC
807 if (current_tb_modified
) {
808 /* we generate a block containing just the instruction
809 modifying the memory. It will ensure that it cannot modify
811 env
->current_tb
= NULL
;
812 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
814 cpu_resume_from_signal(env
, NULL
);
819 /* len must be <= 8 and start must be a multiple of len */
820 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
827 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
828 cpu_single_env
->mem_write_vaddr
, len
,
830 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
834 p
= page_find(start
>> TARGET_PAGE_BITS
);
837 if (p
->code_bitmap
) {
838 offset
= start
& ~TARGET_PAGE_MASK
;
839 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
840 if (b
& ((1 << len
) - 1))
844 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
848 #if !defined(CONFIG_SOFTMMU)
849 static void tb_invalidate_phys_page(target_ulong addr
,
850 unsigned long pc
, void *puc
)
852 int n
, current_flags
, current_tb_modified
;
853 target_ulong current_pc
, current_cs_base
;
855 TranslationBlock
*tb
, *current_tb
;
856 #ifdef TARGET_HAS_PRECISE_SMC
857 CPUState
*env
= cpu_single_env
;
860 addr
&= TARGET_PAGE_MASK
;
861 p
= page_find(addr
>> TARGET_PAGE_BITS
);
865 current_tb_modified
= 0;
867 current_pc
= 0; /* avoid warning */
868 current_cs_base
= 0; /* avoid warning */
869 current_flags
= 0; /* avoid warning */
870 #ifdef TARGET_HAS_PRECISE_SMC
872 current_tb
= tb_find_pc(pc
);
877 tb
= (TranslationBlock
*)((long)tb
& ~3);
878 #ifdef TARGET_HAS_PRECISE_SMC
879 if (current_tb
== tb
&&
880 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
881 /* If we are modifying the current TB, we must stop
882 its execution. We could be more precise by checking
883 that the modification is after the current PC, but it
884 would require a specialized function to partially
885 restore the CPU state */
887 current_tb_modified
= 1;
888 cpu_restore_state(current_tb
, env
, pc
, puc
);
889 #if defined(TARGET_I386)
890 current_flags
= env
->hflags
;
891 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
892 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
893 current_pc
= current_cs_base
+ env
->eip
;
895 #error unsupported CPU
898 #endif /* TARGET_HAS_PRECISE_SMC */
899 tb_phys_invalidate(tb
, addr
);
900 tb
= tb
->page_next
[n
];
903 #ifdef TARGET_HAS_PRECISE_SMC
904 if (current_tb_modified
) {
905 /* we generate a block containing just the instruction
906 modifying the memory. It will ensure that it cannot modify
908 env
->current_tb
= NULL
;
909 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
911 cpu_resume_from_signal(env
, puc
);
917 /* add the tb in the target page and protect it if necessary */
918 static inline void tb_alloc_page(TranslationBlock
*tb
,
919 unsigned int n
, unsigned int page_addr
)
922 TranslationBlock
*last_first_tb
;
924 tb
->page_addr
[n
] = page_addr
;
925 p
= page_find(page_addr
>> TARGET_PAGE_BITS
);
926 tb
->page_next
[n
] = p
->first_tb
;
927 last_first_tb
= p
->first_tb
;
928 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
929 invalidate_page_bitmap(p
);
931 #if defined(TARGET_HAS_SMC) || 1
933 #if defined(CONFIG_USER_ONLY)
934 if (p
->flags
& PAGE_WRITE
) {
935 unsigned long host_start
, host_end
, addr
;
938 /* force the host page as non writable (writes will have a
939 page fault + mprotect overhead) */
940 host_start
= page_addr
& qemu_host_page_mask
;
941 host_end
= host_start
+ qemu_host_page_size
;
943 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
944 prot
|= page_get_flags(addr
);
945 mprotect((void *)host_start
, qemu_host_page_size
,
946 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
947 #ifdef DEBUG_TB_INVALIDATE
948 printf("protecting code page: 0x%08lx\n",
951 p
->flags
&= ~PAGE_WRITE
;
954 /* if some code is already present, then the pages are already
955 protected. So we handle the case where only the first TB is
956 allocated in a physical page */
957 if (!last_first_tb
) {
958 target_ulong virt_addr
;
960 virt_addr
= (tb
->pc
& TARGET_PAGE_MASK
) + (n
<< TARGET_PAGE_BITS
);
961 tlb_protect_code(cpu_single_env
, virt_addr
);
965 #endif /* TARGET_HAS_SMC */
968 /* Allocate a new translation block. Flush the translation buffer if
969 too many translation blocks or too much generated code. */
970 TranslationBlock
*tb_alloc(target_ulong pc
)
972 TranslationBlock
*tb
;
974 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
975 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
983 /* add a new TB and link it to the physical page tables. phys_page2 is
984 (-1) to indicate that only one page contains the TB. */
985 void tb_link_phys(TranslationBlock
*tb
,
986 target_ulong phys_pc
, target_ulong phys_page2
)
989 TranslationBlock
**ptb
;
991 /* add in the physical hash table */
992 h
= tb_phys_hash_func(phys_pc
);
993 ptb
= &tb_phys_hash
[h
];
994 tb
->phys_hash_next
= *ptb
;
997 /* add in the page list */
998 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
999 if (phys_page2
!= -1)
1000 tb_alloc_page(tb
, 1, phys_page2
);
1002 tb
->page_addr
[1] = -1;
1003 #ifdef DEBUG_TB_CHECK
1008 /* link the tb with the other TBs */
1009 void tb_link(TranslationBlock
*tb
)
1011 #if !defined(CONFIG_USER_ONLY)
1016 /* save the code memory mappings (needed to invalidate the code) */
1017 addr
= tb
->pc
& TARGET_PAGE_MASK
;
1018 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1019 #ifdef DEBUG_TLB_CHECK
1020 if (vp
->valid_tag
== virt_valid_tag
&&
1021 vp
->phys_addr
!= tb
->page_addr
[0]) {
1022 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1023 addr
, tb
->page_addr
[0], vp
->phys_addr
);
1026 vp
->phys_addr
= tb
->page_addr
[0];
1027 if (vp
->valid_tag
!= virt_valid_tag
) {
1028 vp
->valid_tag
= virt_valid_tag
;
1029 #if !defined(CONFIG_SOFTMMU)
1034 if (tb
->page_addr
[1] != -1) {
1035 addr
+= TARGET_PAGE_SIZE
;
1036 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1037 #ifdef DEBUG_TLB_CHECK
1038 if (vp
->valid_tag
== virt_valid_tag
&&
1039 vp
->phys_addr
!= tb
->page_addr
[1]) {
1040 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1041 addr
, tb
->page_addr
[1], vp
->phys_addr
);
1044 vp
->phys_addr
= tb
->page_addr
[1];
1045 if (vp
->valid_tag
!= virt_valid_tag
) {
1046 vp
->valid_tag
= virt_valid_tag
;
1047 #if !defined(CONFIG_SOFTMMU)
1055 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1056 tb
->jmp_next
[0] = NULL
;
1057 tb
->jmp_next
[1] = NULL
;
1058 #ifdef USE_CODE_COPY
1059 tb
->cflags
&= ~CF_FP_USED
;
1060 if (tb
->cflags
& CF_TB_FP_USED
)
1061 tb
->cflags
|= CF_FP_USED
;
1064 /* init original jump addresses */
1065 if (tb
->tb_next_offset
[0] != 0xffff)
1066 tb_reset_jump(tb
, 0);
1067 if (tb
->tb_next_offset
[1] != 0xffff)
1068 tb_reset_jump(tb
, 1);
1071 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1072 tb[1].tc_ptr. Return NULL if not found */
1073 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1075 int m_min
, m_max
, m
;
1077 TranslationBlock
*tb
;
1081 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1082 tc_ptr
>= (unsigned long)code_gen_ptr
)
1084 /* binary search (cf Knuth) */
1087 while (m_min
<= m_max
) {
1088 m
= (m_min
+ m_max
) >> 1;
1090 v
= (unsigned long)tb
->tc_ptr
;
1093 else if (tc_ptr
< v
) {
1102 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1104 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1106 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1109 tb1
= tb
->jmp_next
[n
];
1111 /* find head of list */
1114 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1117 tb1
= tb1
->jmp_next
[n1
];
1119 /* we are now sure now that tb jumps to tb1 */
1122 /* remove tb from the jmp_first list */
1123 ptb
= &tb_next
->jmp_first
;
1127 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1128 if (n1
== n
&& tb1
== tb
)
1130 ptb
= &tb1
->jmp_next
[n1
];
1132 *ptb
= tb
->jmp_next
[n
];
1133 tb
->jmp_next
[n
] = NULL
;
1135 /* suppress the jump to next tb in generated code */
1136 tb_reset_jump(tb
, n
);
1138 /* suppress jumps in the tb on which we could have jumped */
1139 tb_reset_jump_recursive(tb_next
);
1143 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1145 tb_reset_jump_recursive2(tb
, 0);
1146 tb_reset_jump_recursive2(tb
, 1);
1149 #if defined(TARGET_HAS_ICE)
1150 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1152 target_ulong phys_addr
;
1154 phys_addr
= cpu_get_phys_page_debug(env
, pc
);
1155 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ 1, 0);
1159 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1160 breakpoint is reached */
1161 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1163 #if defined(TARGET_HAS_ICE)
1166 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1167 if (env
->breakpoints
[i
] == pc
)
1171 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1173 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1175 breakpoint_invalidate(env
, pc
);
1182 /* remove a breakpoint */
1183 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1185 #if defined(TARGET_HAS_ICE)
1187 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1188 if (env
->breakpoints
[i
] == pc
)
1193 env
->nb_breakpoints
--;
1194 if (i
< env
->nb_breakpoints
)
1195 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1197 breakpoint_invalidate(env
, pc
);
1204 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1205 CPU loop after each instruction */
1206 void cpu_single_step(CPUState
*env
, int enabled
)
1208 #if defined(TARGET_HAS_ICE)
1209 if (env
->singlestep_enabled
!= enabled
) {
1210 env
->singlestep_enabled
= enabled
;
1211 /* must flush all the translated code to avoid inconsistancies */
1212 /* XXX: only flush what is necessary */
1218 /* enable or disable low levels log */
1219 void cpu_set_log(int log_flags
)
1221 loglevel
= log_flags
;
1222 if (loglevel
&& !logfile
) {
1223 logfile
= fopen(logfilename
, "w");
1225 perror(logfilename
);
1228 #if !defined(CONFIG_SOFTMMU)
1229 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1231 static uint8_t logfile_buf
[4096];
1232 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1235 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1240 void cpu_set_log_filename(const char *filename
)
1242 logfilename
= strdup(filename
);
1245 /* mask must never be zero, except for A20 change call */
1246 void cpu_interrupt(CPUState
*env
, int mask
)
1248 TranslationBlock
*tb
;
1249 static int interrupt_lock
;
1251 env
->interrupt_request
|= mask
;
1252 /* if the cpu is currently executing code, we must unlink it and
1253 all the potentially executing TB */
1254 tb
= env
->current_tb
;
1255 if (tb
&& !testandset(&interrupt_lock
)) {
1256 env
->current_tb
= NULL
;
1257 tb_reset_jump_recursive(tb
);
1262 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1264 env
->interrupt_request
&= ~mask
;
1267 CPULogItem cpu_log_items
[] = {
1268 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1269 "show generated host assembly code for each compiled TB" },
1270 { CPU_LOG_TB_IN_ASM
, "in_asm",
1271 "show target assembly code for each compiled TB" },
1272 { CPU_LOG_TB_OP
, "op",
1273 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1275 { CPU_LOG_TB_OP_OPT
, "op_opt",
1276 "show micro ops after optimization for each compiled TB" },
1278 { CPU_LOG_INT
, "int",
1279 "show interrupts/exceptions in short format" },
1280 { CPU_LOG_EXEC
, "exec",
1281 "show trace before each executed TB (lots of logs)" },
1282 { CPU_LOG_TB_CPU
, "cpu",
1283 "show CPU state before bloc translation" },
1285 { CPU_LOG_PCALL
, "pcall",
1286 "show protected mode far calls/returns/exceptions" },
1289 { CPU_LOG_IOPORT
, "ioport",
1290 "show all i/o ports accesses" },
1295 static int cmp1(const char *s1
, int n
, const char *s2
)
1297 if (strlen(s2
) != n
)
1299 return memcmp(s1
, s2
, n
) == 0;
1302 /* takes a comma separated list of log masks. Return 0 if error. */
1303 int cpu_str_to_log_mask(const char *str
)
1312 p1
= strchr(p
, ',');
1315 if(cmp1(p
,p1
-p
,"all")) {
1316 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1320 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1321 if (cmp1(p
, p1
- p
, item
->name
))
1335 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1340 fprintf(stderr
, "qemu: fatal: ");
1341 vfprintf(stderr
, fmt
, ap
);
1342 fprintf(stderr
, "\n");
1344 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1346 cpu_dump_state(env
, stderr
, fprintf
, 0);
1352 #if !defined(CONFIG_USER_ONLY)
1354 /* NOTE: if flush_global is true, also flush global entries (not
1356 void tlb_flush(CPUState
*env
, int flush_global
)
1360 #if defined(DEBUG_TLB)
1361 printf("tlb_flush:\n");
1363 /* must reset current TB so that interrupts cannot modify the
1364 links while we are modifying them */
1365 env
->current_tb
= NULL
;
1367 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1368 env
->tlb_read
[0][i
].address
= -1;
1369 env
->tlb_write
[0][i
].address
= -1;
1370 env
->tlb_read
[1][i
].address
= -1;
1371 env
->tlb_write
[1][i
].address
= -1;
1375 memset (tb_hash
, 0, CODE_GEN_HASH_SIZE
* sizeof (void *));
1377 #if !defined(CONFIG_SOFTMMU)
1378 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1381 if (env
->kqemu_enabled
) {
1382 kqemu_flush(env
, flush_global
);
1388 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1390 if (addr
== (tlb_entry
->address
&
1391 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
1392 tlb_entry
->address
= -1;
1395 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1400 TranslationBlock
*tb
;
1402 #if defined(DEBUG_TLB)
1403 printf("tlb_flush_page: 0x%08x\n", addr
);
1405 /* must reset current TB so that interrupts cannot modify the
1406 links while we are modifying them */
1407 env
->current_tb
= NULL
;
1409 addr
&= TARGET_PAGE_MASK
;
1410 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1411 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
1412 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
1413 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
1414 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
1416 /* remove from the virtual pc hash table all the TB at this
1419 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1420 if (vp
&& vp
->valid_tag
== virt_valid_tag
) {
1421 p
= page_find(vp
->phys_addr
>> TARGET_PAGE_BITS
);
1423 /* we remove all the links to the TBs in this virtual page */
1425 while (tb
!= NULL
) {
1427 tb
= (TranslationBlock
*)((long)tb
& ~3);
1428 if ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1429 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
) {
1432 tb
= tb
->page_next
[n
];
1438 #if !defined(CONFIG_SOFTMMU)
1439 if (addr
< MMAP_AREA_END
)
1440 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1443 if (env
->kqemu_enabled
) {
1444 kqemu_flush_page(env
, addr
);
1449 static inline void tlb_protect_code1(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1451 if (addr
== (tlb_entry
->address
&
1452 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1453 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
&&
1454 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
) {
1455 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_CODE
;
1459 /* update the TLBs so that writes to code in the virtual page 'addr'
1461 static void tlb_protect_code(CPUState
*env
, target_ulong addr
)
1465 addr
&= TARGET_PAGE_MASK
;
1466 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1467 tlb_protect_code1(&env
->tlb_write
[0][i
], addr
);
1468 tlb_protect_code1(&env
->tlb_write
[1][i
], addr
);
1469 #if !defined(CONFIG_SOFTMMU)
1470 /* NOTE: as we generated the code for this page, it is already at
1472 if (addr
< MMAP_AREA_END
)
1473 mprotect((void *)addr
, TARGET_PAGE_SIZE
, PROT_READ
);
1477 static inline void tlb_unprotect_code2(CPUTLBEntry
*tlb_entry
,
1478 unsigned long phys_addr
)
1480 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
&&
1481 ((tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
) == phys_addr
) {
1482 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1486 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1487 tested self modifying code */
1488 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
)
1492 phys_addr
&= TARGET_PAGE_MASK
;
1493 phys_addr
+= (long)phys_ram_base
;
1494 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1495 tlb_unprotect_code2(&env
->tlb_write
[0][i
], phys_addr
);
1496 tlb_unprotect_code2(&env
->tlb_write
[1][i
], phys_addr
);
1499 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1500 unsigned long start
, unsigned long length
)
1503 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1504 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1505 if ((addr
- start
) < length
) {
1506 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1511 void cpu_physical_memory_reset_dirty(target_ulong start
, target_ulong end
,
1515 unsigned long length
, start1
;
1519 start
&= TARGET_PAGE_MASK
;
1520 end
= TARGET_PAGE_ALIGN(end
);
1522 length
= end
- start
;
1525 mask
= ~dirty_flags
;
1526 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1527 len
= length
>> TARGET_PAGE_BITS
;
1528 for(i
= 0; i
< len
; i
++)
1531 env
= cpu_single_env
;
1532 /* we modify the TLB cache so that the dirty bit will be set again
1533 when accessing the range */
1534 start1
= start
+ (unsigned long)phys_ram_base
;
1535 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1536 tlb_reset_dirty_range(&env
->tlb_write
[0][i
], start1
, length
);
1537 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1538 tlb_reset_dirty_range(&env
->tlb_write
[1][i
], start1
, length
);
1540 #if !defined(CONFIG_SOFTMMU)
1541 /* XXX: this is expensive */
1547 for(i
= 0; i
< L1_SIZE
; i
++) {
1550 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1551 for(j
= 0; j
< L2_SIZE
; j
++) {
1552 if (p
->valid_tag
== virt_valid_tag
&&
1553 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1554 (p
->prot
& PROT_WRITE
)) {
1555 if (addr
< MMAP_AREA_END
) {
1556 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1557 p
->prot
& ~PROT_WRITE
);
1560 addr
+= TARGET_PAGE_SIZE
;
1569 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1570 unsigned long start
)
1573 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1574 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1575 if (addr
== start
) {
1576 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1581 /* update the TLB corresponding to virtual page vaddr and phys addr
1582 addr so that it is no longer dirty */
1583 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1585 CPUState
*env
= cpu_single_env
;
1588 phys_ram_dirty
[(addr
- (unsigned long)phys_ram_base
) >> TARGET_PAGE_BITS
] = 0xff;
1590 addr
&= TARGET_PAGE_MASK
;
1591 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1592 tlb_set_dirty1(&env
->tlb_write
[0][i
], addr
);
1593 tlb_set_dirty1(&env
->tlb_write
[1][i
], addr
);
1596 /* add a new TLB entry. At most one entry for a given virtual address
1597 is permitted. Return 0 if OK or 2 if the page could not be mapped
1598 (can only happen in non SOFTMMU mode for I/O pages or pages
1599 conflicting with the host address space). */
1600 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1601 target_phys_addr_t paddr
, int prot
,
1602 int is_user
, int is_softmmu
)
1606 TranslationBlock
*first_tb
;
1608 target_ulong address
;
1609 unsigned long addend
;
1612 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1615 pd
= IO_MEM_UNASSIGNED
;
1618 pd
= p
->phys_offset
;
1619 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1620 /* NOTE: we also allocate the page at this stage */
1621 p1
= page_find_alloc(pd
>> TARGET_PAGE_BITS
);
1622 first_tb
= p1
->first_tb
;
1625 #if defined(DEBUG_TLB)
1626 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1627 vaddr
, paddr
, prot
, is_user
, (first_tb
!= NULL
), is_softmmu
, pd
);
1631 #if !defined(CONFIG_SOFTMMU)
1635 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1636 /* IO memory case */
1637 address
= vaddr
| pd
;
1640 /* standard memory */
1642 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1645 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1647 if (prot
& PAGE_READ
) {
1648 env
->tlb_read
[is_user
][index
].address
= address
;
1649 env
->tlb_read
[is_user
][index
].addend
= addend
;
1651 env
->tlb_read
[is_user
][index
].address
= -1;
1652 env
->tlb_read
[is_user
][index
].addend
= -1;
1654 if (prot
& PAGE_WRITE
) {
1655 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1656 /* ROM: access is ignored (same as unassigned) */
1657 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_ROM
;
1658 env
->tlb_write
[is_user
][index
].addend
= addend
;
1660 /* XXX: the PowerPC code seems not ready to handle
1661 self modifying code with DCBI */
1662 #if defined(TARGET_HAS_SMC) || 1
1664 /* if code is present, we use a specific memory
1665 handler. It works only for physical memory access */
1666 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_CODE
;
1667 env
->tlb_write
[is_user
][index
].addend
= addend
;
1670 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1671 !cpu_physical_memory_is_dirty(pd
)) {
1672 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_NOTDIRTY
;
1673 env
->tlb_write
[is_user
][index
].addend
= addend
;
1675 env
->tlb_write
[is_user
][index
].address
= address
;
1676 env
->tlb_write
[is_user
][index
].addend
= addend
;
1679 env
->tlb_write
[is_user
][index
].address
= -1;
1680 env
->tlb_write
[is_user
][index
].addend
= -1;
1683 #if !defined(CONFIG_SOFTMMU)
1685 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1686 /* IO access: no mapping is done as it will be handled by the
1688 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1693 if (vaddr
>= MMAP_AREA_END
) {
1696 if (prot
& PROT_WRITE
) {
1697 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1698 #if defined(TARGET_HAS_SMC) || 1
1701 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1702 !cpu_physical_memory_is_dirty(pd
))) {
1703 /* ROM: we do as if code was inside */
1704 /* if code is present, we only map as read only and save the
1708 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1711 vp
->valid_tag
= virt_valid_tag
;
1712 prot
&= ~PAGE_WRITE
;
1715 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1716 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1717 if (map_addr
== MAP_FAILED
) {
1718 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1728 /* called from signal handler: invalidate the code and unprotect the
1729 page. Return TRUE if the fault was succesfully handled. */
1730 int page_unprotect(unsigned long addr
, unsigned long pc
, void *puc
)
1732 #if !defined(CONFIG_SOFTMMU)
1735 #if defined(DEBUG_TLB)
1736 printf("page_unprotect: addr=0x%08x\n", addr
);
1738 addr
&= TARGET_PAGE_MASK
;
1740 /* if it is not mapped, no need to worry here */
1741 if (addr
>= MMAP_AREA_END
)
1743 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1746 /* NOTE: in this case, validate_tag is _not_ tested as it
1747 validates only the code TLB */
1748 if (vp
->valid_tag
!= virt_valid_tag
)
1750 if (!(vp
->prot
& PAGE_WRITE
))
1752 #if defined(DEBUG_TLB)
1753 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1754 addr
, vp
->phys_addr
, vp
->prot
);
1756 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1757 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1758 (unsigned long)addr
, vp
->prot
);
1759 /* set the dirty bit */
1760 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1761 /* flush the code inside */
1762 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1771 void tlb_flush(CPUState
*env
, int flush_global
)
1775 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1779 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1780 target_phys_addr_t paddr
, int prot
,
1781 int is_user
, int is_softmmu
)
1786 /* dump memory mappings */
1787 void page_dump(FILE *f
)
1789 unsigned long start
, end
;
1790 int i
, j
, prot
, prot1
;
1793 fprintf(f
, "%-8s %-8s %-8s %s\n",
1794 "start", "end", "size", "prot");
1798 for(i
= 0; i
<= L1_SIZE
; i
++) {
1803 for(j
= 0;j
< L2_SIZE
; j
++) {
1808 if (prot1
!= prot
) {
1809 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1811 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1812 start
, end
, end
- start
,
1813 prot
& PAGE_READ
? 'r' : '-',
1814 prot
& PAGE_WRITE
? 'w' : '-',
1815 prot
& PAGE_EXEC
? 'x' : '-');
1829 int page_get_flags(unsigned long address
)
1833 p
= page_find(address
>> TARGET_PAGE_BITS
);
1839 /* modify the flags of a page and invalidate the code if
1840 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1841 depending on PAGE_WRITE */
1842 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1847 start
= start
& TARGET_PAGE_MASK
;
1848 end
= TARGET_PAGE_ALIGN(end
);
1849 if (flags
& PAGE_WRITE
)
1850 flags
|= PAGE_WRITE_ORG
;
1851 spin_lock(&tb_lock
);
1852 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1853 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1854 /* if the write protection is set, then we invalidate the code
1856 if (!(p
->flags
& PAGE_WRITE
) &&
1857 (flags
& PAGE_WRITE
) &&
1859 tb_invalidate_phys_page(addr
, 0, NULL
);
1863 spin_unlock(&tb_lock
);
1866 /* called from signal handler: invalidate the code and unprotect the
1867 page. Return TRUE if the fault was succesfully handled. */
1868 int page_unprotect(unsigned long address
, unsigned long pc
, void *puc
)
1870 unsigned int page_index
, prot
, pindex
;
1872 unsigned long host_start
, host_end
, addr
;
1874 host_start
= address
& qemu_host_page_mask
;
1875 page_index
= host_start
>> TARGET_PAGE_BITS
;
1876 p1
= page_find(page_index
);
1879 host_end
= host_start
+ qemu_host_page_size
;
1882 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1886 /* if the page was really writable, then we change its
1887 protection back to writable */
1888 if (prot
& PAGE_WRITE_ORG
) {
1889 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1890 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1891 mprotect((void *)host_start
, qemu_host_page_size
,
1892 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1893 p1
[pindex
].flags
|= PAGE_WRITE
;
1894 /* and since the content will be modified, we must invalidate
1895 the corresponding translated code. */
1896 tb_invalidate_phys_page(address
, pc
, puc
);
1897 #ifdef DEBUG_TB_CHECK
1898 tb_invalidate_check(address
);
1906 /* call this function when system calls directly modify a memory area */
1907 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1909 unsigned long start
, end
, addr
;
1911 start
= (unsigned long)data
;
1912 end
= start
+ data_size
;
1913 start
&= TARGET_PAGE_MASK
;
1914 end
= TARGET_PAGE_ALIGN(end
);
1915 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1916 page_unprotect(addr
, 0, NULL
);
1920 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1923 #endif /* defined(CONFIG_USER_ONLY) */
1925 /* register physical memory. 'size' must be a multiple of the target
1926 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1928 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1930 unsigned long phys_offset
)
1932 unsigned long addr
, end_addr
;
1935 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1936 end_addr
= start_addr
+ size
;
1937 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1938 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1939 p
->phys_offset
= phys_offset
;
1940 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1941 phys_offset
+= TARGET_PAGE_SIZE
;
1945 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1950 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1954 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1955 unassigned_mem_readb
,
1956 unassigned_mem_readb
,
1957 unassigned_mem_readb
,
1960 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1961 unassigned_mem_writeb
,
1962 unassigned_mem_writeb
,
1963 unassigned_mem_writeb
,
1966 /* self modifying code support in soft mmu mode : writing to a page
1967 containing code comes to these functions */
1969 static void code_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1971 unsigned long phys_addr
;
1973 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1974 #if !defined(CONFIG_USER_ONLY)
1975 tb_invalidate_phys_page_fast(phys_addr
, 1);
1977 stb_p((uint8_t *)(long)addr
, val
);
1978 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1981 static void code_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1983 unsigned long phys_addr
;
1985 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1986 #if !defined(CONFIG_USER_ONLY)
1987 tb_invalidate_phys_page_fast(phys_addr
, 2);
1989 stw_p((uint8_t *)(long)addr
, val
);
1990 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1993 static void code_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1995 unsigned long phys_addr
;
1997 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1998 #if !defined(CONFIG_USER_ONLY)
1999 tb_invalidate_phys_page_fast(phys_addr
, 4);
2001 stl_p((uint8_t *)(long)addr
, val
);
2002 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
2005 static CPUReadMemoryFunc
*code_mem_read
[3] = {
2006 NULL
, /* never used */
2007 NULL
, /* never used */
2008 NULL
, /* never used */
2011 static CPUWriteMemoryFunc
*code_mem_write
[3] = {
2017 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2019 stb_p((uint8_t *)(long)addr
, val
);
2020 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
2023 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2025 stw_p((uint8_t *)(long)addr
, val
);
2026 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
2029 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2031 stl_p((uint8_t *)(long)addr
, val
);
2032 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
2035 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2036 notdirty_mem_writeb
,
2037 notdirty_mem_writew
,
2038 notdirty_mem_writel
,
2041 static void io_mem_init(void)
2043 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, code_mem_read
, unassigned_mem_write
, NULL
);
2044 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2045 cpu_register_io_memory(IO_MEM_CODE
>> IO_MEM_SHIFT
, code_mem_read
, code_mem_write
, NULL
);
2046 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, code_mem_read
, notdirty_mem_write
, NULL
);
2049 /* alloc dirty bits array */
2050 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2053 /* mem_read and mem_write are arrays of functions containing the
2054 function to access byte (index 0), word (index 1) and dword (index
2055 2). All functions must be supplied. If io_index is non zero, the
2056 corresponding io zone is modified. If it is zero, a new io zone is
2057 allocated. The return value can be used with
2058 cpu_register_physical_memory(). (-1) is returned if error. */
2059 int cpu_register_io_memory(int io_index
,
2060 CPUReadMemoryFunc
**mem_read
,
2061 CPUWriteMemoryFunc
**mem_write
,
2066 if (io_index
<= 0) {
2067 if (io_index
>= IO_MEM_NB_ENTRIES
)
2069 io_index
= io_mem_nb
++;
2071 if (io_index
>= IO_MEM_NB_ENTRIES
)
2075 for(i
= 0;i
< 3; i
++) {
2076 io_mem_read
[io_index
][i
] = mem_read
[i
];
2077 io_mem_write
[io_index
][i
] = mem_write
[i
];
2079 io_mem_opaque
[io_index
] = opaque
;
2080 return io_index
<< IO_MEM_SHIFT
;
2083 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2085 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2088 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2090 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2093 /* physical memory access (slow version, mainly for debug) */
2094 #if defined(CONFIG_USER_ONLY)
2095 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2096 int len
, int is_write
)
2102 page
= addr
& TARGET_PAGE_MASK
;
2103 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2106 flags
= page_get_flags(page
);
2107 if (!(flags
& PAGE_VALID
))
2110 if (!(flags
& PAGE_WRITE
))
2112 memcpy((uint8_t *)addr
, buf
, len
);
2114 if (!(flags
& PAGE_READ
))
2116 memcpy(buf
, (uint8_t *)addr
, len
);
2125 uint32_t ldl_phys(target_phys_addr_t addr
)
2130 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2134 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2139 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2140 int len
, int is_write
)
2145 target_phys_addr_t page
;
2150 page
= addr
& TARGET_PAGE_MASK
;
2151 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2154 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2156 pd
= IO_MEM_UNASSIGNED
;
2158 pd
= p
->phys_offset
;
2162 if ((pd
& ~TARGET_PAGE_MASK
) != 0) {
2163 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2164 if (l
>= 4 && ((addr
& 3) == 0)) {
2165 /* 32 bit read access */
2167 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2169 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2170 /* 16 bit read access */
2172 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2177 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2181 unsigned long addr1
;
2182 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2184 ptr
= phys_ram_base
+ addr1
;
2185 memcpy(ptr
, buf
, l
);
2186 /* invalidate code */
2187 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2189 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] = 0xff;
2192 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2193 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
) {
2195 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2196 if (l
>= 4 && ((addr
& 3) == 0)) {
2197 /* 32 bit read access */
2198 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2201 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2202 /* 16 bit read access */
2203 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2208 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2214 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2215 (addr
& ~TARGET_PAGE_MASK
);
2216 memcpy(buf
, ptr
, l
);
2225 /* warning: addr must be aligned */
2226 uint32_t ldl_phys(target_phys_addr_t addr
)
2234 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2236 pd
= IO_MEM_UNASSIGNED
;
2238 pd
= p
->phys_offset
;
2241 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2242 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
) {
2244 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2245 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2248 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2249 (addr
& ~TARGET_PAGE_MASK
);
2255 /* warning: addr must be aligned. The ram page is not masked as dirty
2256 and the code inside is not invalidated. It is useful if the dirty
2257 bits are used to track modified PTEs */
2258 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2265 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2267 pd
= IO_MEM_UNASSIGNED
;
2269 pd
= p
->phys_offset
;
2272 if ((pd
& ~TARGET_PAGE_MASK
) != 0) {
2273 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2274 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2276 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2277 (addr
& ~TARGET_PAGE_MASK
);
2282 /* warning: addr must be aligned */
2283 /* XXX: optimize code invalidation test */
2284 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2291 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2293 pd
= IO_MEM_UNASSIGNED
;
2295 pd
= p
->phys_offset
;
2298 if ((pd
& ~TARGET_PAGE_MASK
) != 0) {
2299 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2300 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2302 unsigned long addr1
;
2303 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2305 ptr
= phys_ram_base
+ addr1
;
2307 /* invalidate code */
2308 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2310 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] = 0xff;
2316 /* virtual memory access for debug */
2317 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2318 uint8_t *buf
, int len
, int is_write
)
2321 target_ulong page
, phys_addr
;
2324 page
= addr
& TARGET_PAGE_MASK
;
2325 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2326 /* if no physical page mapped, return an error */
2327 if (phys_addr
== -1)
2329 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2332 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2341 void dump_exec_info(FILE *f
,
2342 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2344 int i
, target_code_size
, max_target_code_size
;
2345 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2346 TranslationBlock
*tb
;
2348 target_code_size
= 0;
2349 max_target_code_size
= 0;
2351 direct_jmp_count
= 0;
2352 direct_jmp2_count
= 0;
2353 for(i
= 0; i
< nb_tbs
; i
++) {
2355 target_code_size
+= tb
->size
;
2356 if (tb
->size
> max_target_code_size
)
2357 max_target_code_size
= tb
->size
;
2358 if (tb
->page_addr
[1] != -1)
2360 if (tb
->tb_next_offset
[0] != 0xffff) {
2362 if (tb
->tb_next_offset
[1] != 0xffff) {
2363 direct_jmp2_count
++;
2367 /* XXX: avoid using doubles ? */
2368 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2369 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2370 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2371 max_target_code_size
);
2372 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2373 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2374 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2375 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2377 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2378 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2380 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2382 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2383 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2384 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2385 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2388 #if !defined(CONFIG_USER_ONLY)
2390 #define MMUSUFFIX _cmmu
2391 #define GETPC() NULL
2392 #define env cpu_single_env
2393 #define SOFTMMU_CODE_ACCESS
2396 #include "softmmu_template.h"
2399 #include "softmmu_template.h"
2402 #include "softmmu_template.h"
2405 #include "softmmu_template.h"