2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #if !defined(CONFIG_SOFTMMU)
35 //#define DEBUG_TB_INVALIDATE
39 /* make various TB consistency checks */
40 //#define DEBUG_TB_CHECK
41 //#define DEBUG_TLB_CHECK
43 /* threshold to flush the translated code buffer */
44 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
46 #define SMC_BITMAP_USE_THRESHOLD 10
48 #define MMAP_AREA_START 0x00000000
49 #define MMAP_AREA_END 0xa8000000
51 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
52 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
53 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
55 /* any access to the tbs or the page table must use this lock */
56 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
58 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
59 uint8_t *code_gen_ptr
;
63 uint8_t *phys_ram_base
;
64 uint8_t *phys_ram_dirty
;
66 typedef struct PageDesc
{
67 /* list of TBs intersecting this ram page */
68 TranslationBlock
*first_tb
;
69 /* in order to optimize self modifying code, we count the number
70 of lookups we do to a given page to use a bitmap */
71 unsigned int code_write_count
;
73 #if defined(CONFIG_USER_ONLY)
78 typedef struct PhysPageDesc
{
79 /* offset in host memory of the page + io_index in the low 12 bits */
80 unsigned long phys_offset
;
83 typedef struct VirtPageDesc
{
84 /* physical address of code page. It is valid only if 'valid_tag'
85 matches 'virt_valid_tag' */
86 target_ulong phys_addr
;
87 unsigned int valid_tag
;
88 #if !defined(CONFIG_SOFTMMU)
89 /* original page access rights. It is valid only if 'valid_tag'
90 matches 'virt_valid_tag' */
96 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
98 #define L1_SIZE (1 << L1_BITS)
99 #define L2_SIZE (1 << L2_BITS)
101 static void io_mem_init(void);
103 unsigned long real_host_page_size
;
104 unsigned long host_page_bits
;
105 unsigned long host_page_size
;
106 unsigned long host_page_mask
;
108 /* XXX: for system emulation, it could just be an array */
109 static PageDesc
*l1_map
[L1_SIZE
];
110 static PhysPageDesc
*l1_phys_map
[L1_SIZE
];
112 #if !defined(CONFIG_USER_ONLY)
113 static VirtPageDesc
*l1_virt_map
[L1_SIZE
];
114 static unsigned int virt_valid_tag
;
117 /* io memory support */
118 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
119 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
120 static int io_mem_nb
;
123 char *logfilename
= "/tmp/qemu.log";
127 static void page_init(void)
129 /* NOTE: we can always suppose that host_page_size >=
132 real_host_page_size
= 4096;
134 real_host_page_size
= getpagesize();
136 if (host_page_size
== 0)
137 host_page_size
= real_host_page_size
;
138 if (host_page_size
< TARGET_PAGE_SIZE
)
139 host_page_size
= TARGET_PAGE_SIZE
;
141 while ((1 << host_page_bits
) < host_page_size
)
143 host_page_mask
= ~(host_page_size
- 1);
144 #if !defined(CONFIG_USER_ONLY)
149 static inline PageDesc
*page_find_alloc(unsigned int index
)
153 lp
= &l1_map
[index
>> L2_BITS
];
156 /* allocate if not found */
157 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
158 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
161 return p
+ (index
& (L2_SIZE
- 1));
164 static inline PageDesc
*page_find(unsigned int index
)
168 p
= l1_map
[index
>> L2_BITS
];
171 return p
+ (index
& (L2_SIZE
- 1));
174 static inline PhysPageDesc
*phys_page_find_alloc(unsigned int index
)
176 PhysPageDesc
**lp
, *p
;
178 lp
= &l1_phys_map
[index
>> L2_BITS
];
181 /* allocate if not found */
182 p
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
183 memset(p
, 0, sizeof(PhysPageDesc
) * L2_SIZE
);
186 return p
+ (index
& (L2_SIZE
- 1));
189 static inline PhysPageDesc
*phys_page_find(unsigned int index
)
193 p
= l1_phys_map
[index
>> L2_BITS
];
196 return p
+ (index
& (L2_SIZE
- 1));
199 #if !defined(CONFIG_USER_ONLY)
200 static void tlb_protect_code(CPUState
*env
, target_ulong addr
);
201 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
);
203 static inline VirtPageDesc
*virt_page_find_alloc(unsigned int index
)
205 VirtPageDesc
**lp
, *p
;
207 lp
= &l1_virt_map
[index
>> L2_BITS
];
210 /* allocate if not found */
211 p
= qemu_malloc(sizeof(VirtPageDesc
) * L2_SIZE
);
212 memset(p
, 0, sizeof(VirtPageDesc
) * L2_SIZE
);
215 return p
+ (index
& (L2_SIZE
- 1));
218 static inline VirtPageDesc
*virt_page_find(unsigned int index
)
222 p
= l1_virt_map
[index
>> L2_BITS
];
225 return p
+ (index
& (L2_SIZE
- 1));
228 static void virt_page_flush(void)
235 if (virt_valid_tag
== 0) {
237 for(i
= 0; i
< L1_SIZE
; i
++) {
240 for(j
= 0; j
< L2_SIZE
; j
++)
247 static void virt_page_flush(void)
252 void cpu_exec_init(void)
255 code_gen_ptr
= code_gen_buffer
;
261 static inline void invalidate_page_bitmap(PageDesc
*p
)
263 if (p
->code_bitmap
) {
264 qemu_free(p
->code_bitmap
);
265 p
->code_bitmap
= NULL
;
267 p
->code_write_count
= 0;
270 /* set to NULL all the 'first_tb' fields in all PageDescs */
271 static void page_flush_tb(void)
276 for(i
= 0; i
< L1_SIZE
; i
++) {
279 for(j
= 0; j
< L2_SIZE
; j
++) {
281 invalidate_page_bitmap(p
);
288 /* flush all the translation blocks */
289 /* XXX: tb_flush is currently not thread safe */
290 void tb_flush(CPUState
*env
)
293 #if defined(DEBUG_FLUSH)
294 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
295 code_gen_ptr
- code_gen_buffer
,
297 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
300 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
304 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++)
305 tb_phys_hash
[i
] = NULL
;
308 code_gen_ptr
= code_gen_buffer
;
309 /* XXX: flush processor icache at this point if cache flush is
313 #ifdef DEBUG_TB_CHECK
315 static void tb_invalidate_check(unsigned long address
)
317 TranslationBlock
*tb
;
319 address
&= TARGET_PAGE_MASK
;
320 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
321 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
322 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
323 address
>= tb
->pc
+ tb
->size
)) {
324 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
325 address
, tb
->pc
, tb
->size
);
331 /* verify that all the pages have correct rights for code */
332 static void tb_page_check(void)
334 TranslationBlock
*tb
;
335 int i
, flags1
, flags2
;
337 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
338 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
339 flags1
= page_get_flags(tb
->pc
);
340 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
341 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
342 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
343 tb
->pc
, tb
->size
, flags1
, flags2
);
349 void tb_jmp_check(TranslationBlock
*tb
)
351 TranslationBlock
*tb1
;
354 /* suppress any remaining jumps to this TB */
358 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
361 tb1
= tb1
->jmp_next
[n1
];
363 /* check end of list */
365 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
371 /* invalidate one TB */
372 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
375 TranslationBlock
*tb1
;
379 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
382 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
386 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
388 TranslationBlock
*tb1
;
394 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
396 *ptb
= tb1
->page_next
[n1
];
399 ptb
= &tb1
->page_next
[n1
];
403 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
405 TranslationBlock
*tb1
, **ptb
;
408 ptb
= &tb
->jmp_next
[n
];
411 /* find tb(n) in circular list */
415 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
416 if (n1
== n
&& tb1
== tb
)
419 ptb
= &tb1
->jmp_first
;
421 ptb
= &tb1
->jmp_next
[n1
];
424 /* now we can suppress tb(n) from the list */
425 *ptb
= tb
->jmp_next
[n
];
427 tb
->jmp_next
[n
] = NULL
;
431 /* reset the jump entry 'n' of a TB so that it is not chained to
433 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
435 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
438 static inline void tb_invalidate(TranslationBlock
*tb
)
441 TranslationBlock
*tb1
, *tb2
, **ptb
;
443 tb_invalidated_flag
= 1;
445 /* remove the TB from the hash list */
446 h
= tb_hash_func(tb
->pc
);
450 /* NOTE: the TB is not necessarily linked in the hash. It
451 indicates that it is not currently used */
455 *ptb
= tb1
->hash_next
;
458 ptb
= &tb1
->hash_next
;
461 /* suppress this TB from the two jump lists */
462 tb_jmp_remove(tb
, 0);
463 tb_jmp_remove(tb
, 1);
465 /* suppress any remaining jumps to this TB */
471 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
472 tb2
= tb1
->jmp_next
[n1
];
473 tb_reset_jump(tb1
, n1
);
474 tb1
->jmp_next
[n1
] = NULL
;
477 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
480 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
484 target_ulong phys_pc
;
486 /* remove the TB from the hash list */
487 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
488 h
= tb_phys_hash_func(phys_pc
);
489 tb_remove(&tb_phys_hash
[h
], tb
,
490 offsetof(TranslationBlock
, phys_hash_next
));
492 /* remove the TB from the page list */
493 if (tb
->page_addr
[0] != page_addr
) {
494 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
495 tb_page_remove(&p
->first_tb
, tb
);
496 invalidate_page_bitmap(p
);
498 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
499 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
500 tb_page_remove(&p
->first_tb
, tb
);
501 invalidate_page_bitmap(p
);
507 static inline void set_bits(uint8_t *tab
, int start
, int len
)
513 mask
= 0xff << (start
& 7);
514 if ((start
& ~7) == (end
& ~7)) {
516 mask
&= ~(0xff << (end
& 7));
521 start
= (start
+ 8) & ~7;
523 while (start
< end1
) {
528 mask
= ~(0xff << (end
& 7));
534 static void build_page_bitmap(PageDesc
*p
)
536 int n
, tb_start
, tb_end
;
537 TranslationBlock
*tb
;
539 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
542 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
547 tb
= (TranslationBlock
*)((long)tb
& ~3);
548 /* NOTE: this is subtle as a TB may span two physical pages */
550 /* NOTE: tb_end may be after the end of the page, but
551 it is not a problem */
552 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
553 tb_end
= tb_start
+ tb
->size
;
554 if (tb_end
> TARGET_PAGE_SIZE
)
555 tb_end
= TARGET_PAGE_SIZE
;
558 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
560 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
561 tb
= tb
->page_next
[n
];
565 #ifdef TARGET_HAS_PRECISE_SMC
567 static void tb_gen_code(CPUState
*env
,
568 target_ulong pc
, target_ulong cs_base
, int flags
,
571 TranslationBlock
*tb
;
573 target_ulong phys_pc
, phys_page2
, virt_page2
;
576 phys_pc
= get_phys_addr_code(env
, (unsigned long)pc
);
577 tb
= tb_alloc((unsigned long)pc
);
579 /* flush must be done */
581 /* cannot fail at this point */
582 tb
= tb_alloc((unsigned long)pc
);
584 tc_ptr
= code_gen_ptr
;
586 tb
->cs_base
= cs_base
;
589 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
590 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
592 /* check next page if needed */
593 virt_page2
= ((unsigned long)pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
595 if (((unsigned long)pc
& TARGET_PAGE_MASK
) != virt_page2
) {
596 phys_page2
= get_phys_addr_code(env
, virt_page2
);
598 tb_link_phys(tb
, phys_pc
, phys_page2
);
602 /* invalidate all TBs which intersect with the target physical page
603 starting in range [start;end[. NOTE: start and end must refer to
604 the same physical page. 'is_cpu_write_access' should be true if called
605 from a real cpu write access: the virtual CPU will exit the current
606 TB if code is modified inside this TB. */
607 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
608 int is_cpu_write_access
)
610 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
611 #if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
612 CPUState
*env
= cpu_single_env
;
615 TranslationBlock
*tb
, *tb_next
, *current_tb
;
616 target_ulong tb_start
, tb_end
;
617 target_ulong current_pc
, current_cs_base
;
619 p
= page_find(start
>> TARGET_PAGE_BITS
);
622 if (!p
->code_bitmap
&&
623 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
624 is_cpu_write_access
) {
625 /* build code bitmap */
626 build_page_bitmap(p
);
629 /* we remove all the TBs in the range [start, end[ */
630 /* XXX: see if in some cases it could be faster to invalidate all the code */
631 current_tb_not_found
= is_cpu_write_access
;
632 current_tb_modified
= 0;
633 current_tb
= NULL
; /* avoid warning */
634 current_pc
= 0; /* avoid warning */
635 current_cs_base
= 0; /* avoid warning */
636 current_flags
= 0; /* avoid warning */
640 tb
= (TranslationBlock
*)((long)tb
& ~3);
641 tb_next
= tb
->page_next
[n
];
642 /* NOTE: this is subtle as a TB may span two physical pages */
644 /* NOTE: tb_end may be after the end of the page, but
645 it is not a problem */
646 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
647 tb_end
= tb_start
+ tb
->size
;
649 tb_start
= tb
->page_addr
[1];
650 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
652 if (!(tb_end
<= start
|| tb_start
>= end
)) {
653 #ifdef TARGET_HAS_PRECISE_SMC
654 if (current_tb_not_found
) {
655 current_tb_not_found
= 0;
657 if (env
->mem_write_pc
) {
658 /* now we have a real cpu fault */
659 current_tb
= tb_find_pc(env
->mem_write_pc
);
662 if (current_tb
== tb
&&
663 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
664 /* If we are modifying the current TB, we must stop
665 its execution. We could be more precise by checking
666 that the modification is after the current PC, but it
667 would require a specialized function to partially
668 restore the CPU state */
670 current_tb_modified
= 1;
671 cpu_restore_state(current_tb
, env
,
672 env
->mem_write_pc
, NULL
);
673 #if defined(TARGET_I386)
674 current_flags
= env
->hflags
;
675 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
676 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
677 current_pc
= current_cs_base
+ env
->eip
;
679 #error unsupported CPU
682 #endif /* TARGET_HAS_PRECISE_SMC */
683 tb_phys_invalidate(tb
, -1);
687 #if !defined(CONFIG_USER_ONLY)
688 /* if no code remaining, no need to continue to use slow writes */
690 invalidate_page_bitmap(p
);
691 if (is_cpu_write_access
) {
692 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
696 #ifdef TARGET_HAS_PRECISE_SMC
697 if (current_tb_modified
) {
698 /* we generate a block containing just the instruction
699 modifying the memory. It will ensure that it cannot modify
701 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
703 cpu_resume_from_signal(env
, NULL
);
708 /* len must be <= 8 and start must be a multiple of len */
709 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
714 if (cpu_single_env
->cr
[0] & CR0_PE_MASK
) {
715 printf("modifying code at 0x%x size=%d EIP=%x\n",
716 (vaddr
& TARGET_PAGE_MASK
) | (start
& ~TARGET_PAGE_MASK
), len
,
717 cpu_single_env
->eip
);
720 p
= page_find(start
>> TARGET_PAGE_BITS
);
723 if (p
->code_bitmap
) {
724 offset
= start
& ~TARGET_PAGE_MASK
;
725 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
726 if (b
& ((1 << len
) - 1))
730 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
734 #if !defined(CONFIG_SOFTMMU)
735 static void tb_invalidate_phys_page(target_ulong addr
,
736 unsigned long pc
, void *puc
)
738 int n
, current_flags
, current_tb_modified
;
739 target_ulong current_pc
, current_cs_base
;
741 TranslationBlock
*tb
, *current_tb
;
742 #ifdef TARGET_HAS_PRECISE_SMC
743 CPUState
*env
= cpu_single_env
;
746 addr
&= TARGET_PAGE_MASK
;
747 p
= page_find(addr
>> TARGET_PAGE_BITS
);
751 current_tb_modified
= 0;
753 current_pc
= 0; /* avoid warning */
754 current_cs_base
= 0; /* avoid warning */
755 current_flags
= 0; /* avoid warning */
756 #ifdef TARGET_HAS_PRECISE_SMC
758 current_tb
= tb_find_pc(pc
);
763 tb
= (TranslationBlock
*)((long)tb
& ~3);
764 #ifdef TARGET_HAS_PRECISE_SMC
765 if (current_tb
== tb
&&
766 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
767 /* If we are modifying the current TB, we must stop
768 its execution. We could be more precise by checking
769 that the modification is after the current PC, but it
770 would require a specialized function to partially
771 restore the CPU state */
773 current_tb_modified
= 1;
774 cpu_restore_state(current_tb
, env
, pc
, puc
);
775 #if defined(TARGET_I386)
776 current_flags
= env
->hflags
;
777 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
778 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
779 current_pc
= current_cs_base
+ env
->eip
;
781 #error unsupported CPU
784 #endif /* TARGET_HAS_PRECISE_SMC */
785 tb_phys_invalidate(tb
, addr
);
786 tb
= tb
->page_next
[n
];
789 #ifdef TARGET_HAS_PRECISE_SMC
790 if (current_tb_modified
) {
791 /* we generate a block containing just the instruction
792 modifying the memory. It will ensure that it cannot modify
794 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
796 cpu_resume_from_signal(env
, puc
);
802 /* add the tb in the target page and protect it if necessary */
803 static inline void tb_alloc_page(TranslationBlock
*tb
,
804 unsigned int n
, unsigned int page_addr
)
807 TranslationBlock
*last_first_tb
;
809 tb
->page_addr
[n
] = page_addr
;
810 p
= page_find(page_addr
>> TARGET_PAGE_BITS
);
811 tb
->page_next
[n
] = p
->first_tb
;
812 last_first_tb
= p
->first_tb
;
813 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
814 invalidate_page_bitmap(p
);
816 #ifdef TARGET_HAS_SMC
818 #if defined(CONFIG_USER_ONLY)
819 if (p
->flags
& PAGE_WRITE
) {
820 unsigned long host_start
, host_end
, addr
;
823 /* force the host page as non writable (writes will have a
824 page fault + mprotect overhead) */
825 host_start
= page_addr
& host_page_mask
;
826 host_end
= host_start
+ host_page_size
;
828 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
829 prot
|= page_get_flags(addr
);
830 mprotect((void *)host_start
, host_page_size
,
831 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
832 #ifdef DEBUG_TB_INVALIDATE
833 printf("protecting code page: 0x%08lx\n",
836 p
->flags
&= ~PAGE_WRITE
;
839 /* if some code is already present, then the pages are already
840 protected. So we handle the case where only the first TB is
841 allocated in a physical page */
842 if (!last_first_tb
) {
843 target_ulong virt_addr
;
845 virt_addr
= (tb
->pc
& TARGET_PAGE_MASK
) + (n
<< TARGET_PAGE_BITS
);
846 tlb_protect_code(cpu_single_env
, virt_addr
);
850 #endif /* TARGET_HAS_SMC */
853 /* Allocate a new translation block. Flush the translation buffer if
854 too many translation blocks or too much generated code. */
855 TranslationBlock
*tb_alloc(unsigned long pc
)
857 TranslationBlock
*tb
;
859 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
860 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
868 /* add a new TB and link it to the physical page tables. phys_page2 is
869 (-1) to indicate that only one page contains the TB. */
870 void tb_link_phys(TranslationBlock
*tb
,
871 target_ulong phys_pc
, target_ulong phys_page2
)
874 TranslationBlock
**ptb
;
876 /* add in the physical hash table */
877 h
= tb_phys_hash_func(phys_pc
);
878 ptb
= &tb_phys_hash
[h
];
879 tb
->phys_hash_next
= *ptb
;
882 /* add in the page list */
883 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
884 if (phys_page2
!= -1)
885 tb_alloc_page(tb
, 1, phys_page2
);
887 tb
->page_addr
[1] = -1;
888 #ifdef DEBUG_TB_CHECK
893 /* link the tb with the other TBs */
894 void tb_link(TranslationBlock
*tb
)
896 #if !defined(CONFIG_USER_ONLY)
901 /* save the code memory mappings (needed to invalidate the code) */
902 addr
= tb
->pc
& TARGET_PAGE_MASK
;
903 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
904 #ifdef DEBUG_TLB_CHECK
905 if (vp
->valid_tag
== virt_valid_tag
&&
906 vp
->phys_addr
!= tb
->page_addr
[0]) {
907 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
908 addr
, tb
->page_addr
[0], vp
->phys_addr
);
911 vp
->phys_addr
= tb
->page_addr
[0];
912 if (vp
->valid_tag
!= virt_valid_tag
) {
913 vp
->valid_tag
= virt_valid_tag
;
914 #if !defined(CONFIG_SOFTMMU)
919 if (tb
->page_addr
[1] != -1) {
920 addr
+= TARGET_PAGE_SIZE
;
921 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
922 #ifdef DEBUG_TLB_CHECK
923 if (vp
->valid_tag
== virt_valid_tag
&&
924 vp
->phys_addr
!= tb
->page_addr
[1]) {
925 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
926 addr
, tb
->page_addr
[1], vp
->phys_addr
);
929 vp
->phys_addr
= tb
->page_addr
[1];
930 if (vp
->valid_tag
!= virt_valid_tag
) {
931 vp
->valid_tag
= virt_valid_tag
;
932 #if !defined(CONFIG_SOFTMMU)
940 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
941 tb
->jmp_next
[0] = NULL
;
942 tb
->jmp_next
[1] = NULL
;
944 tb
->cflags
&= ~CF_FP_USED
;
945 if (tb
->cflags
& CF_TB_FP_USED
)
946 tb
->cflags
|= CF_FP_USED
;
949 /* init original jump addresses */
950 if (tb
->tb_next_offset
[0] != 0xffff)
951 tb_reset_jump(tb
, 0);
952 if (tb
->tb_next_offset
[1] != 0xffff)
953 tb_reset_jump(tb
, 1);
956 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
957 tb[1].tc_ptr. Return NULL if not found */
958 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
962 TranslationBlock
*tb
;
966 if (tc_ptr
< (unsigned long)code_gen_buffer
||
967 tc_ptr
>= (unsigned long)code_gen_ptr
)
969 /* binary search (cf Knuth) */
972 while (m_min
<= m_max
) {
973 m
= (m_min
+ m_max
) >> 1;
975 v
= (unsigned long)tb
->tc_ptr
;
978 else if (tc_ptr
< v
) {
987 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
989 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
991 TranslationBlock
*tb1
, *tb_next
, **ptb
;
994 tb1
= tb
->jmp_next
[n
];
996 /* find head of list */
999 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1002 tb1
= tb1
->jmp_next
[n1
];
1004 /* we are now sure now that tb jumps to tb1 */
1007 /* remove tb from the jmp_first list */
1008 ptb
= &tb_next
->jmp_first
;
1012 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1013 if (n1
== n
&& tb1
== tb
)
1015 ptb
= &tb1
->jmp_next
[n1
];
1017 *ptb
= tb
->jmp_next
[n
];
1018 tb
->jmp_next
[n
] = NULL
;
1020 /* suppress the jump to next tb in generated code */
1021 tb_reset_jump(tb
, n
);
1023 /* suppress jumps in the tb on which we could have jumped */
1024 tb_reset_jump_recursive(tb_next
);
1028 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1030 tb_reset_jump_recursive2(tb
, 0);
1031 tb_reset_jump_recursive2(tb
, 1);
1034 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1036 target_ulong phys_addr
;
1038 phys_addr
= cpu_get_phys_page_debug(env
, pc
);
1039 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ 1, 0);
1042 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1043 breakpoint is reached */
1044 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1046 #if defined(TARGET_I386) || defined(TARGET_PPC)
1049 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1050 if (env
->breakpoints
[i
] == pc
)
1054 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1056 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1058 breakpoint_invalidate(env
, pc
);
1065 /* remove a breakpoint */
1066 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1068 #if defined(TARGET_I386) || defined(TARGET_PPC)
1070 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1071 if (env
->breakpoints
[i
] == pc
)
1076 memmove(&env
->breakpoints
[i
], &env
->breakpoints
[i
+ 1],
1077 (env
->nb_breakpoints
- (i
+ 1)) * sizeof(env
->breakpoints
[0]));
1078 env
->nb_breakpoints
--;
1080 breakpoint_invalidate(env
, pc
);
1087 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1088 CPU loop after each instruction */
1089 void cpu_single_step(CPUState
*env
, int enabled
)
1091 #if defined(TARGET_I386) || defined(TARGET_PPC)
1092 if (env
->singlestep_enabled
!= enabled
) {
1093 env
->singlestep_enabled
= enabled
;
1094 /* must flush all the translated code to avoid inconsistancies */
1095 /* XXX: only flush what is necessary */
1101 /* enable or disable low levels log */
1102 void cpu_set_log(int log_flags
)
1104 loglevel
= log_flags
;
1105 if (loglevel
&& !logfile
) {
1106 logfile
= fopen(logfilename
, "w");
1108 perror(logfilename
);
1111 #if !defined(CONFIG_SOFTMMU)
1112 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1114 static uint8_t logfile_buf
[4096];
1115 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1118 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1123 void cpu_set_log_filename(const char *filename
)
1125 logfilename
= strdup(filename
);
1128 /* mask must never be zero, except for A20 change call */
1129 void cpu_interrupt(CPUState
*env
, int mask
)
1131 TranslationBlock
*tb
;
1132 static int interrupt_lock
;
1134 env
->interrupt_request
|= mask
;
1135 /* if the cpu is currently executing code, we must unlink it and
1136 all the potentially executing TB */
1137 tb
= env
->current_tb
;
1138 if (tb
&& !testandset(&interrupt_lock
)) {
1139 env
->current_tb
= NULL
;
1140 tb_reset_jump_recursive(tb
);
1145 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1147 env
->interrupt_request
&= ~mask
;
1150 CPULogItem cpu_log_items
[] = {
1151 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1152 "show generated host assembly code for each compiled TB" },
1153 { CPU_LOG_TB_IN_ASM
, "in_asm",
1154 "show target assembly code for each compiled TB" },
1155 { CPU_LOG_TB_OP
, "op",
1156 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1158 { CPU_LOG_TB_OP_OPT
, "op_opt",
1159 "show micro ops after optimization for each compiled TB" },
1161 { CPU_LOG_INT
, "int",
1162 "show interrupts/exceptions in short format" },
1163 { CPU_LOG_EXEC
, "exec",
1164 "show trace before each executed TB (lots of logs)" },
1165 { CPU_LOG_TB_CPU
, "cpu",
1166 "show CPU state before bloc translation" },
1168 { CPU_LOG_PCALL
, "pcall",
1169 "show protected mode far calls/returns/exceptions" },
1171 { CPU_LOG_IOPORT
, "ioport",
1172 "show all i/o ports accesses" },
1176 static int cmp1(const char *s1
, int n
, const char *s2
)
1178 if (strlen(s2
) != n
)
1180 return memcmp(s1
, s2
, n
) == 0;
1183 /* takes a comma separated list of log masks. Return 0 if error. */
1184 int cpu_str_to_log_mask(const char *str
)
1193 p1
= strchr(p
, ',');
1196 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1197 if (cmp1(p
, p1
- p
, item
->name
))
1210 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1215 fprintf(stderr
, "qemu: fatal: ");
1216 vfprintf(stderr
, fmt
, ap
);
1217 fprintf(stderr
, "\n");
1219 cpu_x86_dump_state(env
, stderr
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1225 #if !defined(CONFIG_USER_ONLY)
1227 /* NOTE: if flush_global is true, also flush global entries (not
1229 void tlb_flush(CPUState
*env
, int flush_global
)
1233 #if defined(DEBUG_TLB)
1234 printf("tlb_flush:\n");
1236 /* must reset current TB so that interrupts cannot modify the
1237 links while we are modifying them */
1238 env
->current_tb
= NULL
;
1240 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1241 env
->tlb_read
[0][i
].address
= -1;
1242 env
->tlb_write
[0][i
].address
= -1;
1243 env
->tlb_read
[1][i
].address
= -1;
1244 env
->tlb_write
[1][i
].address
= -1;
1248 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
1251 #if !defined(CONFIG_SOFTMMU)
1252 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1256 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1258 if (addr
== (tlb_entry
->address
&
1259 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
1260 tlb_entry
->address
= -1;
1263 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1268 TranslationBlock
*tb
;
1270 #if defined(DEBUG_TLB)
1271 printf("tlb_flush_page: 0x%08x\n", addr
);
1273 /* must reset current TB so that interrupts cannot modify the
1274 links while we are modifying them */
1275 env
->current_tb
= NULL
;
1277 addr
&= TARGET_PAGE_MASK
;
1278 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1279 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
1280 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
1281 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
1282 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
1284 /* remove from the virtual pc hash table all the TB at this
1287 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1288 if (vp
&& vp
->valid_tag
== virt_valid_tag
) {
1289 p
= page_find(vp
->phys_addr
>> TARGET_PAGE_BITS
);
1291 /* we remove all the links to the TBs in this virtual page */
1293 while (tb
!= NULL
) {
1295 tb
= (TranslationBlock
*)((long)tb
& ~3);
1296 if ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1297 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
) {
1300 tb
= tb
->page_next
[n
];
1306 #if !defined(CONFIG_SOFTMMU)
1307 if (addr
< MMAP_AREA_END
)
1308 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1312 static inline void tlb_protect_code1(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1314 if (addr
== (tlb_entry
->address
&
1315 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1316 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
&&
1317 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
) {
1318 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_CODE
;
1322 /* update the TLBs so that writes to code in the virtual page 'addr'
1324 static void tlb_protect_code(CPUState
*env
, target_ulong addr
)
1328 addr
&= TARGET_PAGE_MASK
;
1329 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1330 tlb_protect_code1(&env
->tlb_write
[0][i
], addr
);
1331 tlb_protect_code1(&env
->tlb_write
[1][i
], addr
);
1332 #if !defined(CONFIG_SOFTMMU)
1333 /* NOTE: as we generated the code for this page, it is already at
1335 if (addr
< MMAP_AREA_END
)
1336 mprotect((void *)addr
, TARGET_PAGE_SIZE
, PROT_READ
);
1340 static inline void tlb_unprotect_code2(CPUTLBEntry
*tlb_entry
,
1341 unsigned long phys_addr
)
1343 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
&&
1344 ((tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
) == phys_addr
) {
1345 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1349 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1350 tested self modifying code */
1351 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
)
1355 phys_addr
&= TARGET_PAGE_MASK
;
1356 phys_addr
+= (long)phys_ram_base
;
1357 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1358 tlb_unprotect_code2(&env
->tlb_write
[0][i
], phys_addr
);
1359 tlb_unprotect_code2(&env
->tlb_write
[1][i
], phys_addr
);
1362 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1363 unsigned long start
, unsigned long length
)
1366 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1367 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1368 if ((addr
- start
) < length
) {
1369 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1374 void cpu_physical_memory_reset_dirty(target_ulong start
, target_ulong end
)
1377 unsigned long length
, start1
;
1380 start
&= TARGET_PAGE_MASK
;
1381 end
= TARGET_PAGE_ALIGN(end
);
1383 length
= end
- start
;
1386 memset(phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
), 0, length
>> TARGET_PAGE_BITS
);
1388 env
= cpu_single_env
;
1389 /* we modify the TLB cache so that the dirty bit will be set again
1390 when accessing the range */
1391 start1
= start
+ (unsigned long)phys_ram_base
;
1392 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1393 tlb_reset_dirty_range(&env
->tlb_write
[0][i
], start1
, length
);
1394 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1395 tlb_reset_dirty_range(&env
->tlb_write
[1][i
], start1
, length
);
1397 #if !defined(CONFIG_SOFTMMU)
1398 /* XXX: this is expensive */
1404 for(i
= 0; i
< L1_SIZE
; i
++) {
1407 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1408 for(j
= 0; j
< L2_SIZE
; j
++) {
1409 if (p
->valid_tag
== virt_valid_tag
&&
1410 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1411 (p
->prot
& PROT_WRITE
)) {
1412 if (addr
< MMAP_AREA_END
) {
1413 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1414 p
->prot
& ~PROT_WRITE
);
1417 addr
+= TARGET_PAGE_SIZE
;
1426 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1427 unsigned long start
)
1430 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1431 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1432 if (addr
== start
) {
1433 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1438 /* update the TLB corresponding to virtual page vaddr and phys addr
1439 addr so that it is no longer dirty */
1440 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1442 CPUState
*env
= cpu_single_env
;
1445 phys_ram_dirty
[(addr
- (unsigned long)phys_ram_base
) >> TARGET_PAGE_BITS
] = 1;
1447 addr
&= TARGET_PAGE_MASK
;
1448 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1449 tlb_set_dirty1(&env
->tlb_write
[0][i
], addr
);
1450 tlb_set_dirty1(&env
->tlb_write
[1][i
], addr
);
1453 /* add a new TLB entry. At most one entry for a given virtual address
1454 is permitted. Return 0 if OK or 2 if the page could not be mapped
1455 (can only happen in non SOFTMMU mode for I/O pages or pages
1456 conflicting with the host address space). */
1457 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1458 target_phys_addr_t paddr
, int prot
,
1459 int is_user
, int is_softmmu
)
1463 TranslationBlock
*first_tb
;
1465 target_ulong address
;
1466 unsigned long addend
;
1469 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1472 pd
= IO_MEM_UNASSIGNED
;
1475 pd
= p
->phys_offset
;
1476 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1477 /* NOTE: we also allocate the page at this stage */
1478 p1
= page_find_alloc(pd
>> TARGET_PAGE_BITS
);
1479 first_tb
= p1
->first_tb
;
1482 #if defined(DEBUG_TLB)
1483 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1484 vaddr
, paddr
, prot
, is_user
, (first_tb
!= NULL
), is_softmmu
, pd
);
1488 #if !defined(CONFIG_SOFTMMU)
1492 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1493 /* IO memory case */
1494 address
= vaddr
| pd
;
1497 /* standard memory */
1499 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1502 index
= (vaddr
>> 12) & (CPU_TLB_SIZE
- 1);
1504 if (prot
& PAGE_READ
) {
1505 env
->tlb_read
[is_user
][index
].address
= address
;
1506 env
->tlb_read
[is_user
][index
].addend
= addend
;
1508 env
->tlb_read
[is_user
][index
].address
= -1;
1509 env
->tlb_read
[is_user
][index
].addend
= -1;
1511 if (prot
& PAGE_WRITE
) {
1512 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1513 /* ROM: access is ignored (same as unassigned) */
1514 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_ROM
;
1515 env
->tlb_write
[is_user
][index
].addend
= addend
;
1517 /* XXX: the PowerPC code seems not ready to handle
1518 self modifying code with DCBI */
1519 #if defined(TARGET_HAS_SMC) || 1
1521 /* if code is present, we use a specific memory
1522 handler. It works only for physical memory access */
1523 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_CODE
;
1524 env
->tlb_write
[is_user
][index
].addend
= addend
;
1527 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1528 !cpu_physical_memory_is_dirty(pd
)) {
1529 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_NOTDIRTY
;
1530 env
->tlb_write
[is_user
][index
].addend
= addend
;
1532 env
->tlb_write
[is_user
][index
].address
= address
;
1533 env
->tlb_write
[is_user
][index
].addend
= addend
;
1536 env
->tlb_write
[is_user
][index
].address
= -1;
1537 env
->tlb_write
[is_user
][index
].addend
= -1;
1540 #if !defined(CONFIG_SOFTMMU)
1542 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1543 /* IO access: no mapping is done as it will be handled by the
1545 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1550 if (vaddr
>= MMAP_AREA_END
) {
1553 if (prot
& PROT_WRITE
) {
1554 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1555 #if defined(TARGET_HAS_SMC) || 1
1558 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1559 !cpu_physical_memory_is_dirty(pd
))) {
1560 /* ROM: we do as if code was inside */
1561 /* if code is present, we only map as read only and save the
1565 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
);
1568 vp
->valid_tag
= virt_valid_tag
;
1569 prot
&= ~PAGE_WRITE
;
1572 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1573 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1574 if (map_addr
== MAP_FAILED
) {
1575 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1585 /* called from signal handler: invalidate the code and unprotect the
1586 page. Return TRUE if the fault was succesfully handled. */
1587 int page_unprotect(unsigned long addr
, unsigned long pc
, void *puc
)
1589 #if !defined(CONFIG_SOFTMMU)
1592 #if defined(DEBUG_TLB)
1593 printf("page_unprotect: addr=0x%08x\n", addr
);
1595 addr
&= TARGET_PAGE_MASK
;
1597 /* if it is not mapped, no need to worry here */
1598 if (addr
>= MMAP_AREA_END
)
1600 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1603 /* NOTE: in this case, validate_tag is _not_ tested as it
1604 validates only the code TLB */
1605 if (vp
->valid_tag
!= virt_valid_tag
)
1607 if (!(vp
->prot
& PAGE_WRITE
))
1609 #if defined(DEBUG_TLB)
1610 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1611 addr
, vp
->phys_addr
, vp
->prot
);
1613 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1614 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1615 (unsigned long)addr
, vp
->prot
);
1616 /* set the dirty bit */
1617 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 1;
1618 /* flush the code inside */
1619 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1628 void tlb_flush(CPUState
*env
, int flush_global
)
1632 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1636 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1637 target_phys_addr_t paddr
, int prot
,
1638 int is_user
, int is_softmmu
)
1643 /* dump memory mappings */
1644 void page_dump(FILE *f
)
1646 unsigned long start
, end
;
1647 int i
, j
, prot
, prot1
;
1650 fprintf(f
, "%-8s %-8s %-8s %s\n",
1651 "start", "end", "size", "prot");
1655 for(i
= 0; i
<= L1_SIZE
; i
++) {
1660 for(j
= 0;j
< L2_SIZE
; j
++) {
1665 if (prot1
!= prot
) {
1666 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1668 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1669 start
, end
, end
- start
,
1670 prot
& PAGE_READ
? 'r' : '-',
1671 prot
& PAGE_WRITE
? 'w' : '-',
1672 prot
& PAGE_EXEC
? 'x' : '-');
1686 int page_get_flags(unsigned long address
)
1690 p
= page_find(address
>> TARGET_PAGE_BITS
);
1696 /* modify the flags of a page and invalidate the code if
1697 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1698 depending on PAGE_WRITE */
1699 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1704 start
= start
& TARGET_PAGE_MASK
;
1705 end
= TARGET_PAGE_ALIGN(end
);
1706 if (flags
& PAGE_WRITE
)
1707 flags
|= PAGE_WRITE_ORG
;
1708 spin_lock(&tb_lock
);
1709 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1710 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1711 /* if the write protection is set, then we invalidate the code
1713 if (!(p
->flags
& PAGE_WRITE
) &&
1714 (flags
& PAGE_WRITE
) &&
1716 tb_invalidate_phys_page(addr
, 0, NULL
);
1720 spin_unlock(&tb_lock
);
1723 /* called from signal handler: invalidate the code and unprotect the
1724 page. Return TRUE if the fault was succesfully handled. */
1725 int page_unprotect(unsigned long address
, unsigned long pc
, void *puc
)
1727 unsigned int page_index
, prot
, pindex
;
1729 unsigned long host_start
, host_end
, addr
;
1731 host_start
= address
& host_page_mask
;
1732 page_index
= host_start
>> TARGET_PAGE_BITS
;
1733 p1
= page_find(page_index
);
1736 host_end
= host_start
+ host_page_size
;
1739 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1743 /* if the page was really writable, then we change its
1744 protection back to writable */
1745 if (prot
& PAGE_WRITE_ORG
) {
1746 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1747 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1748 mprotect((void *)host_start
, host_page_size
,
1749 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1750 p1
[pindex
].flags
|= PAGE_WRITE
;
1751 /* and since the content will be modified, we must invalidate
1752 the corresponding translated code. */
1753 tb_invalidate_phys_page(address
, pc
, puc
);
1754 #ifdef DEBUG_TB_CHECK
1755 tb_invalidate_check(address
);
1763 /* call this function when system calls directly modify a memory area */
1764 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1766 unsigned long start
, end
, addr
;
1768 start
= (unsigned long)data
;
1769 end
= start
+ data_size
;
1770 start
&= TARGET_PAGE_MASK
;
1771 end
= TARGET_PAGE_ALIGN(end
);
1772 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1773 page_unprotect(addr
, 0, NULL
);
1777 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1780 #endif /* defined(CONFIG_USER_ONLY) */
1782 /* register physical memory. 'size' must be a multiple of the target
1783 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1785 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1787 unsigned long phys_offset
)
1789 unsigned long addr
, end_addr
;
1792 end_addr
= start_addr
+ size
;
1793 for(addr
= start_addr
; addr
< end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1794 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1795 p
->phys_offset
= phys_offset
;
1796 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1797 phys_offset
+= TARGET_PAGE_SIZE
;
1801 static uint32_t unassigned_mem_readb(target_phys_addr_t addr
)
1806 static void unassigned_mem_writeb(target_phys_addr_t addr
, uint32_t val
)
1810 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1811 unassigned_mem_readb
,
1812 unassigned_mem_readb
,
1813 unassigned_mem_readb
,
1816 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1817 unassigned_mem_writeb
,
1818 unassigned_mem_writeb
,
1819 unassigned_mem_writeb
,
1822 /* self modifying code support in soft mmu mode : writing to a page
1823 containing code comes to these functions */
1825 static void code_mem_writeb(target_phys_addr_t addr
, uint32_t val
)
1827 unsigned long phys_addr
;
1829 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1830 #if !defined(CONFIG_USER_ONLY)
1831 tb_invalidate_phys_page_fast(phys_addr
, 1);
1833 stb_raw((uint8_t *)addr
, val
);
1834 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1837 static void code_mem_writew(target_phys_addr_t addr
, uint32_t val
)
1839 unsigned long phys_addr
;
1841 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1842 #if !defined(CONFIG_USER_ONLY)
1843 tb_invalidate_phys_page_fast(phys_addr
, 2);
1845 stw_raw((uint8_t *)addr
, val
);
1846 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1849 static void code_mem_writel(target_phys_addr_t addr
, uint32_t val
)
1851 unsigned long phys_addr
;
1853 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1854 #if !defined(CONFIG_USER_ONLY)
1855 tb_invalidate_phys_page_fast(phys_addr
, 4);
1857 stl_raw((uint8_t *)addr
, val
);
1858 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1861 static CPUReadMemoryFunc
*code_mem_read
[3] = {
1862 NULL
, /* never used */
1863 NULL
, /* never used */
1864 NULL
, /* never used */
1867 static CPUWriteMemoryFunc
*code_mem_write
[3] = {
1873 static void notdirty_mem_writeb(target_phys_addr_t addr
, uint32_t val
)
1875 stb_raw((uint8_t *)addr
, val
);
1876 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1879 static void notdirty_mem_writew(target_phys_addr_t addr
, uint32_t val
)
1881 stw_raw((uint8_t *)addr
, val
);
1882 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1885 static void notdirty_mem_writel(target_phys_addr_t addr
, uint32_t val
)
1887 stl_raw((uint8_t *)addr
, val
);
1888 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1891 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1892 notdirty_mem_writeb
,
1893 notdirty_mem_writew
,
1894 notdirty_mem_writel
,
1897 static void io_mem_init(void)
1899 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, code_mem_read
, unassigned_mem_write
);
1900 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
);
1901 cpu_register_io_memory(IO_MEM_CODE
>> IO_MEM_SHIFT
, code_mem_read
, code_mem_write
);
1902 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, code_mem_read
, notdirty_mem_write
);
1905 /* alloc dirty bits array */
1906 phys_ram_dirty
= qemu_malloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1909 /* mem_read and mem_write are arrays of functions containing the
1910 function to access byte (index 0), word (index 1) and dword (index
1911 2). All functions must be supplied. If io_index is non zero, the
1912 corresponding io zone is modified. If it is zero, a new io zone is
1913 allocated. The return value can be used with
1914 cpu_register_physical_memory(). (-1) is returned if error. */
1915 int cpu_register_io_memory(int io_index
,
1916 CPUReadMemoryFunc
**mem_read
,
1917 CPUWriteMemoryFunc
**mem_write
)
1921 if (io_index
<= 0) {
1922 if (io_index
>= IO_MEM_NB_ENTRIES
)
1924 io_index
= io_mem_nb
++;
1926 if (io_index
>= IO_MEM_NB_ENTRIES
)
1930 for(i
= 0;i
< 3; i
++) {
1931 io_mem_read
[io_index
][i
] = mem_read
[i
];
1932 io_mem_write
[io_index
][i
] = mem_write
[i
];
1934 return io_index
<< IO_MEM_SHIFT
;
1937 /* physical memory access (slow version, mainly for debug) */
1938 #if defined(CONFIG_USER_ONLY)
1939 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
1940 int len
, int is_write
)
1946 page
= addr
& TARGET_PAGE_MASK
;
1947 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1950 flags
= page_get_flags(page
);
1951 if (!(flags
& PAGE_VALID
))
1954 if (!(flags
& PAGE_WRITE
))
1956 memcpy((uint8_t *)addr
, buf
, len
);
1958 if (!(flags
& PAGE_READ
))
1960 memcpy(buf
, (uint8_t *)addr
, len
);
1968 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
1969 int len
, int is_write
)
1974 target_phys_addr_t page
;
1979 page
= addr
& TARGET_PAGE_MASK
;
1980 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1983 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
1985 pd
= IO_MEM_UNASSIGNED
;
1987 pd
= p
->phys_offset
;
1991 if ((pd
& ~TARGET_PAGE_MASK
) != 0) {
1992 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
1993 if (l
>= 4 && ((addr
& 3) == 0)) {
1994 /* 32 bit read access */
1996 io_mem_write
[io_index
][2](addr
, val
);
1998 } else if (l
>= 2 && ((addr
& 1) == 0)) {
1999 /* 16 bit read access */
2000 val
= lduw_raw(buf
);
2001 io_mem_write
[io_index
][1](addr
, val
);
2005 val
= ldub_raw(buf
);
2006 io_mem_write
[io_index
][0](addr
, val
);
2010 unsigned long addr1
;
2011 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2013 ptr
= phys_ram_base
+ addr1
;
2014 memcpy(ptr
, buf
, l
);
2015 /* invalidate code */
2016 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2018 phys_ram_dirty
[page
>> TARGET_PAGE_BITS
] = 1;
2021 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2022 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
) {
2024 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2025 if (l
>= 4 && ((addr
& 3) == 0)) {
2026 /* 32 bit read access */
2027 val
= io_mem_read
[io_index
][2](addr
);
2030 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2031 /* 16 bit read access */
2032 val
= io_mem_read
[io_index
][1](addr
);
2037 val
= io_mem_read
[io_index
][0](addr
);
2043 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2044 (addr
& ~TARGET_PAGE_MASK
);
2045 memcpy(buf
, ptr
, l
);
2055 /* virtual memory access for debug */
2056 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2057 uint8_t *buf
, int len
, int is_write
)
2060 target_ulong page
, phys_addr
;
2063 page
= addr
& TARGET_PAGE_MASK
;
2064 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2065 /* if no physical page mapped, return an error */
2066 if (phys_addr
== -1)
2068 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2071 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2080 #if !defined(CONFIG_USER_ONLY)
2082 #define MMUSUFFIX _cmmu
2083 #define GETPC() NULL
2084 #define env cpu_single_env
2087 #include "softmmu_template.h"
2090 #include "softmmu_template.h"
2093 #include "softmmu_template.h"
2096 #include "softmmu_template.h"