2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #if !defined(CONFIG_SOFTMMU)
35 //#define DEBUG_TB_INVALIDATE
39 /* make various TB consistency checks */
40 //#define DEBUG_TB_CHECK
41 //#define DEBUG_TLB_CHECK
43 /* threshold to flush the translated code buffer */
44 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
46 #define SMC_BITMAP_USE_THRESHOLD 10
48 #define MMAP_AREA_START 0x00000000
49 #define MMAP_AREA_END 0xa8000000
51 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
52 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
53 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
55 /* any access to the tbs or the page table must use this lock */
56 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
58 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
59 uint8_t *code_gen_ptr
;
63 uint8_t *phys_ram_base
;
64 uint8_t *phys_ram_dirty
;
66 typedef struct PageDesc
{
67 /* list of TBs intersecting this ram page */
68 TranslationBlock
*first_tb
;
69 /* in order to optimize self modifying code, we count the number
70 of lookups we do to a given page to use a bitmap */
71 unsigned int code_write_count
;
73 #if defined(CONFIG_USER_ONLY)
78 typedef struct PhysPageDesc
{
79 /* offset in host memory of the page + io_index in the low 12 bits */
80 unsigned long phys_offset
;
83 typedef struct VirtPageDesc
{
84 /* physical address of code page. It is valid only if 'valid_tag'
85 matches 'virt_valid_tag' */
86 target_ulong phys_addr
;
87 unsigned int valid_tag
;
88 #if !defined(CONFIG_SOFTMMU)
89 /* original page access rights. It is valid only if 'valid_tag'
90 matches 'virt_valid_tag' */
96 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
98 #define L1_SIZE (1 << L1_BITS)
99 #define L2_SIZE (1 << L2_BITS)
101 static void io_mem_init(void);
103 unsigned long real_host_page_size
;
104 unsigned long host_page_bits
;
105 unsigned long host_page_size
;
106 unsigned long host_page_mask
;
108 /* XXX: for system emulation, it could just be an array */
109 static PageDesc
*l1_map
[L1_SIZE
];
110 static PhysPageDesc
*l1_phys_map
[L1_SIZE
];
112 #if !defined(CONFIG_USER_ONLY)
113 static VirtPageDesc
*l1_virt_map
[L1_SIZE
];
114 static unsigned int virt_valid_tag
;
117 /* io memory support */
118 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
119 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
120 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
121 static int io_mem_nb
;
124 char *logfilename
= "/tmp/qemu.log";
128 static void page_init(void)
130 /* NOTE: we can always suppose that host_page_size >=
133 real_host_page_size
= 4096;
135 real_host_page_size
= getpagesize();
137 if (host_page_size
== 0)
138 host_page_size
= real_host_page_size
;
139 if (host_page_size
< TARGET_PAGE_SIZE
)
140 host_page_size
= TARGET_PAGE_SIZE
;
142 while ((1 << host_page_bits
) < host_page_size
)
144 host_page_mask
= ~(host_page_size
- 1);
145 #if !defined(CONFIG_USER_ONLY)
150 static inline PageDesc
*page_find_alloc(unsigned int index
)
154 lp
= &l1_map
[index
>> L2_BITS
];
157 /* allocate if not found */
158 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
159 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
162 return p
+ (index
& (L2_SIZE
- 1));
165 static inline PageDesc
*page_find(unsigned int index
)
169 p
= l1_map
[index
>> L2_BITS
];
172 return p
+ (index
& (L2_SIZE
- 1));
175 static inline PhysPageDesc
*phys_page_find_alloc(unsigned int index
)
177 PhysPageDesc
**lp
, *p
;
179 lp
= &l1_phys_map
[index
>> L2_BITS
];
182 /* allocate if not found */
183 p
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
184 memset(p
, 0, sizeof(PhysPageDesc
) * L2_SIZE
);
187 return p
+ (index
& (L2_SIZE
- 1));
190 static inline PhysPageDesc
*phys_page_find(unsigned int index
)
194 p
= l1_phys_map
[index
>> L2_BITS
];
197 return p
+ (index
& (L2_SIZE
- 1));
200 #if !defined(CONFIG_USER_ONLY)
201 static void tlb_protect_code(CPUState
*env
, target_ulong addr
);
202 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
);
204 static inline VirtPageDesc
*virt_page_find_alloc(unsigned int index
)
206 VirtPageDesc
**lp
, *p
;
208 lp
= &l1_virt_map
[index
>> L2_BITS
];
211 /* allocate if not found */
212 p
= qemu_malloc(sizeof(VirtPageDesc
) * L2_SIZE
);
213 memset(p
, 0, sizeof(VirtPageDesc
) * L2_SIZE
);
216 return p
+ (index
& (L2_SIZE
- 1));
219 static inline VirtPageDesc
*virt_page_find(unsigned int index
)
223 p
= l1_virt_map
[index
>> L2_BITS
];
226 return p
+ (index
& (L2_SIZE
- 1));
229 static void virt_page_flush(void)
236 if (virt_valid_tag
== 0) {
238 for(i
= 0; i
< L1_SIZE
; i
++) {
241 for(j
= 0; j
< L2_SIZE
; j
++)
248 static void virt_page_flush(void)
253 void cpu_exec_init(void)
256 code_gen_ptr
= code_gen_buffer
;
262 static inline void invalidate_page_bitmap(PageDesc
*p
)
264 if (p
->code_bitmap
) {
265 qemu_free(p
->code_bitmap
);
266 p
->code_bitmap
= NULL
;
268 p
->code_write_count
= 0;
271 /* set to NULL all the 'first_tb' fields in all PageDescs */
272 static void page_flush_tb(void)
277 for(i
= 0; i
< L1_SIZE
; i
++) {
280 for(j
= 0; j
< L2_SIZE
; j
++) {
282 invalidate_page_bitmap(p
);
289 /* flush all the translation blocks */
290 /* XXX: tb_flush is currently not thread safe */
291 void tb_flush(CPUState
*env
)
294 #if defined(DEBUG_FLUSH)
295 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
296 code_gen_ptr
- code_gen_buffer
,
298 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
301 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
305 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++)
306 tb_phys_hash
[i
] = NULL
;
309 code_gen_ptr
= code_gen_buffer
;
310 /* XXX: flush processor icache at this point if cache flush is
314 #ifdef DEBUG_TB_CHECK
316 static void tb_invalidate_check(unsigned long address
)
318 TranslationBlock
*tb
;
320 address
&= TARGET_PAGE_MASK
;
321 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
322 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
323 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
324 address
>= tb
->pc
+ tb
->size
)) {
325 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
326 address
, tb
->pc
, tb
->size
);
332 /* verify that all the pages have correct rights for code */
333 static void tb_page_check(void)
335 TranslationBlock
*tb
;
336 int i
, flags1
, flags2
;
338 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
339 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
340 flags1
= page_get_flags(tb
->pc
);
341 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
342 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
343 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
344 tb
->pc
, tb
->size
, flags1
, flags2
);
350 void tb_jmp_check(TranslationBlock
*tb
)
352 TranslationBlock
*tb1
;
355 /* suppress any remaining jumps to this TB */
359 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
362 tb1
= tb1
->jmp_next
[n1
];
364 /* check end of list */
366 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
372 /* invalidate one TB */
373 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
376 TranslationBlock
*tb1
;
380 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
383 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
387 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
389 TranslationBlock
*tb1
;
395 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
397 *ptb
= tb1
->page_next
[n1
];
400 ptb
= &tb1
->page_next
[n1
];
404 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
406 TranslationBlock
*tb1
, **ptb
;
409 ptb
= &tb
->jmp_next
[n
];
412 /* find tb(n) in circular list */
416 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
417 if (n1
== n
&& tb1
== tb
)
420 ptb
= &tb1
->jmp_first
;
422 ptb
= &tb1
->jmp_next
[n1
];
425 /* now we can suppress tb(n) from the list */
426 *ptb
= tb
->jmp_next
[n
];
428 tb
->jmp_next
[n
] = NULL
;
432 /* reset the jump entry 'n' of a TB so that it is not chained to
434 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
436 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
439 static inline void tb_invalidate(TranslationBlock
*tb
)
442 TranslationBlock
*tb1
, *tb2
, **ptb
;
444 tb_invalidated_flag
= 1;
446 /* remove the TB from the hash list */
447 h
= tb_hash_func(tb
->pc
);
451 /* NOTE: the TB is not necessarily linked in the hash. It
452 indicates that it is not currently used */
456 *ptb
= tb1
->hash_next
;
459 ptb
= &tb1
->hash_next
;
462 /* suppress this TB from the two jump lists */
463 tb_jmp_remove(tb
, 0);
464 tb_jmp_remove(tb
, 1);
466 /* suppress any remaining jumps to this TB */
472 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
473 tb2
= tb1
->jmp_next
[n1
];
474 tb_reset_jump(tb1
, n1
);
475 tb1
->jmp_next
[n1
] = NULL
;
478 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
481 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
485 target_ulong phys_pc
;
487 /* remove the TB from the hash list */
488 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
489 h
= tb_phys_hash_func(phys_pc
);
490 tb_remove(&tb_phys_hash
[h
], tb
,
491 offsetof(TranslationBlock
, phys_hash_next
));
493 /* remove the TB from the page list */
494 if (tb
->page_addr
[0] != page_addr
) {
495 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
496 tb_page_remove(&p
->first_tb
, tb
);
497 invalidate_page_bitmap(p
);
499 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
500 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
501 tb_page_remove(&p
->first_tb
, tb
);
502 invalidate_page_bitmap(p
);
508 static inline void set_bits(uint8_t *tab
, int start
, int len
)
514 mask
= 0xff << (start
& 7);
515 if ((start
& ~7) == (end
& ~7)) {
517 mask
&= ~(0xff << (end
& 7));
522 start
= (start
+ 8) & ~7;
524 while (start
< end1
) {
529 mask
= ~(0xff << (end
& 7));
535 static void build_page_bitmap(PageDesc
*p
)
537 int n
, tb_start
, tb_end
;
538 TranslationBlock
*tb
;
540 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
543 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
548 tb
= (TranslationBlock
*)((long)tb
& ~3);
549 /* NOTE: this is subtle as a TB may span two physical pages */
551 /* NOTE: tb_end may be after the end of the page, but
552 it is not a problem */
553 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
554 tb_end
= tb_start
+ tb
->size
;
555 if (tb_end
> TARGET_PAGE_SIZE
)
556 tb_end
= TARGET_PAGE_SIZE
;
559 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
561 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
562 tb
= tb
->page_next
[n
];
566 #ifdef TARGET_HAS_PRECISE_SMC
568 static void tb_gen_code(CPUState
*env
,
569 target_ulong pc
, target_ulong cs_base
, int flags
,
572 TranslationBlock
*tb
;
574 target_ulong phys_pc
, phys_page2
, virt_page2
;
577 phys_pc
= get_phys_addr_code(env
, (unsigned long)pc
);
578 tb
= tb_alloc((unsigned long)pc
);
580 /* flush must be done */
582 /* cannot fail at this point */
583 tb
= tb_alloc((unsigned long)pc
);
585 tc_ptr
= code_gen_ptr
;
587 tb
->cs_base
= cs_base
;
590 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
591 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
593 /* check next page if needed */
594 virt_page2
= ((unsigned long)pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
596 if (((unsigned long)pc
& TARGET_PAGE_MASK
) != virt_page2
) {
597 phys_page2
= get_phys_addr_code(env
, virt_page2
);
599 tb_link_phys(tb
, phys_pc
, phys_page2
);
603 /* invalidate all TBs which intersect with the target physical page
604 starting in range [start;end[. NOTE: start and end must refer to
605 the same physical page. 'is_cpu_write_access' should be true if called
606 from a real cpu write access: the virtual CPU will exit the current
607 TB if code is modified inside this TB. */
608 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
609 int is_cpu_write_access
)
611 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
612 #if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
613 CPUState
*env
= cpu_single_env
;
616 TranslationBlock
*tb
, *tb_next
, *current_tb
;
617 target_ulong tb_start
, tb_end
;
618 target_ulong current_pc
, current_cs_base
;
620 p
= page_find(start
>> TARGET_PAGE_BITS
);
623 if (!p
->code_bitmap
&&
624 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
625 is_cpu_write_access
) {
626 /* build code bitmap */
627 build_page_bitmap(p
);
630 /* we remove all the TBs in the range [start, end[ */
631 /* XXX: see if in some cases it could be faster to invalidate all the code */
632 current_tb_not_found
= is_cpu_write_access
;
633 current_tb_modified
= 0;
634 current_tb
= NULL
; /* avoid warning */
635 current_pc
= 0; /* avoid warning */
636 current_cs_base
= 0; /* avoid warning */
637 current_flags
= 0; /* avoid warning */
641 tb
= (TranslationBlock
*)((long)tb
& ~3);
642 tb_next
= tb
->page_next
[n
];
643 /* NOTE: this is subtle as a TB may span two physical pages */
645 /* NOTE: tb_end may be after the end of the page, but
646 it is not a problem */
647 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
648 tb_end
= tb_start
+ tb
->size
;
650 tb_start
= tb
->page_addr
[1];
651 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
653 if (!(tb_end
<= start
|| tb_start
>= end
)) {
654 #ifdef TARGET_HAS_PRECISE_SMC
655 if (current_tb_not_found
) {
656 current_tb_not_found
= 0;
658 if (env
->mem_write_pc
) {
659 /* now we have a real cpu fault */
660 current_tb
= tb_find_pc(env
->mem_write_pc
);
663 if (current_tb
== tb
&&
664 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
665 /* If we are modifying the current TB, we must stop
666 its execution. We could be more precise by checking
667 that the modification is after the current PC, but it
668 would require a specialized function to partially
669 restore the CPU state */
671 current_tb_modified
= 1;
672 cpu_restore_state(current_tb
, env
,
673 env
->mem_write_pc
, NULL
);
674 #if defined(TARGET_I386)
675 current_flags
= env
->hflags
;
676 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
677 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
678 current_pc
= current_cs_base
+ env
->eip
;
680 #error unsupported CPU
683 #endif /* TARGET_HAS_PRECISE_SMC */
684 tb_phys_invalidate(tb
, -1);
688 #if !defined(CONFIG_USER_ONLY)
689 /* if no code remaining, no need to continue to use slow writes */
691 invalidate_page_bitmap(p
);
692 if (is_cpu_write_access
) {
693 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
697 #ifdef TARGET_HAS_PRECISE_SMC
698 if (current_tb_modified
) {
699 /* we generate a block containing just the instruction
700 modifying the memory. It will ensure that it cannot modify
702 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
704 cpu_resume_from_signal(env
, NULL
);
709 /* len must be <= 8 and start must be a multiple of len */
710 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
717 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
718 cpu_single_env
->mem_write_vaddr
, len
,
720 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
724 p
= page_find(start
>> TARGET_PAGE_BITS
);
727 if (p
->code_bitmap
) {
728 offset
= start
& ~TARGET_PAGE_MASK
;
729 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
730 if (b
& ((1 << len
) - 1))
734 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
738 #if !defined(CONFIG_SOFTMMU)
739 static void tb_invalidate_phys_page(target_ulong addr
,
740 unsigned long pc
, void *puc
)
742 int n
, current_flags
, current_tb_modified
;
743 target_ulong current_pc
, current_cs_base
;
745 TranslationBlock
*tb
, *current_tb
;
746 #ifdef TARGET_HAS_PRECISE_SMC
747 CPUState
*env
= cpu_single_env
;
750 addr
&= TARGET_PAGE_MASK
;
751 p
= page_find(addr
>> TARGET_PAGE_BITS
);
755 current_tb_modified
= 0;
757 current_pc
= 0; /* avoid warning */
758 current_cs_base
= 0; /* avoid warning */
759 current_flags
= 0; /* avoid warning */
760 #ifdef TARGET_HAS_PRECISE_SMC
762 current_tb
= tb_find_pc(pc
);
767 tb
= (TranslationBlock
*)((long)tb
& ~3);
768 #ifdef TARGET_HAS_PRECISE_SMC
769 if (current_tb
== tb
&&
770 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
771 /* If we are modifying the current TB, we must stop
772 its execution. We could be more precise by checking
773 that the modification is after the current PC, but it
774 would require a specialized function to partially
775 restore the CPU state */
777 current_tb_modified
= 1;
778 cpu_restore_state(current_tb
, env
, pc
, puc
);
779 #if defined(TARGET_I386)
780 current_flags
= env
->hflags
;
781 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
782 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
783 current_pc
= current_cs_base
+ env
->eip
;
785 #error unsupported CPU
788 #endif /* TARGET_HAS_PRECISE_SMC */
789 tb_phys_invalidate(tb
, addr
);
790 tb
= tb
->page_next
[n
];
793 #ifdef TARGET_HAS_PRECISE_SMC
794 if (current_tb_modified
) {
795 /* we generate a block containing just the instruction
796 modifying the memory. It will ensure that it cannot modify
798 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
800 cpu_resume_from_signal(env
, puc
);
806 /* add the tb in the target page and protect it if necessary */
807 static inline void tb_alloc_page(TranslationBlock
*tb
,
808 unsigned int n
, unsigned int page_addr
)
811 TranslationBlock
*last_first_tb
;
813 tb
->page_addr
[n
] = page_addr
;
814 p
= page_find(page_addr
>> TARGET_PAGE_BITS
);
815 tb
->page_next
[n
] = p
->first_tb
;
816 last_first_tb
= p
->first_tb
;
817 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
818 invalidate_page_bitmap(p
);
820 #ifdef TARGET_HAS_SMC
822 #if defined(CONFIG_USER_ONLY)
823 if (p
->flags
& PAGE_WRITE
) {
824 unsigned long host_start
, host_end
, addr
;
827 /* force the host page as non writable (writes will have a
828 page fault + mprotect overhead) */
829 host_start
= page_addr
& host_page_mask
;
830 host_end
= host_start
+ host_page_size
;
832 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
833 prot
|= page_get_flags(addr
);
834 mprotect((void *)host_start
, host_page_size
,
835 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
836 #ifdef DEBUG_TB_INVALIDATE
837 printf("protecting code page: 0x%08lx\n",
840 p
->flags
&= ~PAGE_WRITE
;
843 /* if some code is already present, then the pages are already
844 protected. So we handle the case where only the first TB is
845 allocated in a physical page */
846 if (!last_first_tb
) {
847 target_ulong virt_addr
;
849 virt_addr
= (tb
->pc
& TARGET_PAGE_MASK
) + (n
<< TARGET_PAGE_BITS
);
850 tlb_protect_code(cpu_single_env
, virt_addr
);
854 #endif /* TARGET_HAS_SMC */
857 /* Allocate a new translation block. Flush the translation buffer if
858 too many translation blocks or too much generated code. */
859 TranslationBlock
*tb_alloc(unsigned long pc
)
861 TranslationBlock
*tb
;
863 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
864 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
872 /* add a new TB and link it to the physical page tables. phys_page2 is
873 (-1) to indicate that only one page contains the TB. */
874 void tb_link_phys(TranslationBlock
*tb
,
875 target_ulong phys_pc
, target_ulong phys_page2
)
878 TranslationBlock
**ptb
;
880 /* add in the physical hash table */
881 h
= tb_phys_hash_func(phys_pc
);
882 ptb
= &tb_phys_hash
[h
];
883 tb
->phys_hash_next
= *ptb
;
886 /* add in the page list */
887 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
888 if (phys_page2
!= -1)
889 tb_alloc_page(tb
, 1, phys_page2
);
891 tb
->page_addr
[1] = -1;
892 #ifdef DEBUG_TB_CHECK
897 /* link the tb with the other TBs */
898 void tb_link(TranslationBlock
*tb
)
900 #if !defined(CONFIG_USER_ONLY)
905 /* save the code memory mappings (needed to invalidate the code) */
906 addr
= tb
->pc
& TARGET_PAGE_MASK
;
907 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
908 #ifdef DEBUG_TLB_CHECK
909 if (vp
->valid_tag
== virt_valid_tag
&&
910 vp
->phys_addr
!= tb
->page_addr
[0]) {
911 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
912 addr
, tb
->page_addr
[0], vp
->phys_addr
);
915 vp
->phys_addr
= tb
->page_addr
[0];
916 if (vp
->valid_tag
!= virt_valid_tag
) {
917 vp
->valid_tag
= virt_valid_tag
;
918 #if !defined(CONFIG_SOFTMMU)
923 if (tb
->page_addr
[1] != -1) {
924 addr
+= TARGET_PAGE_SIZE
;
925 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
926 #ifdef DEBUG_TLB_CHECK
927 if (vp
->valid_tag
== virt_valid_tag
&&
928 vp
->phys_addr
!= tb
->page_addr
[1]) {
929 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
930 addr
, tb
->page_addr
[1], vp
->phys_addr
);
933 vp
->phys_addr
= tb
->page_addr
[1];
934 if (vp
->valid_tag
!= virt_valid_tag
) {
935 vp
->valid_tag
= virt_valid_tag
;
936 #if !defined(CONFIG_SOFTMMU)
944 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
945 tb
->jmp_next
[0] = NULL
;
946 tb
->jmp_next
[1] = NULL
;
948 tb
->cflags
&= ~CF_FP_USED
;
949 if (tb
->cflags
& CF_TB_FP_USED
)
950 tb
->cflags
|= CF_FP_USED
;
953 /* init original jump addresses */
954 if (tb
->tb_next_offset
[0] != 0xffff)
955 tb_reset_jump(tb
, 0);
956 if (tb
->tb_next_offset
[1] != 0xffff)
957 tb_reset_jump(tb
, 1);
960 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
961 tb[1].tc_ptr. Return NULL if not found */
962 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
966 TranslationBlock
*tb
;
970 if (tc_ptr
< (unsigned long)code_gen_buffer
||
971 tc_ptr
>= (unsigned long)code_gen_ptr
)
973 /* binary search (cf Knuth) */
976 while (m_min
<= m_max
) {
977 m
= (m_min
+ m_max
) >> 1;
979 v
= (unsigned long)tb
->tc_ptr
;
982 else if (tc_ptr
< v
) {
991 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
993 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
995 TranslationBlock
*tb1
, *tb_next
, **ptb
;
998 tb1
= tb
->jmp_next
[n
];
1000 /* find head of list */
1003 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1006 tb1
= tb1
->jmp_next
[n1
];
1008 /* we are now sure now that tb jumps to tb1 */
1011 /* remove tb from the jmp_first list */
1012 ptb
= &tb_next
->jmp_first
;
1016 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1017 if (n1
== n
&& tb1
== tb
)
1019 ptb
= &tb1
->jmp_next
[n1
];
1021 *ptb
= tb
->jmp_next
[n
];
1022 tb
->jmp_next
[n
] = NULL
;
1024 /* suppress the jump to next tb in generated code */
1025 tb_reset_jump(tb
, n
);
1027 /* suppress jumps in the tb on which we could have jumped */
1028 tb_reset_jump_recursive(tb_next
);
1032 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1034 tb_reset_jump_recursive2(tb
, 0);
1035 tb_reset_jump_recursive2(tb
, 1);
1038 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1040 target_ulong phys_addr
;
1042 phys_addr
= cpu_get_phys_page_debug(env
, pc
);
1043 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ 1, 0);
1046 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1047 breakpoint is reached */
1048 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1050 #if defined(TARGET_I386) || defined(TARGET_PPC)
1053 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1054 if (env
->breakpoints
[i
] == pc
)
1058 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1060 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1062 breakpoint_invalidate(env
, pc
);
1069 /* remove a breakpoint */
1070 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1072 #if defined(TARGET_I386) || defined(TARGET_PPC)
1074 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1075 if (env
->breakpoints
[i
] == pc
)
1080 memmove(&env
->breakpoints
[i
], &env
->breakpoints
[i
+ 1],
1081 (env
->nb_breakpoints
- (i
+ 1)) * sizeof(env
->breakpoints
[0]));
1082 env
->nb_breakpoints
--;
1084 breakpoint_invalidate(env
, pc
);
1091 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1092 CPU loop after each instruction */
1093 void cpu_single_step(CPUState
*env
, int enabled
)
1095 #if defined(TARGET_I386) || defined(TARGET_PPC)
1096 if (env
->singlestep_enabled
!= enabled
) {
1097 env
->singlestep_enabled
= enabled
;
1098 /* must flush all the translated code to avoid inconsistancies */
1099 /* XXX: only flush what is necessary */
1105 /* enable or disable low levels log */
1106 void cpu_set_log(int log_flags
)
1108 loglevel
= log_flags
;
1109 if (loglevel
&& !logfile
) {
1110 logfile
= fopen(logfilename
, "w");
1112 perror(logfilename
);
1115 #if !defined(CONFIG_SOFTMMU)
1116 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1118 static uint8_t logfile_buf
[4096];
1119 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1122 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1127 void cpu_set_log_filename(const char *filename
)
1129 logfilename
= strdup(filename
);
1132 /* mask must never be zero, except for A20 change call */
1133 void cpu_interrupt(CPUState
*env
, int mask
)
1135 TranslationBlock
*tb
;
1136 static int interrupt_lock
;
1138 env
->interrupt_request
|= mask
;
1139 /* if the cpu is currently executing code, we must unlink it and
1140 all the potentially executing TB */
1141 tb
= env
->current_tb
;
1142 if (tb
&& !testandset(&interrupt_lock
)) {
1143 env
->current_tb
= NULL
;
1144 tb_reset_jump_recursive(tb
);
1149 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1151 env
->interrupt_request
&= ~mask
;
1154 CPULogItem cpu_log_items
[] = {
1155 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1156 "show generated host assembly code for each compiled TB" },
1157 { CPU_LOG_TB_IN_ASM
, "in_asm",
1158 "show target assembly code for each compiled TB" },
1159 { CPU_LOG_TB_OP
, "op",
1160 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1162 { CPU_LOG_TB_OP_OPT
, "op_opt",
1163 "show micro ops after optimization for each compiled TB" },
1165 { CPU_LOG_INT
, "int",
1166 "show interrupts/exceptions in short format" },
1167 { CPU_LOG_EXEC
, "exec",
1168 "show trace before each executed TB (lots of logs)" },
1169 { CPU_LOG_TB_CPU
, "cpu",
1170 "show CPU state before bloc translation" },
1172 { CPU_LOG_PCALL
, "pcall",
1173 "show protected mode far calls/returns/exceptions" },
1175 { CPU_LOG_IOPORT
, "ioport",
1176 "show all i/o ports accesses" },
1180 static int cmp1(const char *s1
, int n
, const char *s2
)
1182 if (strlen(s2
) != n
)
1184 return memcmp(s1
, s2
, n
) == 0;
1187 /* takes a comma separated list of log masks. Return 0 if error. */
1188 int cpu_str_to_log_mask(const char *str
)
1197 p1
= strchr(p
, ',');
1200 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1201 if (cmp1(p
, p1
- p
, item
->name
))
1214 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1219 fprintf(stderr
, "qemu: fatal: ");
1220 vfprintf(stderr
, fmt
, ap
);
1221 fprintf(stderr
, "\n");
1223 cpu_x86_dump_state(env
, stderr
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1229 #if !defined(CONFIG_USER_ONLY)
1231 /* NOTE: if flush_global is true, also flush global entries (not
1233 void tlb_flush(CPUState
*env
, int flush_global
)
1237 #if defined(DEBUG_TLB)
1238 printf("tlb_flush:\n");
1240 /* must reset current TB so that interrupts cannot modify the
1241 links while we are modifying them */
1242 env
->current_tb
= NULL
;
1244 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1245 env
->tlb_read
[0][i
].address
= -1;
1246 env
->tlb_write
[0][i
].address
= -1;
1247 env
->tlb_read
[1][i
].address
= -1;
1248 env
->tlb_write
[1][i
].address
= -1;
1252 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
1255 #if !defined(CONFIG_SOFTMMU)
1256 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1260 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1262 if (addr
== (tlb_entry
->address
&
1263 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
1264 tlb_entry
->address
= -1;
1267 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1272 TranslationBlock
*tb
;
1274 #if defined(DEBUG_TLB)
1275 printf("tlb_flush_page: 0x%08x\n", addr
);
1277 /* must reset current TB so that interrupts cannot modify the
1278 links while we are modifying them */
1279 env
->current_tb
= NULL
;
1281 addr
&= TARGET_PAGE_MASK
;
1282 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1283 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
1284 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
1285 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
1286 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
1288 /* remove from the virtual pc hash table all the TB at this
1291 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1292 if (vp
&& vp
->valid_tag
== virt_valid_tag
) {
1293 p
= page_find(vp
->phys_addr
>> TARGET_PAGE_BITS
);
1295 /* we remove all the links to the TBs in this virtual page */
1297 while (tb
!= NULL
) {
1299 tb
= (TranslationBlock
*)((long)tb
& ~3);
1300 if ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1301 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
) {
1304 tb
= tb
->page_next
[n
];
1310 #if !defined(CONFIG_SOFTMMU)
1311 if (addr
< MMAP_AREA_END
)
1312 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1316 static inline void tlb_protect_code1(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1318 if (addr
== (tlb_entry
->address
&
1319 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1320 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
&&
1321 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
) {
1322 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_CODE
;
1326 /* update the TLBs so that writes to code in the virtual page 'addr'
1328 static void tlb_protect_code(CPUState
*env
, target_ulong addr
)
1332 addr
&= TARGET_PAGE_MASK
;
1333 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1334 tlb_protect_code1(&env
->tlb_write
[0][i
], addr
);
1335 tlb_protect_code1(&env
->tlb_write
[1][i
], addr
);
1336 #if !defined(CONFIG_SOFTMMU)
1337 /* NOTE: as we generated the code for this page, it is already at
1339 if (addr
< MMAP_AREA_END
)
1340 mprotect((void *)addr
, TARGET_PAGE_SIZE
, PROT_READ
);
1344 static inline void tlb_unprotect_code2(CPUTLBEntry
*tlb_entry
,
1345 unsigned long phys_addr
)
1347 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
&&
1348 ((tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
) == phys_addr
) {
1349 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1353 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1354 tested self modifying code */
1355 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
)
1359 phys_addr
&= TARGET_PAGE_MASK
;
1360 phys_addr
+= (long)phys_ram_base
;
1361 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1362 tlb_unprotect_code2(&env
->tlb_write
[0][i
], phys_addr
);
1363 tlb_unprotect_code2(&env
->tlb_write
[1][i
], phys_addr
);
1366 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1367 unsigned long start
, unsigned long length
)
1370 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1371 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1372 if ((addr
- start
) < length
) {
1373 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1378 void cpu_physical_memory_reset_dirty(target_ulong start
, target_ulong end
)
1381 unsigned long length
, start1
;
1384 start
&= TARGET_PAGE_MASK
;
1385 end
= TARGET_PAGE_ALIGN(end
);
1387 length
= end
- start
;
1390 memset(phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
), 0, length
>> TARGET_PAGE_BITS
);
1392 env
= cpu_single_env
;
1393 /* we modify the TLB cache so that the dirty bit will be set again
1394 when accessing the range */
1395 start1
= start
+ (unsigned long)phys_ram_base
;
1396 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1397 tlb_reset_dirty_range(&env
->tlb_write
[0][i
], start1
, length
);
1398 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1399 tlb_reset_dirty_range(&env
->tlb_write
[1][i
], start1
, length
);
1401 #if !defined(CONFIG_SOFTMMU)
1402 /* XXX: this is expensive */
1408 for(i
= 0; i
< L1_SIZE
; i
++) {
1411 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1412 for(j
= 0; j
< L2_SIZE
; j
++) {
1413 if (p
->valid_tag
== virt_valid_tag
&&
1414 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1415 (p
->prot
& PROT_WRITE
)) {
1416 if (addr
< MMAP_AREA_END
) {
1417 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1418 p
->prot
& ~PROT_WRITE
);
1421 addr
+= TARGET_PAGE_SIZE
;
1430 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1431 unsigned long start
)
1434 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1435 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1436 if (addr
== start
) {
1437 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1442 /* update the TLB corresponding to virtual page vaddr and phys addr
1443 addr so that it is no longer dirty */
1444 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1446 CPUState
*env
= cpu_single_env
;
1449 phys_ram_dirty
[(addr
- (unsigned long)phys_ram_base
) >> TARGET_PAGE_BITS
] = 1;
1451 addr
&= TARGET_PAGE_MASK
;
1452 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1453 tlb_set_dirty1(&env
->tlb_write
[0][i
], addr
);
1454 tlb_set_dirty1(&env
->tlb_write
[1][i
], addr
);
1457 /* add a new TLB entry. At most one entry for a given virtual address
1458 is permitted. Return 0 if OK or 2 if the page could not be mapped
1459 (can only happen in non SOFTMMU mode for I/O pages or pages
1460 conflicting with the host address space). */
1461 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1462 target_phys_addr_t paddr
, int prot
,
1463 int is_user
, int is_softmmu
)
1467 TranslationBlock
*first_tb
;
1469 target_ulong address
;
1470 unsigned long addend
;
1473 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1476 pd
= IO_MEM_UNASSIGNED
;
1479 pd
= p
->phys_offset
;
1480 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1481 /* NOTE: we also allocate the page at this stage */
1482 p1
= page_find_alloc(pd
>> TARGET_PAGE_BITS
);
1483 first_tb
= p1
->first_tb
;
1486 #if defined(DEBUG_TLB)
1487 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1488 vaddr
, paddr
, prot
, is_user
, (first_tb
!= NULL
), is_softmmu
, pd
);
1492 #if !defined(CONFIG_SOFTMMU)
1496 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1497 /* IO memory case */
1498 address
= vaddr
| pd
;
1501 /* standard memory */
1503 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1506 index
= (vaddr
>> 12) & (CPU_TLB_SIZE
- 1);
1508 if (prot
& PAGE_READ
) {
1509 env
->tlb_read
[is_user
][index
].address
= address
;
1510 env
->tlb_read
[is_user
][index
].addend
= addend
;
1512 env
->tlb_read
[is_user
][index
].address
= -1;
1513 env
->tlb_read
[is_user
][index
].addend
= -1;
1515 if (prot
& PAGE_WRITE
) {
1516 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1517 /* ROM: access is ignored (same as unassigned) */
1518 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_ROM
;
1519 env
->tlb_write
[is_user
][index
].addend
= addend
;
1521 /* XXX: the PowerPC code seems not ready to handle
1522 self modifying code with DCBI */
1523 #if defined(TARGET_HAS_SMC) || 1
1525 /* if code is present, we use a specific memory
1526 handler. It works only for physical memory access */
1527 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_CODE
;
1528 env
->tlb_write
[is_user
][index
].addend
= addend
;
1531 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1532 !cpu_physical_memory_is_dirty(pd
)) {
1533 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_NOTDIRTY
;
1534 env
->tlb_write
[is_user
][index
].addend
= addend
;
1536 env
->tlb_write
[is_user
][index
].address
= address
;
1537 env
->tlb_write
[is_user
][index
].addend
= addend
;
1540 env
->tlb_write
[is_user
][index
].address
= -1;
1541 env
->tlb_write
[is_user
][index
].addend
= -1;
1544 #if !defined(CONFIG_SOFTMMU)
1546 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1547 /* IO access: no mapping is done as it will be handled by the
1549 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1554 if (vaddr
>= MMAP_AREA_END
) {
1557 if (prot
& PROT_WRITE
) {
1558 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1559 #if defined(TARGET_HAS_SMC) || 1
1562 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1563 !cpu_physical_memory_is_dirty(pd
))) {
1564 /* ROM: we do as if code was inside */
1565 /* if code is present, we only map as read only and save the
1569 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
);
1572 vp
->valid_tag
= virt_valid_tag
;
1573 prot
&= ~PAGE_WRITE
;
1576 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1577 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1578 if (map_addr
== MAP_FAILED
) {
1579 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1589 /* called from signal handler: invalidate the code and unprotect the
1590 page. Return TRUE if the fault was succesfully handled. */
1591 int page_unprotect(unsigned long addr
, unsigned long pc
, void *puc
)
1593 #if !defined(CONFIG_SOFTMMU)
1596 #if defined(DEBUG_TLB)
1597 printf("page_unprotect: addr=0x%08x\n", addr
);
1599 addr
&= TARGET_PAGE_MASK
;
1601 /* if it is not mapped, no need to worry here */
1602 if (addr
>= MMAP_AREA_END
)
1604 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1607 /* NOTE: in this case, validate_tag is _not_ tested as it
1608 validates only the code TLB */
1609 if (vp
->valid_tag
!= virt_valid_tag
)
1611 if (!(vp
->prot
& PAGE_WRITE
))
1613 #if defined(DEBUG_TLB)
1614 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1615 addr
, vp
->phys_addr
, vp
->prot
);
1617 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1618 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1619 (unsigned long)addr
, vp
->prot
);
1620 /* set the dirty bit */
1621 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 1;
1622 /* flush the code inside */
1623 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1632 void tlb_flush(CPUState
*env
, int flush_global
)
1636 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1640 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1641 target_phys_addr_t paddr
, int prot
,
1642 int is_user
, int is_softmmu
)
1647 /* dump memory mappings */
1648 void page_dump(FILE *f
)
1650 unsigned long start
, end
;
1651 int i
, j
, prot
, prot1
;
1654 fprintf(f
, "%-8s %-8s %-8s %s\n",
1655 "start", "end", "size", "prot");
1659 for(i
= 0; i
<= L1_SIZE
; i
++) {
1664 for(j
= 0;j
< L2_SIZE
; j
++) {
1669 if (prot1
!= prot
) {
1670 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1672 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1673 start
, end
, end
- start
,
1674 prot
& PAGE_READ
? 'r' : '-',
1675 prot
& PAGE_WRITE
? 'w' : '-',
1676 prot
& PAGE_EXEC
? 'x' : '-');
1690 int page_get_flags(unsigned long address
)
1694 p
= page_find(address
>> TARGET_PAGE_BITS
);
1700 /* modify the flags of a page and invalidate the code if
1701 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1702 depending on PAGE_WRITE */
1703 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1708 start
= start
& TARGET_PAGE_MASK
;
1709 end
= TARGET_PAGE_ALIGN(end
);
1710 if (flags
& PAGE_WRITE
)
1711 flags
|= PAGE_WRITE_ORG
;
1712 spin_lock(&tb_lock
);
1713 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1714 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1715 /* if the write protection is set, then we invalidate the code
1717 if (!(p
->flags
& PAGE_WRITE
) &&
1718 (flags
& PAGE_WRITE
) &&
1720 tb_invalidate_phys_page(addr
, 0, NULL
);
1724 spin_unlock(&tb_lock
);
1727 /* called from signal handler: invalidate the code and unprotect the
1728 page. Return TRUE if the fault was succesfully handled. */
1729 int page_unprotect(unsigned long address
, unsigned long pc
, void *puc
)
1731 unsigned int page_index
, prot
, pindex
;
1733 unsigned long host_start
, host_end
, addr
;
1735 host_start
= address
& host_page_mask
;
1736 page_index
= host_start
>> TARGET_PAGE_BITS
;
1737 p1
= page_find(page_index
);
1740 host_end
= host_start
+ host_page_size
;
1743 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1747 /* if the page was really writable, then we change its
1748 protection back to writable */
1749 if (prot
& PAGE_WRITE_ORG
) {
1750 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1751 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1752 mprotect((void *)host_start
, host_page_size
,
1753 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1754 p1
[pindex
].flags
|= PAGE_WRITE
;
1755 /* and since the content will be modified, we must invalidate
1756 the corresponding translated code. */
1757 tb_invalidate_phys_page(address
, pc
, puc
);
1758 #ifdef DEBUG_TB_CHECK
1759 tb_invalidate_check(address
);
1767 /* call this function when system calls directly modify a memory area */
1768 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1770 unsigned long start
, end
, addr
;
1772 start
= (unsigned long)data
;
1773 end
= start
+ data_size
;
1774 start
&= TARGET_PAGE_MASK
;
1775 end
= TARGET_PAGE_ALIGN(end
);
1776 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1777 page_unprotect(addr
, 0, NULL
);
1781 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1784 #endif /* defined(CONFIG_USER_ONLY) */
1786 /* register physical memory. 'size' must be a multiple of the target
1787 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1789 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1791 unsigned long phys_offset
)
1793 unsigned long addr
, end_addr
;
1796 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1797 end_addr
= start_addr
+ size
;
1798 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1799 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1800 p
->phys_offset
= phys_offset
;
1801 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1802 phys_offset
+= TARGET_PAGE_SIZE
;
1806 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1811 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1815 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1816 unassigned_mem_readb
,
1817 unassigned_mem_readb
,
1818 unassigned_mem_readb
,
1821 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1822 unassigned_mem_writeb
,
1823 unassigned_mem_writeb
,
1824 unassigned_mem_writeb
,
1827 /* self modifying code support in soft mmu mode : writing to a page
1828 containing code comes to these functions */
1830 static void code_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1832 unsigned long phys_addr
;
1834 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1835 #if !defined(CONFIG_USER_ONLY)
1836 tb_invalidate_phys_page_fast(phys_addr
, 1);
1838 stb_raw((uint8_t *)addr
, val
);
1839 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1842 static void code_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1844 unsigned long phys_addr
;
1846 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1847 #if !defined(CONFIG_USER_ONLY)
1848 tb_invalidate_phys_page_fast(phys_addr
, 2);
1850 stw_raw((uint8_t *)addr
, val
);
1851 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1854 static void code_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1856 unsigned long phys_addr
;
1858 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1859 #if !defined(CONFIG_USER_ONLY)
1860 tb_invalidate_phys_page_fast(phys_addr
, 4);
1862 stl_raw((uint8_t *)addr
, val
);
1863 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1866 static CPUReadMemoryFunc
*code_mem_read
[3] = {
1867 NULL
, /* never used */
1868 NULL
, /* never used */
1869 NULL
, /* never used */
1872 static CPUWriteMemoryFunc
*code_mem_write
[3] = {
1878 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1880 stb_raw((uint8_t *)addr
, val
);
1881 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1884 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1886 stw_raw((uint8_t *)addr
, val
);
1887 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1890 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1892 stl_raw((uint8_t *)addr
, val
);
1893 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1896 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1897 notdirty_mem_writeb
,
1898 notdirty_mem_writew
,
1899 notdirty_mem_writel
,
1902 static void io_mem_init(void)
1904 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, code_mem_read
, unassigned_mem_write
, NULL
);
1905 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
1906 cpu_register_io_memory(IO_MEM_CODE
>> IO_MEM_SHIFT
, code_mem_read
, code_mem_write
, NULL
);
1907 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, code_mem_read
, notdirty_mem_write
, NULL
);
1910 /* alloc dirty bits array */
1911 phys_ram_dirty
= qemu_malloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1914 /* mem_read and mem_write are arrays of functions containing the
1915 function to access byte (index 0), word (index 1) and dword (index
1916 2). All functions must be supplied. If io_index is non zero, the
1917 corresponding io zone is modified. If it is zero, a new io zone is
1918 allocated. The return value can be used with
1919 cpu_register_physical_memory(). (-1) is returned if error. */
1920 int cpu_register_io_memory(int io_index
,
1921 CPUReadMemoryFunc
**mem_read
,
1922 CPUWriteMemoryFunc
**mem_write
,
1927 if (io_index
<= 0) {
1928 if (io_index
>= IO_MEM_NB_ENTRIES
)
1930 io_index
= io_mem_nb
++;
1932 if (io_index
>= IO_MEM_NB_ENTRIES
)
1936 for(i
= 0;i
< 3; i
++) {
1937 io_mem_read
[io_index
][i
] = mem_read
[i
];
1938 io_mem_write
[io_index
][i
] = mem_write
[i
];
1940 io_mem_opaque
[io_index
] = opaque
;
1941 return io_index
<< IO_MEM_SHIFT
;
1944 /* physical memory access (slow version, mainly for debug) */
1945 #if defined(CONFIG_USER_ONLY)
1946 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
1947 int len
, int is_write
)
1953 page
= addr
& TARGET_PAGE_MASK
;
1954 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1957 flags
= page_get_flags(page
);
1958 if (!(flags
& PAGE_VALID
))
1961 if (!(flags
& PAGE_WRITE
))
1963 memcpy((uint8_t *)addr
, buf
, len
);
1965 if (!(flags
& PAGE_READ
))
1967 memcpy(buf
, (uint8_t *)addr
, len
);
1975 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
1976 int len
, int is_write
)
1981 target_phys_addr_t page
;
1986 page
= addr
& TARGET_PAGE_MASK
;
1987 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1990 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
1992 pd
= IO_MEM_UNASSIGNED
;
1994 pd
= p
->phys_offset
;
1998 if ((pd
& ~TARGET_PAGE_MASK
) != 0) {
1999 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2000 if (l
>= 4 && ((addr
& 3) == 0)) {
2001 /* 32 bit read access */
2003 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2005 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2006 /* 16 bit read access */
2007 val
= lduw_raw(buf
);
2008 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2012 val
= ldub_raw(buf
);
2013 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2017 unsigned long addr1
;
2018 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2020 ptr
= phys_ram_base
+ addr1
;
2021 memcpy(ptr
, buf
, l
);
2022 /* invalidate code */
2023 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2025 phys_ram_dirty
[page
>> TARGET_PAGE_BITS
] = 1;
2028 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2029 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
) {
2031 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2032 if (l
>= 4 && ((addr
& 3) == 0)) {
2033 /* 32 bit read access */
2034 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2037 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2038 /* 16 bit read access */
2039 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2044 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2050 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2051 (addr
& ~TARGET_PAGE_MASK
);
2052 memcpy(buf
, ptr
, l
);
2062 /* virtual memory access for debug */
2063 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2064 uint8_t *buf
, int len
, int is_write
)
2067 target_ulong page
, phys_addr
;
2070 page
= addr
& TARGET_PAGE_MASK
;
2071 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2072 /* if no physical page mapped, return an error */
2073 if (phys_addr
== -1)
2075 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2078 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2087 #if !defined(CONFIG_USER_ONLY)
2089 #define MMUSUFFIX _cmmu
2090 #define GETPC() NULL
2091 #define env cpu_single_env
2094 #include "softmmu_template.h"
2097 #include "softmmu_template.h"
2100 #include "softmmu_template.h"
2103 #include "softmmu_template.h"