2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
38 //#define DEBUG_TB_INVALIDATE
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
49 #define SMC_BITMAP_USE_THRESHOLD 10
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
54 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
55 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
56 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
58 /* any access to the tbs or the page table must use this lock */
59 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
61 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
62 uint8_t *code_gen_ptr
;
66 uint8_t *phys_ram_base
;
67 uint8_t *phys_ram_dirty
;
69 typedef struct PageDesc
{
70 /* list of TBs intersecting this ram page */
71 TranslationBlock
*first_tb
;
72 /* in order to optimize self modifying code, we count the number
73 of lookups we do to a given page to use a bitmap */
74 unsigned int code_write_count
;
76 #if defined(CONFIG_USER_ONLY)
81 typedef struct PhysPageDesc
{
82 /* offset in host memory of the page + io_index in the low 12 bits */
83 unsigned long phys_offset
;
86 typedef struct VirtPageDesc
{
87 /* physical address of code page. It is valid only if 'valid_tag'
88 matches 'virt_valid_tag' */
89 target_ulong phys_addr
;
90 unsigned int valid_tag
;
91 #if !defined(CONFIG_SOFTMMU)
92 /* original page access rights. It is valid only if 'valid_tag'
93 matches 'virt_valid_tag' */
99 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
101 #define L1_SIZE (1 << L1_BITS)
102 #define L2_SIZE (1 << L2_BITS)
104 static void io_mem_init(void);
106 unsigned long qemu_real_host_page_size
;
107 unsigned long qemu_host_page_bits
;
108 unsigned long qemu_host_page_size
;
109 unsigned long qemu_host_page_mask
;
111 /* XXX: for system emulation, it could just be an array */
112 static PageDesc
*l1_map
[L1_SIZE
];
113 static PhysPageDesc
*l1_phys_map
[L1_SIZE
];
115 #if !defined(CONFIG_USER_ONLY)
116 static VirtPageDesc
*l1_virt_map
[L1_SIZE
];
117 static unsigned int virt_valid_tag
;
120 /* io memory support */
121 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
122 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
123 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
124 static int io_mem_nb
;
127 char *logfilename
= "/tmp/qemu.log";
131 static void page_init(void)
133 /* NOTE: we can always suppose that qemu_host_page_size >=
137 SYSTEM_INFO system_info
;
140 GetSystemInfo(&system_info
);
141 qemu_real_host_page_size
= system_info
.dwPageSize
;
143 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
144 PAGE_EXECUTE_READWRITE
, &old_protect
);
147 qemu_real_host_page_size
= getpagesize();
149 unsigned long start
, end
;
151 start
= (unsigned long)code_gen_buffer
;
152 start
&= ~(qemu_real_host_page_size
- 1);
154 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
155 end
+= qemu_real_host_page_size
- 1;
156 end
&= ~(qemu_real_host_page_size
- 1);
158 mprotect((void *)start
, end
- start
,
159 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
163 if (qemu_host_page_size
== 0)
164 qemu_host_page_size
= qemu_real_host_page_size
;
165 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
166 qemu_host_page_size
= TARGET_PAGE_SIZE
;
167 qemu_host_page_bits
= 0;
168 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
169 qemu_host_page_bits
++;
170 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
171 #if !defined(CONFIG_USER_ONLY)
176 static inline PageDesc
*page_find_alloc(unsigned int index
)
180 lp
= &l1_map
[index
>> L2_BITS
];
183 /* allocate if not found */
184 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
185 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
188 return p
+ (index
& (L2_SIZE
- 1));
191 static inline PageDesc
*page_find(unsigned int index
)
195 p
= l1_map
[index
>> L2_BITS
];
198 return p
+ (index
& (L2_SIZE
- 1));
201 static inline PhysPageDesc
*phys_page_find_alloc(unsigned int index
)
203 PhysPageDesc
**lp
, *p
;
205 lp
= &l1_phys_map
[index
>> L2_BITS
];
208 /* allocate if not found */
209 p
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
210 memset(p
, 0, sizeof(PhysPageDesc
) * L2_SIZE
);
213 return p
+ (index
& (L2_SIZE
- 1));
216 static inline PhysPageDesc
*phys_page_find(unsigned int index
)
220 p
= l1_phys_map
[index
>> L2_BITS
];
223 return p
+ (index
& (L2_SIZE
- 1));
226 #if !defined(CONFIG_USER_ONLY)
227 static void tlb_protect_code(CPUState
*env
, target_ulong addr
);
228 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
);
230 static inline VirtPageDesc
*virt_page_find_alloc(unsigned int index
)
232 VirtPageDesc
**lp
, *p
;
234 /* XXX: should not truncate for 64 bit addresses */
235 #if TARGET_LONG_BITS > 32
236 index
&= (L1_SIZE
- 1);
238 lp
= &l1_virt_map
[index
>> L2_BITS
];
241 /* allocate if not found */
242 p
= qemu_malloc(sizeof(VirtPageDesc
) * L2_SIZE
);
243 memset(p
, 0, sizeof(VirtPageDesc
) * L2_SIZE
);
246 return p
+ (index
& (L2_SIZE
- 1));
249 static inline VirtPageDesc
*virt_page_find(unsigned int index
)
253 p
= l1_virt_map
[index
>> L2_BITS
];
256 return p
+ (index
& (L2_SIZE
- 1));
259 static void virt_page_flush(void)
266 if (virt_valid_tag
== 0) {
268 for(i
= 0; i
< L1_SIZE
; i
++) {
271 for(j
= 0; j
< L2_SIZE
; j
++)
278 static void virt_page_flush(void)
283 void cpu_exec_init(void)
286 code_gen_ptr
= code_gen_buffer
;
292 static inline void invalidate_page_bitmap(PageDesc
*p
)
294 if (p
->code_bitmap
) {
295 qemu_free(p
->code_bitmap
);
296 p
->code_bitmap
= NULL
;
298 p
->code_write_count
= 0;
301 /* set to NULL all the 'first_tb' fields in all PageDescs */
302 static void page_flush_tb(void)
307 for(i
= 0; i
< L1_SIZE
; i
++) {
310 for(j
= 0; j
< L2_SIZE
; j
++) {
312 invalidate_page_bitmap(p
);
319 /* flush all the translation blocks */
320 /* XXX: tb_flush is currently not thread safe */
321 void tb_flush(CPUState
*env
)
323 #if defined(DEBUG_FLUSH)
324 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
325 code_gen_ptr
- code_gen_buffer
,
327 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
330 memset (tb_hash
, 0, CODE_GEN_HASH_SIZE
* sizeof (void *));
333 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
336 code_gen_ptr
= code_gen_buffer
;
337 /* XXX: flush processor icache at this point if cache flush is
341 #ifdef DEBUG_TB_CHECK
343 static void tb_invalidate_check(unsigned long address
)
345 TranslationBlock
*tb
;
347 address
&= TARGET_PAGE_MASK
;
348 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
349 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
350 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
351 address
>= tb
->pc
+ tb
->size
)) {
352 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
353 address
, tb
->pc
, tb
->size
);
359 /* verify that all the pages have correct rights for code */
360 static void tb_page_check(void)
362 TranslationBlock
*tb
;
363 int i
, flags1
, flags2
;
365 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
366 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
367 flags1
= page_get_flags(tb
->pc
);
368 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
369 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
370 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
371 tb
->pc
, tb
->size
, flags1
, flags2
);
377 void tb_jmp_check(TranslationBlock
*tb
)
379 TranslationBlock
*tb1
;
382 /* suppress any remaining jumps to this TB */
386 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
389 tb1
= tb1
->jmp_next
[n1
];
391 /* check end of list */
393 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
399 /* invalidate one TB */
400 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
403 TranslationBlock
*tb1
;
407 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
410 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
414 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
416 TranslationBlock
*tb1
;
422 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
424 *ptb
= tb1
->page_next
[n1
];
427 ptb
= &tb1
->page_next
[n1
];
431 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
433 TranslationBlock
*tb1
, **ptb
;
436 ptb
= &tb
->jmp_next
[n
];
439 /* find tb(n) in circular list */
443 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
444 if (n1
== n
&& tb1
== tb
)
447 ptb
= &tb1
->jmp_first
;
449 ptb
= &tb1
->jmp_next
[n1
];
452 /* now we can suppress tb(n) from the list */
453 *ptb
= tb
->jmp_next
[n
];
455 tb
->jmp_next
[n
] = NULL
;
459 /* reset the jump entry 'n' of a TB so that it is not chained to
461 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
463 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
466 static inline void tb_invalidate(TranslationBlock
*tb
)
469 TranslationBlock
*tb1
, *tb2
, **ptb
;
471 tb_invalidated_flag
= 1;
473 /* remove the TB from the hash list */
474 h
= tb_hash_func(tb
->pc
);
478 /* NOTE: the TB is not necessarily linked in the hash. It
479 indicates that it is not currently used */
483 *ptb
= tb1
->hash_next
;
486 ptb
= &tb1
->hash_next
;
489 /* suppress this TB from the two jump lists */
490 tb_jmp_remove(tb
, 0);
491 tb_jmp_remove(tb
, 1);
493 /* suppress any remaining jumps to this TB */
499 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
500 tb2
= tb1
->jmp_next
[n1
];
501 tb_reset_jump(tb1
, n1
);
502 tb1
->jmp_next
[n1
] = NULL
;
505 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
508 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
512 target_ulong phys_pc
;
514 /* remove the TB from the hash list */
515 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
516 h
= tb_phys_hash_func(phys_pc
);
517 tb_remove(&tb_phys_hash
[h
], tb
,
518 offsetof(TranslationBlock
, phys_hash_next
));
520 /* remove the TB from the page list */
521 if (tb
->page_addr
[0] != page_addr
) {
522 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
523 tb_page_remove(&p
->first_tb
, tb
);
524 invalidate_page_bitmap(p
);
526 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
527 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
528 tb_page_remove(&p
->first_tb
, tb
);
529 invalidate_page_bitmap(p
);
535 static inline void set_bits(uint8_t *tab
, int start
, int len
)
541 mask
= 0xff << (start
& 7);
542 if ((start
& ~7) == (end
& ~7)) {
544 mask
&= ~(0xff << (end
& 7));
549 start
= (start
+ 8) & ~7;
551 while (start
< end1
) {
556 mask
= ~(0xff << (end
& 7));
562 static void build_page_bitmap(PageDesc
*p
)
564 int n
, tb_start
, tb_end
;
565 TranslationBlock
*tb
;
567 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
570 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
575 tb
= (TranslationBlock
*)((long)tb
& ~3);
576 /* NOTE: this is subtle as a TB may span two physical pages */
578 /* NOTE: tb_end may be after the end of the page, but
579 it is not a problem */
580 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
581 tb_end
= tb_start
+ tb
->size
;
582 if (tb_end
> TARGET_PAGE_SIZE
)
583 tb_end
= TARGET_PAGE_SIZE
;
586 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
588 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
589 tb
= tb
->page_next
[n
];
593 #ifdef TARGET_HAS_PRECISE_SMC
595 static void tb_gen_code(CPUState
*env
,
596 target_ulong pc
, target_ulong cs_base
, int flags
,
599 TranslationBlock
*tb
;
601 target_ulong phys_pc
, phys_page2
, virt_page2
;
604 phys_pc
= get_phys_addr_code(env
, pc
);
607 /* flush must be done */
609 /* cannot fail at this point */
612 tc_ptr
= code_gen_ptr
;
614 tb
->cs_base
= cs_base
;
617 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
618 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
620 /* check next page if needed */
621 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
623 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
624 phys_page2
= get_phys_addr_code(env
, virt_page2
);
626 tb_link_phys(tb
, phys_pc
, phys_page2
);
630 /* invalidate all TBs which intersect with the target physical page
631 starting in range [start;end[. NOTE: start and end must refer to
632 the same physical page. 'is_cpu_write_access' should be true if called
633 from a real cpu write access: the virtual CPU will exit the current
634 TB if code is modified inside this TB. */
635 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
636 int is_cpu_write_access
)
638 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
639 CPUState
*env
= cpu_single_env
;
641 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
642 target_ulong tb_start
, tb_end
;
643 target_ulong current_pc
, current_cs_base
;
645 p
= page_find(start
>> TARGET_PAGE_BITS
);
648 if (!p
->code_bitmap
&&
649 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
650 is_cpu_write_access
) {
651 /* build code bitmap */
652 build_page_bitmap(p
);
655 /* we remove all the TBs in the range [start, end[ */
656 /* XXX: see if in some cases it could be faster to invalidate all the code */
657 current_tb_not_found
= is_cpu_write_access
;
658 current_tb_modified
= 0;
659 current_tb
= NULL
; /* avoid warning */
660 current_pc
= 0; /* avoid warning */
661 current_cs_base
= 0; /* avoid warning */
662 current_flags
= 0; /* avoid warning */
666 tb
= (TranslationBlock
*)((long)tb
& ~3);
667 tb_next
= tb
->page_next
[n
];
668 /* NOTE: this is subtle as a TB may span two physical pages */
670 /* NOTE: tb_end may be after the end of the page, but
671 it is not a problem */
672 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
673 tb_end
= tb_start
+ tb
->size
;
675 tb_start
= tb
->page_addr
[1];
676 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
678 if (!(tb_end
<= start
|| tb_start
>= end
)) {
679 #ifdef TARGET_HAS_PRECISE_SMC
680 if (current_tb_not_found
) {
681 current_tb_not_found
= 0;
683 if (env
->mem_write_pc
) {
684 /* now we have a real cpu fault */
685 current_tb
= tb_find_pc(env
->mem_write_pc
);
688 if (current_tb
== tb
&&
689 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
690 /* If we are modifying the current TB, we must stop
691 its execution. We could be more precise by checking
692 that the modification is after the current PC, but it
693 would require a specialized function to partially
694 restore the CPU state */
696 current_tb_modified
= 1;
697 cpu_restore_state(current_tb
, env
,
698 env
->mem_write_pc
, NULL
);
699 #if defined(TARGET_I386)
700 current_flags
= env
->hflags
;
701 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
702 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
703 current_pc
= current_cs_base
+ env
->eip
;
705 #error unsupported CPU
708 #endif /* TARGET_HAS_PRECISE_SMC */
709 saved_tb
= env
->current_tb
;
710 env
->current_tb
= NULL
;
711 tb_phys_invalidate(tb
, -1);
712 env
->current_tb
= saved_tb
;
713 if (env
->interrupt_request
&& env
->current_tb
)
714 cpu_interrupt(env
, env
->interrupt_request
);
718 #if !defined(CONFIG_USER_ONLY)
719 /* if no code remaining, no need to continue to use slow writes */
721 invalidate_page_bitmap(p
);
722 if (is_cpu_write_access
) {
723 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
727 #ifdef TARGET_HAS_PRECISE_SMC
728 if (current_tb_modified
) {
729 /* we generate a block containing just the instruction
730 modifying the memory. It will ensure that it cannot modify
732 env
->current_tb
= NULL
;
733 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
735 cpu_resume_from_signal(env
, NULL
);
740 /* len must be <= 8 and start must be a multiple of len */
741 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
748 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
749 cpu_single_env
->mem_write_vaddr
, len
,
751 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
755 p
= page_find(start
>> TARGET_PAGE_BITS
);
758 if (p
->code_bitmap
) {
759 offset
= start
& ~TARGET_PAGE_MASK
;
760 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
761 if (b
& ((1 << len
) - 1))
765 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
769 #if !defined(CONFIG_SOFTMMU)
770 static void tb_invalidate_phys_page(target_ulong addr
,
771 unsigned long pc
, void *puc
)
773 int n
, current_flags
, current_tb_modified
;
774 target_ulong current_pc
, current_cs_base
;
776 TranslationBlock
*tb
, *current_tb
;
777 #ifdef TARGET_HAS_PRECISE_SMC
778 CPUState
*env
= cpu_single_env
;
781 addr
&= TARGET_PAGE_MASK
;
782 p
= page_find(addr
>> TARGET_PAGE_BITS
);
786 current_tb_modified
= 0;
788 current_pc
= 0; /* avoid warning */
789 current_cs_base
= 0; /* avoid warning */
790 current_flags
= 0; /* avoid warning */
791 #ifdef TARGET_HAS_PRECISE_SMC
793 current_tb
= tb_find_pc(pc
);
798 tb
= (TranslationBlock
*)((long)tb
& ~3);
799 #ifdef TARGET_HAS_PRECISE_SMC
800 if (current_tb
== tb
&&
801 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
802 /* If we are modifying the current TB, we must stop
803 its execution. We could be more precise by checking
804 that the modification is after the current PC, but it
805 would require a specialized function to partially
806 restore the CPU state */
808 current_tb_modified
= 1;
809 cpu_restore_state(current_tb
, env
, pc
, puc
);
810 #if defined(TARGET_I386)
811 current_flags
= env
->hflags
;
812 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
813 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
814 current_pc
= current_cs_base
+ env
->eip
;
816 #error unsupported CPU
819 #endif /* TARGET_HAS_PRECISE_SMC */
820 tb_phys_invalidate(tb
, addr
);
821 tb
= tb
->page_next
[n
];
824 #ifdef TARGET_HAS_PRECISE_SMC
825 if (current_tb_modified
) {
826 /* we generate a block containing just the instruction
827 modifying the memory. It will ensure that it cannot modify
829 env
->current_tb
= NULL
;
830 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
832 cpu_resume_from_signal(env
, puc
);
838 /* add the tb in the target page and protect it if necessary */
839 static inline void tb_alloc_page(TranslationBlock
*tb
,
840 unsigned int n
, unsigned int page_addr
)
843 TranslationBlock
*last_first_tb
;
845 tb
->page_addr
[n
] = page_addr
;
846 p
= page_find(page_addr
>> TARGET_PAGE_BITS
);
847 tb
->page_next
[n
] = p
->first_tb
;
848 last_first_tb
= p
->first_tb
;
849 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
850 invalidate_page_bitmap(p
);
852 #if defined(TARGET_HAS_SMC) || 1
854 #if defined(CONFIG_USER_ONLY)
855 if (p
->flags
& PAGE_WRITE
) {
856 unsigned long host_start
, host_end
, addr
;
859 /* force the host page as non writable (writes will have a
860 page fault + mprotect overhead) */
861 host_start
= page_addr
& qemu_host_page_mask
;
862 host_end
= host_start
+ qemu_host_page_size
;
864 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
865 prot
|= page_get_flags(addr
);
866 mprotect((void *)host_start
, qemu_host_page_size
,
867 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
868 #ifdef DEBUG_TB_INVALIDATE
869 printf("protecting code page: 0x%08lx\n",
872 p
->flags
&= ~PAGE_WRITE
;
875 /* if some code is already present, then the pages are already
876 protected. So we handle the case where only the first TB is
877 allocated in a physical page */
878 if (!last_first_tb
) {
879 target_ulong virt_addr
;
881 virt_addr
= (tb
->pc
& TARGET_PAGE_MASK
) + (n
<< TARGET_PAGE_BITS
);
882 tlb_protect_code(cpu_single_env
, virt_addr
);
886 #endif /* TARGET_HAS_SMC */
889 /* Allocate a new translation block. Flush the translation buffer if
890 too many translation blocks or too much generated code. */
891 TranslationBlock
*tb_alloc(target_ulong pc
)
893 TranslationBlock
*tb
;
895 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
896 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
904 /* add a new TB and link it to the physical page tables. phys_page2 is
905 (-1) to indicate that only one page contains the TB. */
906 void tb_link_phys(TranslationBlock
*tb
,
907 target_ulong phys_pc
, target_ulong phys_page2
)
910 TranslationBlock
**ptb
;
912 /* add in the physical hash table */
913 h
= tb_phys_hash_func(phys_pc
);
914 ptb
= &tb_phys_hash
[h
];
915 tb
->phys_hash_next
= *ptb
;
918 /* add in the page list */
919 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
920 if (phys_page2
!= -1)
921 tb_alloc_page(tb
, 1, phys_page2
);
923 tb
->page_addr
[1] = -1;
924 #ifdef DEBUG_TB_CHECK
929 /* link the tb with the other TBs */
930 void tb_link(TranslationBlock
*tb
)
932 #if !defined(CONFIG_USER_ONLY)
937 /* save the code memory mappings (needed to invalidate the code) */
938 addr
= tb
->pc
& TARGET_PAGE_MASK
;
939 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
940 #ifdef DEBUG_TLB_CHECK
941 if (vp
->valid_tag
== virt_valid_tag
&&
942 vp
->phys_addr
!= tb
->page_addr
[0]) {
943 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
944 addr
, tb
->page_addr
[0], vp
->phys_addr
);
947 vp
->phys_addr
= tb
->page_addr
[0];
948 if (vp
->valid_tag
!= virt_valid_tag
) {
949 vp
->valid_tag
= virt_valid_tag
;
950 #if !defined(CONFIG_SOFTMMU)
955 if (tb
->page_addr
[1] != -1) {
956 addr
+= TARGET_PAGE_SIZE
;
957 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
958 #ifdef DEBUG_TLB_CHECK
959 if (vp
->valid_tag
== virt_valid_tag
&&
960 vp
->phys_addr
!= tb
->page_addr
[1]) {
961 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
962 addr
, tb
->page_addr
[1], vp
->phys_addr
);
965 vp
->phys_addr
= tb
->page_addr
[1];
966 if (vp
->valid_tag
!= virt_valid_tag
) {
967 vp
->valid_tag
= virt_valid_tag
;
968 #if !defined(CONFIG_SOFTMMU)
976 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
977 tb
->jmp_next
[0] = NULL
;
978 tb
->jmp_next
[1] = NULL
;
980 tb
->cflags
&= ~CF_FP_USED
;
981 if (tb
->cflags
& CF_TB_FP_USED
)
982 tb
->cflags
|= CF_FP_USED
;
985 /* init original jump addresses */
986 if (tb
->tb_next_offset
[0] != 0xffff)
987 tb_reset_jump(tb
, 0);
988 if (tb
->tb_next_offset
[1] != 0xffff)
989 tb_reset_jump(tb
, 1);
992 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
993 tb[1].tc_ptr. Return NULL if not found */
994 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
998 TranslationBlock
*tb
;
1002 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1003 tc_ptr
>= (unsigned long)code_gen_ptr
)
1005 /* binary search (cf Knuth) */
1008 while (m_min
<= m_max
) {
1009 m
= (m_min
+ m_max
) >> 1;
1011 v
= (unsigned long)tb
->tc_ptr
;
1014 else if (tc_ptr
< v
) {
1023 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1025 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1027 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1030 tb1
= tb
->jmp_next
[n
];
1032 /* find head of list */
1035 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1038 tb1
= tb1
->jmp_next
[n1
];
1040 /* we are now sure now that tb jumps to tb1 */
1043 /* remove tb from the jmp_first list */
1044 ptb
= &tb_next
->jmp_first
;
1048 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1049 if (n1
== n
&& tb1
== tb
)
1051 ptb
= &tb1
->jmp_next
[n1
];
1053 *ptb
= tb
->jmp_next
[n
];
1054 tb
->jmp_next
[n
] = NULL
;
1056 /* suppress the jump to next tb in generated code */
1057 tb_reset_jump(tb
, n
);
1059 /* suppress jumps in the tb on which we could have jumped */
1060 tb_reset_jump_recursive(tb_next
);
1064 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1066 tb_reset_jump_recursive2(tb
, 0);
1067 tb_reset_jump_recursive2(tb
, 1);
1070 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1071 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1073 target_ulong phys_addr
;
1075 phys_addr
= cpu_get_phys_page_debug(env
, pc
);
1076 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ 1, 0);
1080 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1081 breakpoint is reached */
1082 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1084 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1087 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1088 if (env
->breakpoints
[i
] == pc
)
1092 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1094 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1096 breakpoint_invalidate(env
, pc
);
1103 /* remove a breakpoint */
1104 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1106 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1108 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1109 if (env
->breakpoints
[i
] == pc
)
1114 memmove(&env
->breakpoints
[i
], &env
->breakpoints
[i
+ 1],
1115 (env
->nb_breakpoints
- (i
+ 1)) * sizeof(env
->breakpoints
[0]));
1116 env
->nb_breakpoints
--;
1118 breakpoint_invalidate(env
, pc
);
1125 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1126 CPU loop after each instruction */
1127 void cpu_single_step(CPUState
*env
, int enabled
)
1129 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1130 if (env
->singlestep_enabled
!= enabled
) {
1131 env
->singlestep_enabled
= enabled
;
1132 /* must flush all the translated code to avoid inconsistancies */
1133 /* XXX: only flush what is necessary */
1139 /* enable or disable low levels log */
1140 void cpu_set_log(int log_flags
)
1142 loglevel
= log_flags
;
1143 if (loglevel
&& !logfile
) {
1144 logfile
= fopen(logfilename
, "w");
1146 perror(logfilename
);
1149 #if !defined(CONFIG_SOFTMMU)
1150 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1152 static uint8_t logfile_buf
[4096];
1153 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1156 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1161 void cpu_set_log_filename(const char *filename
)
1163 logfilename
= strdup(filename
);
1166 /* mask must never be zero, except for A20 change call */
1167 void cpu_interrupt(CPUState
*env
, int mask
)
1169 TranslationBlock
*tb
;
1170 static int interrupt_lock
;
1172 env
->interrupt_request
|= mask
;
1173 /* if the cpu is currently executing code, we must unlink it and
1174 all the potentially executing TB */
1175 tb
= env
->current_tb
;
1176 if (tb
&& !testandset(&interrupt_lock
)) {
1177 env
->current_tb
= NULL
;
1178 tb_reset_jump_recursive(tb
);
1183 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1185 env
->interrupt_request
&= ~mask
;
1188 CPULogItem cpu_log_items
[] = {
1189 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1190 "show generated host assembly code for each compiled TB" },
1191 { CPU_LOG_TB_IN_ASM
, "in_asm",
1192 "show target assembly code for each compiled TB" },
1193 { CPU_LOG_TB_OP
, "op",
1194 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1196 { CPU_LOG_TB_OP_OPT
, "op_opt",
1197 "show micro ops after optimization for each compiled TB" },
1199 { CPU_LOG_INT
, "int",
1200 "show interrupts/exceptions in short format" },
1201 { CPU_LOG_EXEC
, "exec",
1202 "show trace before each executed TB (lots of logs)" },
1203 { CPU_LOG_TB_CPU
, "cpu",
1204 "show CPU state before bloc translation" },
1206 { CPU_LOG_PCALL
, "pcall",
1207 "show protected mode far calls/returns/exceptions" },
1210 { CPU_LOG_IOPORT
, "ioport",
1211 "show all i/o ports accesses" },
1216 static int cmp1(const char *s1
, int n
, const char *s2
)
1218 if (strlen(s2
) != n
)
1220 return memcmp(s1
, s2
, n
) == 0;
1223 /* takes a comma separated list of log masks. Return 0 if error. */
1224 int cpu_str_to_log_mask(const char *str
)
1233 p1
= strchr(p
, ',');
1236 if(cmp1(p
,p1
-p
,"all")) {
1237 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1241 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1242 if (cmp1(p
, p1
- p
, item
->name
))
1256 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1261 fprintf(stderr
, "qemu: fatal: ");
1262 vfprintf(stderr
, fmt
, ap
);
1263 fprintf(stderr
, "\n");
1265 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1267 cpu_dump_state(env
, stderr
, fprintf
, 0);
1273 #if !defined(CONFIG_USER_ONLY)
1275 /* NOTE: if flush_global is true, also flush global entries (not
1277 void tlb_flush(CPUState
*env
, int flush_global
)
1281 #if defined(DEBUG_TLB)
1282 printf("tlb_flush:\n");
1284 /* must reset current TB so that interrupts cannot modify the
1285 links while we are modifying them */
1286 env
->current_tb
= NULL
;
1288 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1289 env
->tlb_read
[0][i
].address
= -1;
1290 env
->tlb_write
[0][i
].address
= -1;
1291 env
->tlb_read
[1][i
].address
= -1;
1292 env
->tlb_write
[1][i
].address
= -1;
1296 memset (tb_hash
, 0, CODE_GEN_HASH_SIZE
* sizeof (void *));
1298 #if !defined(CONFIG_SOFTMMU)
1299 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1303 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1305 if (addr
== (tlb_entry
->address
&
1306 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
1307 tlb_entry
->address
= -1;
1310 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1315 TranslationBlock
*tb
;
1317 #if defined(DEBUG_TLB)
1318 printf("tlb_flush_page: 0x%08x\n", addr
);
1320 /* must reset current TB so that interrupts cannot modify the
1321 links while we are modifying them */
1322 env
->current_tb
= NULL
;
1324 addr
&= TARGET_PAGE_MASK
;
1325 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1326 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
1327 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
1328 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
1329 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
1331 /* remove from the virtual pc hash table all the TB at this
1334 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1335 if (vp
&& vp
->valid_tag
== virt_valid_tag
) {
1336 p
= page_find(vp
->phys_addr
>> TARGET_PAGE_BITS
);
1338 /* we remove all the links to the TBs in this virtual page */
1340 while (tb
!= NULL
) {
1342 tb
= (TranslationBlock
*)((long)tb
& ~3);
1343 if ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1344 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
) {
1347 tb
= tb
->page_next
[n
];
1353 #if !defined(CONFIG_SOFTMMU)
1354 if (addr
< MMAP_AREA_END
)
1355 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1359 static inline void tlb_protect_code1(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1361 if (addr
== (tlb_entry
->address
&
1362 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1363 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
&&
1364 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
) {
1365 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_CODE
;
1369 /* update the TLBs so that writes to code in the virtual page 'addr'
1371 static void tlb_protect_code(CPUState
*env
, target_ulong addr
)
1375 addr
&= TARGET_PAGE_MASK
;
1376 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1377 tlb_protect_code1(&env
->tlb_write
[0][i
], addr
);
1378 tlb_protect_code1(&env
->tlb_write
[1][i
], addr
);
1379 #if !defined(CONFIG_SOFTMMU)
1380 /* NOTE: as we generated the code for this page, it is already at
1382 if (addr
< MMAP_AREA_END
)
1383 mprotect((void *)addr
, TARGET_PAGE_SIZE
, PROT_READ
);
1387 static inline void tlb_unprotect_code2(CPUTLBEntry
*tlb_entry
,
1388 unsigned long phys_addr
)
1390 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
&&
1391 ((tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
) == phys_addr
) {
1392 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1396 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1397 tested self modifying code */
1398 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
)
1402 phys_addr
&= TARGET_PAGE_MASK
;
1403 phys_addr
+= (long)phys_ram_base
;
1404 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1405 tlb_unprotect_code2(&env
->tlb_write
[0][i
], phys_addr
);
1406 tlb_unprotect_code2(&env
->tlb_write
[1][i
], phys_addr
);
1409 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1410 unsigned long start
, unsigned long length
)
1413 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1414 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1415 if ((addr
- start
) < length
) {
1416 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1421 void cpu_physical_memory_reset_dirty(target_ulong start
, target_ulong end
)
1424 unsigned long length
, start1
;
1427 start
&= TARGET_PAGE_MASK
;
1428 end
= TARGET_PAGE_ALIGN(end
);
1430 length
= end
- start
;
1433 memset(phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
), 0, length
>> TARGET_PAGE_BITS
);
1435 env
= cpu_single_env
;
1436 /* we modify the TLB cache so that the dirty bit will be set again
1437 when accessing the range */
1438 start1
= start
+ (unsigned long)phys_ram_base
;
1439 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1440 tlb_reset_dirty_range(&env
->tlb_write
[0][i
], start1
, length
);
1441 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1442 tlb_reset_dirty_range(&env
->tlb_write
[1][i
], start1
, length
);
1444 #if !defined(CONFIG_SOFTMMU)
1445 /* XXX: this is expensive */
1451 for(i
= 0; i
< L1_SIZE
; i
++) {
1454 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1455 for(j
= 0; j
< L2_SIZE
; j
++) {
1456 if (p
->valid_tag
== virt_valid_tag
&&
1457 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1458 (p
->prot
& PROT_WRITE
)) {
1459 if (addr
< MMAP_AREA_END
) {
1460 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1461 p
->prot
& ~PROT_WRITE
);
1464 addr
+= TARGET_PAGE_SIZE
;
1473 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1474 unsigned long start
)
1477 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1478 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1479 if (addr
== start
) {
1480 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1485 /* update the TLB corresponding to virtual page vaddr and phys addr
1486 addr so that it is no longer dirty */
1487 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1489 CPUState
*env
= cpu_single_env
;
1492 phys_ram_dirty
[(addr
- (unsigned long)phys_ram_base
) >> TARGET_PAGE_BITS
] = 1;
1494 addr
&= TARGET_PAGE_MASK
;
1495 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1496 tlb_set_dirty1(&env
->tlb_write
[0][i
], addr
);
1497 tlb_set_dirty1(&env
->tlb_write
[1][i
], addr
);
1500 /* add a new TLB entry. At most one entry for a given virtual address
1501 is permitted. Return 0 if OK or 2 if the page could not be mapped
1502 (can only happen in non SOFTMMU mode for I/O pages or pages
1503 conflicting with the host address space). */
1504 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1505 target_phys_addr_t paddr
, int prot
,
1506 int is_user
, int is_softmmu
)
1510 TranslationBlock
*first_tb
;
1512 target_ulong address
;
1513 unsigned long addend
;
1516 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1519 pd
= IO_MEM_UNASSIGNED
;
1522 pd
= p
->phys_offset
;
1523 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1524 /* NOTE: we also allocate the page at this stage */
1525 p1
= page_find_alloc(pd
>> TARGET_PAGE_BITS
);
1526 first_tb
= p1
->first_tb
;
1529 #if defined(DEBUG_TLB)
1530 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1531 vaddr
, paddr
, prot
, is_user
, (first_tb
!= NULL
), is_softmmu
, pd
);
1535 #if !defined(CONFIG_SOFTMMU)
1539 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1540 /* IO memory case */
1541 address
= vaddr
| pd
;
1544 /* standard memory */
1546 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1549 index
= (vaddr
>> 12) & (CPU_TLB_SIZE
- 1);
1551 if (prot
& PAGE_READ
) {
1552 env
->tlb_read
[is_user
][index
].address
= address
;
1553 env
->tlb_read
[is_user
][index
].addend
= addend
;
1555 env
->tlb_read
[is_user
][index
].address
= -1;
1556 env
->tlb_read
[is_user
][index
].addend
= -1;
1558 if (prot
& PAGE_WRITE
) {
1559 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1560 /* ROM: access is ignored (same as unassigned) */
1561 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_ROM
;
1562 env
->tlb_write
[is_user
][index
].addend
= addend
;
1564 /* XXX: the PowerPC code seems not ready to handle
1565 self modifying code with DCBI */
1566 #if defined(TARGET_HAS_SMC) || 1
1568 /* if code is present, we use a specific memory
1569 handler. It works only for physical memory access */
1570 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_CODE
;
1571 env
->tlb_write
[is_user
][index
].addend
= addend
;
1574 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1575 !cpu_physical_memory_is_dirty(pd
)) {
1576 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_NOTDIRTY
;
1577 env
->tlb_write
[is_user
][index
].addend
= addend
;
1579 env
->tlb_write
[is_user
][index
].address
= address
;
1580 env
->tlb_write
[is_user
][index
].addend
= addend
;
1583 env
->tlb_write
[is_user
][index
].address
= -1;
1584 env
->tlb_write
[is_user
][index
].addend
= -1;
1587 #if !defined(CONFIG_SOFTMMU)
1589 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1590 /* IO access: no mapping is done as it will be handled by the
1592 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1597 if (vaddr
>= MMAP_AREA_END
) {
1600 if (prot
& PROT_WRITE
) {
1601 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1602 #if defined(TARGET_HAS_SMC) || 1
1605 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1606 !cpu_physical_memory_is_dirty(pd
))) {
1607 /* ROM: we do as if code was inside */
1608 /* if code is present, we only map as read only and save the
1612 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
);
1615 vp
->valid_tag
= virt_valid_tag
;
1616 prot
&= ~PAGE_WRITE
;
1619 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1620 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1621 if (map_addr
== MAP_FAILED
) {
1622 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1632 /* called from signal handler: invalidate the code and unprotect the
1633 page. Return TRUE if the fault was succesfully handled. */
1634 int page_unprotect(unsigned long addr
, unsigned long pc
, void *puc
)
1636 #if !defined(CONFIG_SOFTMMU)
1639 #if defined(DEBUG_TLB)
1640 printf("page_unprotect: addr=0x%08x\n", addr
);
1642 addr
&= TARGET_PAGE_MASK
;
1644 /* if it is not mapped, no need to worry here */
1645 if (addr
>= MMAP_AREA_END
)
1647 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1650 /* NOTE: in this case, validate_tag is _not_ tested as it
1651 validates only the code TLB */
1652 if (vp
->valid_tag
!= virt_valid_tag
)
1654 if (!(vp
->prot
& PAGE_WRITE
))
1656 #if defined(DEBUG_TLB)
1657 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1658 addr
, vp
->phys_addr
, vp
->prot
);
1660 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1661 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1662 (unsigned long)addr
, vp
->prot
);
1663 /* set the dirty bit */
1664 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 1;
1665 /* flush the code inside */
1666 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1675 void tlb_flush(CPUState
*env
, int flush_global
)
1679 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1683 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1684 target_phys_addr_t paddr
, int prot
,
1685 int is_user
, int is_softmmu
)
1690 /* dump memory mappings */
1691 void page_dump(FILE *f
)
1693 unsigned long start
, end
;
1694 int i
, j
, prot
, prot1
;
1697 fprintf(f
, "%-8s %-8s %-8s %s\n",
1698 "start", "end", "size", "prot");
1702 for(i
= 0; i
<= L1_SIZE
; i
++) {
1707 for(j
= 0;j
< L2_SIZE
; j
++) {
1712 if (prot1
!= prot
) {
1713 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1715 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1716 start
, end
, end
- start
,
1717 prot
& PAGE_READ
? 'r' : '-',
1718 prot
& PAGE_WRITE
? 'w' : '-',
1719 prot
& PAGE_EXEC
? 'x' : '-');
1733 int page_get_flags(unsigned long address
)
1737 p
= page_find(address
>> TARGET_PAGE_BITS
);
1743 /* modify the flags of a page and invalidate the code if
1744 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1745 depending on PAGE_WRITE */
1746 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1751 start
= start
& TARGET_PAGE_MASK
;
1752 end
= TARGET_PAGE_ALIGN(end
);
1753 if (flags
& PAGE_WRITE
)
1754 flags
|= PAGE_WRITE_ORG
;
1755 spin_lock(&tb_lock
);
1756 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1757 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1758 /* if the write protection is set, then we invalidate the code
1760 if (!(p
->flags
& PAGE_WRITE
) &&
1761 (flags
& PAGE_WRITE
) &&
1763 tb_invalidate_phys_page(addr
, 0, NULL
);
1767 spin_unlock(&tb_lock
);
1770 /* called from signal handler: invalidate the code and unprotect the
1771 page. Return TRUE if the fault was succesfully handled. */
1772 int page_unprotect(unsigned long address
, unsigned long pc
, void *puc
)
1774 unsigned int page_index
, prot
, pindex
;
1776 unsigned long host_start
, host_end
, addr
;
1778 host_start
= address
& qemu_host_page_mask
;
1779 page_index
= host_start
>> TARGET_PAGE_BITS
;
1780 p1
= page_find(page_index
);
1783 host_end
= host_start
+ qemu_host_page_size
;
1786 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1790 /* if the page was really writable, then we change its
1791 protection back to writable */
1792 if (prot
& PAGE_WRITE_ORG
) {
1793 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1794 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1795 mprotect((void *)host_start
, qemu_host_page_size
,
1796 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1797 p1
[pindex
].flags
|= PAGE_WRITE
;
1798 /* and since the content will be modified, we must invalidate
1799 the corresponding translated code. */
1800 tb_invalidate_phys_page(address
, pc
, puc
);
1801 #ifdef DEBUG_TB_CHECK
1802 tb_invalidate_check(address
);
1810 /* call this function when system calls directly modify a memory area */
1811 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1813 unsigned long start
, end
, addr
;
1815 start
= (unsigned long)data
;
1816 end
= start
+ data_size
;
1817 start
&= TARGET_PAGE_MASK
;
1818 end
= TARGET_PAGE_ALIGN(end
);
1819 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1820 page_unprotect(addr
, 0, NULL
);
1824 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1827 #endif /* defined(CONFIG_USER_ONLY) */
1829 /* register physical memory. 'size' must be a multiple of the target
1830 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1832 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1834 unsigned long phys_offset
)
1836 unsigned long addr
, end_addr
;
1839 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1840 end_addr
= start_addr
+ size
;
1841 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1842 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1843 p
->phys_offset
= phys_offset
;
1844 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1845 phys_offset
+= TARGET_PAGE_SIZE
;
1849 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1854 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1858 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1859 unassigned_mem_readb
,
1860 unassigned_mem_readb
,
1861 unassigned_mem_readb
,
1864 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1865 unassigned_mem_writeb
,
1866 unassigned_mem_writeb
,
1867 unassigned_mem_writeb
,
1870 /* self modifying code support in soft mmu mode : writing to a page
1871 containing code comes to these functions */
1873 static void code_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1875 unsigned long phys_addr
;
1877 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1878 #if !defined(CONFIG_USER_ONLY)
1879 tb_invalidate_phys_page_fast(phys_addr
, 1);
1881 stb_p((uint8_t *)(long)addr
, val
);
1882 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1885 static void code_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1887 unsigned long phys_addr
;
1889 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1890 #if !defined(CONFIG_USER_ONLY)
1891 tb_invalidate_phys_page_fast(phys_addr
, 2);
1893 stw_p((uint8_t *)(long)addr
, val
);
1894 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1897 static void code_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1899 unsigned long phys_addr
;
1901 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1902 #if !defined(CONFIG_USER_ONLY)
1903 tb_invalidate_phys_page_fast(phys_addr
, 4);
1905 stl_p((uint8_t *)(long)addr
, val
);
1906 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1909 static CPUReadMemoryFunc
*code_mem_read
[3] = {
1910 NULL
, /* never used */
1911 NULL
, /* never used */
1912 NULL
, /* never used */
1915 static CPUWriteMemoryFunc
*code_mem_write
[3] = {
1921 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1923 stb_p((uint8_t *)(long)addr
, val
);
1924 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1927 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1929 stw_p((uint8_t *)(long)addr
, val
);
1930 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1933 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1935 stl_p((uint8_t *)(long)addr
, val
);
1936 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1939 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1940 notdirty_mem_writeb
,
1941 notdirty_mem_writew
,
1942 notdirty_mem_writel
,
1945 static void io_mem_init(void)
1947 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, code_mem_read
, unassigned_mem_write
, NULL
);
1948 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
1949 cpu_register_io_memory(IO_MEM_CODE
>> IO_MEM_SHIFT
, code_mem_read
, code_mem_write
, NULL
);
1950 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, code_mem_read
, notdirty_mem_write
, NULL
);
1953 /* alloc dirty bits array */
1954 phys_ram_dirty
= qemu_malloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1957 /* mem_read and mem_write are arrays of functions containing the
1958 function to access byte (index 0), word (index 1) and dword (index
1959 2). All functions must be supplied. If io_index is non zero, the
1960 corresponding io zone is modified. If it is zero, a new io zone is
1961 allocated. The return value can be used with
1962 cpu_register_physical_memory(). (-1) is returned if error. */
1963 int cpu_register_io_memory(int io_index
,
1964 CPUReadMemoryFunc
**mem_read
,
1965 CPUWriteMemoryFunc
**mem_write
,
1970 if (io_index
<= 0) {
1971 if (io_index
>= IO_MEM_NB_ENTRIES
)
1973 io_index
= io_mem_nb
++;
1975 if (io_index
>= IO_MEM_NB_ENTRIES
)
1979 for(i
= 0;i
< 3; i
++) {
1980 io_mem_read
[io_index
][i
] = mem_read
[i
];
1981 io_mem_write
[io_index
][i
] = mem_write
[i
];
1983 io_mem_opaque
[io_index
] = opaque
;
1984 return io_index
<< IO_MEM_SHIFT
;
1987 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
1989 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
1992 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
1994 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
1997 /* physical memory access (slow version, mainly for debug) */
1998 #if defined(CONFIG_USER_ONLY)
1999 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2000 int len
, int is_write
)
2006 page
= addr
& TARGET_PAGE_MASK
;
2007 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2010 flags
= page_get_flags(page
);
2011 if (!(flags
& PAGE_VALID
))
2014 if (!(flags
& PAGE_WRITE
))
2016 memcpy((uint8_t *)addr
, buf
, len
);
2018 if (!(flags
& PAGE_READ
))
2020 memcpy(buf
, (uint8_t *)addr
, len
);
2028 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2029 int len
, int is_write
)
2034 target_phys_addr_t page
;
2039 page
= addr
& TARGET_PAGE_MASK
;
2040 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2043 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2045 pd
= IO_MEM_UNASSIGNED
;
2047 pd
= p
->phys_offset
;
2051 if ((pd
& ~TARGET_PAGE_MASK
) != 0) {
2052 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2053 if (l
>= 4 && ((addr
& 3) == 0)) {
2054 /* 32 bit read access */
2056 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2058 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2059 /* 16 bit read access */
2061 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2066 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2070 unsigned long addr1
;
2071 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2073 ptr
= phys_ram_base
+ addr1
;
2074 memcpy(ptr
, buf
, l
);
2075 /* invalidate code */
2076 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2078 phys_ram_dirty
[page
>> TARGET_PAGE_BITS
] = 1;
2081 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2082 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
) {
2084 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2085 if (l
>= 4 && ((addr
& 3) == 0)) {
2086 /* 32 bit read access */
2087 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2090 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2091 /* 16 bit read access */
2092 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2097 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2103 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2104 (addr
& ~TARGET_PAGE_MASK
);
2105 memcpy(buf
, ptr
, l
);
2115 /* virtual memory access for debug */
2116 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2117 uint8_t *buf
, int len
, int is_write
)
2120 target_ulong page
, phys_addr
;
2123 page
= addr
& TARGET_PAGE_MASK
;
2124 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2125 /* if no physical page mapped, return an error */
2126 if (phys_addr
== -1)
2128 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2131 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2140 #if !defined(CONFIG_USER_ONLY)
2142 #define MMUSUFFIX _cmmu
2143 #define GETPC() NULL
2144 #define env cpu_single_env
2145 #define SOFTMMU_CODE_ACCESS
2148 #include "softmmu_template.h"
2151 #include "softmmu_template.h"
2154 #include "softmmu_template.h"
2157 #include "softmmu_template.h"