2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #if !defined(CONFIG_SOFTMMU)
35 //#define DEBUG_TB_INVALIDATE
39 /* make various TB consistency checks */
40 //#define DEBUG_TB_CHECK
41 //#define DEBUG_TLB_CHECK
43 /* threshold to flush the translated code buffer */
44 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
46 #define SMC_BITMAP_USE_THRESHOLD 10
48 #define MMAP_AREA_START 0x00000000
49 #define MMAP_AREA_END 0xa8000000
51 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
52 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
53 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
55 /* any access to the tbs or the page table must use this lock */
56 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
58 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
59 uint8_t *code_gen_ptr
;
63 uint8_t *phys_ram_base
;
64 uint8_t *phys_ram_dirty
;
66 typedef struct PageDesc
{
67 /* offset in host memory of the page + io_index in the low 12 bits */
68 unsigned long phys_offset
;
69 /* list of TBs intersecting this physical page */
70 TranslationBlock
*first_tb
;
71 /* in order to optimize self modifying code, we count the number
72 of lookups we do to a given page to use a bitmap */
73 unsigned int code_write_count
;
75 #if defined(CONFIG_USER_ONLY)
80 typedef struct VirtPageDesc
{
81 /* physical address of code page. It is valid only if 'valid_tag'
82 matches 'virt_valid_tag' */
83 target_ulong phys_addr
;
84 unsigned int valid_tag
;
85 #if !defined(CONFIG_SOFTMMU)
86 /* original page access rights. It is valid only if 'valid_tag'
87 matches 'virt_valid_tag' */
93 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
95 #define L1_SIZE (1 << L1_BITS)
96 #define L2_SIZE (1 << L2_BITS)
98 static void io_mem_init(void);
100 unsigned long real_host_page_size
;
101 unsigned long host_page_bits
;
102 unsigned long host_page_size
;
103 unsigned long host_page_mask
;
105 static PageDesc
*l1_map
[L1_SIZE
];
107 #if !defined(CONFIG_USER_ONLY)
108 static VirtPageDesc
*l1_virt_map
[L1_SIZE
];
109 static unsigned int virt_valid_tag
;
112 /* io memory support */
113 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
114 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
115 static int io_mem_nb
;
118 char *logfilename
= "/tmp/qemu.log";
122 static void page_init(void)
124 /* NOTE: we can always suppose that host_page_size >=
127 real_host_page_size
= 4096;
129 real_host_page_size
= getpagesize();
131 if (host_page_size
== 0)
132 host_page_size
= real_host_page_size
;
133 if (host_page_size
< TARGET_PAGE_SIZE
)
134 host_page_size
= TARGET_PAGE_SIZE
;
136 while ((1 << host_page_bits
) < host_page_size
)
138 host_page_mask
= ~(host_page_size
- 1);
139 #if !defined(CONFIG_USER_ONLY)
144 static inline PageDesc
*page_find_alloc(unsigned int index
)
148 lp
= &l1_map
[index
>> L2_BITS
];
151 /* allocate if not found */
152 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
153 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
156 return p
+ (index
& (L2_SIZE
- 1));
159 static inline PageDesc
*page_find(unsigned int index
)
163 p
= l1_map
[index
>> L2_BITS
];
166 return p
+ (index
& (L2_SIZE
- 1));
169 #if !defined(CONFIG_USER_ONLY)
170 static void tlb_protect_code(CPUState
*env
, target_ulong addr
);
171 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
);
173 static inline VirtPageDesc
*virt_page_find_alloc(unsigned int index
)
175 VirtPageDesc
**lp
, *p
;
177 lp
= &l1_virt_map
[index
>> L2_BITS
];
180 /* allocate if not found */
181 p
= qemu_malloc(sizeof(VirtPageDesc
) * L2_SIZE
);
182 memset(p
, 0, sizeof(VirtPageDesc
) * L2_SIZE
);
185 return p
+ (index
& (L2_SIZE
- 1));
188 static inline VirtPageDesc
*virt_page_find(unsigned int index
)
192 p
= l1_virt_map
[index
>> L2_BITS
];
195 return p
+ (index
& (L2_SIZE
- 1));
198 static void virt_page_flush(void)
205 if (virt_valid_tag
== 0) {
207 for(i
= 0; i
< L1_SIZE
; i
++) {
210 for(j
= 0; j
< L2_SIZE
; j
++)
217 static void virt_page_flush(void)
222 void cpu_exec_init(void)
225 code_gen_ptr
= code_gen_buffer
;
231 static inline void invalidate_page_bitmap(PageDesc
*p
)
233 if (p
->code_bitmap
) {
234 qemu_free(p
->code_bitmap
);
235 p
->code_bitmap
= NULL
;
237 p
->code_write_count
= 0;
240 /* set to NULL all the 'first_tb' fields in all PageDescs */
241 static void page_flush_tb(void)
246 for(i
= 0; i
< L1_SIZE
; i
++) {
249 for(j
= 0; j
< L2_SIZE
; j
++) {
251 invalidate_page_bitmap(p
);
258 /* flush all the translation blocks */
259 /* XXX: tb_flush is currently not thread safe */
260 void tb_flush(CPUState
*env
)
263 #if defined(DEBUG_FLUSH)
264 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
265 code_gen_ptr
- code_gen_buffer
,
267 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
270 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
274 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++)
275 tb_phys_hash
[i
] = NULL
;
278 code_gen_ptr
= code_gen_buffer
;
279 /* XXX: flush processor icache at this point if cache flush is
283 #ifdef DEBUG_TB_CHECK
285 static void tb_invalidate_check(unsigned long address
)
287 TranslationBlock
*tb
;
289 address
&= TARGET_PAGE_MASK
;
290 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
291 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
292 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
293 address
>= tb
->pc
+ tb
->size
)) {
294 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
295 address
, tb
->pc
, tb
->size
);
301 /* verify that all the pages have correct rights for code */
302 static void tb_page_check(void)
304 TranslationBlock
*tb
;
305 int i
, flags1
, flags2
;
307 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
308 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
309 flags1
= page_get_flags(tb
->pc
);
310 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
311 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
312 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
313 tb
->pc
, tb
->size
, flags1
, flags2
);
319 void tb_jmp_check(TranslationBlock
*tb
)
321 TranslationBlock
*tb1
;
324 /* suppress any remaining jumps to this TB */
328 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
331 tb1
= tb1
->jmp_next
[n1
];
333 /* check end of list */
335 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
341 /* invalidate one TB */
342 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
345 TranslationBlock
*tb1
;
349 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
352 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
356 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
358 TranslationBlock
*tb1
;
364 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
366 *ptb
= tb1
->page_next
[n1
];
369 ptb
= &tb1
->page_next
[n1
];
373 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
375 TranslationBlock
*tb1
, **ptb
;
378 ptb
= &tb
->jmp_next
[n
];
381 /* find tb(n) in circular list */
385 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
386 if (n1
== n
&& tb1
== tb
)
389 ptb
= &tb1
->jmp_first
;
391 ptb
= &tb1
->jmp_next
[n1
];
394 /* now we can suppress tb(n) from the list */
395 *ptb
= tb
->jmp_next
[n
];
397 tb
->jmp_next
[n
] = NULL
;
401 /* reset the jump entry 'n' of a TB so that it is not chained to
403 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
405 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
408 static inline void tb_invalidate(TranslationBlock
*tb
)
411 TranslationBlock
*tb1
, *tb2
, **ptb
;
413 tb_invalidated_flag
= 1;
415 /* remove the TB from the hash list */
416 h
= tb_hash_func(tb
->pc
);
420 /* NOTE: the TB is not necessarily linked in the hash. It
421 indicates that it is not currently used */
425 *ptb
= tb1
->hash_next
;
428 ptb
= &tb1
->hash_next
;
431 /* suppress this TB from the two jump lists */
432 tb_jmp_remove(tb
, 0);
433 tb_jmp_remove(tb
, 1);
435 /* suppress any remaining jumps to this TB */
441 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
442 tb2
= tb1
->jmp_next
[n1
];
443 tb_reset_jump(tb1
, n1
);
444 tb1
->jmp_next
[n1
] = NULL
;
447 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
450 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
454 target_ulong phys_pc
;
456 /* remove the TB from the hash list */
457 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
458 h
= tb_phys_hash_func(phys_pc
);
459 tb_remove(&tb_phys_hash
[h
], tb
,
460 offsetof(TranslationBlock
, phys_hash_next
));
462 /* remove the TB from the page list */
463 if (tb
->page_addr
[0] != page_addr
) {
464 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
465 tb_page_remove(&p
->first_tb
, tb
);
466 invalidate_page_bitmap(p
);
468 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
469 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
470 tb_page_remove(&p
->first_tb
, tb
);
471 invalidate_page_bitmap(p
);
477 static inline void set_bits(uint8_t *tab
, int start
, int len
)
483 mask
= 0xff << (start
& 7);
484 if ((start
& ~7) == (end
& ~7)) {
486 mask
&= ~(0xff << (end
& 7));
491 start
= (start
+ 8) & ~7;
493 while (start
< end1
) {
498 mask
= ~(0xff << (end
& 7));
504 static void build_page_bitmap(PageDesc
*p
)
506 int n
, tb_start
, tb_end
;
507 TranslationBlock
*tb
;
509 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
512 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
517 tb
= (TranslationBlock
*)((long)tb
& ~3);
518 /* NOTE: this is subtle as a TB may span two physical pages */
520 /* NOTE: tb_end may be after the end of the page, but
521 it is not a problem */
522 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
523 tb_end
= tb_start
+ tb
->size
;
524 if (tb_end
> TARGET_PAGE_SIZE
)
525 tb_end
= TARGET_PAGE_SIZE
;
528 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
530 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
531 tb
= tb
->page_next
[n
];
535 #ifdef TARGET_HAS_PRECISE_SMC
537 static void tb_gen_code(CPUState
*env
,
538 target_ulong pc
, target_ulong cs_base
, int flags
,
541 TranslationBlock
*tb
;
543 target_ulong phys_pc
, phys_page2
, virt_page2
;
546 phys_pc
= get_phys_addr_code(env
, (unsigned long)pc
);
547 tb
= tb_alloc((unsigned long)pc
);
549 /* flush must be done */
551 /* cannot fail at this point */
552 tb
= tb_alloc((unsigned long)pc
);
554 tc_ptr
= code_gen_ptr
;
556 tb
->cs_base
= cs_base
;
559 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
560 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
562 /* check next page if needed */
563 virt_page2
= ((unsigned long)pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
565 if (((unsigned long)pc
& TARGET_PAGE_MASK
) != virt_page2
) {
566 phys_page2
= get_phys_addr_code(env
, virt_page2
);
568 tb_link_phys(tb
, phys_pc
, phys_page2
);
572 /* invalidate all TBs which intersect with the target physical page
573 starting in range [start;end[. NOTE: start and end must refer to
574 the same physical page. 'is_cpu_write_access' should be true if called
575 from a real cpu write access: the virtual CPU will exit the current
576 TB if code is modified inside this TB. */
577 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
578 int is_cpu_write_access
)
580 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
581 #if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
582 CPUState
*env
= cpu_single_env
;
585 TranslationBlock
*tb
, *tb_next
, *current_tb
;
586 target_ulong tb_start
, tb_end
;
587 target_ulong current_pc
, current_cs_base
;
589 p
= page_find(start
>> TARGET_PAGE_BITS
);
592 if (!p
->code_bitmap
&&
593 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
594 is_cpu_write_access
) {
595 /* build code bitmap */
596 build_page_bitmap(p
);
599 /* we remove all the TBs in the range [start, end[ */
600 /* XXX: see if in some cases it could be faster to invalidate all the code */
601 current_tb_not_found
= is_cpu_write_access
;
602 current_tb_modified
= 0;
603 current_tb
= NULL
; /* avoid warning */
604 current_pc
= 0; /* avoid warning */
605 current_cs_base
= 0; /* avoid warning */
606 current_flags
= 0; /* avoid warning */
610 tb
= (TranslationBlock
*)((long)tb
& ~3);
611 tb_next
= tb
->page_next
[n
];
612 /* NOTE: this is subtle as a TB may span two physical pages */
614 /* NOTE: tb_end may be after the end of the page, but
615 it is not a problem */
616 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
617 tb_end
= tb_start
+ tb
->size
;
619 tb_start
= tb
->page_addr
[1];
620 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
622 if (!(tb_end
<= start
|| tb_start
>= end
)) {
623 #ifdef TARGET_HAS_PRECISE_SMC
624 if (current_tb_not_found
) {
625 current_tb_not_found
= 0;
627 if (env
->mem_write_pc
) {
628 /* now we have a real cpu fault */
629 current_tb
= tb_find_pc(env
->mem_write_pc
);
632 if (current_tb
== tb
&&
633 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
634 /* If we are modifying the current TB, we must stop
635 its execution. We could be more precise by checking
636 that the modification is after the current PC, but it
637 would require a specialized function to partially
638 restore the CPU state */
640 current_tb_modified
= 1;
641 cpu_restore_state(current_tb
, env
,
642 env
->mem_write_pc
, NULL
);
643 #if defined(TARGET_I386)
644 current_flags
= env
->hflags
;
645 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
646 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
647 current_pc
= current_cs_base
+ env
->eip
;
649 #error unsupported CPU
652 #endif /* TARGET_HAS_PRECISE_SMC */
653 tb_phys_invalidate(tb
, -1);
657 #if !defined(CONFIG_USER_ONLY)
658 /* if no code remaining, no need to continue to use slow writes */
660 invalidate_page_bitmap(p
);
661 if (is_cpu_write_access
) {
662 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
666 #ifdef TARGET_HAS_PRECISE_SMC
667 if (current_tb_modified
) {
668 /* we generate a block containing just the instruction
669 modifying the memory. It will ensure that it cannot modify
671 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
673 cpu_resume_from_signal(env
, NULL
);
678 /* len must be <= 8 and start must be a multiple of len */
679 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
684 if (cpu_single_env
->cr
[0] & CR0_PE_MASK
) {
685 printf("modifying code at 0x%x size=%d EIP=%x\n",
686 (vaddr
& TARGET_PAGE_MASK
) | (start
& ~TARGET_PAGE_MASK
), len
,
687 cpu_single_env
->eip
);
690 p
= page_find(start
>> TARGET_PAGE_BITS
);
693 if (p
->code_bitmap
) {
694 offset
= start
& ~TARGET_PAGE_MASK
;
695 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
696 if (b
& ((1 << len
) - 1))
700 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
704 #if !defined(CONFIG_SOFTMMU)
705 static void tb_invalidate_phys_page(target_ulong addr
,
706 unsigned long pc
, void *puc
)
708 int n
, current_flags
, current_tb_modified
;
709 target_ulong current_pc
, current_cs_base
;
711 TranslationBlock
*tb
, *current_tb
;
712 #ifdef TARGET_HAS_PRECISE_SMC
713 CPUState
*env
= cpu_single_env
;
716 addr
&= TARGET_PAGE_MASK
;
717 p
= page_find(addr
>> TARGET_PAGE_BITS
);
721 current_tb_modified
= 0;
723 current_pc
= 0; /* avoid warning */
724 current_cs_base
= 0; /* avoid warning */
725 current_flags
= 0; /* avoid warning */
726 #ifdef TARGET_HAS_PRECISE_SMC
728 current_tb
= tb_find_pc(pc
);
733 tb
= (TranslationBlock
*)((long)tb
& ~3);
734 #ifdef TARGET_HAS_PRECISE_SMC
735 if (current_tb
== tb
&&
736 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
737 /* If we are modifying the current TB, we must stop
738 its execution. We could be more precise by checking
739 that the modification is after the current PC, but it
740 would require a specialized function to partially
741 restore the CPU state */
743 current_tb_modified
= 1;
744 cpu_restore_state(current_tb
, env
, pc
, puc
);
745 #if defined(TARGET_I386)
746 current_flags
= env
->hflags
;
747 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
748 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
749 current_pc
= current_cs_base
+ env
->eip
;
751 #error unsupported CPU
754 #endif /* TARGET_HAS_PRECISE_SMC */
755 tb_phys_invalidate(tb
, addr
);
756 tb
= tb
->page_next
[n
];
759 #ifdef TARGET_HAS_PRECISE_SMC
760 if (current_tb_modified
) {
761 /* we generate a block containing just the instruction
762 modifying the memory. It will ensure that it cannot modify
764 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
766 cpu_resume_from_signal(env
, puc
);
772 /* add the tb in the target page and protect it if necessary */
773 static inline void tb_alloc_page(TranslationBlock
*tb
,
774 unsigned int n
, unsigned int page_addr
)
777 TranslationBlock
*last_first_tb
;
779 tb
->page_addr
[n
] = page_addr
;
780 p
= page_find(page_addr
>> TARGET_PAGE_BITS
);
781 tb
->page_next
[n
] = p
->first_tb
;
782 last_first_tb
= p
->first_tb
;
783 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
784 invalidate_page_bitmap(p
);
786 #ifdef TARGET_HAS_SMC
788 #if defined(CONFIG_USER_ONLY)
789 if (p
->flags
& PAGE_WRITE
) {
790 unsigned long host_start
, host_end
, addr
;
793 /* force the host page as non writable (writes will have a
794 page fault + mprotect overhead) */
795 host_start
= page_addr
& host_page_mask
;
796 host_end
= host_start
+ host_page_size
;
798 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
799 prot
|= page_get_flags(addr
);
800 mprotect((void *)host_start
, host_page_size
,
801 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
802 #ifdef DEBUG_TB_INVALIDATE
803 printf("protecting code page: 0x%08lx\n",
806 p
->flags
&= ~PAGE_WRITE
;
809 /* if some code is already present, then the pages are already
810 protected. So we handle the case where only the first TB is
811 allocated in a physical page */
812 if (!last_first_tb
) {
813 target_ulong virt_addr
;
815 virt_addr
= (tb
->pc
& TARGET_PAGE_MASK
) + (n
<< TARGET_PAGE_BITS
);
816 tlb_protect_code(cpu_single_env
, virt_addr
);
820 #endif /* TARGET_HAS_SMC */
823 /* Allocate a new translation block. Flush the translation buffer if
824 too many translation blocks or too much generated code. */
825 TranslationBlock
*tb_alloc(unsigned long pc
)
827 TranslationBlock
*tb
;
829 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
830 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
838 /* add a new TB and link it to the physical page tables. phys_page2 is
839 (-1) to indicate that only one page contains the TB. */
840 void tb_link_phys(TranslationBlock
*tb
,
841 target_ulong phys_pc
, target_ulong phys_page2
)
844 TranslationBlock
**ptb
;
846 /* add in the physical hash table */
847 h
= tb_phys_hash_func(phys_pc
);
848 ptb
= &tb_phys_hash
[h
];
849 tb
->phys_hash_next
= *ptb
;
852 /* add in the page list */
853 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
854 if (phys_page2
!= -1)
855 tb_alloc_page(tb
, 1, phys_page2
);
857 tb
->page_addr
[1] = -1;
858 #ifdef DEBUG_TB_CHECK
863 /* link the tb with the other TBs */
864 void tb_link(TranslationBlock
*tb
)
866 #if !defined(CONFIG_USER_ONLY)
871 /* save the code memory mappings (needed to invalidate the code) */
872 addr
= tb
->pc
& TARGET_PAGE_MASK
;
873 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
874 #ifdef DEBUG_TLB_CHECK
875 if (vp
->valid_tag
== virt_valid_tag
&&
876 vp
->phys_addr
!= tb
->page_addr
[0]) {
877 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
878 addr
, tb
->page_addr
[0], vp
->phys_addr
);
881 vp
->phys_addr
= tb
->page_addr
[0];
882 if (vp
->valid_tag
!= virt_valid_tag
) {
883 vp
->valid_tag
= virt_valid_tag
;
884 #if !defined(CONFIG_SOFTMMU)
889 if (tb
->page_addr
[1] != -1) {
890 addr
+= TARGET_PAGE_SIZE
;
891 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
892 #ifdef DEBUG_TLB_CHECK
893 if (vp
->valid_tag
== virt_valid_tag
&&
894 vp
->phys_addr
!= tb
->page_addr
[1]) {
895 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
896 addr
, tb
->page_addr
[1], vp
->phys_addr
);
899 vp
->phys_addr
= tb
->page_addr
[1];
900 if (vp
->valid_tag
!= virt_valid_tag
) {
901 vp
->valid_tag
= virt_valid_tag
;
902 #if !defined(CONFIG_SOFTMMU)
910 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
911 tb
->jmp_next
[0] = NULL
;
912 tb
->jmp_next
[1] = NULL
;
914 tb
->cflags
&= ~CF_FP_USED
;
915 if (tb
->cflags
& CF_TB_FP_USED
)
916 tb
->cflags
|= CF_FP_USED
;
919 /* init original jump addresses */
920 if (tb
->tb_next_offset
[0] != 0xffff)
921 tb_reset_jump(tb
, 0);
922 if (tb
->tb_next_offset
[1] != 0xffff)
923 tb_reset_jump(tb
, 1);
926 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
927 tb[1].tc_ptr. Return NULL if not found */
928 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
932 TranslationBlock
*tb
;
936 if (tc_ptr
< (unsigned long)code_gen_buffer
||
937 tc_ptr
>= (unsigned long)code_gen_ptr
)
939 /* binary search (cf Knuth) */
942 while (m_min
<= m_max
) {
943 m
= (m_min
+ m_max
) >> 1;
945 v
= (unsigned long)tb
->tc_ptr
;
948 else if (tc_ptr
< v
) {
957 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
959 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
961 TranslationBlock
*tb1
, *tb_next
, **ptb
;
964 tb1
= tb
->jmp_next
[n
];
966 /* find head of list */
969 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
972 tb1
= tb1
->jmp_next
[n1
];
974 /* we are now sure now that tb jumps to tb1 */
977 /* remove tb from the jmp_first list */
978 ptb
= &tb_next
->jmp_first
;
982 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
983 if (n1
== n
&& tb1
== tb
)
985 ptb
= &tb1
->jmp_next
[n1
];
987 *ptb
= tb
->jmp_next
[n
];
988 tb
->jmp_next
[n
] = NULL
;
990 /* suppress the jump to next tb in generated code */
991 tb_reset_jump(tb
, n
);
993 /* suppress jumps in the tb on which we could have jumped */
994 tb_reset_jump_recursive(tb_next
);
998 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1000 tb_reset_jump_recursive2(tb
, 0);
1001 tb_reset_jump_recursive2(tb
, 1);
1004 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1006 target_ulong phys_addr
;
1008 phys_addr
= cpu_get_phys_page_debug(env
, pc
);
1009 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ 1, 0);
1012 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1013 breakpoint is reached */
1014 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1016 #if defined(TARGET_I386) || defined(TARGET_PPC)
1019 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1020 if (env
->breakpoints
[i
] == pc
)
1024 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1026 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1028 breakpoint_invalidate(env
, pc
);
1035 /* remove a breakpoint */
1036 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1038 #if defined(TARGET_I386) || defined(TARGET_PPC)
1040 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1041 if (env
->breakpoints
[i
] == pc
)
1046 memmove(&env
->breakpoints
[i
], &env
->breakpoints
[i
+ 1],
1047 (env
->nb_breakpoints
- (i
+ 1)) * sizeof(env
->breakpoints
[0]));
1048 env
->nb_breakpoints
--;
1050 breakpoint_invalidate(env
, pc
);
1057 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1058 CPU loop after each instruction */
1059 void cpu_single_step(CPUState
*env
, int enabled
)
1061 #if defined(TARGET_I386) || defined(TARGET_PPC)
1062 if (env
->singlestep_enabled
!= enabled
) {
1063 env
->singlestep_enabled
= enabled
;
1064 /* must flush all the translated code to avoid inconsistancies */
1065 /* XXX: only flush what is necessary */
1071 /* enable or disable low levels log */
1072 void cpu_set_log(int log_flags
)
1074 loglevel
= log_flags
;
1075 if (loglevel
&& !logfile
) {
1076 logfile
= fopen(logfilename
, "w");
1078 perror(logfilename
);
1081 #if !defined(CONFIG_SOFTMMU)
1082 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1084 static uint8_t logfile_buf
[4096];
1085 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1088 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1093 void cpu_set_log_filename(const char *filename
)
1095 logfilename
= strdup(filename
);
1098 /* mask must never be zero, except for A20 change call */
1099 void cpu_interrupt(CPUState
*env
, int mask
)
1101 TranslationBlock
*tb
;
1102 static int interrupt_lock
;
1104 env
->interrupt_request
|= mask
;
1105 /* if the cpu is currently executing code, we must unlink it and
1106 all the potentially executing TB */
1107 tb
= env
->current_tb
;
1108 if (tb
&& !testandset(&interrupt_lock
)) {
1109 env
->current_tb
= NULL
;
1110 tb_reset_jump_recursive(tb
);
1115 CPULogItem cpu_log_items
[] = {
1116 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1117 "show generated host assembly code for each compiled TB" },
1118 { CPU_LOG_TB_IN_ASM
, "in_asm",
1119 "show target assembly code for each compiled TB" },
1120 { CPU_LOG_TB_OP
, "op",
1121 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1123 { CPU_LOG_TB_OP_OPT
, "op_opt",
1124 "show micro ops after optimization for each compiled TB" },
1126 { CPU_LOG_INT
, "int",
1127 "show interrupts/exceptions in short format" },
1128 { CPU_LOG_EXEC
, "exec",
1129 "show trace before each executed TB (lots of logs)" },
1131 { CPU_LOG_PCALL
, "pcall",
1132 "show protected mode far calls/returns/exceptions" },
1137 static int cmp1(const char *s1
, int n
, const char *s2
)
1139 if (strlen(s2
) != n
)
1141 return memcmp(s1
, s2
, n
) == 0;
1144 /* takes a comma separated list of log masks. Return 0 if error. */
1145 int cpu_str_to_log_mask(const char *str
)
1154 p1
= strchr(p
, ',');
1157 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1158 if (cmp1(p
, p1
- p
, item
->name
))
1171 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1176 fprintf(stderr
, "qemu: fatal: ");
1177 vfprintf(stderr
, fmt
, ap
);
1178 fprintf(stderr
, "\n");
1180 cpu_x86_dump_state(env
, stderr
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1186 #if !defined(CONFIG_USER_ONLY)
1188 /* NOTE: if flush_global is true, also flush global entries (not
1190 void tlb_flush(CPUState
*env
, int flush_global
)
1194 #if defined(DEBUG_TLB)
1195 printf("tlb_flush:\n");
1197 /* must reset current TB so that interrupts cannot modify the
1198 links while we are modifying them */
1199 env
->current_tb
= NULL
;
1201 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1202 env
->tlb_read
[0][i
].address
= -1;
1203 env
->tlb_write
[0][i
].address
= -1;
1204 env
->tlb_read
[1][i
].address
= -1;
1205 env
->tlb_write
[1][i
].address
= -1;
1209 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
1212 #if !defined(CONFIG_SOFTMMU)
1213 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1217 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, uint32_t addr
)
1219 if (addr
== (tlb_entry
->address
&
1220 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
1221 tlb_entry
->address
= -1;
1224 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1229 TranslationBlock
*tb
;
1231 #if defined(DEBUG_TLB)
1232 printf("tlb_flush_page: 0x%08x\n", addr
);
1234 /* must reset current TB so that interrupts cannot modify the
1235 links while we are modifying them */
1236 env
->current_tb
= NULL
;
1238 addr
&= TARGET_PAGE_MASK
;
1239 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1240 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
1241 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
1242 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
1243 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
1245 /* remove from the virtual pc hash table all the TB at this
1248 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1249 if (vp
&& vp
->valid_tag
== virt_valid_tag
) {
1250 p
= page_find(vp
->phys_addr
>> TARGET_PAGE_BITS
);
1252 /* we remove all the links to the TBs in this virtual page */
1254 while (tb
!= NULL
) {
1256 tb
= (TranslationBlock
*)((long)tb
& ~3);
1257 if ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1258 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
) {
1261 tb
= tb
->page_next
[n
];
1267 #if !defined(CONFIG_SOFTMMU)
1268 if (addr
< MMAP_AREA_END
)
1269 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1273 static inline void tlb_protect_code1(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1275 if (addr
== (tlb_entry
->address
&
1276 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1277 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
&&
1278 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
) {
1279 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_CODE
;
1283 /* update the TLBs so that writes to code in the virtual page 'addr'
1285 static void tlb_protect_code(CPUState
*env
, target_ulong addr
)
1289 addr
&= TARGET_PAGE_MASK
;
1290 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1291 tlb_protect_code1(&env
->tlb_write
[0][i
], addr
);
1292 tlb_protect_code1(&env
->tlb_write
[1][i
], addr
);
1293 #if !defined(CONFIG_SOFTMMU)
1294 /* NOTE: as we generated the code for this page, it is already at
1296 if (addr
< MMAP_AREA_END
)
1297 mprotect((void *)addr
, TARGET_PAGE_SIZE
, PROT_READ
);
1301 static inline void tlb_unprotect_code2(CPUTLBEntry
*tlb_entry
,
1302 unsigned long phys_addr
)
1304 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
&&
1305 ((tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
) == phys_addr
) {
1306 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1310 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1311 tested self modifying code */
1312 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
)
1316 phys_addr
&= TARGET_PAGE_MASK
;
1317 phys_addr
+= (long)phys_ram_base
;
1318 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1319 tlb_unprotect_code2(&env
->tlb_write
[0][i
], phys_addr
);
1320 tlb_unprotect_code2(&env
->tlb_write
[1][i
], phys_addr
);
1323 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1324 unsigned long start
, unsigned long length
)
1327 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1328 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1329 if ((addr
- start
) < length
) {
1330 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1335 void cpu_physical_memory_reset_dirty(target_ulong start
, target_ulong end
)
1338 unsigned long length
, start1
;
1341 start
&= TARGET_PAGE_MASK
;
1342 end
= TARGET_PAGE_ALIGN(end
);
1344 length
= end
- start
;
1347 memset(phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
), 0, length
>> TARGET_PAGE_BITS
);
1349 env
= cpu_single_env
;
1350 /* we modify the TLB cache so that the dirty bit will be set again
1351 when accessing the range */
1352 start1
= start
+ (unsigned long)phys_ram_base
;
1353 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1354 tlb_reset_dirty_range(&env
->tlb_write
[0][i
], start1
, length
);
1355 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1356 tlb_reset_dirty_range(&env
->tlb_write
[1][i
], start1
, length
);
1358 #if !defined(CONFIG_SOFTMMU)
1359 /* XXX: this is expensive */
1365 for(i
= 0; i
< L1_SIZE
; i
++) {
1368 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1369 for(j
= 0; j
< L2_SIZE
; j
++) {
1370 if (p
->valid_tag
== virt_valid_tag
&&
1371 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1372 (p
->prot
& PROT_WRITE
)) {
1373 if (addr
< MMAP_AREA_END
) {
1374 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1375 p
->prot
& ~PROT_WRITE
);
1378 addr
+= TARGET_PAGE_SIZE
;
1387 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1388 unsigned long start
)
1391 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1392 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1393 if (addr
== start
) {
1394 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1399 /* update the TLB corresponding to virtual page vaddr and phys addr
1400 addr so that it is no longer dirty */
1401 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1403 CPUState
*env
= cpu_single_env
;
1406 phys_ram_dirty
[(addr
- (unsigned long)phys_ram_base
) >> TARGET_PAGE_BITS
] = 1;
1408 addr
&= TARGET_PAGE_MASK
;
1409 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1410 tlb_set_dirty1(&env
->tlb_write
[0][i
], addr
);
1411 tlb_set_dirty1(&env
->tlb_write
[1][i
], addr
);
1414 /* add a new TLB entry. At most one entry for a given virtual address
1415 is permitted. Return 0 if OK or 2 if the page could not be mapped
1416 (can only happen in non SOFTMMU mode for I/O pages or pages
1417 conflicting with the host address space). */
1418 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1419 target_phys_addr_t paddr
, int prot
,
1420 int is_user
, int is_softmmu
)
1424 TranslationBlock
*first_tb
;
1426 target_ulong address
;
1427 unsigned long addend
;
1430 p
= page_find(paddr
>> TARGET_PAGE_BITS
);
1432 pd
= IO_MEM_UNASSIGNED
;
1435 pd
= p
->phys_offset
;
1436 first_tb
= p
->first_tb
;
1438 #if defined(DEBUG_TLB)
1439 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1440 vaddr
, paddr
, prot
, is_user
, (first_tb
!= NULL
), is_softmmu
, pd
);
1444 #if !defined(CONFIG_SOFTMMU)
1448 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1449 /* IO memory case */
1450 address
= vaddr
| pd
;
1453 /* standard memory */
1455 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1458 index
= (vaddr
>> 12) & (CPU_TLB_SIZE
- 1);
1460 if (prot
& PAGE_READ
) {
1461 env
->tlb_read
[is_user
][index
].address
= address
;
1462 env
->tlb_read
[is_user
][index
].addend
= addend
;
1464 env
->tlb_read
[is_user
][index
].address
= -1;
1465 env
->tlb_read
[is_user
][index
].addend
= -1;
1467 if (prot
& PAGE_WRITE
) {
1468 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1469 /* ROM: access is ignored (same as unassigned) */
1470 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_ROM
;
1471 env
->tlb_write
[is_user
][index
].addend
= addend
;
1473 /* XXX: the PowerPC code seems not ready to handle
1474 self modifying code with DCBI */
1475 #if defined(TARGET_HAS_SMC) || 1
1477 /* if code is present, we use a specific memory
1478 handler. It works only for physical memory access */
1479 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_CODE
;
1480 env
->tlb_write
[is_user
][index
].addend
= addend
;
1483 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1484 !cpu_physical_memory_is_dirty(pd
)) {
1485 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_NOTDIRTY
;
1486 env
->tlb_write
[is_user
][index
].addend
= addend
;
1488 env
->tlb_write
[is_user
][index
].address
= address
;
1489 env
->tlb_write
[is_user
][index
].addend
= addend
;
1492 env
->tlb_write
[is_user
][index
].address
= -1;
1493 env
->tlb_write
[is_user
][index
].addend
= -1;
1496 #if !defined(CONFIG_SOFTMMU)
1498 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1499 /* IO access: no mapping is done as it will be handled by the
1501 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1506 if (vaddr
>= MMAP_AREA_END
) {
1509 if (prot
& PROT_WRITE
) {
1510 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1511 #if defined(TARGET_HAS_SMC) || 1
1514 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1515 !cpu_physical_memory_is_dirty(pd
))) {
1516 /* ROM: we do as if code was inside */
1517 /* if code is present, we only map as read only and save the
1521 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
);
1524 vp
->valid_tag
= virt_valid_tag
;
1525 prot
&= ~PAGE_WRITE
;
1528 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1529 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1530 if (map_addr
== MAP_FAILED
) {
1531 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1541 /* called from signal handler: invalidate the code and unprotect the
1542 page. Return TRUE if the fault was succesfully handled. */
1543 int page_unprotect(unsigned long addr
, unsigned long pc
, void *puc
)
1545 #if !defined(CONFIG_SOFTMMU)
1548 #if defined(DEBUG_TLB)
1549 printf("page_unprotect: addr=0x%08x\n", addr
);
1551 addr
&= TARGET_PAGE_MASK
;
1553 /* if it is not mapped, no need to worry here */
1554 if (addr
>= MMAP_AREA_END
)
1556 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1559 /* NOTE: in this case, validate_tag is _not_ tested as it
1560 validates only the code TLB */
1561 if (vp
->valid_tag
!= virt_valid_tag
)
1563 if (!(vp
->prot
& PAGE_WRITE
))
1565 #if defined(DEBUG_TLB)
1566 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1567 addr
, vp
->phys_addr
, vp
->prot
);
1569 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1570 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1571 (unsigned long)addr
, vp
->prot
);
1572 /* set the dirty bit */
1573 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 1;
1574 /* flush the code inside */
1575 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1584 void tlb_flush(CPUState
*env
, int flush_global
)
1588 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1592 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1593 target_phys_addr_t paddr
, int prot
,
1594 int is_user
, int is_softmmu
)
1599 /* dump memory mappings */
1600 void page_dump(FILE *f
)
1602 unsigned long start
, end
;
1603 int i
, j
, prot
, prot1
;
1606 fprintf(f
, "%-8s %-8s %-8s %s\n",
1607 "start", "end", "size", "prot");
1611 for(i
= 0; i
<= L1_SIZE
; i
++) {
1616 for(j
= 0;j
< L2_SIZE
; j
++) {
1621 if (prot1
!= prot
) {
1622 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1624 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1625 start
, end
, end
- start
,
1626 prot
& PAGE_READ
? 'r' : '-',
1627 prot
& PAGE_WRITE
? 'w' : '-',
1628 prot
& PAGE_EXEC
? 'x' : '-');
1642 int page_get_flags(unsigned long address
)
1646 p
= page_find(address
>> TARGET_PAGE_BITS
);
1652 /* modify the flags of a page and invalidate the code if
1653 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1654 depending on PAGE_WRITE */
1655 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1660 start
= start
& TARGET_PAGE_MASK
;
1661 end
= TARGET_PAGE_ALIGN(end
);
1662 if (flags
& PAGE_WRITE
)
1663 flags
|= PAGE_WRITE_ORG
;
1664 spin_lock(&tb_lock
);
1665 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1666 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1667 /* if the write protection is set, then we invalidate the code
1669 if (!(p
->flags
& PAGE_WRITE
) &&
1670 (flags
& PAGE_WRITE
) &&
1672 tb_invalidate_phys_page(addr
, 0, NULL
);
1676 spin_unlock(&tb_lock
);
1679 /* called from signal handler: invalidate the code and unprotect the
1680 page. Return TRUE if the fault was succesfully handled. */
1681 int page_unprotect(unsigned long address
, unsigned long pc
, void *puc
)
1683 unsigned int page_index
, prot
, pindex
;
1685 unsigned long host_start
, host_end
, addr
;
1687 host_start
= address
& host_page_mask
;
1688 page_index
= host_start
>> TARGET_PAGE_BITS
;
1689 p1
= page_find(page_index
);
1692 host_end
= host_start
+ host_page_size
;
1695 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1699 /* if the page was really writable, then we change its
1700 protection back to writable */
1701 if (prot
& PAGE_WRITE_ORG
) {
1702 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1703 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1704 mprotect((void *)host_start
, host_page_size
,
1705 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1706 p1
[pindex
].flags
|= PAGE_WRITE
;
1707 /* and since the content will be modified, we must invalidate
1708 the corresponding translated code. */
1709 tb_invalidate_phys_page(address
, pc
, puc
);
1710 #ifdef DEBUG_TB_CHECK
1711 tb_invalidate_check(address
);
1719 /* call this function when system calls directly modify a memory area */
1720 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1722 unsigned long start
, end
, addr
;
1724 start
= (unsigned long)data
;
1725 end
= start
+ data_size
;
1726 start
&= TARGET_PAGE_MASK
;
1727 end
= TARGET_PAGE_ALIGN(end
);
1728 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1729 page_unprotect(addr
, 0, NULL
);
1733 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1736 #endif /* defined(CONFIG_USER_ONLY) */
1738 /* register physical memory. 'size' must be a multiple of the target
1739 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1741 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1743 unsigned long phys_offset
)
1745 unsigned long addr
, end_addr
;
1748 end_addr
= start_addr
+ size
;
1749 for(addr
= start_addr
; addr
< end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1750 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1751 p
->phys_offset
= phys_offset
;
1752 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1753 phys_offset
+= TARGET_PAGE_SIZE
;
1757 static uint32_t unassigned_mem_readb(target_phys_addr_t addr
)
1762 static void unassigned_mem_writeb(target_phys_addr_t addr
, uint32_t val
)
1766 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1767 unassigned_mem_readb
,
1768 unassigned_mem_readb
,
1769 unassigned_mem_readb
,
1772 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1773 unassigned_mem_writeb
,
1774 unassigned_mem_writeb
,
1775 unassigned_mem_writeb
,
1778 /* self modifying code support in soft mmu mode : writing to a page
1779 containing code comes to these functions */
1781 static void code_mem_writeb(target_phys_addr_t addr
, uint32_t val
)
1783 unsigned long phys_addr
;
1785 phys_addr
= addr
- (long)phys_ram_base
;
1786 #if !defined(CONFIG_USER_ONLY)
1787 tb_invalidate_phys_page_fast(phys_addr
, 1);
1789 stb_raw((uint8_t *)addr
, val
);
1790 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1793 static void code_mem_writew(target_phys_addr_t addr
, uint32_t val
)
1795 unsigned long phys_addr
;
1797 phys_addr
= addr
- (long)phys_ram_base
;
1798 #if !defined(CONFIG_USER_ONLY)
1799 tb_invalidate_phys_page_fast(phys_addr
, 2);
1801 stw_raw((uint8_t *)addr
, val
);
1802 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1805 static void code_mem_writel(target_phys_addr_t addr
, uint32_t val
)
1807 unsigned long phys_addr
;
1809 phys_addr
= addr
- (long)phys_ram_base
;
1810 #if !defined(CONFIG_USER_ONLY)
1811 tb_invalidate_phys_page_fast(phys_addr
, 4);
1813 stl_raw((uint8_t *)addr
, val
);
1814 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1817 static CPUReadMemoryFunc
*code_mem_read
[3] = {
1818 NULL
, /* never used */
1819 NULL
, /* never used */
1820 NULL
, /* never used */
1823 static CPUWriteMemoryFunc
*code_mem_write
[3] = {
1829 static void notdirty_mem_writeb(target_phys_addr_t addr
, uint32_t val
)
1831 stb_raw((uint8_t *)addr
, val
);
1832 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1835 static void notdirty_mem_writew(target_phys_addr_t addr
, uint32_t val
)
1837 stw_raw((uint8_t *)addr
, val
);
1838 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1841 static void notdirty_mem_writel(target_phys_addr_t addr
, uint32_t val
)
1843 stl_raw((uint8_t *)addr
, val
);
1844 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1847 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1848 notdirty_mem_writeb
,
1849 notdirty_mem_writew
,
1850 notdirty_mem_writel
,
1853 static void io_mem_init(void)
1855 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, code_mem_read
, unassigned_mem_write
);
1856 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
);
1857 cpu_register_io_memory(IO_MEM_CODE
>> IO_MEM_SHIFT
, code_mem_read
, code_mem_write
);
1858 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, code_mem_read
, notdirty_mem_write
);
1861 /* alloc dirty bits array */
1862 phys_ram_dirty
= qemu_malloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1865 /* mem_read and mem_write are arrays of functions containing the
1866 function to access byte (index 0), word (index 1) and dword (index
1867 2). All functions must be supplied. If io_index is non zero, the
1868 corresponding io zone is modified. If it is zero, a new io zone is
1869 allocated. The return value can be used with
1870 cpu_register_physical_memory(). (-1) is returned if error. */
1871 int cpu_register_io_memory(int io_index
,
1872 CPUReadMemoryFunc
**mem_read
,
1873 CPUWriteMemoryFunc
**mem_write
)
1877 if (io_index
<= 0) {
1878 if (io_index
>= IO_MEM_NB_ENTRIES
)
1880 io_index
= io_mem_nb
++;
1882 if (io_index
>= IO_MEM_NB_ENTRIES
)
1886 for(i
= 0;i
< 3; i
++) {
1887 io_mem_read
[io_index
][i
] = mem_read
[i
];
1888 io_mem_write
[io_index
][i
] = mem_write
[i
];
1890 return io_index
<< IO_MEM_SHIFT
;
1893 /* physical memory access (slow version, mainly for debug) */
1894 #if defined(CONFIG_USER_ONLY)
1895 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
1896 int len
, int is_write
)
1902 page
= addr
& TARGET_PAGE_MASK
;
1903 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1906 flags
= page_get_flags(page
);
1907 if (!(flags
& PAGE_VALID
))
1910 if (!(flags
& PAGE_WRITE
))
1912 memcpy((uint8_t *)addr
, buf
, len
);
1914 if (!(flags
& PAGE_READ
))
1916 memcpy(buf
, (uint8_t *)addr
, len
);
1924 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
1925 int len
, int is_write
)
1930 target_phys_addr_t page
;
1935 page
= addr
& TARGET_PAGE_MASK
;
1936 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1939 p
= page_find(page
>> TARGET_PAGE_BITS
);
1941 pd
= IO_MEM_UNASSIGNED
;
1943 pd
= p
->phys_offset
;
1947 if ((pd
& ~TARGET_PAGE_MASK
) != 0) {
1948 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
1949 if (l
>= 4 && ((addr
& 3) == 0)) {
1950 /* 32 bit read access */
1952 io_mem_write
[io_index
][2](addr
, val
);
1954 } else if (l
>= 2 && ((addr
& 1) == 0)) {
1955 /* 16 bit read access */
1956 val
= lduw_raw(buf
);
1957 io_mem_write
[io_index
][1](addr
, val
);
1961 val
= ldub_raw(buf
);
1962 io_mem_write
[io_index
][0](addr
, val
);
1966 unsigned long addr1
;
1967 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
1969 ptr
= phys_ram_base
+ addr1
;
1970 memcpy(ptr
, buf
, l
);
1971 /* invalidate code */
1972 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
1974 phys_ram_dirty
[page
>> TARGET_PAGE_BITS
] = 1;
1977 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
1978 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
) {
1980 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
1981 if (l
>= 4 && ((addr
& 3) == 0)) {
1982 /* 32 bit read access */
1983 val
= io_mem_read
[io_index
][2](addr
);
1986 } else if (l
>= 2 && ((addr
& 1) == 0)) {
1987 /* 16 bit read access */
1988 val
= io_mem_read
[io_index
][1](addr
);
1993 val
= io_mem_read
[io_index
][0](addr
);
1999 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2000 (addr
& ~TARGET_PAGE_MASK
);
2001 memcpy(buf
, ptr
, l
);
2011 /* virtual memory access for debug */
2012 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2013 uint8_t *buf
, int len
, int is_write
)
2016 target_ulong page
, phys_addr
;
2019 page
= addr
& TARGET_PAGE_MASK
;
2020 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2021 /* if no physical page mapped, return an error */
2022 if (phys_addr
== -1)
2024 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2027 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2036 #if !defined(CONFIG_USER_ONLY)
2038 #define MMUSUFFIX _cmmu
2039 #define GETPC() NULL
2040 #define env cpu_single_env
2043 #include "softmmu_template.h"
2046 #include "softmmu_template.h"
2049 #include "softmmu_template.h"
2052 #include "softmmu_template.h"