2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
37 #if defined(CONFIG_USER_ONLY)
41 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 #if !defined(CONFIG_USER_ONLY)
51 /* TB consistency checks only implemented for usermode emulation. */
55 /* threshold to flush the translated code buffer */
56 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
58 #define SMC_BITMAP_USE_THRESHOLD 10
60 #define MMAP_AREA_START 0x00000000
61 #define MMAP_AREA_END 0xa8000000
63 #if defined(TARGET_SPARC64)
64 #define TARGET_PHYS_ADDR_SPACE_BITS 41
65 #elif defined(TARGET_PPC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 42
68 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
69 #define TARGET_PHYS_ADDR_SPACE_BITS 32
72 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
73 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
75 /* any access to the tbs or the page table must use this lock */
76 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
78 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
79 uint8_t *code_gen_ptr
;
83 uint8_t *phys_ram_base
;
84 uint8_t *phys_ram_dirty
;
87 /* current CPU in the current thread. It is only valid inside
89 CPUState
*cpu_single_env
;
91 typedef struct PageDesc
{
92 /* list of TBs intersecting this ram page */
93 TranslationBlock
*first_tb
;
94 /* in order to optimize self modifying code, we count the number
95 of lookups we do to a given page to use a bitmap */
96 unsigned int code_write_count
;
98 #if defined(CONFIG_USER_ONLY)
103 typedef struct PhysPageDesc
{
104 /* offset in host memory of the page + io_index in the low 12 bits */
105 uint32_t phys_offset
;
109 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
111 #define L1_SIZE (1 << L1_BITS)
112 #define L2_SIZE (1 << L2_BITS)
114 static void io_mem_init(void);
116 unsigned long qemu_real_host_page_size
;
117 unsigned long qemu_host_page_bits
;
118 unsigned long qemu_host_page_size
;
119 unsigned long qemu_host_page_mask
;
121 /* XXX: for system emulation, it could just be an array */
122 static PageDesc
*l1_map
[L1_SIZE
];
123 PhysPageDesc
**l1_phys_map
;
125 /* io memory support */
126 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
127 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
128 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
129 static int io_mem_nb
;
132 char *logfilename
= "/tmp/qemu.log";
137 static int tlb_flush_count
;
138 static int tb_flush_count
;
139 static int tb_phys_invalidate_count
;
141 static void page_init(void)
143 /* NOTE: we can always suppose that qemu_host_page_size >=
147 SYSTEM_INFO system_info
;
150 GetSystemInfo(&system_info
);
151 qemu_real_host_page_size
= system_info
.dwPageSize
;
153 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
154 PAGE_EXECUTE_READWRITE
, &old_protect
);
157 qemu_real_host_page_size
= getpagesize();
159 unsigned long start
, end
;
161 start
= (unsigned long)code_gen_buffer
;
162 start
&= ~(qemu_real_host_page_size
- 1);
164 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
165 end
+= qemu_real_host_page_size
- 1;
166 end
&= ~(qemu_real_host_page_size
- 1);
168 mprotect((void *)start
, end
- start
,
169 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
173 if (qemu_host_page_size
== 0)
174 qemu_host_page_size
= qemu_real_host_page_size
;
175 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
176 qemu_host_page_size
= TARGET_PAGE_SIZE
;
177 qemu_host_page_bits
= 0;
178 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
179 qemu_host_page_bits
++;
180 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
181 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
182 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
185 static inline PageDesc
*page_find_alloc(unsigned int index
)
189 lp
= &l1_map
[index
>> L2_BITS
];
192 /* allocate if not found */
193 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
194 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
197 return p
+ (index
& (L2_SIZE
- 1));
200 static inline PageDesc
*page_find(unsigned int index
)
204 p
= l1_map
[index
>> L2_BITS
];
207 return p
+ (index
& (L2_SIZE
- 1));
210 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
215 p
= (void **)l1_phys_map
;
216 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
218 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
219 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
221 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
224 /* allocate if not found */
227 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
228 memset(p
, 0, sizeof(void *) * L1_SIZE
);
232 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
236 /* allocate if not found */
239 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
241 for (i
= 0; i
< L2_SIZE
; i
++)
242 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
244 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
247 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
249 return phys_page_find_alloc(index
, 0);
252 #if !defined(CONFIG_USER_ONLY)
253 static void tlb_protect_code(ram_addr_t ram_addr
);
254 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
258 void cpu_exec_init(CPUState
*env
)
264 code_gen_ptr
= code_gen_buffer
;
268 env
->next_cpu
= NULL
;
271 while (*penv
!= NULL
) {
272 penv
= (CPUState
**)&(*penv
)->next_cpu
;
275 env
->cpu_index
= cpu_index
;
279 static inline void invalidate_page_bitmap(PageDesc
*p
)
281 if (p
->code_bitmap
) {
282 qemu_free(p
->code_bitmap
);
283 p
->code_bitmap
= NULL
;
285 p
->code_write_count
= 0;
288 /* set to NULL all the 'first_tb' fields in all PageDescs */
289 static void page_flush_tb(void)
294 for(i
= 0; i
< L1_SIZE
; i
++) {
297 for(j
= 0; j
< L2_SIZE
; j
++) {
299 invalidate_page_bitmap(p
);
306 /* flush all the translation blocks */
307 /* XXX: tb_flush is currently not thread safe */
308 void tb_flush(CPUState
*env1
)
311 #if defined(DEBUG_FLUSH)
312 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
313 code_gen_ptr
- code_gen_buffer
,
315 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
319 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
320 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
323 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
326 code_gen_ptr
= code_gen_buffer
;
327 /* XXX: flush processor icache at this point if cache flush is
332 #ifdef DEBUG_TB_CHECK
334 static void tb_invalidate_check(unsigned long address
)
336 TranslationBlock
*tb
;
338 address
&= TARGET_PAGE_MASK
;
339 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
340 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
341 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
342 address
>= tb
->pc
+ tb
->size
)) {
343 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
344 address
, (long)tb
->pc
, tb
->size
);
350 /* verify that all the pages have correct rights for code */
351 static void tb_page_check(void)
353 TranslationBlock
*tb
;
354 int i
, flags1
, flags2
;
356 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
357 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
358 flags1
= page_get_flags(tb
->pc
);
359 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
360 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
361 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
362 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
368 void tb_jmp_check(TranslationBlock
*tb
)
370 TranslationBlock
*tb1
;
373 /* suppress any remaining jumps to this TB */
377 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
380 tb1
= tb1
->jmp_next
[n1
];
382 /* check end of list */
384 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
390 /* invalidate one TB */
391 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
394 TranslationBlock
*tb1
;
398 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
401 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
405 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
407 TranslationBlock
*tb1
;
413 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
415 *ptb
= tb1
->page_next
[n1
];
418 ptb
= &tb1
->page_next
[n1
];
422 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
424 TranslationBlock
*tb1
, **ptb
;
427 ptb
= &tb
->jmp_next
[n
];
430 /* find tb(n) in circular list */
434 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
435 if (n1
== n
&& tb1
== tb
)
438 ptb
= &tb1
->jmp_first
;
440 ptb
= &tb1
->jmp_next
[n1
];
443 /* now we can suppress tb(n) from the list */
444 *ptb
= tb
->jmp_next
[n
];
446 tb
->jmp_next
[n
] = NULL
;
450 /* reset the jump entry 'n' of a TB so that it is not chained to
452 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
454 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
457 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
462 target_ulong phys_pc
;
463 TranslationBlock
*tb1
, *tb2
;
465 /* remove the TB from the hash list */
466 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
467 h
= tb_phys_hash_func(phys_pc
);
468 tb_remove(&tb_phys_hash
[h
], tb
,
469 offsetof(TranslationBlock
, phys_hash_next
));
471 /* remove the TB from the page list */
472 if (tb
->page_addr
[0] != page_addr
) {
473 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
474 tb_page_remove(&p
->first_tb
, tb
);
475 invalidate_page_bitmap(p
);
477 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
478 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
479 tb_page_remove(&p
->first_tb
, tb
);
480 invalidate_page_bitmap(p
);
483 tb_invalidated_flag
= 1;
485 /* remove the TB from the hash list */
486 h
= tb_jmp_cache_hash_func(tb
->pc
);
487 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
488 if (env
->tb_jmp_cache
[h
] == tb
)
489 env
->tb_jmp_cache
[h
] = NULL
;
492 /* suppress this TB from the two jump lists */
493 tb_jmp_remove(tb
, 0);
494 tb_jmp_remove(tb
, 1);
496 /* suppress any remaining jumps to this TB */
502 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
503 tb2
= tb1
->jmp_next
[n1
];
504 tb_reset_jump(tb1
, n1
);
505 tb1
->jmp_next
[n1
] = NULL
;
508 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
510 tb_phys_invalidate_count
++;
513 static inline void set_bits(uint8_t *tab
, int start
, int len
)
519 mask
= 0xff << (start
& 7);
520 if ((start
& ~7) == (end
& ~7)) {
522 mask
&= ~(0xff << (end
& 7));
527 start
= (start
+ 8) & ~7;
529 while (start
< end1
) {
534 mask
= ~(0xff << (end
& 7));
540 static void build_page_bitmap(PageDesc
*p
)
542 int n
, tb_start
, tb_end
;
543 TranslationBlock
*tb
;
545 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
548 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
553 tb
= (TranslationBlock
*)((long)tb
& ~3);
554 /* NOTE: this is subtle as a TB may span two physical pages */
556 /* NOTE: tb_end may be after the end of the page, but
557 it is not a problem */
558 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
559 tb_end
= tb_start
+ tb
->size
;
560 if (tb_end
> TARGET_PAGE_SIZE
)
561 tb_end
= TARGET_PAGE_SIZE
;
564 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
566 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
567 tb
= tb
->page_next
[n
];
571 #ifdef TARGET_HAS_PRECISE_SMC
573 static void tb_gen_code(CPUState
*env
,
574 target_ulong pc
, target_ulong cs_base
, int flags
,
577 TranslationBlock
*tb
;
579 target_ulong phys_pc
, phys_page2
, virt_page2
;
582 phys_pc
= get_phys_addr_code(env
, pc
);
585 /* flush must be done */
587 /* cannot fail at this point */
590 tc_ptr
= code_gen_ptr
;
592 tb
->cs_base
= cs_base
;
595 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
596 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
598 /* check next page if needed */
599 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
601 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
602 phys_page2
= get_phys_addr_code(env
, virt_page2
);
604 tb_link_phys(tb
, phys_pc
, phys_page2
);
608 /* invalidate all TBs which intersect with the target physical page
609 starting in range [start;end[. NOTE: start and end must refer to
610 the same physical page. 'is_cpu_write_access' should be true if called
611 from a real cpu write access: the virtual CPU will exit the current
612 TB if code is modified inside this TB. */
613 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
614 int is_cpu_write_access
)
616 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
617 CPUState
*env
= cpu_single_env
;
619 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
620 target_ulong tb_start
, tb_end
;
621 target_ulong current_pc
, current_cs_base
;
623 p
= page_find(start
>> TARGET_PAGE_BITS
);
626 if (!p
->code_bitmap
&&
627 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
628 is_cpu_write_access
) {
629 /* build code bitmap */
630 build_page_bitmap(p
);
633 /* we remove all the TBs in the range [start, end[ */
634 /* XXX: see if in some cases it could be faster to invalidate all the code */
635 current_tb_not_found
= is_cpu_write_access
;
636 current_tb_modified
= 0;
637 current_tb
= NULL
; /* avoid warning */
638 current_pc
= 0; /* avoid warning */
639 current_cs_base
= 0; /* avoid warning */
640 current_flags
= 0; /* avoid warning */
644 tb
= (TranslationBlock
*)((long)tb
& ~3);
645 tb_next
= tb
->page_next
[n
];
646 /* NOTE: this is subtle as a TB may span two physical pages */
648 /* NOTE: tb_end may be after the end of the page, but
649 it is not a problem */
650 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
651 tb_end
= tb_start
+ tb
->size
;
653 tb_start
= tb
->page_addr
[1];
654 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
656 if (!(tb_end
<= start
|| tb_start
>= end
)) {
657 #ifdef TARGET_HAS_PRECISE_SMC
658 if (current_tb_not_found
) {
659 current_tb_not_found
= 0;
661 if (env
->mem_write_pc
) {
662 /* now we have a real cpu fault */
663 current_tb
= tb_find_pc(env
->mem_write_pc
);
666 if (current_tb
== tb
&&
667 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
668 /* If we are modifying the current TB, we must stop
669 its execution. We could be more precise by checking
670 that the modification is after the current PC, but it
671 would require a specialized function to partially
672 restore the CPU state */
674 current_tb_modified
= 1;
675 cpu_restore_state(current_tb
, env
,
676 env
->mem_write_pc
, NULL
);
677 #if defined(TARGET_I386)
678 current_flags
= env
->hflags
;
679 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
680 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
681 current_pc
= current_cs_base
+ env
->eip
;
683 #error unsupported CPU
686 #endif /* TARGET_HAS_PRECISE_SMC */
687 /* we need to do that to handle the case where a signal
688 occurs while doing tb_phys_invalidate() */
691 saved_tb
= env
->current_tb
;
692 env
->current_tb
= NULL
;
694 tb_phys_invalidate(tb
, -1);
696 env
->current_tb
= saved_tb
;
697 if (env
->interrupt_request
&& env
->current_tb
)
698 cpu_interrupt(env
, env
->interrupt_request
);
703 #if !defined(CONFIG_USER_ONLY)
704 /* if no code remaining, no need to continue to use slow writes */
706 invalidate_page_bitmap(p
);
707 if (is_cpu_write_access
) {
708 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
712 #ifdef TARGET_HAS_PRECISE_SMC
713 if (current_tb_modified
) {
714 /* we generate a block containing just the instruction
715 modifying the memory. It will ensure that it cannot modify
717 env
->current_tb
= NULL
;
718 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
720 cpu_resume_from_signal(env
, NULL
);
725 /* len must be <= 8 and start must be a multiple of len */
726 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
733 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
734 cpu_single_env
->mem_write_vaddr
, len
,
736 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
740 p
= page_find(start
>> TARGET_PAGE_BITS
);
743 if (p
->code_bitmap
) {
744 offset
= start
& ~TARGET_PAGE_MASK
;
745 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
746 if (b
& ((1 << len
) - 1))
750 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
754 #if !defined(CONFIG_SOFTMMU)
755 static void tb_invalidate_phys_page(target_ulong addr
,
756 unsigned long pc
, void *puc
)
758 int n
, current_flags
, current_tb_modified
;
759 target_ulong current_pc
, current_cs_base
;
761 TranslationBlock
*tb
, *current_tb
;
762 #ifdef TARGET_HAS_PRECISE_SMC
763 CPUState
*env
= cpu_single_env
;
766 addr
&= TARGET_PAGE_MASK
;
767 p
= page_find(addr
>> TARGET_PAGE_BITS
);
771 current_tb_modified
= 0;
773 current_pc
= 0; /* avoid warning */
774 current_cs_base
= 0; /* avoid warning */
775 current_flags
= 0; /* avoid warning */
776 #ifdef TARGET_HAS_PRECISE_SMC
778 current_tb
= tb_find_pc(pc
);
783 tb
= (TranslationBlock
*)((long)tb
& ~3);
784 #ifdef TARGET_HAS_PRECISE_SMC
785 if (current_tb
== tb
&&
786 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
787 /* If we are modifying the current TB, we must stop
788 its execution. We could be more precise by checking
789 that the modification is after the current PC, but it
790 would require a specialized function to partially
791 restore the CPU state */
793 current_tb_modified
= 1;
794 cpu_restore_state(current_tb
, env
, pc
, puc
);
795 #if defined(TARGET_I386)
796 current_flags
= env
->hflags
;
797 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
798 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
799 current_pc
= current_cs_base
+ env
->eip
;
801 #error unsupported CPU
804 #endif /* TARGET_HAS_PRECISE_SMC */
805 tb_phys_invalidate(tb
, addr
);
806 tb
= tb
->page_next
[n
];
809 #ifdef TARGET_HAS_PRECISE_SMC
810 if (current_tb_modified
) {
811 /* we generate a block containing just the instruction
812 modifying the memory. It will ensure that it cannot modify
814 env
->current_tb
= NULL
;
815 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
817 cpu_resume_from_signal(env
, puc
);
823 /* add the tb in the target page and protect it if necessary */
824 static inline void tb_alloc_page(TranslationBlock
*tb
,
825 unsigned int n
, target_ulong page_addr
)
828 TranslationBlock
*last_first_tb
;
830 tb
->page_addr
[n
] = page_addr
;
831 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
832 tb
->page_next
[n
] = p
->first_tb
;
833 last_first_tb
= p
->first_tb
;
834 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
835 invalidate_page_bitmap(p
);
837 #if defined(TARGET_HAS_SMC) || 1
839 #if defined(CONFIG_USER_ONLY)
840 if (p
->flags
& PAGE_WRITE
) {
845 /* force the host page as non writable (writes will have a
846 page fault + mprotect overhead) */
847 page_addr
&= qemu_host_page_mask
;
849 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
850 addr
+= TARGET_PAGE_SIZE
) {
852 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
856 p2
->flags
&= ~PAGE_WRITE
;
857 page_get_flags(addr
);
859 mprotect(g2h(page_addr
), qemu_host_page_size
,
860 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
861 #ifdef DEBUG_TB_INVALIDATE
862 printf("protecting code page: 0x%08lx\n",
867 /* if some code is already present, then the pages are already
868 protected. So we handle the case where only the first TB is
869 allocated in a physical page */
870 if (!last_first_tb
) {
871 tlb_protect_code(page_addr
);
875 #endif /* TARGET_HAS_SMC */
878 /* Allocate a new translation block. Flush the translation buffer if
879 too many translation blocks or too much generated code. */
880 TranslationBlock
*tb_alloc(target_ulong pc
)
882 TranslationBlock
*tb
;
884 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
885 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
893 /* add a new TB and link it to the physical page tables. phys_page2 is
894 (-1) to indicate that only one page contains the TB. */
895 void tb_link_phys(TranslationBlock
*tb
,
896 target_ulong phys_pc
, target_ulong phys_page2
)
899 TranslationBlock
**ptb
;
901 /* add in the physical hash table */
902 h
= tb_phys_hash_func(phys_pc
);
903 ptb
= &tb_phys_hash
[h
];
904 tb
->phys_hash_next
= *ptb
;
907 /* add in the page list */
908 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
909 if (phys_page2
!= -1)
910 tb_alloc_page(tb
, 1, phys_page2
);
912 tb
->page_addr
[1] = -1;
914 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
915 tb
->jmp_next
[0] = NULL
;
916 tb
->jmp_next
[1] = NULL
;
918 tb
->cflags
&= ~CF_FP_USED
;
919 if (tb
->cflags
& CF_TB_FP_USED
)
920 tb
->cflags
|= CF_FP_USED
;
923 /* init original jump addresses */
924 if (tb
->tb_next_offset
[0] != 0xffff)
925 tb_reset_jump(tb
, 0);
926 if (tb
->tb_next_offset
[1] != 0xffff)
927 tb_reset_jump(tb
, 1);
929 #ifdef DEBUG_TB_CHECK
934 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
935 tb[1].tc_ptr. Return NULL if not found */
936 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
940 TranslationBlock
*tb
;
944 if (tc_ptr
< (unsigned long)code_gen_buffer
||
945 tc_ptr
>= (unsigned long)code_gen_ptr
)
947 /* binary search (cf Knuth) */
950 while (m_min
<= m_max
) {
951 m
= (m_min
+ m_max
) >> 1;
953 v
= (unsigned long)tb
->tc_ptr
;
956 else if (tc_ptr
< v
) {
965 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
967 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
969 TranslationBlock
*tb1
, *tb_next
, **ptb
;
972 tb1
= tb
->jmp_next
[n
];
974 /* find head of list */
977 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
980 tb1
= tb1
->jmp_next
[n1
];
982 /* we are now sure now that tb jumps to tb1 */
985 /* remove tb from the jmp_first list */
986 ptb
= &tb_next
->jmp_first
;
990 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
991 if (n1
== n
&& tb1
== tb
)
993 ptb
= &tb1
->jmp_next
[n1
];
995 *ptb
= tb
->jmp_next
[n
];
996 tb
->jmp_next
[n
] = NULL
;
998 /* suppress the jump to next tb in generated code */
999 tb_reset_jump(tb
, n
);
1001 /* suppress jumps in the tb on which we could have jumped */
1002 tb_reset_jump_recursive(tb_next
);
1006 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1008 tb_reset_jump_recursive2(tb
, 0);
1009 tb_reset_jump_recursive2(tb
, 1);
1012 #if defined(TARGET_HAS_ICE)
1013 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1015 target_ulong addr
, pd
;
1016 ram_addr_t ram_addr
;
1019 addr
= cpu_get_phys_page_debug(env
, pc
);
1020 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1022 pd
= IO_MEM_UNASSIGNED
;
1024 pd
= p
->phys_offset
;
1026 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1027 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1031 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1032 breakpoint is reached */
1033 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1035 #if defined(TARGET_HAS_ICE)
1038 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1039 if (env
->breakpoints
[i
] == pc
)
1043 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1045 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1047 breakpoint_invalidate(env
, pc
);
1054 /* remove a breakpoint */
1055 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1057 #if defined(TARGET_HAS_ICE)
1059 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1060 if (env
->breakpoints
[i
] == pc
)
1065 env
->nb_breakpoints
--;
1066 if (i
< env
->nb_breakpoints
)
1067 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1069 breakpoint_invalidate(env
, pc
);
1076 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1077 CPU loop after each instruction */
1078 void cpu_single_step(CPUState
*env
, int enabled
)
1080 #if defined(TARGET_HAS_ICE)
1081 if (env
->singlestep_enabled
!= enabled
) {
1082 env
->singlestep_enabled
= enabled
;
1083 /* must flush all the translated code to avoid inconsistancies */
1084 /* XXX: only flush what is necessary */
1090 /* enable or disable low levels log */
1091 void cpu_set_log(int log_flags
)
1093 loglevel
= log_flags
;
1094 if (loglevel
&& !logfile
) {
1095 logfile
= fopen(logfilename
, "w");
1097 perror(logfilename
);
1100 #if !defined(CONFIG_SOFTMMU)
1101 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1103 static uint8_t logfile_buf
[4096];
1104 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1107 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1112 void cpu_set_log_filename(const char *filename
)
1114 logfilename
= strdup(filename
);
1117 /* mask must never be zero, except for A20 change call */
1118 void cpu_interrupt(CPUState
*env
, int mask
)
1120 TranslationBlock
*tb
;
1121 static int interrupt_lock
;
1123 env
->interrupt_request
|= mask
;
1124 /* if the cpu is currently executing code, we must unlink it and
1125 all the potentially executing TB */
1126 tb
= env
->current_tb
;
1127 if (tb
&& !testandset(&interrupt_lock
)) {
1128 env
->current_tb
= NULL
;
1129 tb_reset_jump_recursive(tb
);
1134 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1136 env
->interrupt_request
&= ~mask
;
1139 CPULogItem cpu_log_items
[] = {
1140 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1141 "show generated host assembly code for each compiled TB" },
1142 { CPU_LOG_TB_IN_ASM
, "in_asm",
1143 "show target assembly code for each compiled TB" },
1144 { CPU_LOG_TB_OP
, "op",
1145 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1147 { CPU_LOG_TB_OP_OPT
, "op_opt",
1148 "show micro ops after optimization for each compiled TB" },
1150 { CPU_LOG_INT
, "int",
1151 "show interrupts/exceptions in short format" },
1152 { CPU_LOG_EXEC
, "exec",
1153 "show trace before each executed TB (lots of logs)" },
1154 { CPU_LOG_TB_CPU
, "cpu",
1155 "show CPU state before bloc translation" },
1157 { CPU_LOG_PCALL
, "pcall",
1158 "show protected mode far calls/returns/exceptions" },
1161 { CPU_LOG_IOPORT
, "ioport",
1162 "show all i/o ports accesses" },
1167 static int cmp1(const char *s1
, int n
, const char *s2
)
1169 if (strlen(s2
) != n
)
1171 return memcmp(s1
, s2
, n
) == 0;
1174 /* takes a comma separated list of log masks. Return 0 if error. */
1175 int cpu_str_to_log_mask(const char *str
)
1184 p1
= strchr(p
, ',');
1187 if(cmp1(p
,p1
-p
,"all")) {
1188 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1192 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1193 if (cmp1(p
, p1
- p
, item
->name
))
1207 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1212 fprintf(stderr
, "qemu: fatal: ");
1213 vfprintf(stderr
, fmt
, ap
);
1214 fprintf(stderr
, "\n");
1216 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1218 cpu_dump_state(env
, stderr
, fprintf
, 0);
1224 #if !defined(CONFIG_USER_ONLY)
1226 /* NOTE: if flush_global is true, also flush global entries (not
1228 void tlb_flush(CPUState
*env
, int flush_global
)
1232 #if defined(DEBUG_TLB)
1233 printf("tlb_flush:\n");
1235 /* must reset current TB so that interrupts cannot modify the
1236 links while we are modifying them */
1237 env
->current_tb
= NULL
;
1239 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1240 env
->tlb_table
[0][i
].addr_read
= -1;
1241 env
->tlb_table
[0][i
].addr_write
= -1;
1242 env
->tlb_table
[0][i
].addr_code
= -1;
1243 env
->tlb_table
[1][i
].addr_read
= -1;
1244 env
->tlb_table
[1][i
].addr_write
= -1;
1245 env
->tlb_table
[1][i
].addr_code
= -1;
1248 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1250 #if !defined(CONFIG_SOFTMMU)
1251 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1254 if (env
->kqemu_enabled
) {
1255 kqemu_flush(env
, flush_global
);
1261 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1263 if (addr
== (tlb_entry
->addr_read
&
1264 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1265 addr
== (tlb_entry
->addr_write
&
1266 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1267 addr
== (tlb_entry
->addr_code
&
1268 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1269 tlb_entry
->addr_read
= -1;
1270 tlb_entry
->addr_write
= -1;
1271 tlb_entry
->addr_code
= -1;
1275 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1278 TranslationBlock
*tb
;
1280 #if defined(DEBUG_TLB)
1281 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1283 /* must reset current TB so that interrupts cannot modify the
1284 links while we are modifying them */
1285 env
->current_tb
= NULL
;
1287 addr
&= TARGET_PAGE_MASK
;
1288 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1289 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1290 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1292 /* Discard jump cache entries for any tb which might potentially
1293 overlap the flushed page. */
1294 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1295 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1297 i
= tb_jmp_cache_hash_page(addr
);
1298 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1300 #if !defined(CONFIG_SOFTMMU)
1301 if (addr
< MMAP_AREA_END
)
1302 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1305 if (env
->kqemu_enabled
) {
1306 kqemu_flush_page(env
, addr
);
1311 /* update the TLBs so that writes to code in the virtual page 'addr'
1313 static void tlb_protect_code(ram_addr_t ram_addr
)
1315 cpu_physical_memory_reset_dirty(ram_addr
,
1316 ram_addr
+ TARGET_PAGE_SIZE
,
1320 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1321 tested for self modifying code */
1322 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1325 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1328 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1329 unsigned long start
, unsigned long length
)
1332 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1333 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1334 if ((addr
- start
) < length
) {
1335 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1340 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1344 unsigned long length
, start1
;
1348 start
&= TARGET_PAGE_MASK
;
1349 end
= TARGET_PAGE_ALIGN(end
);
1351 length
= end
- start
;
1354 len
= length
>> TARGET_PAGE_BITS
;
1356 /* XXX: should not depend on cpu context */
1358 if (env
->kqemu_enabled
) {
1361 for(i
= 0; i
< len
; i
++) {
1362 kqemu_set_notdirty(env
, addr
);
1363 addr
+= TARGET_PAGE_SIZE
;
1367 mask
= ~dirty_flags
;
1368 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1369 for(i
= 0; i
< len
; i
++)
1372 /* we modify the TLB cache so that the dirty bit will be set again
1373 when accessing the range */
1374 start1
= start
+ (unsigned long)phys_ram_base
;
1375 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1376 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1377 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1378 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1379 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1382 #if !defined(CONFIG_SOFTMMU)
1383 /* XXX: this is expensive */
1389 for(i
= 0; i
< L1_SIZE
; i
++) {
1392 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1393 for(j
= 0; j
< L2_SIZE
; j
++) {
1394 if (p
->valid_tag
== virt_valid_tag
&&
1395 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1396 (p
->prot
& PROT_WRITE
)) {
1397 if (addr
< MMAP_AREA_END
) {
1398 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1399 p
->prot
& ~PROT_WRITE
);
1402 addr
+= TARGET_PAGE_SIZE
;
1411 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1413 ram_addr_t ram_addr
;
1415 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1416 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1417 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1418 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1419 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1424 /* update the TLB according to the current state of the dirty bits */
1425 void cpu_tlb_update_dirty(CPUState
*env
)
1428 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1429 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1430 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1431 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1434 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1435 unsigned long start
)
1438 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1439 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1440 if (addr
== start
) {
1441 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1446 /* update the TLB corresponding to virtual page vaddr and phys addr
1447 addr so that it is no longer dirty */
1448 static inline void tlb_set_dirty(CPUState
*env
,
1449 unsigned long addr
, target_ulong vaddr
)
1453 addr
&= TARGET_PAGE_MASK
;
1454 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1455 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1456 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1459 /* add a new TLB entry. At most one entry for a given virtual address
1460 is permitted. Return 0 if OK or 2 if the page could not be mapped
1461 (can only happen in non SOFTMMU mode for I/O pages or pages
1462 conflicting with the host address space). */
1463 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1464 target_phys_addr_t paddr
, int prot
,
1465 int is_user
, int is_softmmu
)
1470 target_ulong address
;
1471 target_phys_addr_t addend
;
1475 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1477 pd
= IO_MEM_UNASSIGNED
;
1479 pd
= p
->phys_offset
;
1481 #if defined(DEBUG_TLB)
1482 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1483 vaddr
, (int)paddr
, prot
, is_user
, is_softmmu
, pd
);
1487 #if !defined(CONFIG_SOFTMMU)
1491 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1492 /* IO memory case */
1493 address
= vaddr
| pd
;
1496 /* standard memory */
1498 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1501 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1503 te
= &env
->tlb_table
[is_user
][index
];
1504 te
->addend
= addend
;
1505 if (prot
& PAGE_READ
) {
1506 te
->addr_read
= address
;
1510 if (prot
& PAGE_EXEC
) {
1511 te
->addr_code
= address
;
1515 if (prot
& PAGE_WRITE
) {
1516 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1517 (pd
& IO_MEM_ROMD
)) {
1518 /* write access calls the I/O callback */
1519 te
->addr_write
= vaddr
|
1520 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1521 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1522 !cpu_physical_memory_is_dirty(pd
)) {
1523 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1525 te
->addr_write
= address
;
1528 te
->addr_write
= -1;
1531 #if !defined(CONFIG_SOFTMMU)
1533 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1534 /* IO access: no mapping is done as it will be handled by the
1536 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1541 if (vaddr
>= MMAP_AREA_END
) {
1544 if (prot
& PROT_WRITE
) {
1545 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1546 #if defined(TARGET_HAS_SMC) || 1
1549 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1550 !cpu_physical_memory_is_dirty(pd
))) {
1551 /* ROM: we do as if code was inside */
1552 /* if code is present, we only map as read only and save the
1556 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1559 vp
->valid_tag
= virt_valid_tag
;
1560 prot
&= ~PAGE_WRITE
;
1563 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1564 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1565 if (map_addr
== MAP_FAILED
) {
1566 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1576 /* called from signal handler: invalidate the code and unprotect the
1577 page. Return TRUE if the fault was succesfully handled. */
1578 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1580 #if !defined(CONFIG_SOFTMMU)
1583 #if defined(DEBUG_TLB)
1584 printf("page_unprotect: addr=0x%08x\n", addr
);
1586 addr
&= TARGET_PAGE_MASK
;
1588 /* if it is not mapped, no need to worry here */
1589 if (addr
>= MMAP_AREA_END
)
1591 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1594 /* NOTE: in this case, validate_tag is _not_ tested as it
1595 validates only the code TLB */
1596 if (vp
->valid_tag
!= virt_valid_tag
)
1598 if (!(vp
->prot
& PAGE_WRITE
))
1600 #if defined(DEBUG_TLB)
1601 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1602 addr
, vp
->phys_addr
, vp
->prot
);
1604 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1605 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1606 (unsigned long)addr
, vp
->prot
);
1607 /* set the dirty bit */
1608 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1609 /* flush the code inside */
1610 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1619 void tlb_flush(CPUState
*env
, int flush_global
)
1623 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1627 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1628 target_phys_addr_t paddr
, int prot
,
1629 int is_user
, int is_softmmu
)
1634 /* dump memory mappings */
1635 void page_dump(FILE *f
)
1637 unsigned long start
, end
;
1638 int i
, j
, prot
, prot1
;
1641 fprintf(f
, "%-8s %-8s %-8s %s\n",
1642 "start", "end", "size", "prot");
1646 for(i
= 0; i
<= L1_SIZE
; i
++) {
1651 for(j
= 0;j
< L2_SIZE
; j
++) {
1656 if (prot1
!= prot
) {
1657 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1659 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1660 start
, end
, end
- start
,
1661 prot
& PAGE_READ
? 'r' : '-',
1662 prot
& PAGE_WRITE
? 'w' : '-',
1663 prot
& PAGE_EXEC
? 'x' : '-');
1677 int page_get_flags(target_ulong address
)
1681 p
= page_find(address
>> TARGET_PAGE_BITS
);
1687 /* modify the flags of a page and invalidate the code if
1688 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1689 depending on PAGE_WRITE */
1690 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1695 start
= start
& TARGET_PAGE_MASK
;
1696 end
= TARGET_PAGE_ALIGN(end
);
1697 if (flags
& PAGE_WRITE
)
1698 flags
|= PAGE_WRITE_ORG
;
1699 spin_lock(&tb_lock
);
1700 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1701 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1702 /* if the write protection is set, then we invalidate the code
1704 if (!(p
->flags
& PAGE_WRITE
) &&
1705 (flags
& PAGE_WRITE
) &&
1707 tb_invalidate_phys_page(addr
, 0, NULL
);
1711 spin_unlock(&tb_lock
);
1714 /* called from signal handler: invalidate the code and unprotect the
1715 page. Return TRUE if the fault was succesfully handled. */
1716 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1718 unsigned int page_index
, prot
, pindex
;
1720 target_ulong host_start
, host_end
, addr
;
1722 host_start
= address
& qemu_host_page_mask
;
1723 page_index
= host_start
>> TARGET_PAGE_BITS
;
1724 p1
= page_find(page_index
);
1727 host_end
= host_start
+ qemu_host_page_size
;
1730 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1734 /* if the page was really writable, then we change its
1735 protection back to writable */
1736 if (prot
& PAGE_WRITE_ORG
) {
1737 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1738 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1739 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1740 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1741 p1
[pindex
].flags
|= PAGE_WRITE
;
1742 /* and since the content will be modified, we must invalidate
1743 the corresponding translated code. */
1744 tb_invalidate_phys_page(address
, pc
, puc
);
1745 #ifdef DEBUG_TB_CHECK
1746 tb_invalidate_check(address
);
1754 /* call this function when system calls directly modify a memory area */
1755 /* ??? This should be redundant now we have lock_user. */
1756 void page_unprotect_range(target_ulong data
, target_ulong data_size
)
1758 target_ulong start
, end
, addr
;
1761 end
= start
+ data_size
;
1762 start
&= TARGET_PAGE_MASK
;
1763 end
= TARGET_PAGE_ALIGN(end
);
1764 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1765 page_unprotect(addr
, 0, NULL
);
1769 static inline void tlb_set_dirty(CPUState
*env
,
1770 unsigned long addr
, target_ulong vaddr
)
1773 #endif /* defined(CONFIG_USER_ONLY) */
1775 /* register physical memory. 'size' must be a multiple of the target
1776 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1778 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1780 unsigned long phys_offset
)
1782 target_phys_addr_t addr
, end_addr
;
1786 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1787 end_addr
= start_addr
+ size
;
1788 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1789 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1790 p
->phys_offset
= phys_offset
;
1791 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
1792 (phys_offset
& IO_MEM_ROMD
))
1793 phys_offset
+= TARGET_PAGE_SIZE
;
1796 /* since each CPU stores ram addresses in its TLB cache, we must
1797 reset the modified entries */
1799 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1804 /* XXX: temporary until new memory mapping API */
1805 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
1809 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1811 return IO_MEM_UNASSIGNED
;
1812 return p
->phys_offset
;
1815 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1817 #ifdef DEBUG_UNASSIGNED
1818 printf("Unassigned mem read 0x%08x\n", (int)addr
);
1823 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1825 #ifdef DEBUG_UNASSIGNED
1826 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr
, val
);
1830 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1831 unassigned_mem_readb
,
1832 unassigned_mem_readb
,
1833 unassigned_mem_readb
,
1836 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1837 unassigned_mem_writeb
,
1838 unassigned_mem_writeb
,
1839 unassigned_mem_writeb
,
1842 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1844 unsigned long ram_addr
;
1846 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1847 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1848 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1849 #if !defined(CONFIG_USER_ONLY)
1850 tb_invalidate_phys_page_fast(ram_addr
, 1);
1851 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1854 stb_p((uint8_t *)(long)addr
, val
);
1856 if (cpu_single_env
->kqemu_enabled
&&
1857 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1858 kqemu_modify_page(cpu_single_env
, ram_addr
);
1860 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1861 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1862 /* we remove the notdirty callback only if the code has been
1864 if (dirty_flags
== 0xff)
1865 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1868 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1870 unsigned long ram_addr
;
1872 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1873 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1874 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1875 #if !defined(CONFIG_USER_ONLY)
1876 tb_invalidate_phys_page_fast(ram_addr
, 2);
1877 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1880 stw_p((uint8_t *)(long)addr
, val
);
1882 if (cpu_single_env
->kqemu_enabled
&&
1883 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1884 kqemu_modify_page(cpu_single_env
, ram_addr
);
1886 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1887 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1888 /* we remove the notdirty callback only if the code has been
1890 if (dirty_flags
== 0xff)
1891 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1894 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1896 unsigned long ram_addr
;
1898 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1899 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1900 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1901 #if !defined(CONFIG_USER_ONLY)
1902 tb_invalidate_phys_page_fast(ram_addr
, 4);
1903 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1906 stl_p((uint8_t *)(long)addr
, val
);
1908 if (cpu_single_env
->kqemu_enabled
&&
1909 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1910 kqemu_modify_page(cpu_single_env
, ram_addr
);
1912 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1913 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1914 /* we remove the notdirty callback only if the code has been
1916 if (dirty_flags
== 0xff)
1917 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1920 static CPUReadMemoryFunc
*error_mem_read
[3] = {
1921 NULL
, /* never used */
1922 NULL
, /* never used */
1923 NULL
, /* never used */
1926 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1927 notdirty_mem_writeb
,
1928 notdirty_mem_writew
,
1929 notdirty_mem_writel
,
1932 static void io_mem_init(void)
1934 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
1935 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
1936 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
1939 /* alloc dirty bits array */
1940 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1941 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
1944 /* mem_read and mem_write are arrays of functions containing the
1945 function to access byte (index 0), word (index 1) and dword (index
1946 2). All functions must be supplied. If io_index is non zero, the
1947 corresponding io zone is modified. If it is zero, a new io zone is
1948 allocated. The return value can be used with
1949 cpu_register_physical_memory(). (-1) is returned if error. */
1950 int cpu_register_io_memory(int io_index
,
1951 CPUReadMemoryFunc
**mem_read
,
1952 CPUWriteMemoryFunc
**mem_write
,
1957 if (io_index
<= 0) {
1958 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
1960 io_index
= io_mem_nb
++;
1962 if (io_index
>= IO_MEM_NB_ENTRIES
)
1966 for(i
= 0;i
< 3; i
++) {
1967 io_mem_read
[io_index
][i
] = mem_read
[i
];
1968 io_mem_write
[io_index
][i
] = mem_write
[i
];
1970 io_mem_opaque
[io_index
] = opaque
;
1971 return io_index
<< IO_MEM_SHIFT
;
1974 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
1976 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
1979 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
1981 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
1984 /* physical memory access (slow version, mainly for debug) */
1985 #if defined(CONFIG_USER_ONLY)
1986 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
1987 int len
, int is_write
)
1994 page
= addr
& TARGET_PAGE_MASK
;
1995 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1998 flags
= page_get_flags(page
);
1999 if (!(flags
& PAGE_VALID
))
2002 if (!(flags
& PAGE_WRITE
))
2004 p
= lock_user(addr
, len
, 0);
2005 memcpy(p
, buf
, len
);
2006 unlock_user(p
, addr
, len
);
2008 if (!(flags
& PAGE_READ
))
2010 p
= lock_user(addr
, len
, 1);
2011 memcpy(buf
, p
, len
);
2012 unlock_user(p
, addr
, 0);
2021 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2022 int len
, int is_write
)
2027 target_phys_addr_t page
;
2032 page
= addr
& TARGET_PAGE_MASK
;
2033 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2036 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2038 pd
= IO_MEM_UNASSIGNED
;
2040 pd
= p
->phys_offset
;
2044 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2045 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2046 /* XXX: could force cpu_single_env to NULL to avoid
2048 if (l
>= 4 && ((addr
& 3) == 0)) {
2049 /* 32 bit write access */
2051 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2053 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2054 /* 16 bit write access */
2056 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2059 /* 8 bit write access */
2061 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2065 unsigned long addr1
;
2066 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2068 ptr
= phys_ram_base
+ addr1
;
2069 memcpy(ptr
, buf
, l
);
2070 if (!cpu_physical_memory_is_dirty(addr1
)) {
2071 /* invalidate code */
2072 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2074 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2075 (0xff & ~CODE_DIRTY_FLAG
);
2079 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2080 !(pd
& IO_MEM_ROMD
)) {
2082 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2083 if (l
>= 4 && ((addr
& 3) == 0)) {
2084 /* 32 bit read access */
2085 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2088 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2089 /* 16 bit read access */
2090 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2094 /* 8 bit read access */
2095 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2101 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2102 (addr
& ~TARGET_PAGE_MASK
);
2103 memcpy(buf
, ptr
, l
);
2112 /* used for ROM loading : can write in RAM and ROM */
2113 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2114 const uint8_t *buf
, int len
)
2118 target_phys_addr_t page
;
2123 page
= addr
& TARGET_PAGE_MASK
;
2124 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2127 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2129 pd
= IO_MEM_UNASSIGNED
;
2131 pd
= p
->phys_offset
;
2134 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2135 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2136 !(pd
& IO_MEM_ROMD
)) {
2139 unsigned long addr1
;
2140 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2142 ptr
= phys_ram_base
+ addr1
;
2143 memcpy(ptr
, buf
, l
);
2152 /* warning: addr must be aligned */
2153 uint32_t ldl_phys(target_phys_addr_t addr
)
2161 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2163 pd
= IO_MEM_UNASSIGNED
;
2165 pd
= p
->phys_offset
;
2168 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2169 !(pd
& IO_MEM_ROMD
)) {
2171 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2172 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2175 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2176 (addr
& ~TARGET_PAGE_MASK
);
2182 /* warning: addr must be aligned */
2183 uint64_t ldq_phys(target_phys_addr_t addr
)
2191 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2193 pd
= IO_MEM_UNASSIGNED
;
2195 pd
= p
->phys_offset
;
2198 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2199 !(pd
& IO_MEM_ROMD
)) {
2201 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2202 #ifdef TARGET_WORDS_BIGENDIAN
2203 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2204 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2206 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2207 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2211 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2212 (addr
& ~TARGET_PAGE_MASK
);
2219 uint32_t ldub_phys(target_phys_addr_t addr
)
2222 cpu_physical_memory_read(addr
, &val
, 1);
2227 uint32_t lduw_phys(target_phys_addr_t addr
)
2230 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2231 return tswap16(val
);
2234 /* warning: addr must be aligned. The ram page is not masked as dirty
2235 and the code inside is not invalidated. It is useful if the dirty
2236 bits are used to track modified PTEs */
2237 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2244 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2246 pd
= IO_MEM_UNASSIGNED
;
2248 pd
= p
->phys_offset
;
2251 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2252 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2253 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2255 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2256 (addr
& ~TARGET_PAGE_MASK
);
2261 /* warning: addr must be aligned */
2262 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2269 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2271 pd
= IO_MEM_UNASSIGNED
;
2273 pd
= p
->phys_offset
;
2276 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2277 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2278 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2280 unsigned long addr1
;
2281 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2283 ptr
= phys_ram_base
+ addr1
;
2285 if (!cpu_physical_memory_is_dirty(addr1
)) {
2286 /* invalidate code */
2287 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2289 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2290 (0xff & ~CODE_DIRTY_FLAG
);
2296 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2299 cpu_physical_memory_write(addr
, &v
, 1);
2303 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2305 uint16_t v
= tswap16(val
);
2306 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2310 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2313 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2318 /* virtual memory access for debug */
2319 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2320 uint8_t *buf
, int len
, int is_write
)
2323 target_ulong page
, phys_addr
;
2326 page
= addr
& TARGET_PAGE_MASK
;
2327 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2328 /* if no physical page mapped, return an error */
2329 if (phys_addr
== -1)
2331 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2334 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2343 void dump_exec_info(FILE *f
,
2344 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2346 int i
, target_code_size
, max_target_code_size
;
2347 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2348 TranslationBlock
*tb
;
2350 target_code_size
= 0;
2351 max_target_code_size
= 0;
2353 direct_jmp_count
= 0;
2354 direct_jmp2_count
= 0;
2355 for(i
= 0; i
< nb_tbs
; i
++) {
2357 target_code_size
+= tb
->size
;
2358 if (tb
->size
> max_target_code_size
)
2359 max_target_code_size
= tb
->size
;
2360 if (tb
->page_addr
[1] != -1)
2362 if (tb
->tb_next_offset
[0] != 0xffff) {
2364 if (tb
->tb_next_offset
[1] != 0xffff) {
2365 direct_jmp2_count
++;
2369 /* XXX: avoid using doubles ? */
2370 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2371 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2372 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2373 max_target_code_size
);
2374 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2375 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2376 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2377 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2379 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2380 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2382 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2384 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2385 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2386 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2387 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2390 #if !defined(CONFIG_USER_ONLY)
2392 #define MMUSUFFIX _cmmu
2393 #define GETPC() NULL
2394 #define env cpu_single_env
2395 #define SOFTMMU_CODE_ACCESS
2398 #include "softmmu_template.h"
2401 #include "softmmu_template.h"
2404 #include "softmmu_template.h"
2407 #include "softmmu_template.h"