2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
40 #if defined(CONFIG_USER_ONLY)
44 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 #if !defined(CONFIG_USER_ONLY)
54 /* TB consistency checks only implemented for usermode emulation. */
58 /* threshold to flush the translated code buffer */
59 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_PPC64)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 42
71 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
72 #define TARGET_PHYS_ADDR_SPACE_BITS 32
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #define TARGET_PHYS_ADDR_SPACE_BITS 32
80 extern int kvm_allowed
;
83 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
90 uint8_t *code_gen_ptr
;
92 ram_addr_t phys_ram_size
;
94 uint8_t *phys_ram_base
;
95 uint8_t *phys_ram_dirty
;
97 static int in_migration
;
100 /* current CPU in the current thread. It is only valid inside
102 CPUState
*cpu_single_env
;
104 typedef struct PageDesc
{
105 /* list of TBs intersecting this ram page */
106 TranslationBlock
*first_tb
;
107 /* in order to optimize self modifying code, we count the number
108 of lookups we do to a given page to use a bitmap */
109 unsigned int code_write_count
;
110 uint8_t *code_bitmap
;
111 #if defined(CONFIG_USER_ONLY)
116 typedef struct PhysPageDesc
{
117 /* offset in host memory of the page + io_index in the low 12 bits */
118 ram_addr_t phys_offset
;
122 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
124 #define L1_SIZE (1 << L1_BITS)
125 #define L2_SIZE (1 << L2_BITS)
127 static void io_mem_init(void);
129 unsigned long qemu_real_host_page_size
;
130 unsigned long qemu_host_page_bits
;
131 unsigned long qemu_host_page_size
;
132 unsigned long qemu_host_page_mask
;
134 /* XXX: for system emulation, it could just be an array */
135 static PageDesc
*l1_map
[L1_SIZE
];
136 PhysPageDesc
**l1_phys_map
;
138 /* io memory support */
139 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
140 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
141 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
142 static int io_mem_nb
;
145 char *logfilename
= "/tmp/qemu.log";
150 static int tlb_flush_count
;
151 static int tb_flush_count
;
152 static int tb_phys_invalidate_count
;
154 static void page_init(void)
156 /* NOTE: we can always suppose that qemu_host_page_size >=
160 SYSTEM_INFO system_info
;
163 GetSystemInfo(&system_info
);
164 qemu_real_host_page_size
= system_info
.dwPageSize
;
166 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
167 PAGE_EXECUTE_READWRITE
, &old_protect
);
170 qemu_real_host_page_size
= getpagesize();
172 unsigned long start
, end
;
174 start
= (unsigned long)code_gen_buffer
;
175 start
&= ~(qemu_real_host_page_size
- 1);
177 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
178 end
+= qemu_real_host_page_size
- 1;
179 end
&= ~(qemu_real_host_page_size
- 1);
181 mprotect((void *)start
, end
- start
,
182 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
186 if (qemu_host_page_size
== 0)
187 qemu_host_page_size
= qemu_real_host_page_size
;
188 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
189 qemu_host_page_size
= TARGET_PAGE_SIZE
;
190 qemu_host_page_bits
= 0;
191 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
192 qemu_host_page_bits
++;
193 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
194 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
195 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
198 static inline PageDesc
*page_find_alloc(unsigned int index
)
202 lp
= &l1_map
[index
>> L2_BITS
];
205 /* allocate if not found */
206 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
207 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
210 return p
+ (index
& (L2_SIZE
- 1));
213 static inline PageDesc
*page_find(unsigned int index
)
217 p
= l1_map
[index
>> L2_BITS
];
220 return p
+ (index
& (L2_SIZE
- 1));
223 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
228 p
= (void **)l1_phys_map
;
229 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
231 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
232 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
234 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
237 /* allocate if not found */
240 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
241 memset(p
, 0, sizeof(void *) * L1_SIZE
);
245 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
249 /* allocate if not found */
252 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
254 for (i
= 0; i
< L2_SIZE
; i
++)
255 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
257 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
260 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
262 return phys_page_find_alloc(index
, 0);
265 #if !defined(CONFIG_USER_ONLY)
266 static void tlb_protect_code(ram_addr_t ram_addr
);
267 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
271 void cpu_exec_init(CPUState
*env
)
277 code_gen_ptr
= code_gen_buffer
;
281 env
->next_cpu
= NULL
;
284 while (*penv
!= NULL
) {
285 penv
= (CPUState
**)&(*penv
)->next_cpu
;
288 env
->cpu_index
= cpu_index
;
292 static inline void invalidate_page_bitmap(PageDesc
*p
)
294 if (p
->code_bitmap
) {
295 qemu_free(p
->code_bitmap
);
296 p
->code_bitmap
= NULL
;
298 p
->code_write_count
= 0;
301 /* set to NULL all the 'first_tb' fields in all PageDescs */
302 static void page_flush_tb(void)
307 for(i
= 0; i
< L1_SIZE
; i
++) {
310 for(j
= 0; j
< L2_SIZE
; j
++) {
312 invalidate_page_bitmap(p
);
319 /* flush all the translation blocks */
320 /* XXX: tb_flush is currently not thread safe */
321 void tb_flush(CPUState
*env1
)
324 #if defined(DEBUG_FLUSH)
325 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
326 code_gen_ptr
- code_gen_buffer
,
328 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
332 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
333 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
336 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
339 code_gen_ptr
= code_gen_buffer
;
340 /* XXX: flush processor icache at this point if cache flush is
345 #ifdef DEBUG_TB_CHECK
347 static void tb_invalidate_check(unsigned long address
)
349 TranslationBlock
*tb
;
351 address
&= TARGET_PAGE_MASK
;
352 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
353 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
354 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
355 address
>= tb
->pc
+ tb
->size
)) {
356 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
357 address
, (long)tb
->pc
, tb
->size
);
363 /* verify that all the pages have correct rights for code */
364 static void tb_page_check(void)
366 TranslationBlock
*tb
;
367 int i
, flags1
, flags2
;
369 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
370 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
371 flags1
= page_get_flags(tb
->pc
);
372 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
373 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
374 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
375 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
381 void tb_jmp_check(TranslationBlock
*tb
)
383 TranslationBlock
*tb1
;
386 /* suppress any remaining jumps to this TB */
390 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
393 tb1
= tb1
->jmp_next
[n1
];
395 /* check end of list */
397 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
403 /* invalidate one TB */
404 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
407 TranslationBlock
*tb1
;
411 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
414 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
418 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
420 TranslationBlock
*tb1
;
426 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
428 *ptb
= tb1
->page_next
[n1
];
431 ptb
= &tb1
->page_next
[n1
];
435 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
437 TranslationBlock
*tb1
, **ptb
;
440 ptb
= &tb
->jmp_next
[n
];
443 /* find tb(n) in circular list */
447 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
448 if (n1
== n
&& tb1
== tb
)
451 ptb
= &tb1
->jmp_first
;
453 ptb
= &tb1
->jmp_next
[n1
];
456 /* now we can suppress tb(n) from the list */
457 *ptb
= tb
->jmp_next
[n
];
459 tb
->jmp_next
[n
] = NULL
;
463 /* reset the jump entry 'n' of a TB so that it is not chained to
465 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
467 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
470 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
475 target_ulong phys_pc
;
476 TranslationBlock
*tb1
, *tb2
;
478 /* remove the TB from the hash list */
479 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
480 h
= tb_phys_hash_func(phys_pc
);
481 tb_remove(&tb_phys_hash
[h
], tb
,
482 offsetof(TranslationBlock
, phys_hash_next
));
484 /* remove the TB from the page list */
485 if (tb
->page_addr
[0] != page_addr
) {
486 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
487 tb_page_remove(&p
->first_tb
, tb
);
488 invalidate_page_bitmap(p
);
490 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
491 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
492 tb_page_remove(&p
->first_tb
, tb
);
493 invalidate_page_bitmap(p
);
496 tb_invalidated_flag
= 1;
498 /* remove the TB from the hash list */
499 h
= tb_jmp_cache_hash_func(tb
->pc
);
500 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
501 if (env
->tb_jmp_cache
[h
] == tb
)
502 env
->tb_jmp_cache
[h
] = NULL
;
505 /* suppress this TB from the two jump lists */
506 tb_jmp_remove(tb
, 0);
507 tb_jmp_remove(tb
, 1);
509 /* suppress any remaining jumps to this TB */
515 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
516 tb2
= tb1
->jmp_next
[n1
];
517 tb_reset_jump(tb1
, n1
);
518 tb1
->jmp_next
[n1
] = NULL
;
521 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
523 tb_phys_invalidate_count
++;
526 static inline void set_bits(uint8_t *tab
, int start
, int len
)
532 mask
= 0xff << (start
& 7);
533 if ((start
& ~7) == (end
& ~7)) {
535 mask
&= ~(0xff << (end
& 7));
540 start
= (start
+ 8) & ~7;
542 while (start
< end1
) {
547 mask
= ~(0xff << (end
& 7));
553 static void build_page_bitmap(PageDesc
*p
)
555 int n
, tb_start
, tb_end
;
556 TranslationBlock
*tb
;
558 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
561 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
566 tb
= (TranslationBlock
*)((long)tb
& ~3);
567 /* NOTE: this is subtle as a TB may span two physical pages */
569 /* NOTE: tb_end may be after the end of the page, but
570 it is not a problem */
571 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
572 tb_end
= tb_start
+ tb
->size
;
573 if (tb_end
> TARGET_PAGE_SIZE
)
574 tb_end
= TARGET_PAGE_SIZE
;
577 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
579 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
580 tb
= tb
->page_next
[n
];
584 #ifdef TARGET_HAS_PRECISE_SMC
586 static void tb_gen_code(CPUState
*env
,
587 target_ulong pc
, target_ulong cs_base
, int flags
,
590 TranslationBlock
*tb
;
592 target_ulong phys_pc
, phys_page2
, virt_page2
;
595 phys_pc
= get_phys_addr_code(env
, pc
);
598 /* flush must be done */
600 /* cannot fail at this point */
603 tc_ptr
= code_gen_ptr
;
605 tb
->cs_base
= cs_base
;
608 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
609 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
611 /* check next page if needed */
612 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
614 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
615 phys_page2
= get_phys_addr_code(env
, virt_page2
);
617 tb_link_phys(tb
, phys_pc
, phys_page2
);
621 /* invalidate all TBs which intersect with the target physical page
622 starting in range [start;end[. NOTE: start and end must refer to
623 the same physical page. 'is_cpu_write_access' should be true if called
624 from a real cpu write access: the virtual CPU will exit the current
625 TB if code is modified inside this TB. */
626 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
627 int is_cpu_write_access
)
629 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
630 CPUState
*env
= cpu_single_env
;
632 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
633 target_ulong tb_start
, tb_end
;
634 target_ulong current_pc
, current_cs_base
;
636 p
= page_find(start
>> TARGET_PAGE_BITS
);
639 if (!p
->code_bitmap
&&
640 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
641 is_cpu_write_access
) {
642 /* build code bitmap */
643 build_page_bitmap(p
);
646 /* we remove all the TBs in the range [start, end[ */
647 /* XXX: see if in some cases it could be faster to invalidate all the code */
648 current_tb_not_found
= is_cpu_write_access
;
649 current_tb_modified
= 0;
650 current_tb
= NULL
; /* avoid warning */
651 current_pc
= 0; /* avoid warning */
652 current_cs_base
= 0; /* avoid warning */
653 current_flags
= 0; /* avoid warning */
657 tb
= (TranslationBlock
*)((long)tb
& ~3);
658 tb_next
= tb
->page_next
[n
];
659 /* NOTE: this is subtle as a TB may span two physical pages */
661 /* NOTE: tb_end may be after the end of the page, but
662 it is not a problem */
663 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
664 tb_end
= tb_start
+ tb
->size
;
666 tb_start
= tb
->page_addr
[1];
667 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
669 if (!(tb_end
<= start
|| tb_start
>= end
)) {
670 #ifdef TARGET_HAS_PRECISE_SMC
671 if (current_tb_not_found
) {
672 current_tb_not_found
= 0;
674 if (env
->mem_write_pc
) {
675 /* now we have a real cpu fault */
676 current_tb
= tb_find_pc(env
->mem_write_pc
);
679 if (current_tb
== tb
&&
680 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
681 /* If we are modifying the current TB, we must stop
682 its execution. We could be more precise by checking
683 that the modification is after the current PC, but it
684 would require a specialized function to partially
685 restore the CPU state */
687 current_tb_modified
= 1;
688 cpu_restore_state(current_tb
, env
,
689 env
->mem_write_pc
, NULL
);
690 #if defined(TARGET_I386)
691 current_flags
= env
->hflags
;
692 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
693 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
694 current_pc
= current_cs_base
+ env
->eip
;
696 #error unsupported CPU
699 #endif /* TARGET_HAS_PRECISE_SMC */
700 /* we need to do that to handle the case where a signal
701 occurs while doing tb_phys_invalidate() */
704 saved_tb
= env
->current_tb
;
705 env
->current_tb
= NULL
;
707 tb_phys_invalidate(tb
, -1);
709 env
->current_tb
= saved_tb
;
710 if (env
->interrupt_request
&& env
->current_tb
)
711 cpu_interrupt(env
, env
->interrupt_request
);
716 #if !defined(CONFIG_USER_ONLY)
717 /* if no code remaining, no need to continue to use slow writes */
719 invalidate_page_bitmap(p
);
720 if (is_cpu_write_access
) {
721 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
725 #ifdef TARGET_HAS_PRECISE_SMC
726 if (current_tb_modified
) {
727 /* we generate a block containing just the instruction
728 modifying the memory. It will ensure that it cannot modify
730 env
->current_tb
= NULL
;
731 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
733 cpu_resume_from_signal(env
, NULL
);
738 /* len must be <= 8 and start must be a multiple of len */
739 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
746 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
747 cpu_single_env
->mem_write_vaddr
, len
,
749 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
753 p
= page_find(start
>> TARGET_PAGE_BITS
);
756 if (p
->code_bitmap
) {
757 offset
= start
& ~TARGET_PAGE_MASK
;
758 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
759 if (b
& ((1 << len
) - 1))
763 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
767 #if !defined(CONFIG_SOFTMMU)
768 static void tb_invalidate_phys_page(target_ulong addr
,
769 unsigned long pc
, void *puc
)
771 int n
, current_flags
, current_tb_modified
;
772 target_ulong current_pc
, current_cs_base
;
774 TranslationBlock
*tb
, *current_tb
;
775 #ifdef TARGET_HAS_PRECISE_SMC
776 CPUState
*env
= cpu_single_env
;
779 addr
&= TARGET_PAGE_MASK
;
780 p
= page_find(addr
>> TARGET_PAGE_BITS
);
784 current_tb_modified
= 0;
786 current_pc
= 0; /* avoid warning */
787 current_cs_base
= 0; /* avoid warning */
788 current_flags
= 0; /* avoid warning */
789 #ifdef TARGET_HAS_PRECISE_SMC
791 current_tb
= tb_find_pc(pc
);
796 tb
= (TranslationBlock
*)((long)tb
& ~3);
797 #ifdef TARGET_HAS_PRECISE_SMC
798 if (current_tb
== tb
&&
799 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
800 /* If we are modifying the current TB, we must stop
801 its execution. We could be more precise by checking
802 that the modification is after the current PC, but it
803 would require a specialized function to partially
804 restore the CPU state */
806 current_tb_modified
= 1;
807 cpu_restore_state(current_tb
, env
, pc
, puc
);
808 #if defined(TARGET_I386)
809 current_flags
= env
->hflags
;
810 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
811 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
812 current_pc
= current_cs_base
+ env
->eip
;
814 #error unsupported CPU
817 #endif /* TARGET_HAS_PRECISE_SMC */
818 tb_phys_invalidate(tb
, addr
);
819 tb
= tb
->page_next
[n
];
822 #ifdef TARGET_HAS_PRECISE_SMC
823 if (current_tb_modified
) {
824 /* we generate a block containing just the instruction
825 modifying the memory. It will ensure that it cannot modify
827 env
->current_tb
= NULL
;
828 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
830 cpu_resume_from_signal(env
, puc
);
836 /* add the tb in the target page and protect it if necessary */
837 static inline void tb_alloc_page(TranslationBlock
*tb
,
838 unsigned int n
, target_ulong page_addr
)
841 TranslationBlock
*last_first_tb
;
843 tb
->page_addr
[n
] = page_addr
;
844 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
845 tb
->page_next
[n
] = p
->first_tb
;
846 last_first_tb
= p
->first_tb
;
847 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
848 invalidate_page_bitmap(p
);
850 #if defined(TARGET_HAS_SMC) || 1
852 #if defined(CONFIG_USER_ONLY)
853 if (p
->flags
& PAGE_WRITE
) {
858 /* force the host page as non writable (writes will have a
859 page fault + mprotect overhead) */
860 page_addr
&= qemu_host_page_mask
;
862 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
863 addr
+= TARGET_PAGE_SIZE
) {
865 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
869 p2
->flags
&= ~PAGE_WRITE
;
870 page_get_flags(addr
);
872 mprotect(g2h(page_addr
), qemu_host_page_size
,
873 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
874 #ifdef DEBUG_TB_INVALIDATE
875 printf("protecting code page: 0x%08lx\n",
880 /* if some code is already present, then the pages are already
881 protected. So we handle the case where only the first TB is
882 allocated in a physical page */
883 if (!last_first_tb
) {
884 tlb_protect_code(page_addr
);
888 #endif /* TARGET_HAS_SMC */
891 /* Allocate a new translation block. Flush the translation buffer if
892 too many translation blocks or too much generated code. */
893 TranslationBlock
*tb_alloc(target_ulong pc
)
895 TranslationBlock
*tb
;
897 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
898 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
906 /* add a new TB and link it to the physical page tables. phys_page2 is
907 (-1) to indicate that only one page contains the TB. */
908 void tb_link_phys(TranslationBlock
*tb
,
909 target_ulong phys_pc
, target_ulong phys_page2
)
912 TranslationBlock
**ptb
;
914 /* add in the physical hash table */
915 h
= tb_phys_hash_func(phys_pc
);
916 ptb
= &tb_phys_hash
[h
];
917 tb
->phys_hash_next
= *ptb
;
920 /* add in the page list */
921 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
922 if (phys_page2
!= -1)
923 tb_alloc_page(tb
, 1, phys_page2
);
925 tb
->page_addr
[1] = -1;
927 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
928 tb
->jmp_next
[0] = NULL
;
929 tb
->jmp_next
[1] = NULL
;
931 tb
->cflags
&= ~CF_FP_USED
;
932 if (tb
->cflags
& CF_TB_FP_USED
)
933 tb
->cflags
|= CF_FP_USED
;
936 /* init original jump addresses */
937 if (tb
->tb_next_offset
[0] != 0xffff)
938 tb_reset_jump(tb
, 0);
939 if (tb
->tb_next_offset
[1] != 0xffff)
940 tb_reset_jump(tb
, 1);
942 #ifdef DEBUG_TB_CHECK
947 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
948 tb[1].tc_ptr. Return NULL if not found */
949 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
953 TranslationBlock
*tb
;
957 if (tc_ptr
< (unsigned long)code_gen_buffer
||
958 tc_ptr
>= (unsigned long)code_gen_ptr
)
960 /* binary search (cf Knuth) */
963 while (m_min
<= m_max
) {
964 m
= (m_min
+ m_max
) >> 1;
966 v
= (unsigned long)tb
->tc_ptr
;
969 else if (tc_ptr
< v
) {
978 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
980 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
982 TranslationBlock
*tb1
, *tb_next
, **ptb
;
985 tb1
= tb
->jmp_next
[n
];
987 /* find head of list */
990 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
993 tb1
= tb1
->jmp_next
[n1
];
995 /* we are now sure now that tb jumps to tb1 */
998 /* remove tb from the jmp_first list */
999 ptb
= &tb_next
->jmp_first
;
1003 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1004 if (n1
== n
&& tb1
== tb
)
1006 ptb
= &tb1
->jmp_next
[n1
];
1008 *ptb
= tb
->jmp_next
[n
];
1009 tb
->jmp_next
[n
] = NULL
;
1011 /* suppress the jump to next tb in generated code */
1012 tb_reset_jump(tb
, n
);
1014 /* suppress jumps in the tb on which we could have jumped */
1015 tb_reset_jump_recursive(tb_next
);
1019 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1021 tb_reset_jump_recursive2(tb
, 0);
1022 tb_reset_jump_recursive2(tb
, 1);
1025 #if defined(TARGET_HAS_ICE)
1026 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1028 target_ulong addr
, pd
;
1029 ram_addr_t ram_addr
;
1032 addr
= cpu_get_phys_page_debug(env
, pc
);
1033 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1035 pd
= IO_MEM_UNASSIGNED
;
1037 pd
= p
->phys_offset
;
1039 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1040 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1044 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1045 breakpoint is reached */
1046 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1048 #if defined(TARGET_HAS_ICE)
1051 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1052 if (env
->breakpoints
[i
] == pc
)
1056 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1058 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1062 kvm_update_debugger(env
);
1065 breakpoint_invalidate(env
, pc
);
1072 /* remove a breakpoint */
1073 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1075 #if defined(TARGET_HAS_ICE)
1077 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1078 if (env
->breakpoints
[i
] == pc
)
1083 env
->nb_breakpoints
--;
1084 if (i
< env
->nb_breakpoints
)
1085 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1089 kvm_update_debugger(env
);
1092 breakpoint_invalidate(env
, pc
);
1099 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1100 CPU loop after each instruction */
1101 void cpu_single_step(CPUState
*env
, int enabled
)
1103 #if defined(TARGET_HAS_ICE)
1104 if (env
->singlestep_enabled
!= enabled
) {
1105 env
->singlestep_enabled
= enabled
;
1106 /* must flush all the translated code to avoid inconsistancies */
1107 /* XXX: only flush what is necessary */
1112 kvm_update_debugger(env
);
1117 /* enable or disable low levels log */
1118 void cpu_set_log(int log_flags
)
1120 loglevel
= log_flags
;
1121 if (loglevel
&& !logfile
) {
1122 logfile
= fopen(logfilename
, "w");
1124 perror(logfilename
);
1127 #if !defined(CONFIG_SOFTMMU)
1128 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1130 static uint8_t logfile_buf
[4096];
1131 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1134 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1139 void cpu_set_log_filename(const char *filename
)
1141 logfilename
= strdup(filename
);
1144 /* mask must never be zero, except for A20 change call */
1145 void cpu_interrupt(CPUState
*env
, int mask
)
1147 TranslationBlock
*tb
;
1148 static int interrupt_lock
;
1150 env
->interrupt_request
|= mask
;
1153 kvm_update_interrupt_request(env
);
1155 /* if the cpu is currently executing code, we must unlink it and
1156 all the potentially executing TB */
1157 tb
= env
->current_tb
;
1158 if (tb
&& !testandset(&interrupt_lock
)) {
1159 env
->current_tb
= NULL
;
1160 tb_reset_jump_recursive(tb
);
1165 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1167 env
->interrupt_request
&= ~mask
;
1170 CPULogItem cpu_log_items
[] = {
1171 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1172 "show generated host assembly code for each compiled TB" },
1173 { CPU_LOG_TB_IN_ASM
, "in_asm",
1174 "show target assembly code for each compiled TB" },
1175 { CPU_LOG_TB_OP
, "op",
1176 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1178 { CPU_LOG_TB_OP_OPT
, "op_opt",
1179 "show micro ops after optimization for each compiled TB" },
1181 { CPU_LOG_INT
, "int",
1182 "show interrupts/exceptions in short format" },
1183 { CPU_LOG_EXEC
, "exec",
1184 "show trace before each executed TB (lots of logs)" },
1185 { CPU_LOG_TB_CPU
, "cpu",
1186 "show CPU state before bloc translation" },
1188 { CPU_LOG_PCALL
, "pcall",
1189 "show protected mode far calls/returns/exceptions" },
1192 { CPU_LOG_IOPORT
, "ioport",
1193 "show all i/o ports accesses" },
1198 static int cmp1(const char *s1
, int n
, const char *s2
)
1200 if (strlen(s2
) != n
)
1202 return memcmp(s1
, s2
, n
) == 0;
1205 /* takes a comma separated list of log masks. Return 0 if error. */
1206 int cpu_str_to_log_mask(const char *str
)
1215 p1
= strchr(p
, ',');
1218 if(cmp1(p
,p1
-p
,"all")) {
1219 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1223 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1224 if (cmp1(p
, p1
- p
, item
->name
))
1238 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1243 fprintf(stderr
, "qemu: fatal: ");
1244 vfprintf(stderr
, fmt
, ap
);
1245 fprintf(stderr
, "\n");
1247 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1249 cpu_dump_state(env
, stderr
, fprintf
, 0);
1255 #if !defined(CONFIG_USER_ONLY)
1257 /* NOTE: if flush_global is true, also flush global entries (not
1259 void tlb_flush(CPUState
*env
, int flush_global
)
1263 #if defined(DEBUG_TLB)
1264 printf("tlb_flush:\n");
1266 /* must reset current TB so that interrupts cannot modify the
1267 links while we are modifying them */
1268 env
->current_tb
= NULL
;
1270 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1271 env
->tlb_table
[0][i
].addr_read
= -1;
1272 env
->tlb_table
[0][i
].addr_write
= -1;
1273 env
->tlb_table
[0][i
].addr_code
= -1;
1274 env
->tlb_table
[1][i
].addr_read
= -1;
1275 env
->tlb_table
[1][i
].addr_write
= -1;
1276 env
->tlb_table
[1][i
].addr_code
= -1;
1279 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1281 #if !defined(CONFIG_SOFTMMU)
1282 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1285 if (env
->kqemu_enabled
) {
1286 kqemu_flush(env
, flush_global
);
1292 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1294 if (addr
== (tlb_entry
->addr_read
&
1295 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1296 addr
== (tlb_entry
->addr_write
&
1297 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1298 addr
== (tlb_entry
->addr_code
&
1299 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1300 tlb_entry
->addr_read
= -1;
1301 tlb_entry
->addr_write
= -1;
1302 tlb_entry
->addr_code
= -1;
1306 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1309 TranslationBlock
*tb
;
1311 #if defined(DEBUG_TLB)
1312 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1314 /* must reset current TB so that interrupts cannot modify the
1315 links while we are modifying them */
1316 env
->current_tb
= NULL
;
1318 addr
&= TARGET_PAGE_MASK
;
1319 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1320 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1321 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1323 /* Discard jump cache entries for any tb which might potentially
1324 overlap the flushed page. */
1325 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1326 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1328 i
= tb_jmp_cache_hash_page(addr
);
1329 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1331 #if !defined(CONFIG_SOFTMMU)
1332 if (addr
< MMAP_AREA_END
)
1333 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1336 if (env
->kqemu_enabled
) {
1337 kqemu_flush_page(env
, addr
);
1342 /* update the TLBs so that writes to code in the virtual page 'addr'
1344 static void tlb_protect_code(ram_addr_t ram_addr
)
1346 cpu_physical_memory_reset_dirty(ram_addr
,
1347 ram_addr
+ TARGET_PAGE_SIZE
,
1351 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1352 tested for self modifying code */
1353 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1356 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1359 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1360 unsigned long start
, unsigned long length
)
1363 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1364 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1365 if ((addr
- start
) < length
) {
1366 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1371 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1375 unsigned long length
, start1
;
1379 start
&= TARGET_PAGE_MASK
;
1380 end
= TARGET_PAGE_ALIGN(end
);
1382 length
= end
- start
;
1385 len
= length
>> TARGET_PAGE_BITS
;
1387 /* XXX: should not depend on cpu context */
1389 if (env
->kqemu_enabled
) {
1392 for(i
= 0; i
< len
; i
++) {
1393 kqemu_set_notdirty(env
, addr
);
1394 addr
+= TARGET_PAGE_SIZE
;
1398 mask
= ~dirty_flags
;
1399 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1400 for(i
= 0; i
< len
; i
++)
1403 /* we modify the TLB cache so that the dirty bit will be set again
1404 when accessing the range */
1405 start1
= start
+ (unsigned long)phys_ram_base
;
1406 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1407 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1408 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1409 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1410 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1413 #if !defined(CONFIG_SOFTMMU)
1414 /* XXX: this is expensive */
1420 for(i
= 0; i
< L1_SIZE
; i
++) {
1423 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1424 for(j
= 0; j
< L2_SIZE
; j
++) {
1425 if (p
->valid_tag
== virt_valid_tag
&&
1426 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1427 (p
->prot
& PROT_WRITE
)) {
1428 if (addr
< MMAP_AREA_END
) {
1429 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1430 p
->prot
& ~PROT_WRITE
);
1433 addr
+= TARGET_PAGE_SIZE
;
1442 int cpu_physical_memory_set_dirty_tracking(int enable
)
1447 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1449 in_migration
= enable
;
1453 int cpu_physical_memory_get_dirty_tracking(void)
1455 return in_migration
;
1458 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1460 ram_addr_t ram_addr
;
1462 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1463 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1464 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1465 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1466 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1471 /* update the TLB according to the current state of the dirty bits */
1472 void cpu_tlb_update_dirty(CPUState
*env
)
1475 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1476 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1477 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1478 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1481 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1482 unsigned long start
)
1485 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1486 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1487 if (addr
== start
) {
1488 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1493 /* update the TLB corresponding to virtual page vaddr and phys addr
1494 addr so that it is no longer dirty */
1495 static inline void tlb_set_dirty(CPUState
*env
,
1496 unsigned long addr
, target_ulong vaddr
)
1500 addr
&= TARGET_PAGE_MASK
;
1501 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1502 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1503 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1506 /* add a new TLB entry. At most one entry for a given virtual address
1507 is permitted. Return 0 if OK or 2 if the page could not be mapped
1508 (can only happen in non SOFTMMU mode for I/O pages or pages
1509 conflicting with the host address space). */
1510 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1511 target_phys_addr_t paddr
, int prot
,
1512 int is_user
, int is_softmmu
)
1517 target_ulong address
;
1518 target_phys_addr_t addend
;
1522 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1524 pd
= IO_MEM_UNASSIGNED
;
1526 pd
= p
->phys_offset
;
1528 #if defined(DEBUG_TLB)
1529 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1530 vaddr
, (int)paddr
, prot
, is_user
, is_softmmu
, pd
);
1534 #if !defined(CONFIG_SOFTMMU)
1538 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1539 /* IO memory case */
1540 address
= vaddr
| pd
;
1543 /* standard memory */
1545 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1548 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1550 te
= &env
->tlb_table
[is_user
][index
];
1551 te
->addend
= addend
;
1552 if (prot
& PAGE_READ
) {
1553 te
->addr_read
= address
;
1557 if (prot
& PAGE_EXEC
) {
1558 te
->addr_code
= address
;
1562 if (prot
& PAGE_WRITE
) {
1563 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1564 (pd
& IO_MEM_ROMD
)) {
1565 /* write access calls the I/O callback */
1566 te
->addr_write
= vaddr
|
1567 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1568 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1569 !cpu_physical_memory_is_dirty(pd
)) {
1570 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1572 te
->addr_write
= address
;
1575 te
->addr_write
= -1;
1578 #if !defined(CONFIG_SOFTMMU)
1580 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1581 /* IO access: no mapping is done as it will be handled by the
1583 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1588 if (vaddr
>= MMAP_AREA_END
) {
1591 if (prot
& PROT_WRITE
) {
1592 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1593 #if defined(TARGET_HAS_SMC) || 1
1596 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1597 !cpu_physical_memory_is_dirty(pd
))) {
1598 /* ROM: we do as if code was inside */
1599 /* if code is present, we only map as read only and save the
1603 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1606 vp
->valid_tag
= virt_valid_tag
;
1607 prot
&= ~PAGE_WRITE
;
1610 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1611 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1612 if (map_addr
== MAP_FAILED
) {
1613 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1623 /* called from signal handler: invalidate the code and unprotect the
1624 page. Return TRUE if the fault was succesfully handled. */
1625 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1627 #if !defined(CONFIG_SOFTMMU)
1630 #if defined(DEBUG_TLB)
1631 printf("page_unprotect: addr=0x%08x\n", addr
);
1633 addr
&= TARGET_PAGE_MASK
;
1635 /* if it is not mapped, no need to worry here */
1636 if (addr
>= MMAP_AREA_END
)
1638 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1641 /* NOTE: in this case, validate_tag is _not_ tested as it
1642 validates only the code TLB */
1643 if (vp
->valid_tag
!= virt_valid_tag
)
1645 if (!(vp
->prot
& PAGE_WRITE
))
1647 #if defined(DEBUG_TLB)
1648 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1649 addr
, vp
->phys_addr
, vp
->prot
);
1651 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1652 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1653 (unsigned long)addr
, vp
->prot
);
1654 /* set the dirty bit */
1655 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1656 /* flush the code inside */
1657 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1666 void tlb_flush(CPUState
*env
, int flush_global
)
1670 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1674 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1675 target_phys_addr_t paddr
, int prot
,
1676 int is_user
, int is_softmmu
)
1681 /* dump memory mappings */
1682 void page_dump(FILE *f
)
1684 unsigned long start
, end
;
1685 int i
, j
, prot
, prot1
;
1688 fprintf(f
, "%-8s %-8s %-8s %s\n",
1689 "start", "end", "size", "prot");
1693 for(i
= 0; i
<= L1_SIZE
; i
++) {
1698 for(j
= 0;j
< L2_SIZE
; j
++) {
1703 if (prot1
!= prot
) {
1704 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1706 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1707 start
, end
, end
- start
,
1708 prot
& PAGE_READ
? 'r' : '-',
1709 prot
& PAGE_WRITE
? 'w' : '-',
1710 prot
& PAGE_EXEC
? 'x' : '-');
1724 int page_get_flags(target_ulong address
)
1728 p
= page_find(address
>> TARGET_PAGE_BITS
);
1734 /* modify the flags of a page and invalidate the code if
1735 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1736 depending on PAGE_WRITE */
1737 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1742 start
= start
& TARGET_PAGE_MASK
;
1743 end
= TARGET_PAGE_ALIGN(end
);
1744 if (flags
& PAGE_WRITE
)
1745 flags
|= PAGE_WRITE_ORG
;
1746 spin_lock(&tb_lock
);
1747 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1748 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1749 /* if the write protection is set, then we invalidate the code
1751 if (!(p
->flags
& PAGE_WRITE
) &&
1752 (flags
& PAGE_WRITE
) &&
1754 tb_invalidate_phys_page(addr
, 0, NULL
);
1758 spin_unlock(&tb_lock
);
1761 /* called from signal handler: invalidate the code and unprotect the
1762 page. Return TRUE if the fault was succesfully handled. */
1763 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1765 unsigned int page_index
, prot
, pindex
;
1767 target_ulong host_start
, host_end
, addr
;
1769 host_start
= address
& qemu_host_page_mask
;
1770 page_index
= host_start
>> TARGET_PAGE_BITS
;
1771 p1
= page_find(page_index
);
1774 host_end
= host_start
+ qemu_host_page_size
;
1777 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1781 /* if the page was really writable, then we change its
1782 protection back to writable */
1783 if (prot
& PAGE_WRITE_ORG
) {
1784 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1785 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1786 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1787 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1788 p1
[pindex
].flags
|= PAGE_WRITE
;
1789 /* and since the content will be modified, we must invalidate
1790 the corresponding translated code. */
1791 tb_invalidate_phys_page(address
, pc
, puc
);
1792 #ifdef DEBUG_TB_CHECK
1793 tb_invalidate_check(address
);
1801 /* call this function when system calls directly modify a memory area */
1802 /* ??? This should be redundant now we have lock_user. */
1803 void page_unprotect_range(target_ulong data
, target_ulong data_size
)
1805 target_ulong start
, end
, addr
;
1808 end
= start
+ data_size
;
1809 start
&= TARGET_PAGE_MASK
;
1810 end
= TARGET_PAGE_ALIGN(end
);
1811 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1812 page_unprotect(addr
, 0, NULL
);
1816 static inline void tlb_set_dirty(CPUState
*env
,
1817 unsigned long addr
, target_ulong vaddr
)
1820 #endif /* defined(CONFIG_USER_ONLY) */
1822 /* register physical memory. 'size' must be a multiple of the target
1823 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1825 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1827 unsigned long phys_offset
)
1829 target_phys_addr_t addr
, end_addr
;
1833 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1834 end_addr
= start_addr
+ size
;
1835 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1836 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1837 p
->phys_offset
= phys_offset
;
1838 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
1839 (phys_offset
& IO_MEM_ROMD
))
1840 phys_offset
+= TARGET_PAGE_SIZE
;
1843 /* since each CPU stores ram addresses in its TLB cache, we must
1844 reset the modified entries */
1846 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1851 /* XXX: temporary until new memory mapping API */
1852 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
1856 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1858 return IO_MEM_UNASSIGNED
;
1859 return p
->phys_offset
;
1862 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1864 #ifdef DEBUG_UNASSIGNED
1865 printf("Unassigned mem read 0x%08x\n", (int)addr
);
1870 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1872 #ifdef DEBUG_UNASSIGNED
1873 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr
, val
);
1877 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1878 unassigned_mem_readb
,
1879 unassigned_mem_readb
,
1880 unassigned_mem_readb
,
1883 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1884 unassigned_mem_writeb
,
1885 unassigned_mem_writeb
,
1886 unassigned_mem_writeb
,
1889 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1891 unsigned long ram_addr
;
1893 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1894 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1895 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1896 #if !defined(CONFIG_USER_ONLY)
1897 tb_invalidate_phys_page_fast(ram_addr
, 1);
1898 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1901 stb_p((uint8_t *)(long)addr
, val
);
1903 if (cpu_single_env
->kqemu_enabled
&&
1904 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1905 kqemu_modify_page(cpu_single_env
, ram_addr
);
1907 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1908 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1909 /* we remove the notdirty callback only if the code has been
1911 if (dirty_flags
== 0xff)
1912 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1915 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1917 unsigned long ram_addr
;
1919 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1920 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1921 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1922 #if !defined(CONFIG_USER_ONLY)
1923 tb_invalidate_phys_page_fast(ram_addr
, 2);
1924 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1927 stw_p((uint8_t *)(long)addr
, val
);
1929 if (cpu_single_env
->kqemu_enabled
&&
1930 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1931 kqemu_modify_page(cpu_single_env
, ram_addr
);
1933 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1934 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1935 /* we remove the notdirty callback only if the code has been
1937 if (dirty_flags
== 0xff)
1938 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1941 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1943 unsigned long ram_addr
;
1945 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1946 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1947 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1948 #if !defined(CONFIG_USER_ONLY)
1949 tb_invalidate_phys_page_fast(ram_addr
, 4);
1950 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1953 stl_p((uint8_t *)(long)addr
, val
);
1955 if (cpu_single_env
->kqemu_enabled
&&
1956 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1957 kqemu_modify_page(cpu_single_env
, ram_addr
);
1959 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1960 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1961 /* we remove the notdirty callback only if the code has been
1963 if (dirty_flags
== 0xff)
1964 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1967 static CPUReadMemoryFunc
*error_mem_read
[3] = {
1968 NULL
, /* never used */
1969 NULL
, /* never used */
1970 NULL
, /* never used */
1973 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1974 notdirty_mem_writeb
,
1975 notdirty_mem_writew
,
1976 notdirty_mem_writel
,
1979 static void io_mem_init(void)
1981 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
1982 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
1983 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
1986 /* alloc dirty bits array */
1987 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1988 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
1991 /* mem_read and mem_write are arrays of functions containing the
1992 function to access byte (index 0), word (index 1) and dword (index
1993 2). All functions must be supplied. If io_index is non zero, the
1994 corresponding io zone is modified. If it is zero, a new io zone is
1995 allocated. The return value can be used with
1996 cpu_register_physical_memory(). (-1) is returned if error. */
1997 int cpu_register_io_memory(int io_index
,
1998 CPUReadMemoryFunc
**mem_read
,
1999 CPUWriteMemoryFunc
**mem_write
,
2004 if (io_index
<= 0) {
2005 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2007 io_index
= io_mem_nb
++;
2009 if (io_index
>= IO_MEM_NB_ENTRIES
)
2013 for(i
= 0;i
< 3; i
++) {
2014 io_mem_read
[io_index
][i
] = mem_read
[i
];
2015 io_mem_write
[io_index
][i
] = mem_write
[i
];
2017 io_mem_opaque
[io_index
] = opaque
;
2018 return io_index
<< IO_MEM_SHIFT
;
2021 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2023 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2026 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2028 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2031 /* physical memory access (slow version, mainly for debug) */
2032 #if defined(CONFIG_USER_ONLY)
2033 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2034 int len
, int is_write
)
2041 page
= addr
& TARGET_PAGE_MASK
;
2042 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2045 flags
= page_get_flags(page
);
2046 if (!(flags
& PAGE_VALID
))
2049 if (!(flags
& PAGE_WRITE
))
2051 p
= lock_user(addr
, len
, 0);
2052 memcpy(p
, buf
, len
);
2053 unlock_user(p
, addr
, len
);
2055 if (!(flags
& PAGE_READ
))
2057 p
= lock_user(addr
, len
, 1);
2058 memcpy(buf
, p
, len
);
2059 unlock_user(p
, addr
, 0);
2068 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2069 int len
, int is_write
)
2074 target_phys_addr_t page
;
2079 page
= addr
& TARGET_PAGE_MASK
;
2080 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2083 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2085 pd
= IO_MEM_UNASSIGNED
;
2087 pd
= p
->phys_offset
;
2091 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2092 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2093 /* XXX: could force cpu_single_env to NULL to avoid
2095 if (l
>= 4 && ((addr
& 3) == 0)) {
2096 /* 32 bit write access */
2098 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2100 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2101 /* 16 bit write access */
2103 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2106 /* 8 bit write access */
2108 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2112 unsigned long addr1
;
2113 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2115 ptr
= phys_ram_base
+ addr1
;
2116 memcpy(ptr
, buf
, l
);
2117 if (!cpu_physical_memory_is_dirty(addr1
)) {
2118 /* invalidate code */
2119 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2121 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2122 (0xff & ~CODE_DIRTY_FLAG
);
2126 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2127 !(pd
& IO_MEM_ROMD
)) {
2129 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2130 if (l
>= 4 && ((addr
& 3) == 0)) {
2131 /* 32 bit read access */
2132 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2135 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2136 /* 16 bit read access */
2137 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2141 /* 8 bit read access */
2142 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2148 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2149 (addr
& ~TARGET_PAGE_MASK
);
2150 memcpy(buf
, ptr
, l
);
2159 /* used for ROM loading : can write in RAM and ROM */
2160 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2161 const uint8_t *buf
, int len
)
2165 target_phys_addr_t page
;
2170 page
= addr
& TARGET_PAGE_MASK
;
2171 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2174 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2176 pd
= IO_MEM_UNASSIGNED
;
2178 pd
= p
->phys_offset
;
2181 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2182 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2183 !(pd
& IO_MEM_ROMD
)) {
2186 unsigned long addr1
;
2187 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2189 ptr
= phys_ram_base
+ addr1
;
2190 memcpy(ptr
, buf
, l
);
2199 /* warning: addr must be aligned */
2200 uint32_t ldl_phys(target_phys_addr_t addr
)
2208 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2210 pd
= IO_MEM_UNASSIGNED
;
2212 pd
= p
->phys_offset
;
2215 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2216 !(pd
& IO_MEM_ROMD
)) {
2218 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2219 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2222 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2223 (addr
& ~TARGET_PAGE_MASK
);
2229 /* warning: addr must be aligned */
2230 uint64_t ldq_phys(target_phys_addr_t addr
)
2238 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2240 pd
= IO_MEM_UNASSIGNED
;
2242 pd
= p
->phys_offset
;
2245 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2246 !(pd
& IO_MEM_ROMD
)) {
2248 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2249 #ifdef TARGET_WORDS_BIGENDIAN
2250 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2251 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2253 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2254 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2258 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2259 (addr
& ~TARGET_PAGE_MASK
);
2266 uint32_t ldub_phys(target_phys_addr_t addr
)
2269 cpu_physical_memory_read(addr
, &val
, 1);
2274 uint32_t lduw_phys(target_phys_addr_t addr
)
2277 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2278 return tswap16(val
);
2282 #define likely(x) __builtin_expect(!!(x), 1)
2283 #define unlikely(x) __builtin_expect(!!(x), 0)
2286 #define unlikely(x) x
2289 /* warning: addr must be aligned. The ram page is not masked as dirty
2290 and the code inside is not invalidated. It is useful if the dirty
2291 bits are used to track modified PTEs */
2292 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2299 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2301 pd
= IO_MEM_UNASSIGNED
;
2303 pd
= p
->phys_offset
;
2306 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2307 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2308 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2310 unsigned long addr1
;
2311 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2313 ptr
= phys_ram_base
+ addr1
;
2316 if (unlikely(in_migration
)) {
2317 if (!cpu_physical_memory_is_dirty(addr1
)) {
2318 /* invalidate code */
2319 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2321 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2322 (0xff & ~CODE_DIRTY_FLAG
);
2328 /* warning: addr must be aligned */
2329 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2336 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2338 pd
= IO_MEM_UNASSIGNED
;
2340 pd
= p
->phys_offset
;
2343 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2344 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2345 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2347 unsigned long addr1
;
2348 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2350 ptr
= phys_ram_base
+ addr1
;
2352 if (!cpu_physical_memory_is_dirty(addr1
)) {
2353 /* invalidate code */
2354 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2356 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2357 (0xff & ~CODE_DIRTY_FLAG
);
2363 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2366 cpu_physical_memory_write(addr
, &v
, 1);
2370 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2372 uint16_t v
= tswap16(val
);
2373 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2377 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2380 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2385 /* virtual memory access for debug */
2386 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2387 uint8_t *buf
, int len
, int is_write
)
2390 target_ulong page
, phys_addr
;
2393 page
= addr
& TARGET_PAGE_MASK
;
2394 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2395 /* if no physical page mapped, return an error */
2396 if (phys_addr
== -1)
2398 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2401 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2410 void dump_exec_info(FILE *f
,
2411 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2413 int i
, target_code_size
, max_target_code_size
;
2414 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2415 TranslationBlock
*tb
;
2417 target_code_size
= 0;
2418 max_target_code_size
= 0;
2420 direct_jmp_count
= 0;
2421 direct_jmp2_count
= 0;
2422 for(i
= 0; i
< nb_tbs
; i
++) {
2424 target_code_size
+= tb
->size
;
2425 if (tb
->size
> max_target_code_size
)
2426 max_target_code_size
= tb
->size
;
2427 if (tb
->page_addr
[1] != -1)
2429 if (tb
->tb_next_offset
[0] != 0xffff) {
2431 if (tb
->tb_next_offset
[1] != 0xffff) {
2432 direct_jmp2_count
++;
2436 /* XXX: avoid using doubles ? */
2437 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2438 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2439 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2440 max_target_code_size
);
2441 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2442 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2443 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2444 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2446 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2447 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2449 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2451 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2452 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2453 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2454 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2457 #if !defined(CONFIG_USER_ONLY)
2459 #define MMUSUFFIX _cmmu
2460 #define GETPC() NULL
2461 #define env cpu_single_env
2462 #define SOFTMMU_CODE_ACCESS
2465 #include "softmmu_template.h"
2468 #include "softmmu_template.h"
2471 #include "softmmu_template.h"
2474 #include "softmmu_template.h"