2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
40 #if defined(CONFIG_USER_ONLY)
44 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 #if !defined(CONFIG_USER_ONLY)
54 /* TB consistency checks only implemented for usermode emulation. */
58 /* threshold to flush the translated code buffer */
59 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_PPC64)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 42
71 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
72 #define TARGET_PHYS_ADDR_SPACE_BITS 32
76 extern int kvm_allowed
;
79 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
80 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
82 /* any access to the tbs or the page table must use this lock */
83 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
85 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
86 uint8_t *code_gen_ptr
;
90 uint8_t *phys_ram_base
;
91 uint8_t *phys_ram_dirty
;
93 static int in_migration
;
96 /* current CPU in the current thread. It is only valid inside
98 CPUState
*cpu_single_env
;
100 typedef struct PageDesc
{
101 /* list of TBs intersecting this ram page */
102 TranslationBlock
*first_tb
;
103 /* in order to optimize self modifying code, we count the number
104 of lookups we do to a given page to use a bitmap */
105 unsigned int code_write_count
;
106 uint8_t *code_bitmap
;
107 #if defined(CONFIG_USER_ONLY)
112 typedef struct PhysPageDesc
{
113 /* offset in host memory of the page + io_index in the low 12 bits */
114 uint32_t phys_offset
;
118 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
120 #define L1_SIZE (1 << L1_BITS)
121 #define L2_SIZE (1 << L2_BITS)
123 static void io_mem_init(void);
125 unsigned long qemu_real_host_page_size
;
126 unsigned long qemu_host_page_bits
;
127 unsigned long qemu_host_page_size
;
128 unsigned long qemu_host_page_mask
;
130 /* XXX: for system emulation, it could just be an array */
131 static PageDesc
*l1_map
[L1_SIZE
];
132 PhysPageDesc
**l1_phys_map
;
134 /* io memory support */
135 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
136 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
137 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
138 static int io_mem_nb
;
141 char *logfilename
= "/tmp/qemu.log";
146 static int tlb_flush_count
;
147 static int tb_flush_count
;
148 static int tb_phys_invalidate_count
;
150 static void page_init(void)
152 /* NOTE: we can always suppose that qemu_host_page_size >=
156 SYSTEM_INFO system_info
;
159 GetSystemInfo(&system_info
);
160 qemu_real_host_page_size
= system_info
.dwPageSize
;
162 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
163 PAGE_EXECUTE_READWRITE
, &old_protect
);
166 qemu_real_host_page_size
= getpagesize();
168 unsigned long start
, end
;
170 start
= (unsigned long)code_gen_buffer
;
171 start
&= ~(qemu_real_host_page_size
- 1);
173 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
174 end
+= qemu_real_host_page_size
- 1;
175 end
&= ~(qemu_real_host_page_size
- 1);
177 mprotect((void *)start
, end
- start
,
178 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
182 if (qemu_host_page_size
== 0)
183 qemu_host_page_size
= qemu_real_host_page_size
;
184 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
185 qemu_host_page_size
= TARGET_PAGE_SIZE
;
186 qemu_host_page_bits
= 0;
187 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
188 qemu_host_page_bits
++;
189 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
190 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
191 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
194 static inline PageDesc
*page_find_alloc(unsigned int index
)
198 lp
= &l1_map
[index
>> L2_BITS
];
201 /* allocate if not found */
202 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
203 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
206 return p
+ (index
& (L2_SIZE
- 1));
209 static inline PageDesc
*page_find(unsigned int index
)
213 p
= l1_map
[index
>> L2_BITS
];
216 return p
+ (index
& (L2_SIZE
- 1));
219 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
224 p
= (void **)l1_phys_map
;
225 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
227 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
228 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
230 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
233 /* allocate if not found */
236 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
237 memset(p
, 0, sizeof(void *) * L1_SIZE
);
241 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
245 /* allocate if not found */
248 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
250 for (i
= 0; i
< L2_SIZE
; i
++)
251 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
253 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
256 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
258 return phys_page_find_alloc(index
, 0);
261 #if !defined(CONFIG_USER_ONLY)
262 static void tlb_protect_code(ram_addr_t ram_addr
);
263 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
267 void cpu_exec_init(CPUState
*env
)
273 code_gen_ptr
= code_gen_buffer
;
277 env
->next_cpu
= NULL
;
280 while (*penv
!= NULL
) {
281 penv
= (CPUState
**)&(*penv
)->next_cpu
;
284 env
->cpu_index
= cpu_index
;
288 static inline void invalidate_page_bitmap(PageDesc
*p
)
290 if (p
->code_bitmap
) {
291 qemu_free(p
->code_bitmap
);
292 p
->code_bitmap
= NULL
;
294 p
->code_write_count
= 0;
297 /* set to NULL all the 'first_tb' fields in all PageDescs */
298 static void page_flush_tb(void)
303 for(i
= 0; i
< L1_SIZE
; i
++) {
306 for(j
= 0; j
< L2_SIZE
; j
++) {
308 invalidate_page_bitmap(p
);
315 /* flush all the translation blocks */
316 /* XXX: tb_flush is currently not thread safe */
317 void tb_flush(CPUState
*env1
)
320 #if defined(DEBUG_FLUSH)
321 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
322 code_gen_ptr
- code_gen_buffer
,
324 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
328 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
329 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
332 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
335 code_gen_ptr
= code_gen_buffer
;
336 /* XXX: flush processor icache at this point if cache flush is
341 #ifdef DEBUG_TB_CHECK
343 static void tb_invalidate_check(unsigned long address
)
345 TranslationBlock
*tb
;
347 address
&= TARGET_PAGE_MASK
;
348 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
349 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
350 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
351 address
>= tb
->pc
+ tb
->size
)) {
352 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
353 address
, (long)tb
->pc
, tb
->size
);
359 /* verify that all the pages have correct rights for code */
360 static void tb_page_check(void)
362 TranslationBlock
*tb
;
363 int i
, flags1
, flags2
;
365 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
366 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
367 flags1
= page_get_flags(tb
->pc
);
368 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
369 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
370 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
371 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
377 void tb_jmp_check(TranslationBlock
*tb
)
379 TranslationBlock
*tb1
;
382 /* suppress any remaining jumps to this TB */
386 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
389 tb1
= tb1
->jmp_next
[n1
];
391 /* check end of list */
393 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
399 /* invalidate one TB */
400 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
403 TranslationBlock
*tb1
;
407 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
410 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
414 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
416 TranslationBlock
*tb1
;
422 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
424 *ptb
= tb1
->page_next
[n1
];
427 ptb
= &tb1
->page_next
[n1
];
431 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
433 TranslationBlock
*tb1
, **ptb
;
436 ptb
= &tb
->jmp_next
[n
];
439 /* find tb(n) in circular list */
443 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
444 if (n1
== n
&& tb1
== tb
)
447 ptb
= &tb1
->jmp_first
;
449 ptb
= &tb1
->jmp_next
[n1
];
452 /* now we can suppress tb(n) from the list */
453 *ptb
= tb
->jmp_next
[n
];
455 tb
->jmp_next
[n
] = NULL
;
459 /* reset the jump entry 'n' of a TB so that it is not chained to
461 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
463 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
466 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
471 target_ulong phys_pc
;
472 TranslationBlock
*tb1
, *tb2
;
474 /* remove the TB from the hash list */
475 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
476 h
= tb_phys_hash_func(phys_pc
);
477 tb_remove(&tb_phys_hash
[h
], tb
,
478 offsetof(TranslationBlock
, phys_hash_next
));
480 /* remove the TB from the page list */
481 if (tb
->page_addr
[0] != page_addr
) {
482 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
483 tb_page_remove(&p
->first_tb
, tb
);
484 invalidate_page_bitmap(p
);
486 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
487 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
488 tb_page_remove(&p
->first_tb
, tb
);
489 invalidate_page_bitmap(p
);
492 tb_invalidated_flag
= 1;
494 /* remove the TB from the hash list */
495 h
= tb_jmp_cache_hash_func(tb
->pc
);
496 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
497 if (env
->tb_jmp_cache
[h
] == tb
)
498 env
->tb_jmp_cache
[h
] = NULL
;
501 /* suppress this TB from the two jump lists */
502 tb_jmp_remove(tb
, 0);
503 tb_jmp_remove(tb
, 1);
505 /* suppress any remaining jumps to this TB */
511 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
512 tb2
= tb1
->jmp_next
[n1
];
513 tb_reset_jump(tb1
, n1
);
514 tb1
->jmp_next
[n1
] = NULL
;
517 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
519 tb_phys_invalidate_count
++;
522 static inline void set_bits(uint8_t *tab
, int start
, int len
)
528 mask
= 0xff << (start
& 7);
529 if ((start
& ~7) == (end
& ~7)) {
531 mask
&= ~(0xff << (end
& 7));
536 start
= (start
+ 8) & ~7;
538 while (start
< end1
) {
543 mask
= ~(0xff << (end
& 7));
549 static void build_page_bitmap(PageDesc
*p
)
551 int n
, tb_start
, tb_end
;
552 TranslationBlock
*tb
;
554 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
557 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
562 tb
= (TranslationBlock
*)((long)tb
& ~3);
563 /* NOTE: this is subtle as a TB may span two physical pages */
565 /* NOTE: tb_end may be after the end of the page, but
566 it is not a problem */
567 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
568 tb_end
= tb_start
+ tb
->size
;
569 if (tb_end
> TARGET_PAGE_SIZE
)
570 tb_end
= TARGET_PAGE_SIZE
;
573 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
575 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
576 tb
= tb
->page_next
[n
];
580 #ifdef TARGET_HAS_PRECISE_SMC
582 static void tb_gen_code(CPUState
*env
,
583 target_ulong pc
, target_ulong cs_base
, int flags
,
586 TranslationBlock
*tb
;
588 target_ulong phys_pc
, phys_page2
, virt_page2
;
591 phys_pc
= get_phys_addr_code(env
, pc
);
594 /* flush must be done */
596 /* cannot fail at this point */
599 tc_ptr
= code_gen_ptr
;
601 tb
->cs_base
= cs_base
;
604 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
605 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
607 /* check next page if needed */
608 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
610 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
611 phys_page2
= get_phys_addr_code(env
, virt_page2
);
613 tb_link_phys(tb
, phys_pc
, phys_page2
);
617 /* invalidate all TBs which intersect with the target physical page
618 starting in range [start;end[. NOTE: start and end must refer to
619 the same physical page. 'is_cpu_write_access' should be true if called
620 from a real cpu write access: the virtual CPU will exit the current
621 TB if code is modified inside this TB. */
622 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
623 int is_cpu_write_access
)
625 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
626 CPUState
*env
= cpu_single_env
;
628 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
629 target_ulong tb_start
, tb_end
;
630 target_ulong current_pc
, current_cs_base
;
632 p
= page_find(start
>> TARGET_PAGE_BITS
);
635 if (!p
->code_bitmap
&&
636 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
637 is_cpu_write_access
) {
638 /* build code bitmap */
639 build_page_bitmap(p
);
642 /* we remove all the TBs in the range [start, end[ */
643 /* XXX: see if in some cases it could be faster to invalidate all the code */
644 current_tb_not_found
= is_cpu_write_access
;
645 current_tb_modified
= 0;
646 current_tb
= NULL
; /* avoid warning */
647 current_pc
= 0; /* avoid warning */
648 current_cs_base
= 0; /* avoid warning */
649 current_flags
= 0; /* avoid warning */
653 tb
= (TranslationBlock
*)((long)tb
& ~3);
654 tb_next
= tb
->page_next
[n
];
655 /* NOTE: this is subtle as a TB may span two physical pages */
657 /* NOTE: tb_end may be after the end of the page, but
658 it is not a problem */
659 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
660 tb_end
= tb_start
+ tb
->size
;
662 tb_start
= tb
->page_addr
[1];
663 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
665 if (!(tb_end
<= start
|| tb_start
>= end
)) {
666 #ifdef TARGET_HAS_PRECISE_SMC
667 if (current_tb_not_found
) {
668 current_tb_not_found
= 0;
670 if (env
->mem_write_pc
) {
671 /* now we have a real cpu fault */
672 current_tb
= tb_find_pc(env
->mem_write_pc
);
675 if (current_tb
== tb
&&
676 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
677 /* If we are modifying the current TB, we must stop
678 its execution. We could be more precise by checking
679 that the modification is after the current PC, but it
680 would require a specialized function to partially
681 restore the CPU state */
683 current_tb_modified
= 1;
684 cpu_restore_state(current_tb
, env
,
685 env
->mem_write_pc
, NULL
);
686 #if defined(TARGET_I386)
687 current_flags
= env
->hflags
;
688 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
689 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
690 current_pc
= current_cs_base
+ env
->eip
;
692 #error unsupported CPU
695 #endif /* TARGET_HAS_PRECISE_SMC */
696 /* we need to do that to handle the case where a signal
697 occurs while doing tb_phys_invalidate() */
700 saved_tb
= env
->current_tb
;
701 env
->current_tb
= NULL
;
703 tb_phys_invalidate(tb
, -1);
705 env
->current_tb
= saved_tb
;
706 if (env
->interrupt_request
&& env
->current_tb
)
707 cpu_interrupt(env
, env
->interrupt_request
);
712 #if !defined(CONFIG_USER_ONLY)
713 /* if no code remaining, no need to continue to use slow writes */
715 invalidate_page_bitmap(p
);
716 if (is_cpu_write_access
) {
717 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
721 #ifdef TARGET_HAS_PRECISE_SMC
722 if (current_tb_modified
) {
723 /* we generate a block containing just the instruction
724 modifying the memory. It will ensure that it cannot modify
726 env
->current_tb
= NULL
;
727 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
729 cpu_resume_from_signal(env
, NULL
);
734 /* len must be <= 8 and start must be a multiple of len */
735 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
742 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
743 cpu_single_env
->mem_write_vaddr
, len
,
745 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
749 p
= page_find(start
>> TARGET_PAGE_BITS
);
752 if (p
->code_bitmap
) {
753 offset
= start
& ~TARGET_PAGE_MASK
;
754 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
755 if (b
& ((1 << len
) - 1))
759 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
763 #if !defined(CONFIG_SOFTMMU)
764 static void tb_invalidate_phys_page(target_ulong addr
,
765 unsigned long pc
, void *puc
)
767 int n
, current_flags
, current_tb_modified
;
768 target_ulong current_pc
, current_cs_base
;
770 TranslationBlock
*tb
, *current_tb
;
771 #ifdef TARGET_HAS_PRECISE_SMC
772 CPUState
*env
= cpu_single_env
;
775 addr
&= TARGET_PAGE_MASK
;
776 p
= page_find(addr
>> TARGET_PAGE_BITS
);
780 current_tb_modified
= 0;
782 current_pc
= 0; /* avoid warning */
783 current_cs_base
= 0; /* avoid warning */
784 current_flags
= 0; /* avoid warning */
785 #ifdef TARGET_HAS_PRECISE_SMC
787 current_tb
= tb_find_pc(pc
);
792 tb
= (TranslationBlock
*)((long)tb
& ~3);
793 #ifdef TARGET_HAS_PRECISE_SMC
794 if (current_tb
== tb
&&
795 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
796 /* If we are modifying the current TB, we must stop
797 its execution. We could be more precise by checking
798 that the modification is after the current PC, but it
799 would require a specialized function to partially
800 restore the CPU state */
802 current_tb_modified
= 1;
803 cpu_restore_state(current_tb
, env
, pc
, puc
);
804 #if defined(TARGET_I386)
805 current_flags
= env
->hflags
;
806 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
807 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
808 current_pc
= current_cs_base
+ env
->eip
;
810 #error unsupported CPU
813 #endif /* TARGET_HAS_PRECISE_SMC */
814 tb_phys_invalidate(tb
, addr
);
815 tb
= tb
->page_next
[n
];
818 #ifdef TARGET_HAS_PRECISE_SMC
819 if (current_tb_modified
) {
820 /* we generate a block containing just the instruction
821 modifying the memory. It will ensure that it cannot modify
823 env
->current_tb
= NULL
;
824 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
826 cpu_resume_from_signal(env
, puc
);
832 /* add the tb in the target page and protect it if necessary */
833 static inline void tb_alloc_page(TranslationBlock
*tb
,
834 unsigned int n
, target_ulong page_addr
)
837 TranslationBlock
*last_first_tb
;
839 tb
->page_addr
[n
] = page_addr
;
840 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
841 tb
->page_next
[n
] = p
->first_tb
;
842 last_first_tb
= p
->first_tb
;
843 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
844 invalidate_page_bitmap(p
);
846 #if defined(TARGET_HAS_SMC) || 1
848 #if defined(CONFIG_USER_ONLY)
849 if (p
->flags
& PAGE_WRITE
) {
854 /* force the host page as non writable (writes will have a
855 page fault + mprotect overhead) */
856 page_addr
&= qemu_host_page_mask
;
858 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
859 addr
+= TARGET_PAGE_SIZE
) {
861 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
865 p2
->flags
&= ~PAGE_WRITE
;
866 page_get_flags(addr
);
868 mprotect(g2h(page_addr
), qemu_host_page_size
,
869 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
870 #ifdef DEBUG_TB_INVALIDATE
871 printf("protecting code page: 0x%08lx\n",
876 /* if some code is already present, then the pages are already
877 protected. So we handle the case where only the first TB is
878 allocated in a physical page */
879 if (!last_first_tb
) {
880 tlb_protect_code(page_addr
);
884 #endif /* TARGET_HAS_SMC */
887 /* Allocate a new translation block. Flush the translation buffer if
888 too many translation blocks or too much generated code. */
889 TranslationBlock
*tb_alloc(target_ulong pc
)
891 TranslationBlock
*tb
;
893 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
894 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
902 /* add a new TB and link it to the physical page tables. phys_page2 is
903 (-1) to indicate that only one page contains the TB. */
904 void tb_link_phys(TranslationBlock
*tb
,
905 target_ulong phys_pc
, target_ulong phys_page2
)
908 TranslationBlock
**ptb
;
910 /* add in the physical hash table */
911 h
= tb_phys_hash_func(phys_pc
);
912 ptb
= &tb_phys_hash
[h
];
913 tb
->phys_hash_next
= *ptb
;
916 /* add in the page list */
917 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
918 if (phys_page2
!= -1)
919 tb_alloc_page(tb
, 1, phys_page2
);
921 tb
->page_addr
[1] = -1;
923 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
924 tb
->jmp_next
[0] = NULL
;
925 tb
->jmp_next
[1] = NULL
;
927 tb
->cflags
&= ~CF_FP_USED
;
928 if (tb
->cflags
& CF_TB_FP_USED
)
929 tb
->cflags
|= CF_FP_USED
;
932 /* init original jump addresses */
933 if (tb
->tb_next_offset
[0] != 0xffff)
934 tb_reset_jump(tb
, 0);
935 if (tb
->tb_next_offset
[1] != 0xffff)
936 tb_reset_jump(tb
, 1);
938 #ifdef DEBUG_TB_CHECK
943 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
944 tb[1].tc_ptr. Return NULL if not found */
945 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
949 TranslationBlock
*tb
;
953 if (tc_ptr
< (unsigned long)code_gen_buffer
||
954 tc_ptr
>= (unsigned long)code_gen_ptr
)
956 /* binary search (cf Knuth) */
959 while (m_min
<= m_max
) {
960 m
= (m_min
+ m_max
) >> 1;
962 v
= (unsigned long)tb
->tc_ptr
;
965 else if (tc_ptr
< v
) {
974 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
976 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
978 TranslationBlock
*tb1
, *tb_next
, **ptb
;
981 tb1
= tb
->jmp_next
[n
];
983 /* find head of list */
986 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
989 tb1
= tb1
->jmp_next
[n1
];
991 /* we are now sure now that tb jumps to tb1 */
994 /* remove tb from the jmp_first list */
995 ptb
= &tb_next
->jmp_first
;
999 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1000 if (n1
== n
&& tb1
== tb
)
1002 ptb
= &tb1
->jmp_next
[n1
];
1004 *ptb
= tb
->jmp_next
[n
];
1005 tb
->jmp_next
[n
] = NULL
;
1007 /* suppress the jump to next tb in generated code */
1008 tb_reset_jump(tb
, n
);
1010 /* suppress jumps in the tb on which we could have jumped */
1011 tb_reset_jump_recursive(tb_next
);
1015 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1017 tb_reset_jump_recursive2(tb
, 0);
1018 tb_reset_jump_recursive2(tb
, 1);
1021 #if defined(TARGET_HAS_ICE)
1022 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1024 target_ulong addr
, pd
;
1025 ram_addr_t ram_addr
;
1028 addr
= cpu_get_phys_page_debug(env
, pc
);
1029 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1031 pd
= IO_MEM_UNASSIGNED
;
1033 pd
= p
->phys_offset
;
1035 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1036 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1040 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1041 breakpoint is reached */
1042 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1044 #if defined(TARGET_HAS_ICE)
1047 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1048 if (env
->breakpoints
[i
] == pc
)
1052 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1054 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1058 kvm_update_debugger(env
);
1061 breakpoint_invalidate(env
, pc
);
1068 /* remove a breakpoint */
1069 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1071 #if defined(TARGET_HAS_ICE)
1073 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1074 if (env
->breakpoints
[i
] == pc
)
1079 env
->nb_breakpoints
--;
1080 if (i
< env
->nb_breakpoints
)
1081 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1085 kvm_update_debugger(env
);
1088 breakpoint_invalidate(env
, pc
);
1095 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1096 CPU loop after each instruction */
1097 void cpu_single_step(CPUState
*env
, int enabled
)
1099 #if defined(TARGET_HAS_ICE)
1100 if (env
->singlestep_enabled
!= enabled
) {
1101 env
->singlestep_enabled
= enabled
;
1102 /* must flush all the translated code to avoid inconsistancies */
1103 /* XXX: only flush what is necessary */
1108 kvm_update_debugger(env
);
1113 /* enable or disable low levels log */
1114 void cpu_set_log(int log_flags
)
1116 loglevel
= log_flags
;
1117 if (loglevel
&& !logfile
) {
1118 logfile
= fopen(logfilename
, "w");
1120 perror(logfilename
);
1123 #if !defined(CONFIG_SOFTMMU)
1124 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1126 static uint8_t logfile_buf
[4096];
1127 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1130 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1135 void cpu_set_log_filename(const char *filename
)
1137 logfilename
= strdup(filename
);
1140 /* mask must never be zero, except for A20 change call */
1141 void cpu_interrupt(CPUState
*env
, int mask
)
1143 TranslationBlock
*tb
;
1144 static int interrupt_lock
;
1146 env
->interrupt_request
|= mask
;
1149 kvm_update_interrupt_request(env
);
1151 /* if the cpu is currently executing code, we must unlink it and
1152 all the potentially executing TB */
1153 tb
= env
->current_tb
;
1154 if (tb
&& !testandset(&interrupt_lock
)) {
1155 env
->current_tb
= NULL
;
1156 tb_reset_jump_recursive(tb
);
1161 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1163 env
->interrupt_request
&= ~mask
;
1166 CPULogItem cpu_log_items
[] = {
1167 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1168 "show generated host assembly code for each compiled TB" },
1169 { CPU_LOG_TB_IN_ASM
, "in_asm",
1170 "show target assembly code for each compiled TB" },
1171 { CPU_LOG_TB_OP
, "op",
1172 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1174 { CPU_LOG_TB_OP_OPT
, "op_opt",
1175 "show micro ops after optimization for each compiled TB" },
1177 { CPU_LOG_INT
, "int",
1178 "show interrupts/exceptions in short format" },
1179 { CPU_LOG_EXEC
, "exec",
1180 "show trace before each executed TB (lots of logs)" },
1181 { CPU_LOG_TB_CPU
, "cpu",
1182 "show CPU state before bloc translation" },
1184 { CPU_LOG_PCALL
, "pcall",
1185 "show protected mode far calls/returns/exceptions" },
1188 { CPU_LOG_IOPORT
, "ioport",
1189 "show all i/o ports accesses" },
1194 static int cmp1(const char *s1
, int n
, const char *s2
)
1196 if (strlen(s2
) != n
)
1198 return memcmp(s1
, s2
, n
) == 0;
1201 /* takes a comma separated list of log masks. Return 0 if error. */
1202 int cpu_str_to_log_mask(const char *str
)
1211 p1
= strchr(p
, ',');
1214 if(cmp1(p
,p1
-p
,"all")) {
1215 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1219 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1220 if (cmp1(p
, p1
- p
, item
->name
))
1234 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1239 fprintf(stderr
, "qemu: fatal: ");
1240 vfprintf(stderr
, fmt
, ap
);
1241 fprintf(stderr
, "\n");
1243 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1245 cpu_dump_state(env
, stderr
, fprintf
, 0);
1251 #if !defined(CONFIG_USER_ONLY)
1253 /* NOTE: if flush_global is true, also flush global entries (not
1255 void tlb_flush(CPUState
*env
, int flush_global
)
1259 #if defined(DEBUG_TLB)
1260 printf("tlb_flush:\n");
1262 /* must reset current TB so that interrupts cannot modify the
1263 links while we are modifying them */
1264 env
->current_tb
= NULL
;
1266 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1267 env
->tlb_table
[0][i
].addr_read
= -1;
1268 env
->tlb_table
[0][i
].addr_write
= -1;
1269 env
->tlb_table
[0][i
].addr_code
= -1;
1270 env
->tlb_table
[1][i
].addr_read
= -1;
1271 env
->tlb_table
[1][i
].addr_write
= -1;
1272 env
->tlb_table
[1][i
].addr_code
= -1;
1275 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1277 #if !defined(CONFIG_SOFTMMU)
1278 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1281 if (env
->kqemu_enabled
) {
1282 kqemu_flush(env
, flush_global
);
1288 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1290 if (addr
== (tlb_entry
->addr_read
&
1291 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1292 addr
== (tlb_entry
->addr_write
&
1293 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1294 addr
== (tlb_entry
->addr_code
&
1295 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1296 tlb_entry
->addr_read
= -1;
1297 tlb_entry
->addr_write
= -1;
1298 tlb_entry
->addr_code
= -1;
1302 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1305 TranslationBlock
*tb
;
1307 #if defined(DEBUG_TLB)
1308 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1310 /* must reset current TB so that interrupts cannot modify the
1311 links while we are modifying them */
1312 env
->current_tb
= NULL
;
1314 addr
&= TARGET_PAGE_MASK
;
1315 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1316 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1317 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1319 /* Discard jump cache entries for any tb which might potentially
1320 overlap the flushed page. */
1321 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1322 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1324 i
= tb_jmp_cache_hash_page(addr
);
1325 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1327 #if !defined(CONFIG_SOFTMMU)
1328 if (addr
< MMAP_AREA_END
)
1329 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1332 if (env
->kqemu_enabled
) {
1333 kqemu_flush_page(env
, addr
);
1338 /* update the TLBs so that writes to code in the virtual page 'addr'
1340 static void tlb_protect_code(ram_addr_t ram_addr
)
1342 cpu_physical_memory_reset_dirty(ram_addr
,
1343 ram_addr
+ TARGET_PAGE_SIZE
,
1347 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1348 tested for self modifying code */
1349 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1352 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1355 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1356 unsigned long start
, unsigned long length
)
1359 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1360 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1361 if ((addr
- start
) < length
) {
1362 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1367 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1371 unsigned long length
, start1
;
1375 start
&= TARGET_PAGE_MASK
;
1376 end
= TARGET_PAGE_ALIGN(end
);
1378 length
= end
- start
;
1381 len
= length
>> TARGET_PAGE_BITS
;
1383 /* XXX: should not depend on cpu context */
1385 if (env
->kqemu_enabled
) {
1388 for(i
= 0; i
< len
; i
++) {
1389 kqemu_set_notdirty(env
, addr
);
1390 addr
+= TARGET_PAGE_SIZE
;
1394 mask
= ~dirty_flags
;
1395 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1396 for(i
= 0; i
< len
; i
++)
1399 /* we modify the TLB cache so that the dirty bit will be set again
1400 when accessing the range */
1401 start1
= start
+ (unsigned long)phys_ram_base
;
1402 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1403 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1404 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1405 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1406 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1409 #if !defined(CONFIG_SOFTMMU)
1410 /* XXX: this is expensive */
1416 for(i
= 0; i
< L1_SIZE
; i
++) {
1419 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1420 for(j
= 0; j
< L2_SIZE
; j
++) {
1421 if (p
->valid_tag
== virt_valid_tag
&&
1422 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1423 (p
->prot
& PROT_WRITE
)) {
1424 if (addr
< MMAP_AREA_END
) {
1425 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1426 p
->prot
& ~PROT_WRITE
);
1429 addr
+= TARGET_PAGE_SIZE
;
1438 int cpu_physical_memory_set_dirty_tracking(int enable
)
1443 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1445 in_migration
= enable
;
1449 int cpu_physical_memory_get_dirty_tracking(void)
1451 return in_migration
;
1454 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1456 ram_addr_t ram_addr
;
1458 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1459 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1460 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1461 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1462 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1467 /* update the TLB according to the current state of the dirty bits */
1468 void cpu_tlb_update_dirty(CPUState
*env
)
1471 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1472 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1473 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1474 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1477 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1478 unsigned long start
)
1481 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1482 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1483 if (addr
== start
) {
1484 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1489 /* update the TLB corresponding to virtual page vaddr and phys addr
1490 addr so that it is no longer dirty */
1491 static inline void tlb_set_dirty(CPUState
*env
,
1492 unsigned long addr
, target_ulong vaddr
)
1496 addr
&= TARGET_PAGE_MASK
;
1497 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1498 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1499 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1502 /* add a new TLB entry. At most one entry for a given virtual address
1503 is permitted. Return 0 if OK or 2 if the page could not be mapped
1504 (can only happen in non SOFTMMU mode for I/O pages or pages
1505 conflicting with the host address space). */
1506 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1507 target_phys_addr_t paddr
, int prot
,
1508 int is_user
, int is_softmmu
)
1513 target_ulong address
;
1514 target_phys_addr_t addend
;
1518 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1520 pd
= IO_MEM_UNASSIGNED
;
1522 pd
= p
->phys_offset
;
1524 #if defined(DEBUG_TLB)
1525 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1526 vaddr
, (int)paddr
, prot
, is_user
, is_softmmu
, pd
);
1530 #if !defined(CONFIG_SOFTMMU)
1534 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1535 /* IO memory case */
1536 address
= vaddr
| pd
;
1539 /* standard memory */
1541 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1544 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1546 te
= &env
->tlb_table
[is_user
][index
];
1547 te
->addend
= addend
;
1548 if (prot
& PAGE_READ
) {
1549 te
->addr_read
= address
;
1553 if (prot
& PAGE_EXEC
) {
1554 te
->addr_code
= address
;
1558 if (prot
& PAGE_WRITE
) {
1559 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1560 (pd
& IO_MEM_ROMD
)) {
1561 /* write access calls the I/O callback */
1562 te
->addr_write
= vaddr
|
1563 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1564 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1565 !cpu_physical_memory_is_dirty(pd
)) {
1566 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1568 te
->addr_write
= address
;
1571 te
->addr_write
= -1;
1574 #if !defined(CONFIG_SOFTMMU)
1576 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1577 /* IO access: no mapping is done as it will be handled by the
1579 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1584 if (vaddr
>= MMAP_AREA_END
) {
1587 if (prot
& PROT_WRITE
) {
1588 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1589 #if defined(TARGET_HAS_SMC) || 1
1592 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1593 !cpu_physical_memory_is_dirty(pd
))) {
1594 /* ROM: we do as if code was inside */
1595 /* if code is present, we only map as read only and save the
1599 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1602 vp
->valid_tag
= virt_valid_tag
;
1603 prot
&= ~PAGE_WRITE
;
1606 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1607 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1608 if (map_addr
== MAP_FAILED
) {
1609 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1619 /* called from signal handler: invalidate the code and unprotect the
1620 page. Return TRUE if the fault was succesfully handled. */
1621 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1623 #if !defined(CONFIG_SOFTMMU)
1626 #if defined(DEBUG_TLB)
1627 printf("page_unprotect: addr=0x%08x\n", addr
);
1629 addr
&= TARGET_PAGE_MASK
;
1631 /* if it is not mapped, no need to worry here */
1632 if (addr
>= MMAP_AREA_END
)
1634 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1637 /* NOTE: in this case, validate_tag is _not_ tested as it
1638 validates only the code TLB */
1639 if (vp
->valid_tag
!= virt_valid_tag
)
1641 if (!(vp
->prot
& PAGE_WRITE
))
1643 #if defined(DEBUG_TLB)
1644 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1645 addr
, vp
->phys_addr
, vp
->prot
);
1647 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1648 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1649 (unsigned long)addr
, vp
->prot
);
1650 /* set the dirty bit */
1651 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1652 /* flush the code inside */
1653 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1662 void tlb_flush(CPUState
*env
, int flush_global
)
1666 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1670 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1671 target_phys_addr_t paddr
, int prot
,
1672 int is_user
, int is_softmmu
)
1677 /* dump memory mappings */
1678 void page_dump(FILE *f
)
1680 unsigned long start
, end
;
1681 int i
, j
, prot
, prot1
;
1684 fprintf(f
, "%-8s %-8s %-8s %s\n",
1685 "start", "end", "size", "prot");
1689 for(i
= 0; i
<= L1_SIZE
; i
++) {
1694 for(j
= 0;j
< L2_SIZE
; j
++) {
1699 if (prot1
!= prot
) {
1700 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1702 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1703 start
, end
, end
- start
,
1704 prot
& PAGE_READ
? 'r' : '-',
1705 prot
& PAGE_WRITE
? 'w' : '-',
1706 prot
& PAGE_EXEC
? 'x' : '-');
1720 int page_get_flags(target_ulong address
)
1724 p
= page_find(address
>> TARGET_PAGE_BITS
);
1730 /* modify the flags of a page and invalidate the code if
1731 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1732 depending on PAGE_WRITE */
1733 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1738 start
= start
& TARGET_PAGE_MASK
;
1739 end
= TARGET_PAGE_ALIGN(end
);
1740 if (flags
& PAGE_WRITE
)
1741 flags
|= PAGE_WRITE_ORG
;
1742 spin_lock(&tb_lock
);
1743 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1744 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1745 /* if the write protection is set, then we invalidate the code
1747 if (!(p
->flags
& PAGE_WRITE
) &&
1748 (flags
& PAGE_WRITE
) &&
1750 tb_invalidate_phys_page(addr
, 0, NULL
);
1754 spin_unlock(&tb_lock
);
1757 /* called from signal handler: invalidate the code and unprotect the
1758 page. Return TRUE if the fault was succesfully handled. */
1759 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1761 unsigned int page_index
, prot
, pindex
;
1763 target_ulong host_start
, host_end
, addr
;
1765 host_start
= address
& qemu_host_page_mask
;
1766 page_index
= host_start
>> TARGET_PAGE_BITS
;
1767 p1
= page_find(page_index
);
1770 host_end
= host_start
+ qemu_host_page_size
;
1773 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1777 /* if the page was really writable, then we change its
1778 protection back to writable */
1779 if (prot
& PAGE_WRITE_ORG
) {
1780 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1781 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1782 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1783 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1784 p1
[pindex
].flags
|= PAGE_WRITE
;
1785 /* and since the content will be modified, we must invalidate
1786 the corresponding translated code. */
1787 tb_invalidate_phys_page(address
, pc
, puc
);
1788 #ifdef DEBUG_TB_CHECK
1789 tb_invalidate_check(address
);
1797 /* call this function when system calls directly modify a memory area */
1798 /* ??? This should be redundant now we have lock_user. */
1799 void page_unprotect_range(target_ulong data
, target_ulong data_size
)
1801 target_ulong start
, end
, addr
;
1804 end
= start
+ data_size
;
1805 start
&= TARGET_PAGE_MASK
;
1806 end
= TARGET_PAGE_ALIGN(end
);
1807 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1808 page_unprotect(addr
, 0, NULL
);
1812 static inline void tlb_set_dirty(CPUState
*env
,
1813 unsigned long addr
, target_ulong vaddr
)
1816 #endif /* defined(CONFIG_USER_ONLY) */
1818 /* register physical memory. 'size' must be a multiple of the target
1819 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1821 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1823 unsigned long phys_offset
)
1825 target_phys_addr_t addr
, end_addr
;
1829 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1830 end_addr
= start_addr
+ size
;
1831 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1832 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1833 p
->phys_offset
= phys_offset
;
1834 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
1835 (phys_offset
& IO_MEM_ROMD
))
1836 phys_offset
+= TARGET_PAGE_SIZE
;
1839 /* since each CPU stores ram addresses in its TLB cache, we must
1840 reset the modified entries */
1842 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1847 /* XXX: temporary until new memory mapping API */
1848 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
1852 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1854 return IO_MEM_UNASSIGNED
;
1855 return p
->phys_offset
;
1858 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1860 #ifdef DEBUG_UNASSIGNED
1861 printf("Unassigned mem read 0x%08x\n", (int)addr
);
1866 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1868 #ifdef DEBUG_UNASSIGNED
1869 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr
, val
);
1873 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1874 unassigned_mem_readb
,
1875 unassigned_mem_readb
,
1876 unassigned_mem_readb
,
1879 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1880 unassigned_mem_writeb
,
1881 unassigned_mem_writeb
,
1882 unassigned_mem_writeb
,
1885 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1887 unsigned long ram_addr
;
1889 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1890 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1891 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1892 #if !defined(CONFIG_USER_ONLY)
1893 tb_invalidate_phys_page_fast(ram_addr
, 1);
1894 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1897 stb_p((uint8_t *)(long)addr
, val
);
1899 if (cpu_single_env
->kqemu_enabled
&&
1900 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1901 kqemu_modify_page(cpu_single_env
, ram_addr
);
1903 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1904 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1905 /* we remove the notdirty callback only if the code has been
1907 if (dirty_flags
== 0xff)
1908 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1911 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1913 unsigned long ram_addr
;
1915 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1916 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1917 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1918 #if !defined(CONFIG_USER_ONLY)
1919 tb_invalidate_phys_page_fast(ram_addr
, 2);
1920 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1923 stw_p((uint8_t *)(long)addr
, val
);
1925 if (cpu_single_env
->kqemu_enabled
&&
1926 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1927 kqemu_modify_page(cpu_single_env
, ram_addr
);
1929 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1930 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1931 /* we remove the notdirty callback only if the code has been
1933 if (dirty_flags
== 0xff)
1934 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1937 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1939 unsigned long ram_addr
;
1941 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1942 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1943 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1944 #if !defined(CONFIG_USER_ONLY)
1945 tb_invalidate_phys_page_fast(ram_addr
, 4);
1946 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1949 stl_p((uint8_t *)(long)addr
, val
);
1951 if (cpu_single_env
->kqemu_enabled
&&
1952 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
1953 kqemu_modify_page(cpu_single_env
, ram_addr
);
1955 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1956 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1957 /* we remove the notdirty callback only if the code has been
1959 if (dirty_flags
== 0xff)
1960 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1963 static CPUReadMemoryFunc
*error_mem_read
[3] = {
1964 NULL
, /* never used */
1965 NULL
, /* never used */
1966 NULL
, /* never used */
1969 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1970 notdirty_mem_writeb
,
1971 notdirty_mem_writew
,
1972 notdirty_mem_writel
,
1975 static void io_mem_init(void)
1977 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
1978 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
1979 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
1982 /* alloc dirty bits array */
1983 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1984 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
1987 /* mem_read and mem_write are arrays of functions containing the
1988 function to access byte (index 0), word (index 1) and dword (index
1989 2). All functions must be supplied. If io_index is non zero, the
1990 corresponding io zone is modified. If it is zero, a new io zone is
1991 allocated. The return value can be used with
1992 cpu_register_physical_memory(). (-1) is returned if error. */
1993 int cpu_register_io_memory(int io_index
,
1994 CPUReadMemoryFunc
**mem_read
,
1995 CPUWriteMemoryFunc
**mem_write
,
2000 if (io_index
<= 0) {
2001 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2003 io_index
= io_mem_nb
++;
2005 if (io_index
>= IO_MEM_NB_ENTRIES
)
2009 for(i
= 0;i
< 3; i
++) {
2010 io_mem_read
[io_index
][i
] = mem_read
[i
];
2011 io_mem_write
[io_index
][i
] = mem_write
[i
];
2013 io_mem_opaque
[io_index
] = opaque
;
2014 return io_index
<< IO_MEM_SHIFT
;
2017 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2019 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2022 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2024 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2027 /* physical memory access (slow version, mainly for debug) */
2028 #if defined(CONFIG_USER_ONLY)
2029 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2030 int len
, int is_write
)
2037 page
= addr
& TARGET_PAGE_MASK
;
2038 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2041 flags
= page_get_flags(page
);
2042 if (!(flags
& PAGE_VALID
))
2045 if (!(flags
& PAGE_WRITE
))
2047 p
= lock_user(addr
, len
, 0);
2048 memcpy(p
, buf
, len
);
2049 unlock_user(p
, addr
, len
);
2051 if (!(flags
& PAGE_READ
))
2053 p
= lock_user(addr
, len
, 1);
2054 memcpy(buf
, p
, len
);
2055 unlock_user(p
, addr
, 0);
2064 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2065 int len
, int is_write
)
2070 target_phys_addr_t page
;
2075 page
= addr
& TARGET_PAGE_MASK
;
2076 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2079 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2081 pd
= IO_MEM_UNASSIGNED
;
2083 pd
= p
->phys_offset
;
2087 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2088 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2089 /* XXX: could force cpu_single_env to NULL to avoid
2091 if (l
>= 4 && ((addr
& 3) == 0)) {
2092 /* 32 bit write access */
2094 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2096 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2097 /* 16 bit write access */
2099 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2102 /* 8 bit write access */
2104 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2108 unsigned long addr1
;
2109 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2111 ptr
= phys_ram_base
+ addr1
;
2112 memcpy(ptr
, buf
, l
);
2113 if (!cpu_physical_memory_is_dirty(addr1
)) {
2114 /* invalidate code */
2115 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2117 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2118 (0xff & ~CODE_DIRTY_FLAG
);
2122 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2123 !(pd
& IO_MEM_ROMD
)) {
2125 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2126 if (l
>= 4 && ((addr
& 3) == 0)) {
2127 /* 32 bit read access */
2128 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2131 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2132 /* 16 bit read access */
2133 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2137 /* 8 bit read access */
2138 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2144 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2145 (addr
& ~TARGET_PAGE_MASK
);
2146 memcpy(buf
, ptr
, l
);
2155 /* used for ROM loading : can write in RAM and ROM */
2156 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2157 const uint8_t *buf
, int len
)
2161 target_phys_addr_t page
;
2166 page
= addr
& TARGET_PAGE_MASK
;
2167 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2170 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2172 pd
= IO_MEM_UNASSIGNED
;
2174 pd
= p
->phys_offset
;
2177 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2178 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2179 !(pd
& IO_MEM_ROMD
)) {
2182 unsigned long addr1
;
2183 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2185 ptr
= phys_ram_base
+ addr1
;
2186 memcpy(ptr
, buf
, l
);
2195 /* warning: addr must be aligned */
2196 uint32_t ldl_phys(target_phys_addr_t addr
)
2204 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2206 pd
= IO_MEM_UNASSIGNED
;
2208 pd
= p
->phys_offset
;
2211 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2212 !(pd
& IO_MEM_ROMD
)) {
2214 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2215 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2218 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2219 (addr
& ~TARGET_PAGE_MASK
);
2225 /* warning: addr must be aligned */
2226 uint64_t ldq_phys(target_phys_addr_t addr
)
2234 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2236 pd
= IO_MEM_UNASSIGNED
;
2238 pd
= p
->phys_offset
;
2241 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2242 !(pd
& IO_MEM_ROMD
)) {
2244 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2245 #ifdef TARGET_WORDS_BIGENDIAN
2246 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2247 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2249 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2250 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2254 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2255 (addr
& ~TARGET_PAGE_MASK
);
2262 uint32_t ldub_phys(target_phys_addr_t addr
)
2265 cpu_physical_memory_read(addr
, &val
, 1);
2270 uint32_t lduw_phys(target_phys_addr_t addr
)
2273 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2274 return tswap16(val
);
2278 #define likely(x) __builtin_expect(!!(x), 1)
2279 #define unlikely(x) __builtin_expect(!!(x), 0)
2282 #define unlikely(x) x
2285 /* warning: addr must be aligned. The ram page is not masked as dirty
2286 and the code inside is not invalidated. It is useful if the dirty
2287 bits are used to track modified PTEs */
2288 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2295 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2297 pd
= IO_MEM_UNASSIGNED
;
2299 pd
= p
->phys_offset
;
2302 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2303 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2304 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2306 unsigned long addr1
;
2307 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2309 ptr
= phys_ram_base
+ addr1
;
2312 if (unlikely(in_migration
)) {
2313 if (!cpu_physical_memory_is_dirty(addr1
)) {
2314 /* invalidate code */
2315 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2317 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2318 (0xff & ~CODE_DIRTY_FLAG
);
2324 /* warning: addr must be aligned */
2325 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2332 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2334 pd
= IO_MEM_UNASSIGNED
;
2336 pd
= p
->phys_offset
;
2339 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2340 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2341 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2343 unsigned long addr1
;
2344 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2346 ptr
= phys_ram_base
+ addr1
;
2348 if (!cpu_physical_memory_is_dirty(addr1
)) {
2349 /* invalidate code */
2350 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2352 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2353 (0xff & ~CODE_DIRTY_FLAG
);
2359 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2362 cpu_physical_memory_write(addr
, &v
, 1);
2366 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2368 uint16_t v
= tswap16(val
);
2369 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2373 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2376 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2381 /* virtual memory access for debug */
2382 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2383 uint8_t *buf
, int len
, int is_write
)
2386 target_ulong page
, phys_addr
;
2389 page
= addr
& TARGET_PAGE_MASK
;
2390 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2391 /* if no physical page mapped, return an error */
2392 if (phys_addr
== -1)
2394 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2397 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2406 void dump_exec_info(FILE *f
,
2407 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2409 int i
, target_code_size
, max_target_code_size
;
2410 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2411 TranslationBlock
*tb
;
2413 target_code_size
= 0;
2414 max_target_code_size
= 0;
2416 direct_jmp_count
= 0;
2417 direct_jmp2_count
= 0;
2418 for(i
= 0; i
< nb_tbs
; i
++) {
2420 target_code_size
+= tb
->size
;
2421 if (tb
->size
> max_target_code_size
)
2422 max_target_code_size
= tb
->size
;
2423 if (tb
->page_addr
[1] != -1)
2425 if (tb
->tb_next_offset
[0] != 0xffff) {
2427 if (tb
->tb_next_offset
[1] != 0xffff) {
2428 direct_jmp2_count
++;
2432 /* XXX: avoid using doubles ? */
2433 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2434 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2435 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2436 max_target_code_size
);
2437 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2438 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2439 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2440 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2442 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2443 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2445 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2447 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2448 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2449 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2450 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2453 #if !defined(CONFIG_USER_ONLY)
2455 #define MMUSUFFIX _cmmu
2456 #define GETPC() NULL
2457 #define env cpu_single_env
2458 #define SOFTMMU_CODE_ACCESS
2461 #include "softmmu_template.h"
2464 #include "softmmu_template.h"
2467 #include "softmmu_template.h"
2470 #include "softmmu_template.h"