2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
40 #if defined(CONFIG_USER_ONLY)
44 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
61 /* threshold to flush the translated code buffer */
62 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80 #define TARGET_PHYS_ADDR_SPACE_BITS 32
82 #define TARGET_PHYS_ADDR_SPACE_BITS 42
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
88 extern int kvm_allowed
;
89 extern kvm_context_t kvm_context
;
92 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
93 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
95 /* any access to the tbs or the page table must use this lock */
96 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
98 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
99 uint8_t *code_gen_ptr
;
101 ram_addr_t phys_ram_size
;
103 uint8_t *phys_ram_base
;
104 uint8_t *phys_ram_dirty
;
106 static int in_migration
;
107 static ram_addr_t phys_ram_alloc_offset
= 0;
110 /* current CPU in the current thread. It is only valid inside
112 CPUState
*cpu_single_env
;
114 typedef struct PageDesc
{
115 /* list of TBs intersecting this ram page */
116 TranslationBlock
*first_tb
;
117 /* in order to optimize self modifying code, we count the number
118 of lookups we do to a given page to use a bitmap */
119 unsigned int code_write_count
;
120 uint8_t *code_bitmap
;
121 #if defined(CONFIG_USER_ONLY)
126 typedef struct PhysPageDesc
{
127 /* offset in host memory of the page + io_index in the low 12 bits */
128 ram_addr_t phys_offset
;
132 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
133 /* XXX: this is a temporary hack for alpha target.
134 * In the future, this is to be replaced by a multi-level table
135 * to actually be able to handle the complete 64 bits address space.
137 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
139 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
142 #define L1_SIZE (1 << L1_BITS)
143 #define L2_SIZE (1 << L2_BITS)
145 static void io_mem_init(void);
147 unsigned long qemu_real_host_page_size
;
148 unsigned long qemu_host_page_bits
;
149 unsigned long qemu_host_page_size
;
150 unsigned long qemu_host_page_mask
;
152 /* XXX: for system emulation, it could just be an array */
153 static PageDesc
*l1_map
[L1_SIZE
];
154 PhysPageDesc
**l1_phys_map
;
156 /* io memory support */
157 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
158 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
159 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
160 static int io_mem_nb
;
161 #if defined(CONFIG_SOFTMMU)
162 static int io_mem_watch
;
166 char *logfilename
= "/tmp/qemu.log";
169 static int log_append
= 0;
172 static int tlb_flush_count
;
173 static int tb_flush_count
;
174 static int tb_phys_invalidate_count
;
176 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
177 typedef struct subpage_t
{
178 target_phys_addr_t base
;
179 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
];
180 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
];
181 void *opaque
[TARGET_PAGE_SIZE
];
184 static void page_init(void)
186 /* NOTE: we can always suppose that qemu_host_page_size >=
190 SYSTEM_INFO system_info
;
193 GetSystemInfo(&system_info
);
194 qemu_real_host_page_size
= system_info
.dwPageSize
;
196 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
197 PAGE_EXECUTE_READWRITE
, &old_protect
);
200 qemu_real_host_page_size
= getpagesize();
202 unsigned long start
, end
;
204 start
= (unsigned long)code_gen_buffer
;
205 start
&= ~(qemu_real_host_page_size
- 1);
207 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
208 end
+= qemu_real_host_page_size
- 1;
209 end
&= ~(qemu_real_host_page_size
- 1);
211 mprotect((void *)start
, end
- start
,
212 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
216 if (qemu_host_page_size
== 0)
217 qemu_host_page_size
= qemu_real_host_page_size
;
218 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
219 qemu_host_page_size
= TARGET_PAGE_SIZE
;
220 qemu_host_page_bits
= 0;
221 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
222 qemu_host_page_bits
++;
223 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
224 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
225 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
228 static inline PageDesc
*page_find_alloc(unsigned int index
)
232 lp
= &l1_map
[index
>> L2_BITS
];
235 /* allocate if not found */
236 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
237 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
240 return p
+ (index
& (L2_SIZE
- 1));
243 static inline PageDesc
*page_find(unsigned int index
)
247 p
= l1_map
[index
>> L2_BITS
];
250 return p
+ (index
& (L2_SIZE
- 1));
253 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
258 p
= (void **)l1_phys_map
;
259 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
261 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
262 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
264 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
267 /* allocate if not found */
270 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
271 memset(p
, 0, sizeof(void *) * L1_SIZE
);
275 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
279 /* allocate if not found */
282 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
284 for (i
= 0; i
< L2_SIZE
; i
++)
285 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
287 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
290 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
292 return phys_page_find_alloc(index
, 0);
295 #if !defined(CONFIG_USER_ONLY)
296 static void tlb_protect_code(ram_addr_t ram_addr
);
297 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
301 void cpu_exec_init(CPUState
*env
)
307 code_gen_ptr
= code_gen_buffer
;
311 env
->next_cpu
= NULL
;
314 while (*penv
!= NULL
) {
315 penv
= (CPUState
**)&(*penv
)->next_cpu
;
318 env
->cpu_index
= cpu_index
;
319 env
->nb_watchpoints
= 0;
323 static inline void invalidate_page_bitmap(PageDesc
*p
)
325 if (p
->code_bitmap
) {
326 qemu_free(p
->code_bitmap
);
327 p
->code_bitmap
= NULL
;
329 p
->code_write_count
= 0;
332 /* set to NULL all the 'first_tb' fields in all PageDescs */
333 static void page_flush_tb(void)
338 for(i
= 0; i
< L1_SIZE
; i
++) {
341 for(j
= 0; j
< L2_SIZE
; j
++) {
343 invalidate_page_bitmap(p
);
350 /* flush all the translation blocks */
351 /* XXX: tb_flush is currently not thread safe */
352 void tb_flush(CPUState
*env1
)
355 #if defined(DEBUG_FLUSH)
356 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
357 code_gen_ptr
- code_gen_buffer
,
359 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
363 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
364 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
367 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
370 code_gen_ptr
= code_gen_buffer
;
371 /* XXX: flush processor icache at this point if cache flush is
376 #ifdef DEBUG_TB_CHECK
378 static void tb_invalidate_check(target_ulong address
)
380 TranslationBlock
*tb
;
382 address
&= TARGET_PAGE_MASK
;
383 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
384 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
385 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
386 address
>= tb
->pc
+ tb
->size
)) {
387 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
388 address
, (long)tb
->pc
, tb
->size
);
394 /* verify that all the pages have correct rights for code */
395 static void tb_page_check(void)
397 TranslationBlock
*tb
;
398 int i
, flags1
, flags2
;
400 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
401 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
402 flags1
= page_get_flags(tb
->pc
);
403 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
404 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
405 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
406 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
412 void tb_jmp_check(TranslationBlock
*tb
)
414 TranslationBlock
*tb1
;
417 /* suppress any remaining jumps to this TB */
421 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
424 tb1
= tb1
->jmp_next
[n1
];
426 /* check end of list */
428 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
434 /* invalidate one TB */
435 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
438 TranslationBlock
*tb1
;
442 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
445 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
449 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
451 TranslationBlock
*tb1
;
457 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
459 *ptb
= tb1
->page_next
[n1
];
462 ptb
= &tb1
->page_next
[n1
];
466 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
468 TranslationBlock
*tb1
, **ptb
;
471 ptb
= &tb
->jmp_next
[n
];
474 /* find tb(n) in circular list */
478 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
479 if (n1
== n
&& tb1
== tb
)
482 ptb
= &tb1
->jmp_first
;
484 ptb
= &tb1
->jmp_next
[n1
];
487 /* now we can suppress tb(n) from the list */
488 *ptb
= tb
->jmp_next
[n
];
490 tb
->jmp_next
[n
] = NULL
;
494 /* reset the jump entry 'n' of a TB so that it is not chained to
496 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
498 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
501 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
506 target_ulong phys_pc
;
507 TranslationBlock
*tb1
, *tb2
;
509 /* remove the TB from the hash list */
510 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
511 h
= tb_phys_hash_func(phys_pc
);
512 tb_remove(&tb_phys_hash
[h
], tb
,
513 offsetof(TranslationBlock
, phys_hash_next
));
515 /* remove the TB from the page list */
516 if (tb
->page_addr
[0] != page_addr
) {
517 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
518 tb_page_remove(&p
->first_tb
, tb
);
519 invalidate_page_bitmap(p
);
521 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
522 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
523 tb_page_remove(&p
->first_tb
, tb
);
524 invalidate_page_bitmap(p
);
527 tb_invalidated_flag
= 1;
529 /* remove the TB from the hash list */
530 h
= tb_jmp_cache_hash_func(tb
->pc
);
531 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
532 if (env
->tb_jmp_cache
[h
] == tb
)
533 env
->tb_jmp_cache
[h
] = NULL
;
536 /* suppress this TB from the two jump lists */
537 tb_jmp_remove(tb
, 0);
538 tb_jmp_remove(tb
, 1);
540 /* suppress any remaining jumps to this TB */
546 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
547 tb2
= tb1
->jmp_next
[n1
];
548 tb_reset_jump(tb1
, n1
);
549 tb1
->jmp_next
[n1
] = NULL
;
552 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
554 tb_phys_invalidate_count
++;
557 static inline void set_bits(uint8_t *tab
, int start
, int len
)
563 mask
= 0xff << (start
& 7);
564 if ((start
& ~7) == (end
& ~7)) {
566 mask
&= ~(0xff << (end
& 7));
571 start
= (start
+ 8) & ~7;
573 while (start
< end1
) {
578 mask
= ~(0xff << (end
& 7));
584 static void build_page_bitmap(PageDesc
*p
)
586 int n
, tb_start
, tb_end
;
587 TranslationBlock
*tb
;
589 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
592 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
597 tb
= (TranslationBlock
*)((long)tb
& ~3);
598 /* NOTE: this is subtle as a TB may span two physical pages */
600 /* NOTE: tb_end may be after the end of the page, but
601 it is not a problem */
602 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
603 tb_end
= tb_start
+ tb
->size
;
604 if (tb_end
> TARGET_PAGE_SIZE
)
605 tb_end
= TARGET_PAGE_SIZE
;
608 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
610 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
611 tb
= tb
->page_next
[n
];
615 #ifdef TARGET_HAS_PRECISE_SMC
617 static void tb_gen_code(CPUState
*env
,
618 target_ulong pc
, target_ulong cs_base
, int flags
,
621 TranslationBlock
*tb
;
623 target_ulong phys_pc
, phys_page2
, virt_page2
;
626 phys_pc
= get_phys_addr_code(env
, pc
);
629 /* flush must be done */
631 /* cannot fail at this point */
634 tc_ptr
= code_gen_ptr
;
636 tb
->cs_base
= cs_base
;
639 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
640 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
642 /* check next page if needed */
643 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
645 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
646 phys_page2
= get_phys_addr_code(env
, virt_page2
);
648 tb_link_phys(tb
, phys_pc
, phys_page2
);
652 /* invalidate all TBs which intersect with the target physical page
653 starting in range [start;end[. NOTE: start and end must refer to
654 the same physical page. 'is_cpu_write_access' should be true if called
655 from a real cpu write access: the virtual CPU will exit the current
656 TB if code is modified inside this TB. */
657 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
658 int is_cpu_write_access
)
660 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
661 CPUState
*env
= cpu_single_env
;
663 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
664 target_ulong tb_start
, tb_end
;
665 target_ulong current_pc
, current_cs_base
;
667 p
= page_find(start
>> TARGET_PAGE_BITS
);
670 if (!p
->code_bitmap
&&
671 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
672 is_cpu_write_access
) {
673 /* build code bitmap */
674 build_page_bitmap(p
);
677 /* we remove all the TBs in the range [start, end[ */
678 /* XXX: see if in some cases it could be faster to invalidate all the code */
679 current_tb_not_found
= is_cpu_write_access
;
680 current_tb_modified
= 0;
681 current_tb
= NULL
; /* avoid warning */
682 current_pc
= 0; /* avoid warning */
683 current_cs_base
= 0; /* avoid warning */
684 current_flags
= 0; /* avoid warning */
688 tb
= (TranslationBlock
*)((long)tb
& ~3);
689 tb_next
= tb
->page_next
[n
];
690 /* NOTE: this is subtle as a TB may span two physical pages */
692 /* NOTE: tb_end may be after the end of the page, but
693 it is not a problem */
694 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
695 tb_end
= tb_start
+ tb
->size
;
697 tb_start
= tb
->page_addr
[1];
698 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
700 if (!(tb_end
<= start
|| tb_start
>= end
)) {
701 #ifdef TARGET_HAS_PRECISE_SMC
702 if (current_tb_not_found
) {
703 current_tb_not_found
= 0;
705 if (env
->mem_write_pc
) {
706 /* now we have a real cpu fault */
707 current_tb
= tb_find_pc(env
->mem_write_pc
);
710 if (current_tb
== tb
&&
711 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
712 /* If we are modifying the current TB, we must stop
713 its execution. We could be more precise by checking
714 that the modification is after the current PC, but it
715 would require a specialized function to partially
716 restore the CPU state */
718 current_tb_modified
= 1;
719 cpu_restore_state(current_tb
, env
,
720 env
->mem_write_pc
, NULL
);
721 #if defined(TARGET_I386)
722 current_flags
= env
->hflags
;
723 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
724 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
725 current_pc
= current_cs_base
+ env
->eip
;
727 #error unsupported CPU
730 #endif /* TARGET_HAS_PRECISE_SMC */
731 /* we need to do that to handle the case where a signal
732 occurs while doing tb_phys_invalidate() */
735 saved_tb
= env
->current_tb
;
736 env
->current_tb
= NULL
;
738 tb_phys_invalidate(tb
, -1);
740 env
->current_tb
= saved_tb
;
741 if (env
->interrupt_request
&& env
->current_tb
)
742 cpu_interrupt(env
, env
->interrupt_request
);
747 #if !defined(CONFIG_USER_ONLY)
748 /* if no code remaining, no need to continue to use slow writes */
750 invalidate_page_bitmap(p
);
751 if (is_cpu_write_access
) {
752 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
756 #ifdef TARGET_HAS_PRECISE_SMC
757 if (current_tb_modified
) {
758 /* we generate a block containing just the instruction
759 modifying the memory. It will ensure that it cannot modify
761 env
->current_tb
= NULL
;
762 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
764 cpu_resume_from_signal(env
, NULL
);
769 /* len must be <= 8 and start must be a multiple of len */
770 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
777 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
778 cpu_single_env
->mem_write_vaddr
, len
,
780 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
784 p
= page_find(start
>> TARGET_PAGE_BITS
);
787 if (p
->code_bitmap
) {
788 offset
= start
& ~TARGET_PAGE_MASK
;
789 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
790 if (b
& ((1 << len
) - 1))
794 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
798 #if !defined(CONFIG_SOFTMMU)
799 static void tb_invalidate_phys_page(target_ulong addr
,
800 unsigned long pc
, void *puc
)
802 int n
, current_flags
, current_tb_modified
;
803 target_ulong current_pc
, current_cs_base
;
805 TranslationBlock
*tb
, *current_tb
;
806 #ifdef TARGET_HAS_PRECISE_SMC
807 CPUState
*env
= cpu_single_env
;
810 addr
&= TARGET_PAGE_MASK
;
811 p
= page_find(addr
>> TARGET_PAGE_BITS
);
815 current_tb_modified
= 0;
817 current_pc
= 0; /* avoid warning */
818 current_cs_base
= 0; /* avoid warning */
819 current_flags
= 0; /* avoid warning */
820 #ifdef TARGET_HAS_PRECISE_SMC
822 current_tb
= tb_find_pc(pc
);
827 tb
= (TranslationBlock
*)((long)tb
& ~3);
828 #ifdef TARGET_HAS_PRECISE_SMC
829 if (current_tb
== tb
&&
830 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
831 /* If we are modifying the current TB, we must stop
832 its execution. We could be more precise by checking
833 that the modification is after the current PC, but it
834 would require a specialized function to partially
835 restore the CPU state */
837 current_tb_modified
= 1;
838 cpu_restore_state(current_tb
, env
, pc
, puc
);
839 #if defined(TARGET_I386)
840 current_flags
= env
->hflags
;
841 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
842 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
843 current_pc
= current_cs_base
+ env
->eip
;
845 #error unsupported CPU
848 #endif /* TARGET_HAS_PRECISE_SMC */
849 tb_phys_invalidate(tb
, addr
);
850 tb
= tb
->page_next
[n
];
853 #ifdef TARGET_HAS_PRECISE_SMC
854 if (current_tb_modified
) {
855 /* we generate a block containing just the instruction
856 modifying the memory. It will ensure that it cannot modify
858 env
->current_tb
= NULL
;
859 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
861 cpu_resume_from_signal(env
, puc
);
867 /* add the tb in the target page and protect it if necessary */
868 static inline void tb_alloc_page(TranslationBlock
*tb
,
869 unsigned int n
, target_ulong page_addr
)
872 TranslationBlock
*last_first_tb
;
874 tb
->page_addr
[n
] = page_addr
;
875 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
876 tb
->page_next
[n
] = p
->first_tb
;
877 last_first_tb
= p
->first_tb
;
878 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
879 invalidate_page_bitmap(p
);
881 #if defined(TARGET_HAS_SMC) || 1
883 #if defined(CONFIG_USER_ONLY)
884 if (p
->flags
& PAGE_WRITE
) {
889 /* force the host page as non writable (writes will have a
890 page fault + mprotect overhead) */
891 page_addr
&= qemu_host_page_mask
;
893 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
894 addr
+= TARGET_PAGE_SIZE
) {
896 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
900 p2
->flags
&= ~PAGE_WRITE
;
901 page_get_flags(addr
);
903 mprotect(g2h(page_addr
), qemu_host_page_size
,
904 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
905 #ifdef DEBUG_TB_INVALIDATE
906 printf("protecting code page: 0x%08lx\n",
911 /* if some code is already present, then the pages are already
912 protected. So we handle the case where only the first TB is
913 allocated in a physical page */
914 if (!last_first_tb
) {
915 tlb_protect_code(page_addr
);
919 #endif /* TARGET_HAS_SMC */
922 /* Allocate a new translation block. Flush the translation buffer if
923 too many translation blocks or too much generated code. */
924 TranslationBlock
*tb_alloc(target_ulong pc
)
926 TranslationBlock
*tb
;
928 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
929 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
937 /* add a new TB and link it to the physical page tables. phys_page2 is
938 (-1) to indicate that only one page contains the TB. */
939 void tb_link_phys(TranslationBlock
*tb
,
940 target_ulong phys_pc
, target_ulong phys_page2
)
943 TranslationBlock
**ptb
;
945 /* add in the physical hash table */
946 h
= tb_phys_hash_func(phys_pc
);
947 ptb
= &tb_phys_hash
[h
];
948 tb
->phys_hash_next
= *ptb
;
951 /* add in the page list */
952 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
953 if (phys_page2
!= -1)
954 tb_alloc_page(tb
, 1, phys_page2
);
956 tb
->page_addr
[1] = -1;
958 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
959 tb
->jmp_next
[0] = NULL
;
960 tb
->jmp_next
[1] = NULL
;
962 tb
->cflags
&= ~CF_FP_USED
;
963 if (tb
->cflags
& CF_TB_FP_USED
)
964 tb
->cflags
|= CF_FP_USED
;
967 /* init original jump addresses */
968 if (tb
->tb_next_offset
[0] != 0xffff)
969 tb_reset_jump(tb
, 0);
970 if (tb
->tb_next_offset
[1] != 0xffff)
971 tb_reset_jump(tb
, 1);
973 #ifdef DEBUG_TB_CHECK
978 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
979 tb[1].tc_ptr. Return NULL if not found */
980 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
984 TranslationBlock
*tb
;
988 if (tc_ptr
< (unsigned long)code_gen_buffer
||
989 tc_ptr
>= (unsigned long)code_gen_ptr
)
991 /* binary search (cf Knuth) */
994 while (m_min
<= m_max
) {
995 m
= (m_min
+ m_max
) >> 1;
997 v
= (unsigned long)tb
->tc_ptr
;
1000 else if (tc_ptr
< v
) {
1009 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1011 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1013 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1016 tb1
= tb
->jmp_next
[n
];
1018 /* find head of list */
1021 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1024 tb1
= tb1
->jmp_next
[n1
];
1026 /* we are now sure now that tb jumps to tb1 */
1029 /* remove tb from the jmp_first list */
1030 ptb
= &tb_next
->jmp_first
;
1034 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1035 if (n1
== n
&& tb1
== tb
)
1037 ptb
= &tb1
->jmp_next
[n1
];
1039 *ptb
= tb
->jmp_next
[n
];
1040 tb
->jmp_next
[n
] = NULL
;
1042 /* suppress the jump to next tb in generated code */
1043 tb_reset_jump(tb
, n
);
1045 /* suppress jumps in the tb on which we could have jumped */
1046 tb_reset_jump_recursive(tb_next
);
1050 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1052 tb_reset_jump_recursive2(tb
, 0);
1053 tb_reset_jump_recursive2(tb
, 1);
1056 #if defined(TARGET_HAS_ICE)
1057 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1059 target_phys_addr_t addr
;
1061 ram_addr_t ram_addr
;
1064 addr
= cpu_get_phys_page_debug(env
, pc
);
1065 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1067 pd
= IO_MEM_UNASSIGNED
;
1069 pd
= p
->phys_offset
;
1071 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1072 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1076 /* Add a watchpoint. */
1077 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1081 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1082 if (addr
== env
->watchpoint
[i
].vaddr
)
1085 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1088 i
= env
->nb_watchpoints
++;
1089 env
->watchpoint
[i
].vaddr
= addr
;
1090 tlb_flush_page(env
, addr
);
1091 /* FIXME: This flush is needed because of the hack to make memory ops
1092 terminate the TB. It can be removed once the proper IO trap and
1093 re-execute bits are in. */
1098 /* Remove a watchpoint. */
1099 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1103 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1104 if (addr
== env
->watchpoint
[i
].vaddr
) {
1105 env
->nb_watchpoints
--;
1106 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1107 tlb_flush_page(env
, addr
);
1114 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1115 breakpoint is reached */
1116 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1118 #if defined(TARGET_HAS_ICE)
1121 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1122 if (env
->breakpoints
[i
] == pc
)
1126 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1128 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1132 kvm_update_debugger(env
);
1135 breakpoint_invalidate(env
, pc
);
1142 /* remove a breakpoint */
1143 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1145 #if defined(TARGET_HAS_ICE)
1147 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1148 if (env
->breakpoints
[i
] == pc
)
1153 env
->nb_breakpoints
--;
1154 if (i
< env
->nb_breakpoints
)
1155 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1159 kvm_update_debugger(env
);
1162 breakpoint_invalidate(env
, pc
);
1169 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1170 CPU loop after each instruction */
1171 void cpu_single_step(CPUState
*env
, int enabled
)
1173 #if defined(TARGET_HAS_ICE)
1174 if (env
->singlestep_enabled
!= enabled
) {
1175 env
->singlestep_enabled
= enabled
;
1176 /* must flush all the translated code to avoid inconsistancies */
1177 /* XXX: only flush what is necessary */
1182 kvm_update_debugger(env
);
1187 /* enable or disable low levels log */
1188 void cpu_set_log(int log_flags
)
1190 loglevel
= log_flags
;
1191 if (loglevel
&& !logfile
) {
1192 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1194 perror(logfilename
);
1197 #if !defined(CONFIG_SOFTMMU)
1198 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1200 static uint8_t logfile_buf
[4096];
1201 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1204 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1208 if (!loglevel
&& logfile
) {
1214 void cpu_set_log_filename(const char *filename
)
1216 logfilename
= strdup(filename
);
1221 cpu_set_log(loglevel
);
1224 /* mask must never be zero, except for A20 change call */
1225 void cpu_interrupt(CPUState
*env
, int mask
)
1227 TranslationBlock
*tb
;
1228 static int interrupt_lock
;
1230 env
->interrupt_request
|= mask
;
1232 if (kvm_allowed
&& !kvm_irqchip_in_kernel(kvm_context
))
1233 kvm_update_interrupt_request(env
);
1235 /* if the cpu is currently executing code, we must unlink it and
1236 all the potentially executing TB */
1237 tb
= env
->current_tb
;
1238 if (tb
&& !testandset(&interrupt_lock
)) {
1239 env
->current_tb
= NULL
;
1240 tb_reset_jump_recursive(tb
);
1245 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1247 env
->interrupt_request
&= ~mask
;
1250 CPULogItem cpu_log_items
[] = {
1251 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1252 "show generated host assembly code for each compiled TB" },
1253 { CPU_LOG_TB_IN_ASM
, "in_asm",
1254 "show target assembly code for each compiled TB" },
1255 { CPU_LOG_TB_OP
, "op",
1256 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1258 { CPU_LOG_TB_OP_OPT
, "op_opt",
1259 "show micro ops after optimization for each compiled TB" },
1261 { CPU_LOG_INT
, "int",
1262 "show interrupts/exceptions in short format" },
1263 { CPU_LOG_EXEC
, "exec",
1264 "show trace before each executed TB (lots of logs)" },
1265 { CPU_LOG_TB_CPU
, "cpu",
1266 "show CPU state before block translation" },
1268 { CPU_LOG_PCALL
, "pcall",
1269 "show protected mode far calls/returns/exceptions" },
1272 { CPU_LOG_IOPORT
, "ioport",
1273 "show all i/o ports accesses" },
1278 static int cmp1(const char *s1
, int n
, const char *s2
)
1280 if (strlen(s2
) != n
)
1282 return memcmp(s1
, s2
, n
) == 0;
1285 /* takes a comma separated list of log masks. Return 0 if error. */
1286 int cpu_str_to_log_mask(const char *str
)
1295 p1
= strchr(p
, ',');
1298 if(cmp1(p
,p1
-p
,"all")) {
1299 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1303 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1304 if (cmp1(p
, p1
- p
, item
->name
))
1318 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1323 fprintf(stderr
, "qemu: fatal: ");
1324 vfprintf(stderr
, fmt
, ap
);
1325 fprintf(stderr
, "\n");
1327 if(env
->intercept
& INTERCEPT_SVM_MASK
) {
1328 /* most probably the virtual machine should not
1329 be shut down but rather caught by the VMM */
1330 vmexit(SVM_EXIT_SHUTDOWN
, 0);
1332 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1334 cpu_dump_state(env
, stderr
, fprintf
, 0);
1337 fprintf(logfile
, "qemu: fatal: ");
1338 vfprintf(logfile
, fmt
, ap
);
1339 fprintf(logfile
, "\n");
1341 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1343 cpu_dump_state(env
, logfile
, fprintf
, 0);
1352 CPUState
*cpu_copy(CPUState
*env
)
1354 CPUState
*new_env
= cpu_init();
1355 /* preserve chaining and index */
1356 CPUState
*next_cpu
= new_env
->next_cpu
;
1357 int cpu_index
= new_env
->cpu_index
;
1358 memcpy(new_env
, env
, sizeof(CPUState
));
1359 new_env
->next_cpu
= next_cpu
;
1360 new_env
->cpu_index
= cpu_index
;
1364 #if !defined(CONFIG_USER_ONLY)
1366 /* NOTE: if flush_global is true, also flush global entries (not
1368 void tlb_flush(CPUState
*env
, int flush_global
)
1372 #if defined(DEBUG_TLB)
1373 printf("tlb_flush:\n");
1375 /* must reset current TB so that interrupts cannot modify the
1376 links while we are modifying them */
1377 env
->current_tb
= NULL
;
1379 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1380 env
->tlb_table
[0][i
].addr_read
= -1;
1381 env
->tlb_table
[0][i
].addr_write
= -1;
1382 env
->tlb_table
[0][i
].addr_code
= -1;
1383 env
->tlb_table
[1][i
].addr_read
= -1;
1384 env
->tlb_table
[1][i
].addr_write
= -1;
1385 env
->tlb_table
[1][i
].addr_code
= -1;
1386 #if (NB_MMU_MODES >= 3)
1387 env
->tlb_table
[2][i
].addr_read
= -1;
1388 env
->tlb_table
[2][i
].addr_write
= -1;
1389 env
->tlb_table
[2][i
].addr_code
= -1;
1390 #if (NB_MMU_MODES == 4)
1391 env
->tlb_table
[3][i
].addr_read
= -1;
1392 env
->tlb_table
[3][i
].addr_write
= -1;
1393 env
->tlb_table
[3][i
].addr_code
= -1;
1398 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1400 #if !defined(CONFIG_SOFTMMU)
1401 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1404 if (env
->kqemu_enabled
) {
1405 kqemu_flush(env
, flush_global
);
1411 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1413 if (addr
== (tlb_entry
->addr_read
&
1414 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1415 addr
== (tlb_entry
->addr_write
&
1416 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1417 addr
== (tlb_entry
->addr_code
&
1418 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1419 tlb_entry
->addr_read
= -1;
1420 tlb_entry
->addr_write
= -1;
1421 tlb_entry
->addr_code
= -1;
1425 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1428 TranslationBlock
*tb
;
1430 #if defined(DEBUG_TLB)
1431 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1433 /* must reset current TB so that interrupts cannot modify the
1434 links while we are modifying them */
1435 env
->current_tb
= NULL
;
1437 addr
&= TARGET_PAGE_MASK
;
1438 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1439 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1440 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1441 #if (NB_MMU_MODES >= 3)
1442 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1443 #if (NB_MMU_MODES == 4)
1444 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1448 /* Discard jump cache entries for any tb which might potentially
1449 overlap the flushed page. */
1450 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1451 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1453 i
= tb_jmp_cache_hash_page(addr
);
1454 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1456 #if !defined(CONFIG_SOFTMMU)
1457 if (addr
< MMAP_AREA_END
)
1458 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1461 if (env
->kqemu_enabled
) {
1462 kqemu_flush_page(env
, addr
);
1467 /* update the TLBs so that writes to code in the virtual page 'addr'
1469 static void tlb_protect_code(ram_addr_t ram_addr
)
1471 cpu_physical_memory_reset_dirty(ram_addr
,
1472 ram_addr
+ TARGET_PAGE_SIZE
,
1476 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1477 tested for self modifying code */
1478 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1481 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1484 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1485 unsigned long start
, unsigned long length
)
1488 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1489 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1490 if ((addr
- start
) < length
) {
1491 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1496 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1500 unsigned long length
, start1
;
1504 start
&= TARGET_PAGE_MASK
;
1505 end
= TARGET_PAGE_ALIGN(end
);
1507 length
= end
- start
;
1510 len
= length
>> TARGET_PAGE_BITS
;
1512 /* XXX: should not depend on cpu context */
1514 if (env
->kqemu_enabled
) {
1517 for(i
= 0; i
< len
; i
++) {
1518 kqemu_set_notdirty(env
, addr
);
1519 addr
+= TARGET_PAGE_SIZE
;
1523 mask
= ~dirty_flags
;
1524 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1525 for(i
= 0; i
< len
; i
++)
1528 /* we modify the TLB cache so that the dirty bit will be set again
1529 when accessing the range */
1530 start1
= start
+ (unsigned long)phys_ram_base
;
1531 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1532 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1533 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1534 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1535 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1536 #if (NB_MMU_MODES >= 3)
1537 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1538 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1539 #if (NB_MMU_MODES == 4)
1540 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1541 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1546 #if !defined(CONFIG_SOFTMMU)
1547 /* XXX: this is expensive */
1553 for(i
= 0; i
< L1_SIZE
; i
++) {
1556 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1557 for(j
= 0; j
< L2_SIZE
; j
++) {
1558 if (p
->valid_tag
== virt_valid_tag
&&
1559 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1560 (p
->prot
& PROT_WRITE
)) {
1561 if (addr
< MMAP_AREA_END
) {
1562 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1563 p
->prot
& ~PROT_WRITE
);
1566 addr
+= TARGET_PAGE_SIZE
;
1575 int cpu_physical_memory_set_dirty_tracking(int enable
)
1580 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1582 in_migration
= enable
;
1586 int cpu_physical_memory_get_dirty_tracking(void)
1588 return in_migration
;
1591 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1593 ram_addr_t ram_addr
;
1595 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1596 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1597 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1598 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1599 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1604 /* update the TLB according to the current state of the dirty bits */
1605 void cpu_tlb_update_dirty(CPUState
*env
)
1608 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1609 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1610 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1611 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1612 #if (NB_MMU_MODES >= 3)
1613 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1614 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1615 #if (NB_MMU_MODES == 4)
1616 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1617 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1622 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1623 unsigned long start
)
1626 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1627 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1628 if (addr
== start
) {
1629 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1634 /* update the TLB corresponding to virtual page vaddr and phys addr
1635 addr so that it is no longer dirty */
1636 static inline void tlb_set_dirty(CPUState
*env
,
1637 unsigned long addr
, target_ulong vaddr
)
1641 addr
&= TARGET_PAGE_MASK
;
1642 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1643 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1644 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1645 #if (NB_MMU_MODES >= 3)
1646 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1647 #if (NB_MMU_MODES == 4)
1648 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1653 /* add a new TLB entry. At most one entry for a given virtual address
1654 is permitted. Return 0 if OK or 2 if the page could not be mapped
1655 (can only happen in non SOFTMMU mode for I/O pages or pages
1656 conflicting with the host address space). */
1657 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1658 target_phys_addr_t paddr
, int prot
,
1659 int is_user
, int is_softmmu
)
1664 target_ulong address
;
1665 target_phys_addr_t addend
;
1670 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1672 pd
= IO_MEM_UNASSIGNED
;
1674 pd
= p
->phys_offset
;
1676 #if defined(DEBUG_TLB)
1677 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1678 vaddr
, (int)paddr
, prot
, is_user
, is_softmmu
, pd
);
1682 #if !defined(CONFIG_SOFTMMU)
1686 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1687 /* IO memory case */
1688 address
= vaddr
| pd
;
1691 /* standard memory */
1693 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1696 /* Make accesses to pages with watchpoints go via the
1697 watchpoint trap routines. */
1698 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1699 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1700 if (address
& ~TARGET_PAGE_MASK
) {
1701 env
->watchpoint
[i
].addend
= 0;
1702 address
= vaddr
| io_mem_watch
;
1704 env
->watchpoint
[i
].addend
= pd
- paddr
+
1705 (unsigned long) phys_ram_base
;
1706 /* TODO: Figure out how to make read watchpoints coexist
1708 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1713 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1715 te
= &env
->tlb_table
[is_user
][index
];
1716 te
->addend
= addend
;
1717 if (prot
& PAGE_READ
) {
1718 te
->addr_read
= address
;
1722 if (prot
& PAGE_EXEC
) {
1723 te
->addr_code
= address
;
1727 if (prot
& PAGE_WRITE
) {
1728 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1729 (pd
& IO_MEM_ROMD
)) {
1730 /* write access calls the I/O callback */
1731 te
->addr_write
= vaddr
|
1732 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1733 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1734 !cpu_physical_memory_is_dirty(pd
)) {
1735 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1737 te
->addr_write
= address
;
1740 te
->addr_write
= -1;
1743 #if !defined(CONFIG_SOFTMMU)
1745 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1746 /* IO access: no mapping is done as it will be handled by the
1748 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1753 if (vaddr
>= MMAP_AREA_END
) {
1756 if (prot
& PROT_WRITE
) {
1757 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1758 #if defined(TARGET_HAS_SMC) || 1
1761 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1762 !cpu_physical_memory_is_dirty(pd
))) {
1763 /* ROM: we do as if code was inside */
1764 /* if code is present, we only map as read only and save the
1768 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1771 vp
->valid_tag
= virt_valid_tag
;
1772 prot
&= ~PAGE_WRITE
;
1775 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1776 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1777 if (map_addr
== MAP_FAILED
) {
1778 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1788 /* called from signal handler: invalidate the code and unprotect the
1789 page. Return TRUE if the fault was succesfully handled. */
1790 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1792 #if !defined(CONFIG_SOFTMMU)
1795 #if defined(DEBUG_TLB)
1796 printf("page_unprotect: addr=0x%08x\n", addr
);
1798 addr
&= TARGET_PAGE_MASK
;
1800 /* if it is not mapped, no need to worry here */
1801 if (addr
>= MMAP_AREA_END
)
1803 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1806 /* NOTE: in this case, validate_tag is _not_ tested as it
1807 validates only the code TLB */
1808 if (vp
->valid_tag
!= virt_valid_tag
)
1810 if (!(vp
->prot
& PAGE_WRITE
))
1812 #if defined(DEBUG_TLB)
1813 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1814 addr
, vp
->phys_addr
, vp
->prot
);
1816 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1817 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1818 (unsigned long)addr
, vp
->prot
);
1819 /* set the dirty bit */
1820 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1821 /* flush the code inside */
1822 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1831 void tlb_flush(CPUState
*env
, int flush_global
)
1835 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1839 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1840 target_phys_addr_t paddr
, int prot
,
1841 int is_user
, int is_softmmu
)
1846 /* dump memory mappings */
1847 void page_dump(FILE *f
)
1849 unsigned long start
, end
;
1850 int i
, j
, prot
, prot1
;
1853 fprintf(f
, "%-8s %-8s %-8s %s\n",
1854 "start", "end", "size", "prot");
1858 for(i
= 0; i
<= L1_SIZE
; i
++) {
1863 for(j
= 0;j
< L2_SIZE
; j
++) {
1868 if (prot1
!= prot
) {
1869 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1871 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1872 start
, end
, end
- start
,
1873 prot
& PAGE_READ
? 'r' : '-',
1874 prot
& PAGE_WRITE
? 'w' : '-',
1875 prot
& PAGE_EXEC
? 'x' : '-');
1889 int page_get_flags(target_ulong address
)
1893 p
= page_find(address
>> TARGET_PAGE_BITS
);
1899 /* modify the flags of a page and invalidate the code if
1900 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1901 depending on PAGE_WRITE */
1902 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1907 start
= start
& TARGET_PAGE_MASK
;
1908 end
= TARGET_PAGE_ALIGN(end
);
1909 if (flags
& PAGE_WRITE
)
1910 flags
|= PAGE_WRITE_ORG
;
1911 spin_lock(&tb_lock
);
1912 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1913 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1914 /* if the write protection is set, then we invalidate the code
1916 if (!(p
->flags
& PAGE_WRITE
) &&
1917 (flags
& PAGE_WRITE
) &&
1919 tb_invalidate_phys_page(addr
, 0, NULL
);
1923 spin_unlock(&tb_lock
);
1926 /* called from signal handler: invalidate the code and unprotect the
1927 page. Return TRUE if the fault was succesfully handled. */
1928 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1930 unsigned int page_index
, prot
, pindex
;
1932 target_ulong host_start
, host_end
, addr
;
1934 host_start
= address
& qemu_host_page_mask
;
1935 page_index
= host_start
>> TARGET_PAGE_BITS
;
1936 p1
= page_find(page_index
);
1939 host_end
= host_start
+ qemu_host_page_size
;
1942 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1946 /* if the page was really writable, then we change its
1947 protection back to writable */
1948 if (prot
& PAGE_WRITE_ORG
) {
1949 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1950 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1951 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1952 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1953 p1
[pindex
].flags
|= PAGE_WRITE
;
1954 /* and since the content will be modified, we must invalidate
1955 the corresponding translated code. */
1956 tb_invalidate_phys_page(address
, pc
, puc
);
1957 #ifdef DEBUG_TB_CHECK
1958 tb_invalidate_check(address
);
1966 /* call this function when system calls directly modify a memory area */
1967 /* ??? This should be redundant now we have lock_user. */
1968 void page_unprotect_range(target_ulong data
, target_ulong data_size
)
1970 target_ulong start
, end
, addr
;
1973 end
= start
+ data_size
;
1974 start
&= TARGET_PAGE_MASK
;
1975 end
= TARGET_PAGE_ALIGN(end
);
1976 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1977 page_unprotect(addr
, 0, NULL
);
1981 static inline void tlb_set_dirty(CPUState
*env
,
1982 unsigned long addr
, target_ulong vaddr
)
1985 #endif /* defined(CONFIG_USER_ONLY) */
1987 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1989 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
1991 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1994 if (addr > start_addr) \
1997 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1998 if (start_addr2 > 0) \
2002 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2003 end_addr2 = TARGET_PAGE_SIZE - 1; \
2005 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2006 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2011 /* register physical memory. 'size' must be a multiple of the target
2012 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2014 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2016 unsigned long phys_offset
)
2018 target_phys_addr_t addr
, end_addr
;
2021 unsigned long orig_size
= size
;
2024 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2025 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2026 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2027 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2028 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2029 unsigned long orig_memory
= p
->phys_offset
;
2030 target_phys_addr_t start_addr2
, end_addr2
;
2031 int need_subpage
= 0;
2033 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2036 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2037 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2038 &p
->phys_offset
, orig_memory
);
2040 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2043 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2045 p
->phys_offset
= phys_offset
;
2046 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2047 (phys_offset
& IO_MEM_ROMD
))
2048 phys_offset
+= TARGET_PAGE_SIZE
;
2051 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2052 p
->phys_offset
= phys_offset
;
2053 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2054 (phys_offset
& IO_MEM_ROMD
))
2055 phys_offset
+= TARGET_PAGE_SIZE
;
2057 target_phys_addr_t start_addr2
, end_addr2
;
2058 int need_subpage
= 0;
2060 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2061 end_addr2
, need_subpage
);
2064 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2065 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2066 subpage_register(subpage
, start_addr2
, end_addr2
,
2073 /* since each CPU stores ram addresses in its TLB cache, we must
2074 reset the modified entries */
2076 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2081 /* XXX: temporary until new memory mapping API */
2082 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
2086 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2088 return IO_MEM_UNASSIGNED
;
2089 return p
->phys_offset
;
2092 /* XXX: better than nothing */
2093 ram_addr_t
qemu_ram_alloc(unsigned int size
)
2096 if ((phys_ram_alloc_offset
+ size
) >= phys_ram_size
) {
2097 fprintf(stderr
, "Not enough memory (requested_size = %u, max memory = %d)\n",
2098 size
, phys_ram_size
);
2101 addr
= phys_ram_alloc_offset
;
2102 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2106 void qemu_ram_free(ram_addr_t addr
)
2110 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2112 #ifdef DEBUG_UNASSIGNED
2113 printf("Unassigned mem read " TARGET_FMT_lx
"\n", addr
);
2116 do_unassigned_access(addr
, 0, 0, 0);
2121 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2123 #ifdef DEBUG_UNASSIGNED
2124 printf("Unassigned mem write " TARGET_FMT_lx
" = 0x%x\n", addr
, val
);
2127 do_unassigned_access(addr
, 1, 0, 0);
2131 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2132 unassigned_mem_readb
,
2133 unassigned_mem_readb
,
2134 unassigned_mem_readb
,
2137 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2138 unassigned_mem_writeb
,
2139 unassigned_mem_writeb
,
2140 unassigned_mem_writeb
,
2143 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2145 unsigned long ram_addr
;
2147 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2148 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2149 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2150 #if !defined(CONFIG_USER_ONLY)
2151 tb_invalidate_phys_page_fast(ram_addr
, 1);
2152 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2155 stb_p((uint8_t *)(long)addr
, val
);
2157 if (cpu_single_env
->kqemu_enabled
&&
2158 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2159 kqemu_modify_page(cpu_single_env
, ram_addr
);
2161 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2162 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2163 /* we remove the notdirty callback only if the code has been
2165 if (dirty_flags
== 0xff)
2166 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2169 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2171 unsigned long ram_addr
;
2173 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2174 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2175 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2176 #if !defined(CONFIG_USER_ONLY)
2177 tb_invalidate_phys_page_fast(ram_addr
, 2);
2178 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2181 stw_p((uint8_t *)(long)addr
, val
);
2183 if (cpu_single_env
->kqemu_enabled
&&
2184 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2185 kqemu_modify_page(cpu_single_env
, ram_addr
);
2187 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2188 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2189 /* we remove the notdirty callback only if the code has been
2191 if (dirty_flags
== 0xff)
2192 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2195 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2197 unsigned long ram_addr
;
2199 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2200 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2201 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2202 #if !defined(CONFIG_USER_ONLY)
2203 tb_invalidate_phys_page_fast(ram_addr
, 4);
2204 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2207 stl_p((uint8_t *)(long)addr
, val
);
2209 if (cpu_single_env
->kqemu_enabled
&&
2210 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2211 kqemu_modify_page(cpu_single_env
, ram_addr
);
2213 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2214 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2215 /* we remove the notdirty callback only if the code has been
2217 if (dirty_flags
== 0xff)
2218 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2221 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2222 NULL
, /* never used */
2223 NULL
, /* never used */
2224 NULL
, /* never used */
2227 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2228 notdirty_mem_writeb
,
2229 notdirty_mem_writew
,
2230 notdirty_mem_writel
,
2233 #if defined(CONFIG_SOFTMMU)
2234 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2235 so these check for a hit then pass through to the normal out-of-line
2237 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2239 return ldub_phys(addr
);
2242 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2244 return lduw_phys(addr
);
2247 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2249 return ldl_phys(addr
);
2252 /* Generate a debug exception if a watchpoint has been hit.
2253 Returns the real physical address of the access. addr will be a host
2254 address in case of a RAM location. */
2255 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2257 CPUState
*env
= cpu_single_env
;
2259 target_ulong retaddr
;
2263 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2264 watch
= env
->watchpoint
[i
].vaddr
;
2265 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2266 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2267 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2268 cpu_single_env
->watchpoint_hit
= i
+ 1;
2269 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2277 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2280 addr
= check_watchpoint(addr
);
2281 stb_phys(addr
, val
);
2284 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2287 addr
= check_watchpoint(addr
);
2288 stw_phys(addr
, val
);
2291 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2294 addr
= check_watchpoint(addr
);
2295 stl_phys(addr
, val
);
2298 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2304 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2311 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2314 CPUReadMemoryFunc
**mem_read
;
2318 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2319 #if defined(DEBUG_SUBPAGE)
2320 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2321 mmio
, len
, addr
, idx
);
2323 mem_read
= mmio
->mem_read
[idx
];
2324 ret
= (*mem_read
[len
])(mmio
->opaque
[idx
], addr
);
2329 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2330 uint32_t value
, unsigned int len
)
2332 CPUWriteMemoryFunc
**mem_write
;
2335 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2336 #if defined(DEBUG_SUBPAGE)
2337 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2338 mmio
, len
, addr
, idx
, value
);
2340 mem_write
= mmio
->mem_write
[idx
];
2341 (*mem_write
[len
])(mmio
->opaque
[idx
], addr
, value
);
2344 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2346 #if defined(DEBUG_SUBPAGE)
2347 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2350 return subpage_readlen(opaque
, addr
, 0);
2353 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2356 #if defined(DEBUG_SUBPAGE)
2357 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2359 subpage_writelen(opaque
, addr
, value
, 0);
2362 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2364 #if defined(DEBUG_SUBPAGE)
2365 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2368 return subpage_readlen(opaque
, addr
, 1);
2371 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2374 #if defined(DEBUG_SUBPAGE)
2375 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2377 subpage_writelen(opaque
, addr
, value
, 1);
2380 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2382 #if defined(DEBUG_SUBPAGE)
2383 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2386 return subpage_readlen(opaque
, addr
, 2);
2389 static void subpage_writel (void *opaque
,
2390 target_phys_addr_t addr
, uint32_t value
)
2392 #if defined(DEBUG_SUBPAGE)
2393 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2395 subpage_writelen(opaque
, addr
, value
, 2);
2398 static CPUReadMemoryFunc
*subpage_read
[] = {
2404 static CPUWriteMemoryFunc
*subpage_write
[] = {
2410 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2415 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2417 idx
= SUBPAGE_IDX(start
);
2418 eidx
= SUBPAGE_IDX(end
);
2419 #if defined(DEBUG_SUBPAGE)
2420 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2421 mmio
, start
, end
, idx
, eidx
, memory
);
2423 memory
>>= IO_MEM_SHIFT
;
2424 for (; idx
<= eidx
; idx
++) {
2425 mmio
->mem_read
[idx
] = io_mem_read
[memory
];
2426 mmio
->mem_write
[idx
] = io_mem_write
[memory
];
2427 mmio
->opaque
[idx
] = io_mem_opaque
[memory
];
2433 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2439 mmio
= qemu_mallocz(sizeof(subpage_t
));
2442 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2443 #if defined(DEBUG_SUBPAGE)
2444 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2445 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2447 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2448 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2454 static void io_mem_init(void)
2456 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2457 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2458 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2461 #if defined(CONFIG_SOFTMMU)
2462 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2463 watch_mem_write
, NULL
);
2465 /* alloc dirty bits array */
2466 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2467 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2470 /* mem_read and mem_write are arrays of functions containing the
2471 function to access byte (index 0), word (index 1) and dword (index
2472 2). All functions must be supplied. If io_index is non zero, the
2473 corresponding io zone is modified. If it is zero, a new io zone is
2474 allocated. The return value can be used with
2475 cpu_register_physical_memory(). (-1) is returned if error. */
2476 int cpu_register_io_memory(int io_index
,
2477 CPUReadMemoryFunc
**mem_read
,
2478 CPUWriteMemoryFunc
**mem_write
,
2483 if (io_index
<= 0) {
2484 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2486 io_index
= io_mem_nb
++;
2488 if (io_index
>= IO_MEM_NB_ENTRIES
)
2492 for(i
= 0;i
< 3; i
++) {
2493 io_mem_read
[io_index
][i
] = mem_read
[i
];
2494 io_mem_write
[io_index
][i
] = mem_write
[i
];
2496 io_mem_opaque
[io_index
] = opaque
;
2497 return io_index
<< IO_MEM_SHIFT
;
2500 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2502 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2505 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2507 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2510 /* physical memory access (slow version, mainly for debug) */
2511 #if defined(CONFIG_USER_ONLY)
2512 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2513 int len
, int is_write
)
2520 page
= addr
& TARGET_PAGE_MASK
;
2521 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2524 flags
= page_get_flags(page
);
2525 if (!(flags
& PAGE_VALID
))
2528 if (!(flags
& PAGE_WRITE
))
2530 p
= lock_user(addr
, len
, 0);
2531 memcpy(p
, buf
, len
);
2532 unlock_user(p
, addr
, len
);
2534 if (!(flags
& PAGE_READ
))
2536 p
= lock_user(addr
, len
, 1);
2537 memcpy(buf
, p
, len
);
2538 unlock_user(p
, addr
, 0);
2547 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2548 int len
, int is_write
)
2553 target_phys_addr_t page
;
2558 page
= addr
& TARGET_PAGE_MASK
;
2559 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2562 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2564 pd
= IO_MEM_UNASSIGNED
;
2566 pd
= p
->phys_offset
;
2570 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2571 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2572 /* XXX: could force cpu_single_env to NULL to avoid
2574 if (l
>= 4 && ((addr
& 3) == 0)) {
2575 /* 32 bit write access */
2577 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2579 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2580 /* 16 bit write access */
2582 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2585 /* 8 bit write access */
2587 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2591 unsigned long addr1
;
2592 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2594 ptr
= phys_ram_base
+ addr1
;
2595 memcpy(ptr
, buf
, l
);
2596 if (!cpu_physical_memory_is_dirty(addr1
)) {
2597 /* invalidate code */
2598 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2600 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2601 (0xff & ~CODE_DIRTY_FLAG
);
2605 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2606 !(pd
& IO_MEM_ROMD
)) {
2608 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2609 if (l
>= 4 && ((addr
& 3) == 0)) {
2610 /* 32 bit read access */
2611 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2614 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2615 /* 16 bit read access */
2616 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2620 /* 8 bit read access */
2621 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2627 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2628 (addr
& ~TARGET_PAGE_MASK
);
2629 memcpy(buf
, ptr
, l
);
2638 /* used for ROM loading : can write in RAM and ROM */
2639 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2640 const uint8_t *buf
, int len
)
2644 target_phys_addr_t page
;
2649 page
= addr
& TARGET_PAGE_MASK
;
2650 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2653 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2655 pd
= IO_MEM_UNASSIGNED
;
2657 pd
= p
->phys_offset
;
2660 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2661 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2662 !(pd
& IO_MEM_ROMD
)) {
2665 unsigned long addr1
;
2666 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2668 ptr
= phys_ram_base
+ addr1
;
2669 memcpy(ptr
, buf
, l
);
2678 /* warning: addr must be aligned */
2679 uint32_t ldl_phys(target_phys_addr_t addr
)
2687 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2689 pd
= IO_MEM_UNASSIGNED
;
2691 pd
= p
->phys_offset
;
2694 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2695 !(pd
& IO_MEM_ROMD
)) {
2697 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2698 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2701 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2702 (addr
& ~TARGET_PAGE_MASK
);
2708 /* warning: addr must be aligned */
2709 uint64_t ldq_phys(target_phys_addr_t addr
)
2717 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2719 pd
= IO_MEM_UNASSIGNED
;
2721 pd
= p
->phys_offset
;
2724 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2725 !(pd
& IO_MEM_ROMD
)) {
2727 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2728 #ifdef TARGET_WORDS_BIGENDIAN
2729 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2730 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2732 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2733 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2737 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2738 (addr
& ~TARGET_PAGE_MASK
);
2745 uint32_t ldub_phys(target_phys_addr_t addr
)
2748 cpu_physical_memory_read(addr
, &val
, 1);
2753 uint32_t lduw_phys(target_phys_addr_t addr
)
2756 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2757 return tswap16(val
);
2761 #define likely(x) __builtin_expect(!!(x), 1)
2762 #define unlikely(x) __builtin_expect(!!(x), 0)
2765 #define unlikely(x) x
2768 /* warning: addr must be aligned. The ram page is not masked as dirty
2769 and the code inside is not invalidated. It is useful if the dirty
2770 bits are used to track modified PTEs */
2771 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2778 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2780 pd
= IO_MEM_UNASSIGNED
;
2782 pd
= p
->phys_offset
;
2785 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2786 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2787 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2789 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2790 ptr
= phys_ram_base
+ addr1
;
2793 if (unlikely(in_migration
)) {
2794 if (!cpu_physical_memory_is_dirty(addr1
)) {
2795 /* invalidate code */
2796 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2798 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2799 (0xff & ~CODE_DIRTY_FLAG
);
2805 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2812 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2814 pd
= IO_MEM_UNASSIGNED
;
2816 pd
= p
->phys_offset
;
2819 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2820 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2821 #ifdef TARGET_WORDS_BIGENDIAN
2822 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2823 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2825 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2826 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2829 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2830 (addr
& ~TARGET_PAGE_MASK
);
2835 /* warning: addr must be aligned */
2836 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2843 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2845 pd
= IO_MEM_UNASSIGNED
;
2847 pd
= p
->phys_offset
;
2850 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2851 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2852 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2854 unsigned long addr1
;
2855 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2857 ptr
= phys_ram_base
+ addr1
;
2859 if (!cpu_physical_memory_is_dirty(addr1
)) {
2860 /* invalidate code */
2861 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2863 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2864 (0xff & ~CODE_DIRTY_FLAG
);
2870 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2873 cpu_physical_memory_write(addr
, &v
, 1);
2877 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2879 uint16_t v
= tswap16(val
);
2880 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2884 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2887 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2892 /* virtual memory access for debug */
2893 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2894 uint8_t *buf
, int len
, int is_write
)
2897 target_phys_addr_t phys_addr
;
2901 page
= addr
& TARGET_PAGE_MASK
;
2902 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2903 /* if no physical page mapped, return an error */
2904 if (phys_addr
== -1)
2906 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2909 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2918 void dump_exec_info(FILE *f
,
2919 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2921 int i
, target_code_size
, max_target_code_size
;
2922 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2923 TranslationBlock
*tb
;
2925 target_code_size
= 0;
2926 max_target_code_size
= 0;
2928 direct_jmp_count
= 0;
2929 direct_jmp2_count
= 0;
2930 for(i
= 0; i
< nb_tbs
; i
++) {
2932 target_code_size
+= tb
->size
;
2933 if (tb
->size
> max_target_code_size
)
2934 max_target_code_size
= tb
->size
;
2935 if (tb
->page_addr
[1] != -1)
2937 if (tb
->tb_next_offset
[0] != 0xffff) {
2939 if (tb
->tb_next_offset
[1] != 0xffff) {
2940 direct_jmp2_count
++;
2944 /* XXX: avoid using doubles ? */
2945 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2946 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2947 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2948 max_target_code_size
);
2949 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2950 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2951 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2952 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2954 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2955 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2957 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2959 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2960 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2961 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2962 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2965 #if !defined(CONFIG_USER_ONLY)
2967 #define MMUSUFFIX _cmmu
2968 #define GETPC() NULL
2969 #define env cpu_single_env
2970 #define SOFTMMU_CODE_ACCESS
2973 #include "softmmu_template.h"
2976 #include "softmmu_template.h"
2979 #include "softmmu_template.h"
2982 #include "softmmu_template.h"