2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
37 #if defined(CONFIG_USER_ONLY)
41 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 //#define DEBUG_IOPORT
51 //#define DEBUG_SUBPAGE
53 #if !defined(CONFIG_USER_ONLY)
54 /* TB consistency checks only implemented for usermode emulation. */
58 /* threshold to flush the translated code buffer */
59 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77 #define TARGET_PHYS_ADDR_SPACE_BITS 32
80 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
81 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
83 /* any access to the tbs or the page table must use this lock */
84 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
86 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
87 uint8_t *code_gen_ptr
;
91 uint8_t *phys_ram_base
;
92 uint8_t *phys_ram_dirty
;
93 static ram_addr_t phys_ram_alloc_offset
= 0;
96 /* current CPU in the current thread. It is only valid inside
98 CPUState
*cpu_single_env
;
100 typedef struct PageDesc
{
101 /* list of TBs intersecting this ram page */
102 TranslationBlock
*first_tb
;
103 /* in order to optimize self modifying code, we count the number
104 of lookups we do to a given page to use a bitmap */
105 unsigned int code_write_count
;
106 uint8_t *code_bitmap
;
107 #if defined(CONFIG_USER_ONLY)
112 typedef struct PhysPageDesc
{
113 /* offset in host memory of the page + io_index in the low 12 bits */
114 uint32_t phys_offset
;
118 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119 /* XXX: this is a temporary hack for alpha target.
120 * In the future, this is to be replaced by a multi-level table
121 * to actually be able to handle the complete 64 bits address space.
123 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
125 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
128 #define L1_SIZE (1 << L1_BITS)
129 #define L2_SIZE (1 << L2_BITS)
131 static void io_mem_init(void);
133 unsigned long qemu_real_host_page_size
;
134 unsigned long qemu_host_page_bits
;
135 unsigned long qemu_host_page_size
;
136 unsigned long qemu_host_page_mask
;
138 /* XXX: for system emulation, it could just be an array */
139 static PageDesc
*l1_map
[L1_SIZE
];
140 PhysPageDesc
**l1_phys_map
;
142 /* io memory support */
143 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
144 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
145 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
146 static int io_mem_nb
;
147 #if defined(CONFIG_SOFTMMU)
148 static int io_mem_watch
;
152 char *logfilename
= "/tmp/qemu.log";
155 static int log_append
= 0;
158 static int tlb_flush_count
;
159 static int tb_flush_count
;
160 static int tb_phys_invalidate_count
;
162 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
163 typedef struct subpage_t
{
164 target_phys_addr_t base
;
165 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
];
166 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
];
167 void *opaque
[TARGET_PAGE_SIZE
];
170 static void page_init(void)
172 /* NOTE: we can always suppose that qemu_host_page_size >=
176 SYSTEM_INFO system_info
;
179 GetSystemInfo(&system_info
);
180 qemu_real_host_page_size
= system_info
.dwPageSize
;
182 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
183 PAGE_EXECUTE_READWRITE
, &old_protect
);
186 qemu_real_host_page_size
= getpagesize();
188 unsigned long start
, end
;
190 start
= (unsigned long)code_gen_buffer
;
191 start
&= ~(qemu_real_host_page_size
- 1);
193 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
194 end
+= qemu_real_host_page_size
- 1;
195 end
&= ~(qemu_real_host_page_size
- 1);
197 mprotect((void *)start
, end
- start
,
198 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
202 if (qemu_host_page_size
== 0)
203 qemu_host_page_size
= qemu_real_host_page_size
;
204 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
205 qemu_host_page_size
= TARGET_PAGE_SIZE
;
206 qemu_host_page_bits
= 0;
207 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
208 qemu_host_page_bits
++;
209 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
210 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
211 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
214 static inline PageDesc
*page_find_alloc(unsigned int index
)
218 lp
= &l1_map
[index
>> L2_BITS
];
221 /* allocate if not found */
222 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
223 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
226 return p
+ (index
& (L2_SIZE
- 1));
229 static inline PageDesc
*page_find(unsigned int index
)
233 p
= l1_map
[index
>> L2_BITS
];
236 return p
+ (index
& (L2_SIZE
- 1));
239 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
244 p
= (void **)l1_phys_map
;
245 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
247 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
248 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
250 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
253 /* allocate if not found */
256 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
257 memset(p
, 0, sizeof(void *) * L1_SIZE
);
261 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
265 /* allocate if not found */
268 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
270 for (i
= 0; i
< L2_SIZE
; i
++)
271 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
273 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
276 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
278 return phys_page_find_alloc(index
, 0);
281 #if !defined(CONFIG_USER_ONLY)
282 static void tlb_protect_code(ram_addr_t ram_addr
);
283 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
287 void cpu_exec_init(CPUState
*env
)
293 code_gen_ptr
= code_gen_buffer
;
297 env
->next_cpu
= NULL
;
300 while (*penv
!= NULL
) {
301 penv
= (CPUState
**)&(*penv
)->next_cpu
;
304 env
->cpu_index
= cpu_index
;
305 env
->nb_watchpoints
= 0;
309 static inline void invalidate_page_bitmap(PageDesc
*p
)
311 if (p
->code_bitmap
) {
312 qemu_free(p
->code_bitmap
);
313 p
->code_bitmap
= NULL
;
315 p
->code_write_count
= 0;
318 /* set to NULL all the 'first_tb' fields in all PageDescs */
319 static void page_flush_tb(void)
324 for(i
= 0; i
< L1_SIZE
; i
++) {
327 for(j
= 0; j
< L2_SIZE
; j
++) {
329 invalidate_page_bitmap(p
);
336 /* flush all the translation blocks */
337 /* XXX: tb_flush is currently not thread safe */
338 void tb_flush(CPUState
*env1
)
341 #if defined(DEBUG_FLUSH)
342 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
343 code_gen_ptr
- code_gen_buffer
,
345 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
349 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
350 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
353 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
356 code_gen_ptr
= code_gen_buffer
;
357 /* XXX: flush processor icache at this point if cache flush is
362 #ifdef DEBUG_TB_CHECK
364 static void tb_invalidate_check(target_ulong address
)
366 TranslationBlock
*tb
;
368 address
&= TARGET_PAGE_MASK
;
369 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
370 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
371 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
372 address
>= tb
->pc
+ tb
->size
)) {
373 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
374 address
, (long)tb
->pc
, tb
->size
);
380 /* verify that all the pages have correct rights for code */
381 static void tb_page_check(void)
383 TranslationBlock
*tb
;
384 int i
, flags1
, flags2
;
386 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
387 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
388 flags1
= page_get_flags(tb
->pc
);
389 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
390 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
391 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
392 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
398 void tb_jmp_check(TranslationBlock
*tb
)
400 TranslationBlock
*tb1
;
403 /* suppress any remaining jumps to this TB */
407 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
410 tb1
= tb1
->jmp_next
[n1
];
412 /* check end of list */
414 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
420 /* invalidate one TB */
421 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
424 TranslationBlock
*tb1
;
428 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
431 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
435 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
437 TranslationBlock
*tb1
;
443 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
445 *ptb
= tb1
->page_next
[n1
];
448 ptb
= &tb1
->page_next
[n1
];
452 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
454 TranslationBlock
*tb1
, **ptb
;
457 ptb
= &tb
->jmp_next
[n
];
460 /* find tb(n) in circular list */
464 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
465 if (n1
== n
&& tb1
== tb
)
468 ptb
= &tb1
->jmp_first
;
470 ptb
= &tb1
->jmp_next
[n1
];
473 /* now we can suppress tb(n) from the list */
474 *ptb
= tb
->jmp_next
[n
];
476 tb
->jmp_next
[n
] = NULL
;
480 /* reset the jump entry 'n' of a TB so that it is not chained to
482 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
484 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
487 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
492 target_ulong phys_pc
;
493 TranslationBlock
*tb1
, *tb2
;
495 /* remove the TB from the hash list */
496 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
497 h
= tb_phys_hash_func(phys_pc
);
498 tb_remove(&tb_phys_hash
[h
], tb
,
499 offsetof(TranslationBlock
, phys_hash_next
));
501 /* remove the TB from the page list */
502 if (tb
->page_addr
[0] != page_addr
) {
503 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
504 tb_page_remove(&p
->first_tb
, tb
);
505 invalidate_page_bitmap(p
);
507 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
508 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
509 tb_page_remove(&p
->first_tb
, tb
);
510 invalidate_page_bitmap(p
);
513 tb_invalidated_flag
= 1;
515 /* remove the TB from the hash list */
516 h
= tb_jmp_cache_hash_func(tb
->pc
);
517 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
518 if (env
->tb_jmp_cache
[h
] == tb
)
519 env
->tb_jmp_cache
[h
] = NULL
;
522 /* suppress this TB from the two jump lists */
523 tb_jmp_remove(tb
, 0);
524 tb_jmp_remove(tb
, 1);
526 /* suppress any remaining jumps to this TB */
532 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
533 tb2
= tb1
->jmp_next
[n1
];
534 tb_reset_jump(tb1
, n1
);
535 tb1
->jmp_next
[n1
] = NULL
;
538 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
540 tb_phys_invalidate_count
++;
543 static inline void set_bits(uint8_t *tab
, int start
, int len
)
549 mask
= 0xff << (start
& 7);
550 if ((start
& ~7) == (end
& ~7)) {
552 mask
&= ~(0xff << (end
& 7));
557 start
= (start
+ 8) & ~7;
559 while (start
< end1
) {
564 mask
= ~(0xff << (end
& 7));
570 static void build_page_bitmap(PageDesc
*p
)
572 int n
, tb_start
, tb_end
;
573 TranslationBlock
*tb
;
575 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
578 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
583 tb
= (TranslationBlock
*)((long)tb
& ~3);
584 /* NOTE: this is subtle as a TB may span two physical pages */
586 /* NOTE: tb_end may be after the end of the page, but
587 it is not a problem */
588 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
589 tb_end
= tb_start
+ tb
->size
;
590 if (tb_end
> TARGET_PAGE_SIZE
)
591 tb_end
= TARGET_PAGE_SIZE
;
594 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
596 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
597 tb
= tb
->page_next
[n
];
601 #ifdef TARGET_HAS_PRECISE_SMC
603 static void tb_gen_code(CPUState
*env
,
604 target_ulong pc
, target_ulong cs_base
, int flags
,
607 TranslationBlock
*tb
;
609 target_ulong phys_pc
, phys_page2
, virt_page2
;
612 phys_pc
= get_phys_addr_code(env
, pc
);
615 /* flush must be done */
617 /* cannot fail at this point */
620 tc_ptr
= code_gen_ptr
;
622 tb
->cs_base
= cs_base
;
625 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
626 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
628 /* check next page if needed */
629 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
631 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
632 phys_page2
= get_phys_addr_code(env
, virt_page2
);
634 tb_link_phys(tb
, phys_pc
, phys_page2
);
638 /* invalidate all TBs which intersect with the target physical page
639 starting in range [start;end[. NOTE: start and end must refer to
640 the same physical page. 'is_cpu_write_access' should be true if called
641 from a real cpu write access: the virtual CPU will exit the current
642 TB if code is modified inside this TB. */
643 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
644 int is_cpu_write_access
)
646 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
647 CPUState
*env
= cpu_single_env
;
649 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
650 target_ulong tb_start
, tb_end
;
651 target_ulong current_pc
, current_cs_base
;
653 p
= page_find(start
>> TARGET_PAGE_BITS
);
656 if (!p
->code_bitmap
&&
657 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
658 is_cpu_write_access
) {
659 /* build code bitmap */
660 build_page_bitmap(p
);
663 /* we remove all the TBs in the range [start, end[ */
664 /* XXX: see if in some cases it could be faster to invalidate all the code */
665 current_tb_not_found
= is_cpu_write_access
;
666 current_tb_modified
= 0;
667 current_tb
= NULL
; /* avoid warning */
668 current_pc
= 0; /* avoid warning */
669 current_cs_base
= 0; /* avoid warning */
670 current_flags
= 0; /* avoid warning */
674 tb
= (TranslationBlock
*)((long)tb
& ~3);
675 tb_next
= tb
->page_next
[n
];
676 /* NOTE: this is subtle as a TB may span two physical pages */
678 /* NOTE: tb_end may be after the end of the page, but
679 it is not a problem */
680 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
681 tb_end
= tb_start
+ tb
->size
;
683 tb_start
= tb
->page_addr
[1];
684 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
686 if (!(tb_end
<= start
|| tb_start
>= end
)) {
687 #ifdef TARGET_HAS_PRECISE_SMC
688 if (current_tb_not_found
) {
689 current_tb_not_found
= 0;
691 if (env
->mem_write_pc
) {
692 /* now we have a real cpu fault */
693 current_tb
= tb_find_pc(env
->mem_write_pc
);
696 if (current_tb
== tb
&&
697 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
698 /* If we are modifying the current TB, we must stop
699 its execution. We could be more precise by checking
700 that the modification is after the current PC, but it
701 would require a specialized function to partially
702 restore the CPU state */
704 current_tb_modified
= 1;
705 cpu_restore_state(current_tb
, env
,
706 env
->mem_write_pc
, NULL
);
707 #if defined(TARGET_I386)
708 current_flags
= env
->hflags
;
709 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
710 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
711 current_pc
= current_cs_base
+ env
->eip
;
713 #error unsupported CPU
716 #endif /* TARGET_HAS_PRECISE_SMC */
717 /* we need to do that to handle the case where a signal
718 occurs while doing tb_phys_invalidate() */
721 saved_tb
= env
->current_tb
;
722 env
->current_tb
= NULL
;
724 tb_phys_invalidate(tb
, -1);
726 env
->current_tb
= saved_tb
;
727 if (env
->interrupt_request
&& env
->current_tb
)
728 cpu_interrupt(env
, env
->interrupt_request
);
733 #if !defined(CONFIG_USER_ONLY)
734 /* if no code remaining, no need to continue to use slow writes */
736 invalidate_page_bitmap(p
);
737 if (is_cpu_write_access
) {
738 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
742 #ifdef TARGET_HAS_PRECISE_SMC
743 if (current_tb_modified
) {
744 /* we generate a block containing just the instruction
745 modifying the memory. It will ensure that it cannot modify
747 env
->current_tb
= NULL
;
748 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
750 cpu_resume_from_signal(env
, NULL
);
755 /* len must be <= 8 and start must be a multiple of len */
756 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
763 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
764 cpu_single_env
->mem_write_vaddr
, len
,
766 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
770 p
= page_find(start
>> TARGET_PAGE_BITS
);
773 if (p
->code_bitmap
) {
774 offset
= start
& ~TARGET_PAGE_MASK
;
775 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
776 if (b
& ((1 << len
) - 1))
780 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
784 #if !defined(CONFIG_SOFTMMU)
785 static void tb_invalidate_phys_page(target_ulong addr
,
786 unsigned long pc
, void *puc
)
788 int n
, current_flags
, current_tb_modified
;
789 target_ulong current_pc
, current_cs_base
;
791 TranslationBlock
*tb
, *current_tb
;
792 #ifdef TARGET_HAS_PRECISE_SMC
793 CPUState
*env
= cpu_single_env
;
796 addr
&= TARGET_PAGE_MASK
;
797 p
= page_find(addr
>> TARGET_PAGE_BITS
);
801 current_tb_modified
= 0;
803 current_pc
= 0; /* avoid warning */
804 current_cs_base
= 0; /* avoid warning */
805 current_flags
= 0; /* avoid warning */
806 #ifdef TARGET_HAS_PRECISE_SMC
808 current_tb
= tb_find_pc(pc
);
813 tb
= (TranslationBlock
*)((long)tb
& ~3);
814 #ifdef TARGET_HAS_PRECISE_SMC
815 if (current_tb
== tb
&&
816 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
817 /* If we are modifying the current TB, we must stop
818 its execution. We could be more precise by checking
819 that the modification is after the current PC, but it
820 would require a specialized function to partially
821 restore the CPU state */
823 current_tb_modified
= 1;
824 cpu_restore_state(current_tb
, env
, pc
, puc
);
825 #if defined(TARGET_I386)
826 current_flags
= env
->hflags
;
827 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
828 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
829 current_pc
= current_cs_base
+ env
->eip
;
831 #error unsupported CPU
834 #endif /* TARGET_HAS_PRECISE_SMC */
835 tb_phys_invalidate(tb
, addr
);
836 tb
= tb
->page_next
[n
];
839 #ifdef TARGET_HAS_PRECISE_SMC
840 if (current_tb_modified
) {
841 /* we generate a block containing just the instruction
842 modifying the memory. It will ensure that it cannot modify
844 env
->current_tb
= NULL
;
845 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
847 cpu_resume_from_signal(env
, puc
);
853 /* add the tb in the target page and protect it if necessary */
854 static inline void tb_alloc_page(TranslationBlock
*tb
,
855 unsigned int n
, target_ulong page_addr
)
858 TranslationBlock
*last_first_tb
;
860 tb
->page_addr
[n
] = page_addr
;
861 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
862 tb
->page_next
[n
] = p
->first_tb
;
863 last_first_tb
= p
->first_tb
;
864 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
865 invalidate_page_bitmap(p
);
867 #if defined(TARGET_HAS_SMC) || 1
869 #if defined(CONFIG_USER_ONLY)
870 if (p
->flags
& PAGE_WRITE
) {
875 /* force the host page as non writable (writes will have a
876 page fault + mprotect overhead) */
877 page_addr
&= qemu_host_page_mask
;
879 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
880 addr
+= TARGET_PAGE_SIZE
) {
882 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
886 p2
->flags
&= ~PAGE_WRITE
;
887 page_get_flags(addr
);
889 mprotect(g2h(page_addr
), qemu_host_page_size
,
890 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
891 #ifdef DEBUG_TB_INVALIDATE
892 printf("protecting code page: 0x%08lx\n",
897 /* if some code is already present, then the pages are already
898 protected. So we handle the case where only the first TB is
899 allocated in a physical page */
900 if (!last_first_tb
) {
901 tlb_protect_code(page_addr
);
905 #endif /* TARGET_HAS_SMC */
908 /* Allocate a new translation block. Flush the translation buffer if
909 too many translation blocks or too much generated code. */
910 TranslationBlock
*tb_alloc(target_ulong pc
)
912 TranslationBlock
*tb
;
914 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
915 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
923 /* add a new TB and link it to the physical page tables. phys_page2 is
924 (-1) to indicate that only one page contains the TB. */
925 void tb_link_phys(TranslationBlock
*tb
,
926 target_ulong phys_pc
, target_ulong phys_page2
)
929 TranslationBlock
**ptb
;
931 /* add in the physical hash table */
932 h
= tb_phys_hash_func(phys_pc
);
933 ptb
= &tb_phys_hash
[h
];
934 tb
->phys_hash_next
= *ptb
;
937 /* add in the page list */
938 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
939 if (phys_page2
!= -1)
940 tb_alloc_page(tb
, 1, phys_page2
);
942 tb
->page_addr
[1] = -1;
944 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
945 tb
->jmp_next
[0] = NULL
;
946 tb
->jmp_next
[1] = NULL
;
948 tb
->cflags
&= ~CF_FP_USED
;
949 if (tb
->cflags
& CF_TB_FP_USED
)
950 tb
->cflags
|= CF_FP_USED
;
953 /* init original jump addresses */
954 if (tb
->tb_next_offset
[0] != 0xffff)
955 tb_reset_jump(tb
, 0);
956 if (tb
->tb_next_offset
[1] != 0xffff)
957 tb_reset_jump(tb
, 1);
959 #ifdef DEBUG_TB_CHECK
964 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
965 tb[1].tc_ptr. Return NULL if not found */
966 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
970 TranslationBlock
*tb
;
974 if (tc_ptr
< (unsigned long)code_gen_buffer
||
975 tc_ptr
>= (unsigned long)code_gen_ptr
)
977 /* binary search (cf Knuth) */
980 while (m_min
<= m_max
) {
981 m
= (m_min
+ m_max
) >> 1;
983 v
= (unsigned long)tb
->tc_ptr
;
986 else if (tc_ptr
< v
) {
995 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
997 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
999 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1002 tb1
= tb
->jmp_next
[n
];
1004 /* find head of list */
1007 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1010 tb1
= tb1
->jmp_next
[n1
];
1012 /* we are now sure now that tb jumps to tb1 */
1015 /* remove tb from the jmp_first list */
1016 ptb
= &tb_next
->jmp_first
;
1020 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1021 if (n1
== n
&& tb1
== tb
)
1023 ptb
= &tb1
->jmp_next
[n1
];
1025 *ptb
= tb
->jmp_next
[n
];
1026 tb
->jmp_next
[n
] = NULL
;
1028 /* suppress the jump to next tb in generated code */
1029 tb_reset_jump(tb
, n
);
1031 /* suppress jumps in the tb on which we could have jumped */
1032 tb_reset_jump_recursive(tb_next
);
1036 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1038 tb_reset_jump_recursive2(tb
, 0);
1039 tb_reset_jump_recursive2(tb
, 1);
1042 #if defined(TARGET_HAS_ICE)
1043 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1045 target_phys_addr_t addr
;
1047 ram_addr_t ram_addr
;
1050 addr
= cpu_get_phys_page_debug(env
, pc
);
1051 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1053 pd
= IO_MEM_UNASSIGNED
;
1055 pd
= p
->phys_offset
;
1057 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1058 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1062 /* Add a watchpoint. */
1063 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1067 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1068 if (addr
== env
->watchpoint
[i
].vaddr
)
1071 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1074 i
= env
->nb_watchpoints
++;
1075 env
->watchpoint
[i
].vaddr
= addr
;
1076 tlb_flush_page(env
, addr
);
1077 /* FIXME: This flush is needed because of the hack to make memory ops
1078 terminate the TB. It can be removed once the proper IO trap and
1079 re-execute bits are in. */
1084 /* Remove a watchpoint. */
1085 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1089 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1090 if (addr
== env
->watchpoint
[i
].vaddr
) {
1091 env
->nb_watchpoints
--;
1092 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1093 tlb_flush_page(env
, addr
);
1100 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1101 breakpoint is reached */
1102 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1104 #if defined(TARGET_HAS_ICE)
1107 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1108 if (env
->breakpoints
[i
] == pc
)
1112 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1114 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1116 breakpoint_invalidate(env
, pc
);
1123 /* remove a breakpoint */
1124 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1126 #if defined(TARGET_HAS_ICE)
1128 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1129 if (env
->breakpoints
[i
] == pc
)
1134 env
->nb_breakpoints
--;
1135 if (i
< env
->nb_breakpoints
)
1136 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1138 breakpoint_invalidate(env
, pc
);
1145 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1146 CPU loop after each instruction */
1147 void cpu_single_step(CPUState
*env
, int enabled
)
1149 #if defined(TARGET_HAS_ICE)
1150 if (env
->singlestep_enabled
!= enabled
) {
1151 env
->singlestep_enabled
= enabled
;
1152 /* must flush all the translated code to avoid inconsistancies */
1153 /* XXX: only flush what is necessary */
1159 /* enable or disable low levels log */
1160 void cpu_set_log(int log_flags
)
1162 loglevel
= log_flags
;
1163 if (loglevel
&& !logfile
) {
1164 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1166 perror(logfilename
);
1169 #if !defined(CONFIG_SOFTMMU)
1170 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1172 static uint8_t logfile_buf
[4096];
1173 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1176 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1180 if (!loglevel
&& logfile
) {
1186 void cpu_set_log_filename(const char *filename
)
1188 logfilename
= strdup(filename
);
1193 cpu_set_log(loglevel
);
1196 /* mask must never be zero, except for A20 change call */
1197 void cpu_interrupt(CPUState
*env
, int mask
)
1199 TranslationBlock
*tb
;
1200 static int interrupt_lock
;
1202 env
->interrupt_request
|= mask
;
1203 /* if the cpu is currently executing code, we must unlink it and
1204 all the potentially executing TB */
1205 tb
= env
->current_tb
;
1206 if (tb
&& !testandset(&interrupt_lock
)) {
1207 env
->current_tb
= NULL
;
1208 tb_reset_jump_recursive(tb
);
1213 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1215 env
->interrupt_request
&= ~mask
;
1218 CPULogItem cpu_log_items
[] = {
1219 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1220 "show generated host assembly code for each compiled TB" },
1221 { CPU_LOG_TB_IN_ASM
, "in_asm",
1222 "show target assembly code for each compiled TB" },
1223 { CPU_LOG_TB_OP
, "op",
1224 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1226 { CPU_LOG_TB_OP_OPT
, "op_opt",
1227 "show micro ops after optimization for each compiled TB" },
1229 { CPU_LOG_INT
, "int",
1230 "show interrupts/exceptions in short format" },
1231 { CPU_LOG_EXEC
, "exec",
1232 "show trace before each executed TB (lots of logs)" },
1233 { CPU_LOG_TB_CPU
, "cpu",
1234 "show CPU state before block translation" },
1236 { CPU_LOG_PCALL
, "pcall",
1237 "show protected mode far calls/returns/exceptions" },
1240 { CPU_LOG_IOPORT
, "ioport",
1241 "show all i/o ports accesses" },
1246 static int cmp1(const char *s1
, int n
, const char *s2
)
1248 if (strlen(s2
) != n
)
1250 return memcmp(s1
, s2
, n
) == 0;
1253 /* takes a comma separated list of log masks. Return 0 if error. */
1254 int cpu_str_to_log_mask(const char *str
)
1263 p1
= strchr(p
, ',');
1266 if(cmp1(p
,p1
-p
,"all")) {
1267 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1271 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1272 if (cmp1(p
, p1
- p
, item
->name
))
1286 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1291 fprintf(stderr
, "qemu: fatal: ");
1292 vfprintf(stderr
, fmt
, ap
);
1293 fprintf(stderr
, "\n");
1295 if(env
->intercept
& INTERCEPT_SVM_MASK
) {
1296 /* most probably the virtual machine should not
1297 be shut down but rather caught by the VMM */
1298 vmexit(SVM_EXIT_SHUTDOWN
, 0);
1300 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1302 cpu_dump_state(env
, stderr
, fprintf
, 0);
1312 CPUState
*cpu_copy(CPUState
*env
)
1314 CPUState
*new_env
= cpu_init();
1315 /* preserve chaining and index */
1316 CPUState
*next_cpu
= new_env
->next_cpu
;
1317 int cpu_index
= new_env
->cpu_index
;
1318 memcpy(new_env
, env
, sizeof(CPUState
));
1319 new_env
->next_cpu
= next_cpu
;
1320 new_env
->cpu_index
= cpu_index
;
1324 #if !defined(CONFIG_USER_ONLY)
1326 /* NOTE: if flush_global is true, also flush global entries (not
1328 void tlb_flush(CPUState
*env
, int flush_global
)
1332 #if defined(DEBUG_TLB)
1333 printf("tlb_flush:\n");
1335 /* must reset current TB so that interrupts cannot modify the
1336 links while we are modifying them */
1337 env
->current_tb
= NULL
;
1339 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1340 env
->tlb_table
[0][i
].addr_read
= -1;
1341 env
->tlb_table
[0][i
].addr_write
= -1;
1342 env
->tlb_table
[0][i
].addr_code
= -1;
1343 env
->tlb_table
[1][i
].addr_read
= -1;
1344 env
->tlb_table
[1][i
].addr_write
= -1;
1345 env
->tlb_table
[1][i
].addr_code
= -1;
1346 #if (NB_MMU_MODES >= 3)
1347 env
->tlb_table
[2][i
].addr_read
= -1;
1348 env
->tlb_table
[2][i
].addr_write
= -1;
1349 env
->tlb_table
[2][i
].addr_code
= -1;
1350 #if (NB_MMU_MODES == 4)
1351 env
->tlb_table
[3][i
].addr_read
= -1;
1352 env
->tlb_table
[3][i
].addr_write
= -1;
1353 env
->tlb_table
[3][i
].addr_code
= -1;
1358 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1360 #if !defined(CONFIG_SOFTMMU)
1361 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1364 if (env
->kqemu_enabled
) {
1365 kqemu_flush(env
, flush_global
);
1371 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1373 if (addr
== (tlb_entry
->addr_read
&
1374 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1375 addr
== (tlb_entry
->addr_write
&
1376 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1377 addr
== (tlb_entry
->addr_code
&
1378 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1379 tlb_entry
->addr_read
= -1;
1380 tlb_entry
->addr_write
= -1;
1381 tlb_entry
->addr_code
= -1;
1385 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1388 TranslationBlock
*tb
;
1390 #if defined(DEBUG_TLB)
1391 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1393 /* must reset current TB so that interrupts cannot modify the
1394 links while we are modifying them */
1395 env
->current_tb
= NULL
;
1397 addr
&= TARGET_PAGE_MASK
;
1398 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1399 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1400 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1401 #if (NB_MMU_MODES >= 3)
1402 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1403 #if (NB_MMU_MODES == 4)
1404 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1408 /* Discard jump cache entries for any tb which might potentially
1409 overlap the flushed page. */
1410 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1411 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1413 i
= tb_jmp_cache_hash_page(addr
);
1414 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1416 #if !defined(CONFIG_SOFTMMU)
1417 if (addr
< MMAP_AREA_END
)
1418 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1421 if (env
->kqemu_enabled
) {
1422 kqemu_flush_page(env
, addr
);
1427 /* update the TLBs so that writes to code in the virtual page 'addr'
1429 static void tlb_protect_code(ram_addr_t ram_addr
)
1431 cpu_physical_memory_reset_dirty(ram_addr
,
1432 ram_addr
+ TARGET_PAGE_SIZE
,
1436 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1437 tested for self modifying code */
1438 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1441 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1444 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1445 unsigned long start
, unsigned long length
)
1448 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1449 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1450 if ((addr
- start
) < length
) {
1451 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1456 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1460 unsigned long length
, start1
;
1464 start
&= TARGET_PAGE_MASK
;
1465 end
= TARGET_PAGE_ALIGN(end
);
1467 length
= end
- start
;
1470 len
= length
>> TARGET_PAGE_BITS
;
1472 /* XXX: should not depend on cpu context */
1474 if (env
->kqemu_enabled
) {
1477 for(i
= 0; i
< len
; i
++) {
1478 kqemu_set_notdirty(env
, addr
);
1479 addr
+= TARGET_PAGE_SIZE
;
1483 mask
= ~dirty_flags
;
1484 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1485 for(i
= 0; i
< len
; i
++)
1488 /* we modify the TLB cache so that the dirty bit will be set again
1489 when accessing the range */
1490 start1
= start
+ (unsigned long)phys_ram_base
;
1491 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1492 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1493 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1494 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1495 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1496 #if (NB_MMU_MODES >= 3)
1497 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1498 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1499 #if (NB_MMU_MODES == 4)
1500 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1501 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1506 #if !defined(CONFIG_SOFTMMU)
1507 /* XXX: this is expensive */
1513 for(i
= 0; i
< L1_SIZE
; i
++) {
1516 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1517 for(j
= 0; j
< L2_SIZE
; j
++) {
1518 if (p
->valid_tag
== virt_valid_tag
&&
1519 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1520 (p
->prot
& PROT_WRITE
)) {
1521 if (addr
< MMAP_AREA_END
) {
1522 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1523 p
->prot
& ~PROT_WRITE
);
1526 addr
+= TARGET_PAGE_SIZE
;
1535 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1537 ram_addr_t ram_addr
;
1539 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1540 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1541 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1542 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1543 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1548 /* update the TLB according to the current state of the dirty bits */
1549 void cpu_tlb_update_dirty(CPUState
*env
)
1552 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1553 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1554 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1555 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1556 #if (NB_MMU_MODES >= 3)
1557 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1558 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1559 #if (NB_MMU_MODES == 4)
1560 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1561 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1566 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1567 unsigned long start
)
1570 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1571 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1572 if (addr
== start
) {
1573 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1578 /* update the TLB corresponding to virtual page vaddr and phys addr
1579 addr so that it is no longer dirty */
1580 static inline void tlb_set_dirty(CPUState
*env
,
1581 unsigned long addr
, target_ulong vaddr
)
1585 addr
&= TARGET_PAGE_MASK
;
1586 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1587 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1588 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1589 #if (NB_MMU_MODES >= 3)
1590 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1591 #if (NB_MMU_MODES == 4)
1592 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1597 /* add a new TLB entry. At most one entry for a given virtual address
1598 is permitted. Return 0 if OK or 2 if the page could not be mapped
1599 (can only happen in non SOFTMMU mode for I/O pages or pages
1600 conflicting with the host address space). */
1601 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1602 target_phys_addr_t paddr
, int prot
,
1603 int is_user
, int is_softmmu
)
1608 target_ulong address
;
1609 target_phys_addr_t addend
;
1614 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1616 pd
= IO_MEM_UNASSIGNED
;
1618 pd
= p
->phys_offset
;
1620 #if defined(DEBUG_TLB)
1621 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1622 vaddr
, (int)paddr
, prot
, is_user
, is_softmmu
, pd
);
1626 #if !defined(CONFIG_SOFTMMU)
1630 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1631 /* IO memory case */
1632 address
= vaddr
| pd
;
1635 /* standard memory */
1637 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1640 /* Make accesses to pages with watchpoints go via the
1641 watchpoint trap routines. */
1642 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1643 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1644 if (address
& ~TARGET_PAGE_MASK
) {
1645 env
->watchpoint
[i
].addend
= 0;
1646 address
= vaddr
| io_mem_watch
;
1648 env
->watchpoint
[i
].addend
= pd
- paddr
+
1649 (unsigned long) phys_ram_base
;
1650 /* TODO: Figure out how to make read watchpoints coexist
1652 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1657 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1659 te
= &env
->tlb_table
[is_user
][index
];
1660 te
->addend
= addend
;
1661 if (prot
& PAGE_READ
) {
1662 te
->addr_read
= address
;
1666 if (prot
& PAGE_EXEC
) {
1667 te
->addr_code
= address
;
1671 if (prot
& PAGE_WRITE
) {
1672 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1673 (pd
& IO_MEM_ROMD
)) {
1674 /* write access calls the I/O callback */
1675 te
->addr_write
= vaddr
|
1676 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1677 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1678 !cpu_physical_memory_is_dirty(pd
)) {
1679 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1681 te
->addr_write
= address
;
1684 te
->addr_write
= -1;
1687 #if !defined(CONFIG_SOFTMMU)
1689 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1690 /* IO access: no mapping is done as it will be handled by the
1692 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1697 if (vaddr
>= MMAP_AREA_END
) {
1700 if (prot
& PROT_WRITE
) {
1701 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1702 #if defined(TARGET_HAS_SMC) || 1
1705 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1706 !cpu_physical_memory_is_dirty(pd
))) {
1707 /* ROM: we do as if code was inside */
1708 /* if code is present, we only map as read only and save the
1712 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1715 vp
->valid_tag
= virt_valid_tag
;
1716 prot
&= ~PAGE_WRITE
;
1719 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1720 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1721 if (map_addr
== MAP_FAILED
) {
1722 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1732 /* called from signal handler: invalidate the code and unprotect the
1733 page. Return TRUE if the fault was succesfully handled. */
1734 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1736 #if !defined(CONFIG_SOFTMMU)
1739 #if defined(DEBUG_TLB)
1740 printf("page_unprotect: addr=0x%08x\n", addr
);
1742 addr
&= TARGET_PAGE_MASK
;
1744 /* if it is not mapped, no need to worry here */
1745 if (addr
>= MMAP_AREA_END
)
1747 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1750 /* NOTE: in this case, validate_tag is _not_ tested as it
1751 validates only the code TLB */
1752 if (vp
->valid_tag
!= virt_valid_tag
)
1754 if (!(vp
->prot
& PAGE_WRITE
))
1756 #if defined(DEBUG_TLB)
1757 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1758 addr
, vp
->phys_addr
, vp
->prot
);
1760 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1761 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1762 (unsigned long)addr
, vp
->prot
);
1763 /* set the dirty bit */
1764 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1765 /* flush the code inside */
1766 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1775 void tlb_flush(CPUState
*env
, int flush_global
)
1779 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1783 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1784 target_phys_addr_t paddr
, int prot
,
1785 int is_user
, int is_softmmu
)
1790 /* dump memory mappings */
1791 void page_dump(FILE *f
)
1793 unsigned long start
, end
;
1794 int i
, j
, prot
, prot1
;
1797 fprintf(f
, "%-8s %-8s %-8s %s\n",
1798 "start", "end", "size", "prot");
1802 for(i
= 0; i
<= L1_SIZE
; i
++) {
1807 for(j
= 0;j
< L2_SIZE
; j
++) {
1812 if (prot1
!= prot
) {
1813 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1815 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1816 start
, end
, end
- start
,
1817 prot
& PAGE_READ
? 'r' : '-',
1818 prot
& PAGE_WRITE
? 'w' : '-',
1819 prot
& PAGE_EXEC
? 'x' : '-');
1833 int page_get_flags(target_ulong address
)
1837 p
= page_find(address
>> TARGET_PAGE_BITS
);
1843 /* modify the flags of a page and invalidate the code if
1844 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1845 depending on PAGE_WRITE */
1846 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1851 start
= start
& TARGET_PAGE_MASK
;
1852 end
= TARGET_PAGE_ALIGN(end
);
1853 if (flags
& PAGE_WRITE
)
1854 flags
|= PAGE_WRITE_ORG
;
1855 spin_lock(&tb_lock
);
1856 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1857 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1858 /* if the write protection is set, then we invalidate the code
1860 if (!(p
->flags
& PAGE_WRITE
) &&
1861 (flags
& PAGE_WRITE
) &&
1863 tb_invalidate_phys_page(addr
, 0, NULL
);
1867 spin_unlock(&tb_lock
);
1870 /* called from signal handler: invalidate the code and unprotect the
1871 page. Return TRUE if the fault was succesfully handled. */
1872 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1874 unsigned int page_index
, prot
, pindex
;
1876 target_ulong host_start
, host_end
, addr
;
1878 host_start
= address
& qemu_host_page_mask
;
1879 page_index
= host_start
>> TARGET_PAGE_BITS
;
1880 p1
= page_find(page_index
);
1883 host_end
= host_start
+ qemu_host_page_size
;
1886 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1890 /* if the page was really writable, then we change its
1891 protection back to writable */
1892 if (prot
& PAGE_WRITE_ORG
) {
1893 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1894 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1895 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1896 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1897 p1
[pindex
].flags
|= PAGE_WRITE
;
1898 /* and since the content will be modified, we must invalidate
1899 the corresponding translated code. */
1900 tb_invalidate_phys_page(address
, pc
, puc
);
1901 #ifdef DEBUG_TB_CHECK
1902 tb_invalidate_check(address
);
1910 /* call this function when system calls directly modify a memory area */
1911 /* ??? This should be redundant now we have lock_user. */
1912 void page_unprotect_range(target_ulong data
, target_ulong data_size
)
1914 target_ulong start
, end
, addr
;
1917 end
= start
+ data_size
;
1918 start
&= TARGET_PAGE_MASK
;
1919 end
= TARGET_PAGE_ALIGN(end
);
1920 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1921 page_unprotect(addr
, 0, NULL
);
1925 static inline void tlb_set_dirty(CPUState
*env
,
1926 unsigned long addr
, target_ulong vaddr
)
1929 #endif /* defined(CONFIG_USER_ONLY) */
1931 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1933 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
1935 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1938 if (addr > start_addr) \
1941 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1942 if (start_addr2 > 0) \
1946 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
1947 end_addr2 = TARGET_PAGE_SIZE - 1; \
1949 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1950 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
1955 /* register physical memory. 'size' must be a multiple of the target
1956 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1958 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1960 unsigned long phys_offset
)
1962 target_phys_addr_t addr
, end_addr
;
1965 unsigned long orig_size
= size
;
1968 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1969 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
1970 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1971 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1972 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
1973 unsigned long orig_memory
= p
->phys_offset
;
1974 target_phys_addr_t start_addr2
, end_addr2
;
1975 int need_subpage
= 0;
1977 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
1980 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
1981 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
1982 &p
->phys_offset
, orig_memory
);
1984 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
1987 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
1989 p
->phys_offset
= phys_offset
;
1990 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
1991 (phys_offset
& IO_MEM_ROMD
))
1992 phys_offset
+= TARGET_PAGE_SIZE
;
1995 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1996 p
->phys_offset
= phys_offset
;
1997 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
1998 (phys_offset
& IO_MEM_ROMD
))
1999 phys_offset
+= TARGET_PAGE_SIZE
;
2001 target_phys_addr_t start_addr2
, end_addr2
;
2002 int need_subpage
= 0;
2004 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2005 end_addr2
, need_subpage
);
2008 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2009 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2010 subpage_register(subpage
, start_addr2
, end_addr2
,
2017 /* since each CPU stores ram addresses in its TLB cache, we must
2018 reset the modified entries */
2020 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2025 /* XXX: temporary until new memory mapping API */
2026 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
2030 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2032 return IO_MEM_UNASSIGNED
;
2033 return p
->phys_offset
;
2036 /* XXX: better than nothing */
2037 ram_addr_t
qemu_ram_alloc(unsigned int size
)
2040 if ((phys_ram_alloc_offset
+ size
) >= phys_ram_size
) {
2041 fprintf(stderr
, "Not enough memory (requested_size = %u, max memory = %d)\n",
2042 size
, phys_ram_size
);
2045 addr
= phys_ram_alloc_offset
;
2046 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2050 void qemu_ram_free(ram_addr_t addr
)
2054 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2056 #ifdef DEBUG_UNASSIGNED
2057 printf("Unassigned mem read " TARGET_FMT_lx
"\n", addr
);
2060 do_unassigned_access(addr
, 0, 0, 0);
2065 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2067 #ifdef DEBUG_UNASSIGNED
2068 printf("Unassigned mem write " TARGET_FMT_lx
" = 0x%x\n", addr
, val
);
2071 do_unassigned_access(addr
, 1, 0, 0);
2075 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2076 unassigned_mem_readb
,
2077 unassigned_mem_readb
,
2078 unassigned_mem_readb
,
2081 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2082 unassigned_mem_writeb
,
2083 unassigned_mem_writeb
,
2084 unassigned_mem_writeb
,
2087 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2089 unsigned long ram_addr
;
2091 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2092 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2093 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2094 #if !defined(CONFIG_USER_ONLY)
2095 tb_invalidate_phys_page_fast(ram_addr
, 1);
2096 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2099 stb_p((uint8_t *)(long)addr
, val
);
2101 if (cpu_single_env
->kqemu_enabled
&&
2102 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2103 kqemu_modify_page(cpu_single_env
, ram_addr
);
2105 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2106 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2107 /* we remove the notdirty callback only if the code has been
2109 if (dirty_flags
== 0xff)
2110 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2113 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2115 unsigned long ram_addr
;
2117 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2118 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2119 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2120 #if !defined(CONFIG_USER_ONLY)
2121 tb_invalidate_phys_page_fast(ram_addr
, 2);
2122 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2125 stw_p((uint8_t *)(long)addr
, val
);
2127 if (cpu_single_env
->kqemu_enabled
&&
2128 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2129 kqemu_modify_page(cpu_single_env
, ram_addr
);
2131 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2132 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2133 /* we remove the notdirty callback only if the code has been
2135 if (dirty_flags
== 0xff)
2136 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2139 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2141 unsigned long ram_addr
;
2143 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2144 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2145 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2146 #if !defined(CONFIG_USER_ONLY)
2147 tb_invalidate_phys_page_fast(ram_addr
, 4);
2148 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2151 stl_p((uint8_t *)(long)addr
, val
);
2153 if (cpu_single_env
->kqemu_enabled
&&
2154 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2155 kqemu_modify_page(cpu_single_env
, ram_addr
);
2157 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2158 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2159 /* we remove the notdirty callback only if the code has been
2161 if (dirty_flags
== 0xff)
2162 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2165 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2166 NULL
, /* never used */
2167 NULL
, /* never used */
2168 NULL
, /* never used */
2171 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2172 notdirty_mem_writeb
,
2173 notdirty_mem_writew
,
2174 notdirty_mem_writel
,
2177 #if defined(CONFIG_SOFTMMU)
2178 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2179 so these check for a hit then pass through to the normal out-of-line
2181 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2183 return ldub_phys(addr
);
2186 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2188 return lduw_phys(addr
);
2191 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2193 return ldl_phys(addr
);
2196 /* Generate a debug exception if a watchpoint has been hit.
2197 Returns the real physical address of the access. addr will be a host
2198 address in case of a RAM location. */
2199 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2201 CPUState
*env
= cpu_single_env
;
2203 target_ulong retaddr
;
2207 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2208 watch
= env
->watchpoint
[i
].vaddr
;
2209 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2210 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2211 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2212 cpu_single_env
->watchpoint_hit
= i
+ 1;
2213 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2221 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2224 addr
= check_watchpoint(addr
);
2225 stb_phys(addr
, val
);
2228 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2231 addr
= check_watchpoint(addr
);
2232 stw_phys(addr
, val
);
2235 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2238 addr
= check_watchpoint(addr
);
2239 stl_phys(addr
, val
);
2242 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2248 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2255 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2258 CPUReadMemoryFunc
**mem_read
;
2262 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2263 #if defined(DEBUG_SUBPAGE)
2264 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2265 mmio
, len
, addr
, idx
);
2267 mem_read
= mmio
->mem_read
[idx
];
2268 ret
= (*mem_read
[len
])(mmio
->opaque
[idx
], addr
);
2273 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2274 uint32_t value
, unsigned int len
)
2276 CPUWriteMemoryFunc
**mem_write
;
2279 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2280 #if defined(DEBUG_SUBPAGE)
2281 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2282 mmio
, len
, addr
, idx
, value
);
2284 mem_write
= mmio
->mem_write
[idx
];
2285 (*mem_write
[len
])(mmio
->opaque
[idx
], addr
, value
);
2288 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2290 #if defined(DEBUG_SUBPAGE)
2291 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2294 return subpage_readlen(opaque
, addr
, 0);
2297 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2300 #if defined(DEBUG_SUBPAGE)
2301 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2303 subpage_writelen(opaque
, addr
, value
, 0);
2306 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2308 #if defined(DEBUG_SUBPAGE)
2309 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2312 return subpage_readlen(opaque
, addr
, 1);
2315 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2318 #if defined(DEBUG_SUBPAGE)
2319 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2321 subpage_writelen(opaque
, addr
, value
, 1);
2324 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2326 #if defined(DEBUG_SUBPAGE)
2327 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2330 return subpage_readlen(opaque
, addr
, 2);
2333 static void subpage_writel (void *opaque
,
2334 target_phys_addr_t addr
, uint32_t value
)
2336 #if defined(DEBUG_SUBPAGE)
2337 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2339 subpage_writelen(opaque
, addr
, value
, 2);
2342 static CPUReadMemoryFunc
*subpage_read
[] = {
2348 static CPUWriteMemoryFunc
*subpage_write
[] = {
2354 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2359 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2361 idx
= SUBPAGE_IDX(start
);
2362 eidx
= SUBPAGE_IDX(end
);
2363 #if defined(DEBUG_SUBPAGE)
2364 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2365 mmio
, start
, end
, idx
, eidx
, memory
);
2367 memory
>>= IO_MEM_SHIFT
;
2368 for (; idx
<= eidx
; idx
++) {
2369 mmio
->mem_read
[idx
] = io_mem_read
[memory
];
2370 mmio
->mem_write
[idx
] = io_mem_write
[memory
];
2371 mmio
->opaque
[idx
] = io_mem_opaque
[memory
];
2377 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2383 mmio
= qemu_mallocz(sizeof(subpage_t
));
2386 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2387 #if defined(DEBUG_SUBPAGE)
2388 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2389 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2391 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2392 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2398 static void io_mem_init(void)
2400 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2401 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2402 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2405 #if defined(CONFIG_SOFTMMU)
2406 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2407 watch_mem_write
, NULL
);
2409 /* alloc dirty bits array */
2410 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2411 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2414 /* mem_read and mem_write are arrays of functions containing the
2415 function to access byte (index 0), word (index 1) and dword (index
2416 2). All functions must be supplied. If io_index is non zero, the
2417 corresponding io zone is modified. If it is zero, a new io zone is
2418 allocated. The return value can be used with
2419 cpu_register_physical_memory(). (-1) is returned if error. */
2420 int cpu_register_io_memory(int io_index
,
2421 CPUReadMemoryFunc
**mem_read
,
2422 CPUWriteMemoryFunc
**mem_write
,
2427 if (io_index
<= 0) {
2428 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2430 io_index
= io_mem_nb
++;
2432 if (io_index
>= IO_MEM_NB_ENTRIES
)
2436 for(i
= 0;i
< 3; i
++) {
2437 io_mem_read
[io_index
][i
] = mem_read
[i
];
2438 io_mem_write
[io_index
][i
] = mem_write
[i
];
2440 io_mem_opaque
[io_index
] = opaque
;
2441 return io_index
<< IO_MEM_SHIFT
;
2444 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2446 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2449 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2451 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2454 /* physical memory access (slow version, mainly for debug) */
2455 #if defined(CONFIG_USER_ONLY)
2456 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2457 int len
, int is_write
)
2464 page
= addr
& TARGET_PAGE_MASK
;
2465 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2468 flags
= page_get_flags(page
);
2469 if (!(flags
& PAGE_VALID
))
2472 if (!(flags
& PAGE_WRITE
))
2474 p
= lock_user(addr
, len
, 0);
2475 memcpy(p
, buf
, len
);
2476 unlock_user(p
, addr
, len
);
2478 if (!(flags
& PAGE_READ
))
2480 p
= lock_user(addr
, len
, 1);
2481 memcpy(buf
, p
, len
);
2482 unlock_user(p
, addr
, 0);
2491 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2492 int len
, int is_write
)
2497 target_phys_addr_t page
;
2502 page
= addr
& TARGET_PAGE_MASK
;
2503 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2506 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2508 pd
= IO_MEM_UNASSIGNED
;
2510 pd
= p
->phys_offset
;
2514 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2515 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2516 /* XXX: could force cpu_single_env to NULL to avoid
2518 if (l
>= 4 && ((addr
& 3) == 0)) {
2519 /* 32 bit write access */
2521 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2523 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2524 /* 16 bit write access */
2526 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2529 /* 8 bit write access */
2531 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2535 unsigned long addr1
;
2536 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2538 ptr
= phys_ram_base
+ addr1
;
2539 memcpy(ptr
, buf
, l
);
2540 if (!cpu_physical_memory_is_dirty(addr1
)) {
2541 /* invalidate code */
2542 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2544 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2545 (0xff & ~CODE_DIRTY_FLAG
);
2549 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2550 !(pd
& IO_MEM_ROMD
)) {
2552 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2553 if (l
>= 4 && ((addr
& 3) == 0)) {
2554 /* 32 bit read access */
2555 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2558 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2559 /* 16 bit read access */
2560 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2564 /* 8 bit read access */
2565 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2571 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2572 (addr
& ~TARGET_PAGE_MASK
);
2573 memcpy(buf
, ptr
, l
);
2582 /* used for ROM loading : can write in RAM and ROM */
2583 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2584 const uint8_t *buf
, int len
)
2588 target_phys_addr_t page
;
2593 page
= addr
& TARGET_PAGE_MASK
;
2594 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2597 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2599 pd
= IO_MEM_UNASSIGNED
;
2601 pd
= p
->phys_offset
;
2604 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2605 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2606 !(pd
& IO_MEM_ROMD
)) {
2609 unsigned long addr1
;
2610 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2612 ptr
= phys_ram_base
+ addr1
;
2613 memcpy(ptr
, buf
, l
);
2622 /* warning: addr must be aligned */
2623 uint32_t ldl_phys(target_phys_addr_t addr
)
2631 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2633 pd
= IO_MEM_UNASSIGNED
;
2635 pd
= p
->phys_offset
;
2638 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2639 !(pd
& IO_MEM_ROMD
)) {
2641 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2642 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2645 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2646 (addr
& ~TARGET_PAGE_MASK
);
2652 /* warning: addr must be aligned */
2653 uint64_t ldq_phys(target_phys_addr_t addr
)
2661 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2663 pd
= IO_MEM_UNASSIGNED
;
2665 pd
= p
->phys_offset
;
2668 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2669 !(pd
& IO_MEM_ROMD
)) {
2671 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2672 #ifdef TARGET_WORDS_BIGENDIAN
2673 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2674 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2676 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2677 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2681 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2682 (addr
& ~TARGET_PAGE_MASK
);
2689 uint32_t ldub_phys(target_phys_addr_t addr
)
2692 cpu_physical_memory_read(addr
, &val
, 1);
2697 uint32_t lduw_phys(target_phys_addr_t addr
)
2700 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2701 return tswap16(val
);
2704 /* warning: addr must be aligned. The ram page is not masked as dirty
2705 and the code inside is not invalidated. It is useful if the dirty
2706 bits are used to track modified PTEs */
2707 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2714 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2716 pd
= IO_MEM_UNASSIGNED
;
2718 pd
= p
->phys_offset
;
2721 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2722 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2723 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2725 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2726 (addr
& ~TARGET_PAGE_MASK
);
2731 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2738 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2740 pd
= IO_MEM_UNASSIGNED
;
2742 pd
= p
->phys_offset
;
2745 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2746 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2747 #ifdef TARGET_WORDS_BIGENDIAN
2748 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2749 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2751 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2752 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2755 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2756 (addr
& ~TARGET_PAGE_MASK
);
2761 /* warning: addr must be aligned */
2762 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2769 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2771 pd
= IO_MEM_UNASSIGNED
;
2773 pd
= p
->phys_offset
;
2776 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2777 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2778 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2780 unsigned long addr1
;
2781 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2783 ptr
= phys_ram_base
+ addr1
;
2785 if (!cpu_physical_memory_is_dirty(addr1
)) {
2786 /* invalidate code */
2787 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2789 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2790 (0xff & ~CODE_DIRTY_FLAG
);
2796 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2799 cpu_physical_memory_write(addr
, &v
, 1);
2803 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2805 uint16_t v
= tswap16(val
);
2806 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2810 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2813 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2818 /* virtual memory access for debug */
2819 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2820 uint8_t *buf
, int len
, int is_write
)
2823 target_phys_addr_t phys_addr
;
2827 page
= addr
& TARGET_PAGE_MASK
;
2828 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2829 /* if no physical page mapped, return an error */
2830 if (phys_addr
== -1)
2832 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2835 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2844 void dump_exec_info(FILE *f
,
2845 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2847 int i
, target_code_size
, max_target_code_size
;
2848 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2849 TranslationBlock
*tb
;
2851 target_code_size
= 0;
2852 max_target_code_size
= 0;
2854 direct_jmp_count
= 0;
2855 direct_jmp2_count
= 0;
2856 for(i
= 0; i
< nb_tbs
; i
++) {
2858 target_code_size
+= tb
->size
;
2859 if (tb
->size
> max_target_code_size
)
2860 max_target_code_size
= tb
->size
;
2861 if (tb
->page_addr
[1] != -1)
2863 if (tb
->tb_next_offset
[0] != 0xffff) {
2865 if (tb
->tb_next_offset
[1] != 0xffff) {
2866 direct_jmp2_count
++;
2870 /* XXX: avoid using doubles ? */
2871 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2872 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2873 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2874 max_target_code_size
);
2875 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2876 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2877 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2878 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2880 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2881 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2883 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2885 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2886 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2887 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2888 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2891 #if !defined(CONFIG_USER_ONLY)
2893 #define MMUSUFFIX _cmmu
2894 #define GETPC() NULL
2895 #define env cpu_single_env
2896 #define SOFTMMU_CODE_ACCESS
2899 #include "softmmu_template.h"
2902 #include "softmmu_template.h"
2905 #include "softmmu_template.h"
2908 #include "softmmu_template.h"