2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
37 #if defined(CONFIG_USER_ONLY)
41 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 //#define DEBUG_IOPORT
51 //#define DEBUG_SUBPAGE
53 #if !defined(CONFIG_USER_ONLY)
54 /* TB consistency checks only implemented for usermode emulation. */
58 /* threshold to flush the translated code buffer */
59 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77 #define TARGET_PHYS_ADDR_SPACE_BITS 32
80 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
81 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
83 /* any access to the tbs or the page table must use this lock */
84 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
86 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
87 uint8_t *code_gen_ptr
;
91 uint8_t *phys_ram_base
;
92 uint8_t *phys_ram_dirty
;
93 static ram_addr_t phys_ram_alloc_offset
= 0;
96 /* current CPU in the current thread. It is only valid inside
98 CPUState
*cpu_single_env
;
100 typedef struct PageDesc
{
101 /* list of TBs intersecting this ram page */
102 TranslationBlock
*first_tb
;
103 /* in order to optimize self modifying code, we count the number
104 of lookups we do to a given page to use a bitmap */
105 unsigned int code_write_count
;
106 uint8_t *code_bitmap
;
107 #if defined(CONFIG_USER_ONLY)
112 typedef struct PhysPageDesc
{
113 /* offset in host memory of the page + io_index in the low 12 bits */
114 uint32_t phys_offset
;
118 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119 /* XXX: this is a temporary hack for alpha target.
120 * In the future, this is to be replaced by a multi-level table
121 * to actually be able to handle the complete 64 bits address space.
123 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
125 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
128 #define L1_SIZE (1 << L1_BITS)
129 #define L2_SIZE (1 << L2_BITS)
131 static void io_mem_init(void);
133 unsigned long qemu_real_host_page_size
;
134 unsigned long qemu_host_page_bits
;
135 unsigned long qemu_host_page_size
;
136 unsigned long qemu_host_page_mask
;
138 /* XXX: for system emulation, it could just be an array */
139 static PageDesc
*l1_map
[L1_SIZE
];
140 PhysPageDesc
**l1_phys_map
;
142 /* io memory support */
143 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
144 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
145 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
146 static int io_mem_nb
;
147 #if defined(CONFIG_SOFTMMU)
148 static int io_mem_watch
;
152 char *logfilename
= "/tmp/qemu.log";
155 static int log_append
= 0;
158 static int tlb_flush_count
;
159 static int tb_flush_count
;
160 static int tb_phys_invalidate_count
;
162 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
163 typedef struct subpage_t
{
164 target_phys_addr_t base
;
165 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
];
166 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
];
167 void *opaque
[TARGET_PAGE_SIZE
];
170 static void page_init(void)
172 /* NOTE: we can always suppose that qemu_host_page_size >=
176 SYSTEM_INFO system_info
;
179 GetSystemInfo(&system_info
);
180 qemu_real_host_page_size
= system_info
.dwPageSize
;
182 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
183 PAGE_EXECUTE_READWRITE
, &old_protect
);
186 qemu_real_host_page_size
= getpagesize();
188 unsigned long start
, end
;
190 start
= (unsigned long)code_gen_buffer
;
191 start
&= ~(qemu_real_host_page_size
- 1);
193 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
194 end
+= qemu_real_host_page_size
- 1;
195 end
&= ~(qemu_real_host_page_size
- 1);
197 mprotect((void *)start
, end
- start
,
198 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
202 if (qemu_host_page_size
== 0)
203 qemu_host_page_size
= qemu_real_host_page_size
;
204 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
205 qemu_host_page_size
= TARGET_PAGE_SIZE
;
206 qemu_host_page_bits
= 0;
207 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
208 qemu_host_page_bits
++;
209 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
210 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
211 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
214 static inline PageDesc
*page_find_alloc(unsigned int index
)
218 lp
= &l1_map
[index
>> L2_BITS
];
221 /* allocate if not found */
222 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
223 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
226 return p
+ (index
& (L2_SIZE
- 1));
229 static inline PageDesc
*page_find(unsigned int index
)
233 p
= l1_map
[index
>> L2_BITS
];
236 return p
+ (index
& (L2_SIZE
- 1));
239 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
244 p
= (void **)l1_phys_map
;
245 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
247 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
248 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
250 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
253 /* allocate if not found */
256 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
257 memset(p
, 0, sizeof(void *) * L1_SIZE
);
261 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
265 /* allocate if not found */
268 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
270 for (i
= 0; i
< L2_SIZE
; i
++)
271 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
273 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
276 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
278 return phys_page_find_alloc(index
, 0);
281 #if !defined(CONFIG_USER_ONLY)
282 static void tlb_protect_code(ram_addr_t ram_addr
);
283 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
287 void cpu_exec_init(CPUState
*env
)
293 code_gen_ptr
= code_gen_buffer
;
297 env
->next_cpu
= NULL
;
300 while (*penv
!= NULL
) {
301 penv
= (CPUState
**)&(*penv
)->next_cpu
;
304 env
->cpu_index
= cpu_index
;
305 env
->nb_watchpoints
= 0;
309 static inline void invalidate_page_bitmap(PageDesc
*p
)
311 if (p
->code_bitmap
) {
312 qemu_free(p
->code_bitmap
);
313 p
->code_bitmap
= NULL
;
315 p
->code_write_count
= 0;
318 /* set to NULL all the 'first_tb' fields in all PageDescs */
319 static void page_flush_tb(void)
324 for(i
= 0; i
< L1_SIZE
; i
++) {
327 for(j
= 0; j
< L2_SIZE
; j
++) {
329 invalidate_page_bitmap(p
);
336 /* flush all the translation blocks */
337 /* XXX: tb_flush is currently not thread safe */
338 void tb_flush(CPUState
*env1
)
341 #if defined(DEBUG_FLUSH)
342 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
343 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
345 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
349 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
350 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
353 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
356 code_gen_ptr
= code_gen_buffer
;
357 /* XXX: flush processor icache at this point if cache flush is
362 #ifdef DEBUG_TB_CHECK
364 static void tb_invalidate_check(target_ulong address
)
366 TranslationBlock
*tb
;
368 address
&= TARGET_PAGE_MASK
;
369 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
370 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
371 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
372 address
>= tb
->pc
+ tb
->size
)) {
373 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
374 address
, (long)tb
->pc
, tb
->size
);
380 /* verify that all the pages have correct rights for code */
381 static void tb_page_check(void)
383 TranslationBlock
*tb
;
384 int i
, flags1
, flags2
;
386 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
387 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
388 flags1
= page_get_flags(tb
->pc
);
389 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
390 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
391 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
392 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
398 void tb_jmp_check(TranslationBlock
*tb
)
400 TranslationBlock
*tb1
;
403 /* suppress any remaining jumps to this TB */
407 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
410 tb1
= tb1
->jmp_next
[n1
];
412 /* check end of list */
414 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
420 /* invalidate one TB */
421 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
424 TranslationBlock
*tb1
;
428 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
431 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
435 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
437 TranslationBlock
*tb1
;
443 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
445 *ptb
= tb1
->page_next
[n1
];
448 ptb
= &tb1
->page_next
[n1
];
452 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
454 TranslationBlock
*tb1
, **ptb
;
457 ptb
= &tb
->jmp_next
[n
];
460 /* find tb(n) in circular list */
464 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
465 if (n1
== n
&& tb1
== tb
)
468 ptb
= &tb1
->jmp_first
;
470 ptb
= &tb1
->jmp_next
[n1
];
473 /* now we can suppress tb(n) from the list */
474 *ptb
= tb
->jmp_next
[n
];
476 tb
->jmp_next
[n
] = NULL
;
480 /* reset the jump entry 'n' of a TB so that it is not chained to
482 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
484 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
487 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
492 target_ulong phys_pc
;
493 TranslationBlock
*tb1
, *tb2
;
495 /* remove the TB from the hash list */
496 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
497 h
= tb_phys_hash_func(phys_pc
);
498 tb_remove(&tb_phys_hash
[h
], tb
,
499 offsetof(TranslationBlock
, phys_hash_next
));
501 /* remove the TB from the page list */
502 if (tb
->page_addr
[0] != page_addr
) {
503 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
504 tb_page_remove(&p
->first_tb
, tb
);
505 invalidate_page_bitmap(p
);
507 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
508 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
509 tb_page_remove(&p
->first_tb
, tb
);
510 invalidate_page_bitmap(p
);
513 tb_invalidated_flag
= 1;
515 /* remove the TB from the hash list */
516 h
= tb_jmp_cache_hash_func(tb
->pc
);
517 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
518 if (env
->tb_jmp_cache
[h
] == tb
)
519 env
->tb_jmp_cache
[h
] = NULL
;
522 /* suppress this TB from the two jump lists */
523 tb_jmp_remove(tb
, 0);
524 tb_jmp_remove(tb
, 1);
526 /* suppress any remaining jumps to this TB */
532 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
533 tb2
= tb1
->jmp_next
[n1
];
534 tb_reset_jump(tb1
, n1
);
535 tb1
->jmp_next
[n1
] = NULL
;
538 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
540 tb_phys_invalidate_count
++;
543 static inline void set_bits(uint8_t *tab
, int start
, int len
)
549 mask
= 0xff << (start
& 7);
550 if ((start
& ~7) == (end
& ~7)) {
552 mask
&= ~(0xff << (end
& 7));
557 start
= (start
+ 8) & ~7;
559 while (start
< end1
) {
564 mask
= ~(0xff << (end
& 7));
570 static void build_page_bitmap(PageDesc
*p
)
572 int n
, tb_start
, tb_end
;
573 TranslationBlock
*tb
;
575 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
578 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
583 tb
= (TranslationBlock
*)((long)tb
& ~3);
584 /* NOTE: this is subtle as a TB may span two physical pages */
586 /* NOTE: tb_end may be after the end of the page, but
587 it is not a problem */
588 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
589 tb_end
= tb_start
+ tb
->size
;
590 if (tb_end
> TARGET_PAGE_SIZE
)
591 tb_end
= TARGET_PAGE_SIZE
;
594 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
596 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
597 tb
= tb
->page_next
[n
];
601 #ifdef TARGET_HAS_PRECISE_SMC
603 static void tb_gen_code(CPUState
*env
,
604 target_ulong pc
, target_ulong cs_base
, int flags
,
607 TranslationBlock
*tb
;
609 target_ulong phys_pc
, phys_page2
, virt_page2
;
612 phys_pc
= get_phys_addr_code(env
, pc
);
615 /* flush must be done */
617 /* cannot fail at this point */
620 tc_ptr
= code_gen_ptr
;
622 tb
->cs_base
= cs_base
;
625 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
626 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
628 /* check next page if needed */
629 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
631 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
632 phys_page2
= get_phys_addr_code(env
, virt_page2
);
634 tb_link_phys(tb
, phys_pc
, phys_page2
);
638 /* invalidate all TBs which intersect with the target physical page
639 starting in range [start;end[. NOTE: start and end must refer to
640 the same physical page. 'is_cpu_write_access' should be true if called
641 from a real cpu write access: the virtual CPU will exit the current
642 TB if code is modified inside this TB. */
643 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
644 int is_cpu_write_access
)
646 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
647 CPUState
*env
= cpu_single_env
;
649 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
650 target_ulong tb_start
, tb_end
;
651 target_ulong current_pc
, current_cs_base
;
653 p
= page_find(start
>> TARGET_PAGE_BITS
);
656 if (!p
->code_bitmap
&&
657 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
658 is_cpu_write_access
) {
659 /* build code bitmap */
660 build_page_bitmap(p
);
663 /* we remove all the TBs in the range [start, end[ */
664 /* XXX: see if in some cases it could be faster to invalidate all the code */
665 current_tb_not_found
= is_cpu_write_access
;
666 current_tb_modified
= 0;
667 current_tb
= NULL
; /* avoid warning */
668 current_pc
= 0; /* avoid warning */
669 current_cs_base
= 0; /* avoid warning */
670 current_flags
= 0; /* avoid warning */
674 tb
= (TranslationBlock
*)((long)tb
& ~3);
675 tb_next
= tb
->page_next
[n
];
676 /* NOTE: this is subtle as a TB may span two physical pages */
678 /* NOTE: tb_end may be after the end of the page, but
679 it is not a problem */
680 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
681 tb_end
= tb_start
+ tb
->size
;
683 tb_start
= tb
->page_addr
[1];
684 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
686 if (!(tb_end
<= start
|| tb_start
>= end
)) {
687 #ifdef TARGET_HAS_PRECISE_SMC
688 if (current_tb_not_found
) {
689 current_tb_not_found
= 0;
691 if (env
->mem_write_pc
) {
692 /* now we have a real cpu fault */
693 current_tb
= tb_find_pc(env
->mem_write_pc
);
696 if (current_tb
== tb
&&
697 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
698 /* If we are modifying the current TB, we must stop
699 its execution. We could be more precise by checking
700 that the modification is after the current PC, but it
701 would require a specialized function to partially
702 restore the CPU state */
704 current_tb_modified
= 1;
705 cpu_restore_state(current_tb
, env
,
706 env
->mem_write_pc
, NULL
);
707 #if defined(TARGET_I386)
708 current_flags
= env
->hflags
;
709 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
710 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
711 current_pc
= current_cs_base
+ env
->eip
;
713 #error unsupported CPU
716 #endif /* TARGET_HAS_PRECISE_SMC */
717 /* we need to do that to handle the case where a signal
718 occurs while doing tb_phys_invalidate() */
721 saved_tb
= env
->current_tb
;
722 env
->current_tb
= NULL
;
724 tb_phys_invalidate(tb
, -1);
726 env
->current_tb
= saved_tb
;
727 if (env
->interrupt_request
&& env
->current_tb
)
728 cpu_interrupt(env
, env
->interrupt_request
);
733 #if !defined(CONFIG_USER_ONLY)
734 /* if no code remaining, no need to continue to use slow writes */
736 invalidate_page_bitmap(p
);
737 if (is_cpu_write_access
) {
738 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
742 #ifdef TARGET_HAS_PRECISE_SMC
743 if (current_tb_modified
) {
744 /* we generate a block containing just the instruction
745 modifying the memory. It will ensure that it cannot modify
747 env
->current_tb
= NULL
;
748 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
750 cpu_resume_from_signal(env
, NULL
);
755 /* len must be <= 8 and start must be a multiple of len */
756 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
763 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
764 cpu_single_env
->mem_write_vaddr
, len
,
766 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
770 p
= page_find(start
>> TARGET_PAGE_BITS
);
773 if (p
->code_bitmap
) {
774 offset
= start
& ~TARGET_PAGE_MASK
;
775 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
776 if (b
& ((1 << len
) - 1))
780 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
784 #if !defined(CONFIG_SOFTMMU)
785 static void tb_invalidate_phys_page(target_ulong addr
,
786 unsigned long pc
, void *puc
)
788 int n
, current_flags
, current_tb_modified
;
789 target_ulong current_pc
, current_cs_base
;
791 TranslationBlock
*tb
, *current_tb
;
792 #ifdef TARGET_HAS_PRECISE_SMC
793 CPUState
*env
= cpu_single_env
;
796 addr
&= TARGET_PAGE_MASK
;
797 p
= page_find(addr
>> TARGET_PAGE_BITS
);
801 current_tb_modified
= 0;
803 current_pc
= 0; /* avoid warning */
804 current_cs_base
= 0; /* avoid warning */
805 current_flags
= 0; /* avoid warning */
806 #ifdef TARGET_HAS_PRECISE_SMC
808 current_tb
= tb_find_pc(pc
);
813 tb
= (TranslationBlock
*)((long)tb
& ~3);
814 #ifdef TARGET_HAS_PRECISE_SMC
815 if (current_tb
== tb
&&
816 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
817 /* If we are modifying the current TB, we must stop
818 its execution. We could be more precise by checking
819 that the modification is after the current PC, but it
820 would require a specialized function to partially
821 restore the CPU state */
823 current_tb_modified
= 1;
824 cpu_restore_state(current_tb
, env
, pc
, puc
);
825 #if defined(TARGET_I386)
826 current_flags
= env
->hflags
;
827 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
828 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
829 current_pc
= current_cs_base
+ env
->eip
;
831 #error unsupported CPU
834 #endif /* TARGET_HAS_PRECISE_SMC */
835 tb_phys_invalidate(tb
, addr
);
836 tb
= tb
->page_next
[n
];
839 #ifdef TARGET_HAS_PRECISE_SMC
840 if (current_tb_modified
) {
841 /* we generate a block containing just the instruction
842 modifying the memory. It will ensure that it cannot modify
844 env
->current_tb
= NULL
;
845 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
847 cpu_resume_from_signal(env
, puc
);
853 /* add the tb in the target page and protect it if necessary */
854 static inline void tb_alloc_page(TranslationBlock
*tb
,
855 unsigned int n
, target_ulong page_addr
)
858 TranslationBlock
*last_first_tb
;
860 tb
->page_addr
[n
] = page_addr
;
861 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
862 tb
->page_next
[n
] = p
->first_tb
;
863 last_first_tb
= p
->first_tb
;
864 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
865 invalidate_page_bitmap(p
);
867 #if defined(TARGET_HAS_SMC) || 1
869 #if defined(CONFIG_USER_ONLY)
870 if (p
->flags
& PAGE_WRITE
) {
875 /* force the host page as non writable (writes will have a
876 page fault + mprotect overhead) */
877 page_addr
&= qemu_host_page_mask
;
879 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
880 addr
+= TARGET_PAGE_SIZE
) {
882 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
886 p2
->flags
&= ~PAGE_WRITE
;
887 page_get_flags(addr
);
889 mprotect(g2h(page_addr
), qemu_host_page_size
,
890 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
891 #ifdef DEBUG_TB_INVALIDATE
892 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
897 /* if some code is already present, then the pages are already
898 protected. So we handle the case where only the first TB is
899 allocated in a physical page */
900 if (!last_first_tb
) {
901 tlb_protect_code(page_addr
);
905 #endif /* TARGET_HAS_SMC */
908 /* Allocate a new translation block. Flush the translation buffer if
909 too many translation blocks or too much generated code. */
910 TranslationBlock
*tb_alloc(target_ulong pc
)
912 TranslationBlock
*tb
;
914 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
915 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
923 /* add a new TB and link it to the physical page tables. phys_page2 is
924 (-1) to indicate that only one page contains the TB. */
925 void tb_link_phys(TranslationBlock
*tb
,
926 target_ulong phys_pc
, target_ulong phys_page2
)
929 TranslationBlock
**ptb
;
931 /* add in the physical hash table */
932 h
= tb_phys_hash_func(phys_pc
);
933 ptb
= &tb_phys_hash
[h
];
934 tb
->phys_hash_next
= *ptb
;
937 /* add in the page list */
938 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
939 if (phys_page2
!= -1)
940 tb_alloc_page(tb
, 1, phys_page2
);
942 tb
->page_addr
[1] = -1;
944 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
945 tb
->jmp_next
[0] = NULL
;
946 tb
->jmp_next
[1] = NULL
;
948 /* init original jump addresses */
949 if (tb
->tb_next_offset
[0] != 0xffff)
950 tb_reset_jump(tb
, 0);
951 if (tb
->tb_next_offset
[1] != 0xffff)
952 tb_reset_jump(tb
, 1);
954 #ifdef DEBUG_TB_CHECK
959 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
960 tb[1].tc_ptr. Return NULL if not found */
961 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
965 TranslationBlock
*tb
;
969 if (tc_ptr
< (unsigned long)code_gen_buffer
||
970 tc_ptr
>= (unsigned long)code_gen_ptr
)
972 /* binary search (cf Knuth) */
975 while (m_min
<= m_max
) {
976 m
= (m_min
+ m_max
) >> 1;
978 v
= (unsigned long)tb
->tc_ptr
;
981 else if (tc_ptr
< v
) {
990 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
992 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
994 TranslationBlock
*tb1
, *tb_next
, **ptb
;
997 tb1
= tb
->jmp_next
[n
];
999 /* find head of list */
1002 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1005 tb1
= tb1
->jmp_next
[n1
];
1007 /* we are now sure now that tb jumps to tb1 */
1010 /* remove tb from the jmp_first list */
1011 ptb
= &tb_next
->jmp_first
;
1015 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1016 if (n1
== n
&& tb1
== tb
)
1018 ptb
= &tb1
->jmp_next
[n1
];
1020 *ptb
= tb
->jmp_next
[n
];
1021 tb
->jmp_next
[n
] = NULL
;
1023 /* suppress the jump to next tb in generated code */
1024 tb_reset_jump(tb
, n
);
1026 /* suppress jumps in the tb on which we could have jumped */
1027 tb_reset_jump_recursive(tb_next
);
1031 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1033 tb_reset_jump_recursive2(tb
, 0);
1034 tb_reset_jump_recursive2(tb
, 1);
1037 #if defined(TARGET_HAS_ICE)
1038 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1040 target_phys_addr_t addr
;
1042 ram_addr_t ram_addr
;
1045 addr
= cpu_get_phys_page_debug(env
, pc
);
1046 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1048 pd
= IO_MEM_UNASSIGNED
;
1050 pd
= p
->phys_offset
;
1052 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1053 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1057 /* Add a watchpoint. */
1058 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1062 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1063 if (addr
== env
->watchpoint
[i
].vaddr
)
1066 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1069 i
= env
->nb_watchpoints
++;
1070 env
->watchpoint
[i
].vaddr
= addr
;
1071 tlb_flush_page(env
, addr
);
1072 /* FIXME: This flush is needed because of the hack to make memory ops
1073 terminate the TB. It can be removed once the proper IO trap and
1074 re-execute bits are in. */
1079 /* Remove a watchpoint. */
1080 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1084 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1085 if (addr
== env
->watchpoint
[i
].vaddr
) {
1086 env
->nb_watchpoints
--;
1087 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1088 tlb_flush_page(env
, addr
);
1095 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1096 breakpoint is reached */
1097 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1099 #if defined(TARGET_HAS_ICE)
1102 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1103 if (env
->breakpoints
[i
] == pc
)
1107 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1109 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1111 breakpoint_invalidate(env
, pc
);
1118 /* remove a breakpoint */
1119 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1121 #if defined(TARGET_HAS_ICE)
1123 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1124 if (env
->breakpoints
[i
] == pc
)
1129 env
->nb_breakpoints
--;
1130 if (i
< env
->nb_breakpoints
)
1131 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1133 breakpoint_invalidate(env
, pc
);
1140 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1141 CPU loop after each instruction */
1142 void cpu_single_step(CPUState
*env
, int enabled
)
1144 #if defined(TARGET_HAS_ICE)
1145 if (env
->singlestep_enabled
!= enabled
) {
1146 env
->singlestep_enabled
= enabled
;
1147 /* must flush all the translated code to avoid inconsistancies */
1148 /* XXX: only flush what is necessary */
1154 /* enable or disable low levels log */
1155 void cpu_set_log(int log_flags
)
1157 loglevel
= log_flags
;
1158 if (loglevel
&& !logfile
) {
1159 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1161 perror(logfilename
);
1164 #if !defined(CONFIG_SOFTMMU)
1165 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1167 static uint8_t logfile_buf
[4096];
1168 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1171 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1175 if (!loglevel
&& logfile
) {
1181 void cpu_set_log_filename(const char *filename
)
1183 logfilename
= strdup(filename
);
1188 cpu_set_log(loglevel
);
1191 /* mask must never be zero, except for A20 change call */
1192 void cpu_interrupt(CPUState
*env
, int mask
)
1194 TranslationBlock
*tb
;
1195 static int interrupt_lock
;
1197 env
->interrupt_request
|= mask
;
1198 /* if the cpu is currently executing code, we must unlink it and
1199 all the potentially executing TB */
1200 tb
= env
->current_tb
;
1201 if (tb
&& !testandset(&interrupt_lock
)) {
1202 env
->current_tb
= NULL
;
1203 tb_reset_jump_recursive(tb
);
1208 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1210 env
->interrupt_request
&= ~mask
;
1213 CPULogItem cpu_log_items
[] = {
1214 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1215 "show generated host assembly code for each compiled TB" },
1216 { CPU_LOG_TB_IN_ASM
, "in_asm",
1217 "show target assembly code for each compiled TB" },
1218 { CPU_LOG_TB_OP
, "op",
1219 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1221 { CPU_LOG_TB_OP_OPT
, "op_opt",
1222 "show micro ops after optimization for each compiled TB" },
1224 { CPU_LOG_INT
, "int",
1225 "show interrupts/exceptions in short format" },
1226 { CPU_LOG_EXEC
, "exec",
1227 "show trace before each executed TB (lots of logs)" },
1228 { CPU_LOG_TB_CPU
, "cpu",
1229 "show CPU state before block translation" },
1231 { CPU_LOG_PCALL
, "pcall",
1232 "show protected mode far calls/returns/exceptions" },
1235 { CPU_LOG_IOPORT
, "ioport",
1236 "show all i/o ports accesses" },
1241 static int cmp1(const char *s1
, int n
, const char *s2
)
1243 if (strlen(s2
) != n
)
1245 return memcmp(s1
, s2
, n
) == 0;
1248 /* takes a comma separated list of log masks. Return 0 if error. */
1249 int cpu_str_to_log_mask(const char *str
)
1258 p1
= strchr(p
, ',');
1261 if(cmp1(p
,p1
-p
,"all")) {
1262 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1266 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1267 if (cmp1(p
, p1
- p
, item
->name
))
1281 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1288 fprintf(stderr
, "qemu: fatal: ");
1289 vfprintf(stderr
, fmt
, ap
);
1290 fprintf(stderr
, "\n");
1292 if(env
->intercept
& INTERCEPT_SVM_MASK
) {
1293 /* most probably the virtual machine should not
1294 be shut down but rather caught by the VMM */
1295 vmexit(SVM_EXIT_SHUTDOWN
, 0);
1297 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1299 cpu_dump_state(env
, stderr
, fprintf
, 0);
1302 fprintf(logfile
, "qemu: fatal: ");
1303 vfprintf(logfile
, fmt
, ap2
);
1304 fprintf(logfile
, "\n");
1306 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1308 cpu_dump_state(env
, logfile
, fprintf
, 0);
1318 CPUState
*cpu_copy(CPUState
*env
)
1321 /* XXX: broken, must be handled by each CPU */
1322 CPUState
*new_env
= cpu_init();
1323 /* preserve chaining and index */
1324 CPUState
*next_cpu
= new_env
->next_cpu
;
1325 int cpu_index
= new_env
->cpu_index
;
1326 memcpy(new_env
, env
, sizeof(CPUState
));
1327 new_env
->next_cpu
= next_cpu
;
1328 new_env
->cpu_index
= cpu_index
;
1335 #if !defined(CONFIG_USER_ONLY)
1337 /* NOTE: if flush_global is true, also flush global entries (not
1339 void tlb_flush(CPUState
*env
, int flush_global
)
1343 #if defined(DEBUG_TLB)
1344 printf("tlb_flush:\n");
1346 /* must reset current TB so that interrupts cannot modify the
1347 links while we are modifying them */
1348 env
->current_tb
= NULL
;
1350 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1351 env
->tlb_table
[0][i
].addr_read
= -1;
1352 env
->tlb_table
[0][i
].addr_write
= -1;
1353 env
->tlb_table
[0][i
].addr_code
= -1;
1354 env
->tlb_table
[1][i
].addr_read
= -1;
1355 env
->tlb_table
[1][i
].addr_write
= -1;
1356 env
->tlb_table
[1][i
].addr_code
= -1;
1357 #if (NB_MMU_MODES >= 3)
1358 env
->tlb_table
[2][i
].addr_read
= -1;
1359 env
->tlb_table
[2][i
].addr_write
= -1;
1360 env
->tlb_table
[2][i
].addr_code
= -1;
1361 #if (NB_MMU_MODES == 4)
1362 env
->tlb_table
[3][i
].addr_read
= -1;
1363 env
->tlb_table
[3][i
].addr_write
= -1;
1364 env
->tlb_table
[3][i
].addr_code
= -1;
1369 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1371 #if !defined(CONFIG_SOFTMMU)
1372 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1375 if (env
->kqemu_enabled
) {
1376 kqemu_flush(env
, flush_global
);
1382 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1384 if (addr
== (tlb_entry
->addr_read
&
1385 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1386 addr
== (tlb_entry
->addr_write
&
1387 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1388 addr
== (tlb_entry
->addr_code
&
1389 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1390 tlb_entry
->addr_read
= -1;
1391 tlb_entry
->addr_write
= -1;
1392 tlb_entry
->addr_code
= -1;
1396 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1399 TranslationBlock
*tb
;
1401 #if defined(DEBUG_TLB)
1402 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1404 /* must reset current TB so that interrupts cannot modify the
1405 links while we are modifying them */
1406 env
->current_tb
= NULL
;
1408 addr
&= TARGET_PAGE_MASK
;
1409 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1410 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1411 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1412 #if (NB_MMU_MODES >= 3)
1413 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1414 #if (NB_MMU_MODES == 4)
1415 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1419 /* Discard jump cache entries for any tb which might potentially
1420 overlap the flushed page. */
1421 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1422 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1424 i
= tb_jmp_cache_hash_page(addr
);
1425 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1427 #if !defined(CONFIG_SOFTMMU)
1428 if (addr
< MMAP_AREA_END
)
1429 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1432 if (env
->kqemu_enabled
) {
1433 kqemu_flush_page(env
, addr
);
1438 /* update the TLBs so that writes to code in the virtual page 'addr'
1440 static void tlb_protect_code(ram_addr_t ram_addr
)
1442 cpu_physical_memory_reset_dirty(ram_addr
,
1443 ram_addr
+ TARGET_PAGE_SIZE
,
1447 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1448 tested for self modifying code */
1449 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1452 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1455 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1456 unsigned long start
, unsigned long length
)
1459 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1460 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1461 if ((addr
- start
) < length
) {
1462 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1467 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1471 unsigned long length
, start1
;
1475 start
&= TARGET_PAGE_MASK
;
1476 end
= TARGET_PAGE_ALIGN(end
);
1478 length
= end
- start
;
1481 len
= length
>> TARGET_PAGE_BITS
;
1483 /* XXX: should not depend on cpu context */
1485 if (env
->kqemu_enabled
) {
1488 for(i
= 0; i
< len
; i
++) {
1489 kqemu_set_notdirty(env
, addr
);
1490 addr
+= TARGET_PAGE_SIZE
;
1494 mask
= ~dirty_flags
;
1495 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1496 for(i
= 0; i
< len
; i
++)
1499 /* we modify the TLB cache so that the dirty bit will be set again
1500 when accessing the range */
1501 start1
= start
+ (unsigned long)phys_ram_base
;
1502 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1503 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1504 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1505 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1506 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1507 #if (NB_MMU_MODES >= 3)
1508 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1509 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1510 #if (NB_MMU_MODES == 4)
1511 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1512 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1517 #if !defined(CONFIG_SOFTMMU)
1518 /* XXX: this is expensive */
1524 for(i
= 0; i
< L1_SIZE
; i
++) {
1527 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1528 for(j
= 0; j
< L2_SIZE
; j
++) {
1529 if (p
->valid_tag
== virt_valid_tag
&&
1530 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1531 (p
->prot
& PROT_WRITE
)) {
1532 if (addr
< MMAP_AREA_END
) {
1533 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1534 p
->prot
& ~PROT_WRITE
);
1537 addr
+= TARGET_PAGE_SIZE
;
1546 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1548 ram_addr_t ram_addr
;
1550 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1551 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1552 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1553 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1554 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1559 /* update the TLB according to the current state of the dirty bits */
1560 void cpu_tlb_update_dirty(CPUState
*env
)
1563 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1564 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1565 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1566 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1567 #if (NB_MMU_MODES >= 3)
1568 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1569 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1570 #if (NB_MMU_MODES == 4)
1571 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1572 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1577 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1578 unsigned long start
)
1581 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1582 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1583 if (addr
== start
) {
1584 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1589 /* update the TLB corresponding to virtual page vaddr and phys addr
1590 addr so that it is no longer dirty */
1591 static inline void tlb_set_dirty(CPUState
*env
,
1592 unsigned long addr
, target_ulong vaddr
)
1596 addr
&= TARGET_PAGE_MASK
;
1597 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1598 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1599 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1600 #if (NB_MMU_MODES >= 3)
1601 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1602 #if (NB_MMU_MODES == 4)
1603 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1608 /* add a new TLB entry. At most one entry for a given virtual address
1609 is permitted. Return 0 if OK or 2 if the page could not be mapped
1610 (can only happen in non SOFTMMU mode for I/O pages or pages
1611 conflicting with the host address space). */
1612 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1613 target_phys_addr_t paddr
, int prot
,
1614 int mmu_idx
, int is_softmmu
)
1619 target_ulong address
;
1620 target_phys_addr_t addend
;
1625 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1627 pd
= IO_MEM_UNASSIGNED
;
1629 pd
= p
->phys_offset
;
1631 #if defined(DEBUG_TLB)
1632 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1633 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1637 #if !defined(CONFIG_SOFTMMU)
1641 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1642 /* IO memory case */
1643 address
= vaddr
| pd
;
1646 /* standard memory */
1648 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1651 /* Make accesses to pages with watchpoints go via the
1652 watchpoint trap routines. */
1653 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1654 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1655 if (address
& ~TARGET_PAGE_MASK
) {
1656 env
->watchpoint
[i
].addend
= 0;
1657 address
= vaddr
| io_mem_watch
;
1659 env
->watchpoint
[i
].addend
= pd
- paddr
+
1660 (unsigned long) phys_ram_base
;
1661 /* TODO: Figure out how to make read watchpoints coexist
1663 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1668 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1670 te
= &env
->tlb_table
[mmu_idx
][index
];
1671 te
->addend
= addend
;
1672 if (prot
& PAGE_READ
) {
1673 te
->addr_read
= address
;
1677 if (prot
& PAGE_EXEC
) {
1678 te
->addr_code
= address
;
1682 if (prot
& PAGE_WRITE
) {
1683 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1684 (pd
& IO_MEM_ROMD
)) {
1685 /* write access calls the I/O callback */
1686 te
->addr_write
= vaddr
|
1687 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1688 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1689 !cpu_physical_memory_is_dirty(pd
)) {
1690 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1692 te
->addr_write
= address
;
1695 te
->addr_write
= -1;
1698 #if !defined(CONFIG_SOFTMMU)
1700 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1701 /* IO access: no mapping is done as it will be handled by the
1703 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1708 if (vaddr
>= MMAP_AREA_END
) {
1711 if (prot
& PROT_WRITE
) {
1712 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1713 #if defined(TARGET_HAS_SMC) || 1
1716 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1717 !cpu_physical_memory_is_dirty(pd
))) {
1718 /* ROM: we do as if code was inside */
1719 /* if code is present, we only map as read only and save the
1723 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1726 vp
->valid_tag
= virt_valid_tag
;
1727 prot
&= ~PAGE_WRITE
;
1730 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1731 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1732 if (map_addr
== MAP_FAILED
) {
1733 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1743 /* called from signal handler: invalidate the code and unprotect the
1744 page. Return TRUE if the fault was succesfully handled. */
1745 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1747 #if !defined(CONFIG_SOFTMMU)
1750 #if defined(DEBUG_TLB)
1751 printf("page_unprotect: addr=0x%08x\n", addr
);
1753 addr
&= TARGET_PAGE_MASK
;
1755 /* if it is not mapped, no need to worry here */
1756 if (addr
>= MMAP_AREA_END
)
1758 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1761 /* NOTE: in this case, validate_tag is _not_ tested as it
1762 validates only the code TLB */
1763 if (vp
->valid_tag
!= virt_valid_tag
)
1765 if (!(vp
->prot
& PAGE_WRITE
))
1767 #if defined(DEBUG_TLB)
1768 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1769 addr
, vp
->phys_addr
, vp
->prot
);
1771 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1772 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1773 (unsigned long)addr
, vp
->prot
);
1774 /* set the dirty bit */
1775 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1776 /* flush the code inside */
1777 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1786 void tlb_flush(CPUState
*env
, int flush_global
)
1790 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1794 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1795 target_phys_addr_t paddr
, int prot
,
1796 int mmu_idx
, int is_softmmu
)
1801 /* dump memory mappings */
1802 void page_dump(FILE *f
)
1804 unsigned long start
, end
;
1805 int i
, j
, prot
, prot1
;
1808 fprintf(f
, "%-8s %-8s %-8s %s\n",
1809 "start", "end", "size", "prot");
1813 for(i
= 0; i
<= L1_SIZE
; i
++) {
1818 for(j
= 0;j
< L2_SIZE
; j
++) {
1823 if (prot1
!= prot
) {
1824 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1826 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1827 start
, end
, end
- start
,
1828 prot
& PAGE_READ
? 'r' : '-',
1829 prot
& PAGE_WRITE
? 'w' : '-',
1830 prot
& PAGE_EXEC
? 'x' : '-');
1844 int page_get_flags(target_ulong address
)
1848 p
= page_find(address
>> TARGET_PAGE_BITS
);
1854 /* modify the flags of a page and invalidate the code if
1855 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1856 depending on PAGE_WRITE */
1857 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1862 start
= start
& TARGET_PAGE_MASK
;
1863 end
= TARGET_PAGE_ALIGN(end
);
1864 if (flags
& PAGE_WRITE
)
1865 flags
|= PAGE_WRITE_ORG
;
1866 spin_lock(&tb_lock
);
1867 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1868 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1869 /* if the write protection is set, then we invalidate the code
1871 if (!(p
->flags
& PAGE_WRITE
) &&
1872 (flags
& PAGE_WRITE
) &&
1874 tb_invalidate_phys_page(addr
, 0, NULL
);
1878 spin_unlock(&tb_lock
);
1881 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1887 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1888 start
= start
& TARGET_PAGE_MASK
;
1891 /* we've wrapped around */
1893 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1894 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1897 if( !(p
->flags
& PAGE_VALID
) )
1900 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1902 if (flags
& PAGE_WRITE
) {
1903 if (!(p
->flags
& PAGE_WRITE_ORG
))
1905 /* unprotect the page if it was put read-only because it
1906 contains translated code */
1907 if (!(p
->flags
& PAGE_WRITE
)) {
1908 if (!page_unprotect(addr
, 0, NULL
))
1917 /* called from signal handler: invalidate the code and unprotect the
1918 page. Return TRUE if the fault was succesfully handled. */
1919 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1921 unsigned int page_index
, prot
, pindex
;
1923 target_ulong host_start
, host_end
, addr
;
1925 host_start
= address
& qemu_host_page_mask
;
1926 page_index
= host_start
>> TARGET_PAGE_BITS
;
1927 p1
= page_find(page_index
);
1930 host_end
= host_start
+ qemu_host_page_size
;
1933 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1937 /* if the page was really writable, then we change its
1938 protection back to writable */
1939 if (prot
& PAGE_WRITE_ORG
) {
1940 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1941 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1942 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1943 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1944 p1
[pindex
].flags
|= PAGE_WRITE
;
1945 /* and since the content will be modified, we must invalidate
1946 the corresponding translated code. */
1947 tb_invalidate_phys_page(address
, pc
, puc
);
1948 #ifdef DEBUG_TB_CHECK
1949 tb_invalidate_check(address
);
1957 static inline void tlb_set_dirty(CPUState
*env
,
1958 unsigned long addr
, target_ulong vaddr
)
1961 #endif /* defined(CONFIG_USER_ONLY) */
1963 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1965 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
1967 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1970 if (addr > start_addr) \
1973 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1974 if (start_addr2 > 0) \
1978 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
1979 end_addr2 = TARGET_PAGE_SIZE - 1; \
1981 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1982 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
1987 /* register physical memory. 'size' must be a multiple of the target
1988 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1990 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1992 unsigned long phys_offset
)
1994 target_phys_addr_t addr
, end_addr
;
1997 unsigned long orig_size
= size
;
2000 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2001 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2002 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2003 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2004 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2005 unsigned long orig_memory
= p
->phys_offset
;
2006 target_phys_addr_t start_addr2
, end_addr2
;
2007 int need_subpage
= 0;
2009 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2012 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2013 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2014 &p
->phys_offset
, orig_memory
);
2016 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2019 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2021 p
->phys_offset
= phys_offset
;
2022 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2023 (phys_offset
& IO_MEM_ROMD
))
2024 phys_offset
+= TARGET_PAGE_SIZE
;
2027 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2028 p
->phys_offset
= phys_offset
;
2029 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2030 (phys_offset
& IO_MEM_ROMD
))
2031 phys_offset
+= TARGET_PAGE_SIZE
;
2033 target_phys_addr_t start_addr2
, end_addr2
;
2034 int need_subpage
= 0;
2036 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2037 end_addr2
, need_subpage
);
2040 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2041 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2042 subpage_register(subpage
, start_addr2
, end_addr2
,
2049 /* since each CPU stores ram addresses in its TLB cache, we must
2050 reset the modified entries */
2052 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2057 /* XXX: temporary until new memory mapping API */
2058 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
2062 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2064 return IO_MEM_UNASSIGNED
;
2065 return p
->phys_offset
;
2068 /* XXX: better than nothing */
2069 ram_addr_t
qemu_ram_alloc(unsigned int size
)
2072 if ((phys_ram_alloc_offset
+ size
) >= phys_ram_size
) {
2073 fprintf(stderr
, "Not enough memory (requested_size = %u, max memory = %d)\n",
2074 size
, phys_ram_size
);
2077 addr
= phys_ram_alloc_offset
;
2078 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2082 void qemu_ram_free(ram_addr_t addr
)
2086 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2088 #ifdef DEBUG_UNASSIGNED
2089 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2092 do_unassigned_access(addr
, 0, 0, 0);
2094 do_unassigned_access(addr
, 0, 0, 0);
2099 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2101 #ifdef DEBUG_UNASSIGNED
2102 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2105 do_unassigned_access(addr
, 1, 0, 0);
2107 do_unassigned_access(addr
, 1, 0, 0);
2111 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2112 unassigned_mem_readb
,
2113 unassigned_mem_readb
,
2114 unassigned_mem_readb
,
2117 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2118 unassigned_mem_writeb
,
2119 unassigned_mem_writeb
,
2120 unassigned_mem_writeb
,
2123 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2125 unsigned long ram_addr
;
2127 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2128 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2129 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2130 #if !defined(CONFIG_USER_ONLY)
2131 tb_invalidate_phys_page_fast(ram_addr
, 1);
2132 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2135 stb_p((uint8_t *)(long)addr
, val
);
2137 if (cpu_single_env
->kqemu_enabled
&&
2138 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2139 kqemu_modify_page(cpu_single_env
, ram_addr
);
2141 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2142 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2143 /* we remove the notdirty callback only if the code has been
2145 if (dirty_flags
== 0xff)
2146 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2149 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2151 unsigned long ram_addr
;
2153 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2154 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2155 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2156 #if !defined(CONFIG_USER_ONLY)
2157 tb_invalidate_phys_page_fast(ram_addr
, 2);
2158 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2161 stw_p((uint8_t *)(long)addr
, val
);
2163 if (cpu_single_env
->kqemu_enabled
&&
2164 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2165 kqemu_modify_page(cpu_single_env
, ram_addr
);
2167 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2168 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2169 /* we remove the notdirty callback only if the code has been
2171 if (dirty_flags
== 0xff)
2172 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2175 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2177 unsigned long ram_addr
;
2179 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2180 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2181 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2182 #if !defined(CONFIG_USER_ONLY)
2183 tb_invalidate_phys_page_fast(ram_addr
, 4);
2184 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2187 stl_p((uint8_t *)(long)addr
, val
);
2189 if (cpu_single_env
->kqemu_enabled
&&
2190 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2191 kqemu_modify_page(cpu_single_env
, ram_addr
);
2193 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2194 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2195 /* we remove the notdirty callback only if the code has been
2197 if (dirty_flags
== 0xff)
2198 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2201 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2202 NULL
, /* never used */
2203 NULL
, /* never used */
2204 NULL
, /* never used */
2207 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2208 notdirty_mem_writeb
,
2209 notdirty_mem_writew
,
2210 notdirty_mem_writel
,
2213 #if defined(CONFIG_SOFTMMU)
2214 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2215 so these check for a hit then pass through to the normal out-of-line
2217 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2219 return ldub_phys(addr
);
2222 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2224 return lduw_phys(addr
);
2227 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2229 return ldl_phys(addr
);
2232 /* Generate a debug exception if a watchpoint has been hit.
2233 Returns the real physical address of the access. addr will be a host
2234 address in case of a RAM location. */
2235 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2237 CPUState
*env
= cpu_single_env
;
2239 target_ulong retaddr
;
2243 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2244 watch
= env
->watchpoint
[i
].vaddr
;
2245 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2246 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2247 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2248 cpu_single_env
->watchpoint_hit
= i
+ 1;
2249 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2257 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2260 addr
= check_watchpoint(addr
);
2261 stb_phys(addr
, val
);
2264 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2267 addr
= check_watchpoint(addr
);
2268 stw_phys(addr
, val
);
2271 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2274 addr
= check_watchpoint(addr
);
2275 stl_phys(addr
, val
);
2278 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2284 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2291 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2294 CPUReadMemoryFunc
**mem_read
;
2298 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2299 #if defined(DEBUG_SUBPAGE)
2300 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2301 mmio
, len
, addr
, idx
);
2303 mem_read
= mmio
->mem_read
[idx
];
2304 ret
= (*mem_read
[len
])(mmio
->opaque
[idx
], addr
);
2309 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2310 uint32_t value
, unsigned int len
)
2312 CPUWriteMemoryFunc
**mem_write
;
2315 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2316 #if defined(DEBUG_SUBPAGE)
2317 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2318 mmio
, len
, addr
, idx
, value
);
2320 mem_write
= mmio
->mem_write
[idx
];
2321 (*mem_write
[len
])(mmio
->opaque
[idx
], addr
, value
);
2324 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2326 #if defined(DEBUG_SUBPAGE)
2327 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2330 return subpage_readlen(opaque
, addr
, 0);
2333 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2336 #if defined(DEBUG_SUBPAGE)
2337 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2339 subpage_writelen(opaque
, addr
, value
, 0);
2342 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2344 #if defined(DEBUG_SUBPAGE)
2345 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2348 return subpage_readlen(opaque
, addr
, 1);
2351 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2354 #if defined(DEBUG_SUBPAGE)
2355 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2357 subpage_writelen(opaque
, addr
, value
, 1);
2360 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2362 #if defined(DEBUG_SUBPAGE)
2363 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2366 return subpage_readlen(opaque
, addr
, 2);
2369 static void subpage_writel (void *opaque
,
2370 target_phys_addr_t addr
, uint32_t value
)
2372 #if defined(DEBUG_SUBPAGE)
2373 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2375 subpage_writelen(opaque
, addr
, value
, 2);
2378 static CPUReadMemoryFunc
*subpage_read
[] = {
2384 static CPUWriteMemoryFunc
*subpage_write
[] = {
2390 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2395 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2397 idx
= SUBPAGE_IDX(start
);
2398 eidx
= SUBPAGE_IDX(end
);
2399 #if defined(DEBUG_SUBPAGE)
2400 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2401 mmio
, start
, end
, idx
, eidx
, memory
);
2403 memory
>>= IO_MEM_SHIFT
;
2404 for (; idx
<= eidx
; idx
++) {
2405 mmio
->mem_read
[idx
] = io_mem_read
[memory
];
2406 mmio
->mem_write
[idx
] = io_mem_write
[memory
];
2407 mmio
->opaque
[idx
] = io_mem_opaque
[memory
];
2413 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2419 mmio
= qemu_mallocz(sizeof(subpage_t
));
2422 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2423 #if defined(DEBUG_SUBPAGE)
2424 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2425 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2427 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2428 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2434 static void io_mem_init(void)
2436 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2437 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2438 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2441 #if defined(CONFIG_SOFTMMU)
2442 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2443 watch_mem_write
, NULL
);
2445 /* alloc dirty bits array */
2446 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2447 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2450 /* mem_read and mem_write are arrays of functions containing the
2451 function to access byte (index 0), word (index 1) and dword (index
2452 2). All functions must be supplied. If io_index is non zero, the
2453 corresponding io zone is modified. If it is zero, a new io zone is
2454 allocated. The return value can be used with
2455 cpu_register_physical_memory(). (-1) is returned if error. */
2456 int cpu_register_io_memory(int io_index
,
2457 CPUReadMemoryFunc
**mem_read
,
2458 CPUWriteMemoryFunc
**mem_write
,
2463 if (io_index
<= 0) {
2464 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2466 io_index
= io_mem_nb
++;
2468 if (io_index
>= IO_MEM_NB_ENTRIES
)
2472 for(i
= 0;i
< 3; i
++) {
2473 io_mem_read
[io_index
][i
] = mem_read
[i
];
2474 io_mem_write
[io_index
][i
] = mem_write
[i
];
2476 io_mem_opaque
[io_index
] = opaque
;
2477 return io_index
<< IO_MEM_SHIFT
;
2480 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2482 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2485 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2487 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2490 /* physical memory access (slow version, mainly for debug) */
2491 #if defined(CONFIG_USER_ONLY)
2492 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2493 int len
, int is_write
)
2500 page
= addr
& TARGET_PAGE_MASK
;
2501 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2504 flags
= page_get_flags(page
);
2505 if (!(flags
& PAGE_VALID
))
2508 if (!(flags
& PAGE_WRITE
))
2510 /* XXX: this code should not depend on lock_user */
2511 if (!(p
= lock_user(VERIFY_WRITE
, addr
, len
, 0)))
2512 /* FIXME - should this return an error rather than just fail? */
2514 memcpy(p
, buf
, len
);
2515 unlock_user(p
, addr
, len
);
2517 if (!(flags
& PAGE_READ
))
2519 /* XXX: this code should not depend on lock_user */
2520 if (!(p
= lock_user(VERIFY_READ
, addr
, len
, 1)))
2521 /* FIXME - should this return an error rather than just fail? */
2523 memcpy(buf
, p
, len
);
2524 unlock_user(p
, addr
, 0);
2533 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2534 int len
, int is_write
)
2539 target_phys_addr_t page
;
2544 page
= addr
& TARGET_PAGE_MASK
;
2545 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2548 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2550 pd
= IO_MEM_UNASSIGNED
;
2552 pd
= p
->phys_offset
;
2556 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2557 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2558 /* XXX: could force cpu_single_env to NULL to avoid
2560 if (l
>= 4 && ((addr
& 3) == 0)) {
2561 /* 32 bit write access */
2563 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2565 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2566 /* 16 bit write access */
2568 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2571 /* 8 bit write access */
2573 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2577 unsigned long addr1
;
2578 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2580 ptr
= phys_ram_base
+ addr1
;
2581 memcpy(ptr
, buf
, l
);
2582 if (!cpu_physical_memory_is_dirty(addr1
)) {
2583 /* invalidate code */
2584 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2586 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2587 (0xff & ~CODE_DIRTY_FLAG
);
2591 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2592 !(pd
& IO_MEM_ROMD
)) {
2594 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2595 if (l
>= 4 && ((addr
& 3) == 0)) {
2596 /* 32 bit read access */
2597 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2600 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2601 /* 16 bit read access */
2602 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2606 /* 8 bit read access */
2607 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2613 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2614 (addr
& ~TARGET_PAGE_MASK
);
2615 memcpy(buf
, ptr
, l
);
2624 /* used for ROM loading : can write in RAM and ROM */
2625 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2626 const uint8_t *buf
, int len
)
2630 target_phys_addr_t page
;
2635 page
= addr
& TARGET_PAGE_MASK
;
2636 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2639 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2641 pd
= IO_MEM_UNASSIGNED
;
2643 pd
= p
->phys_offset
;
2646 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2647 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2648 !(pd
& IO_MEM_ROMD
)) {
2651 unsigned long addr1
;
2652 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2654 ptr
= phys_ram_base
+ addr1
;
2655 memcpy(ptr
, buf
, l
);
2664 /* warning: addr must be aligned */
2665 uint32_t ldl_phys(target_phys_addr_t addr
)
2673 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2675 pd
= IO_MEM_UNASSIGNED
;
2677 pd
= p
->phys_offset
;
2680 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2681 !(pd
& IO_MEM_ROMD
)) {
2683 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2684 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2687 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2688 (addr
& ~TARGET_PAGE_MASK
);
2694 /* warning: addr must be aligned */
2695 uint64_t ldq_phys(target_phys_addr_t addr
)
2703 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2705 pd
= IO_MEM_UNASSIGNED
;
2707 pd
= p
->phys_offset
;
2710 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2711 !(pd
& IO_MEM_ROMD
)) {
2713 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2714 #ifdef TARGET_WORDS_BIGENDIAN
2715 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2716 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2718 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2719 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2723 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2724 (addr
& ~TARGET_PAGE_MASK
);
2731 uint32_t ldub_phys(target_phys_addr_t addr
)
2734 cpu_physical_memory_read(addr
, &val
, 1);
2739 uint32_t lduw_phys(target_phys_addr_t addr
)
2742 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2743 return tswap16(val
);
2746 /* warning: addr must be aligned. The ram page is not masked as dirty
2747 and the code inside is not invalidated. It is useful if the dirty
2748 bits are used to track modified PTEs */
2749 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2756 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2758 pd
= IO_MEM_UNASSIGNED
;
2760 pd
= p
->phys_offset
;
2763 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2764 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2765 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2767 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2768 (addr
& ~TARGET_PAGE_MASK
);
2773 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2780 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2782 pd
= IO_MEM_UNASSIGNED
;
2784 pd
= p
->phys_offset
;
2787 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2788 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2789 #ifdef TARGET_WORDS_BIGENDIAN
2790 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2791 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2793 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2794 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2797 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2798 (addr
& ~TARGET_PAGE_MASK
);
2803 /* warning: addr must be aligned */
2804 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2811 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2813 pd
= IO_MEM_UNASSIGNED
;
2815 pd
= p
->phys_offset
;
2818 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2819 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2820 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2822 unsigned long addr1
;
2823 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2825 ptr
= phys_ram_base
+ addr1
;
2827 if (!cpu_physical_memory_is_dirty(addr1
)) {
2828 /* invalidate code */
2829 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2831 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2832 (0xff & ~CODE_DIRTY_FLAG
);
2838 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2841 cpu_physical_memory_write(addr
, &v
, 1);
2845 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2847 uint16_t v
= tswap16(val
);
2848 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2852 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2855 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2860 /* virtual memory access for debug */
2861 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2862 uint8_t *buf
, int len
, int is_write
)
2865 target_phys_addr_t phys_addr
;
2869 page
= addr
& TARGET_PAGE_MASK
;
2870 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2871 /* if no physical page mapped, return an error */
2872 if (phys_addr
== -1)
2874 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2877 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2886 void dump_exec_info(FILE *f
,
2887 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2889 int i
, target_code_size
, max_target_code_size
;
2890 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2891 TranslationBlock
*tb
;
2893 target_code_size
= 0;
2894 max_target_code_size
= 0;
2896 direct_jmp_count
= 0;
2897 direct_jmp2_count
= 0;
2898 for(i
= 0; i
< nb_tbs
; i
++) {
2900 target_code_size
+= tb
->size
;
2901 if (tb
->size
> max_target_code_size
)
2902 max_target_code_size
= tb
->size
;
2903 if (tb
->page_addr
[1] != -1)
2905 if (tb
->tb_next_offset
[0] != 0xffff) {
2907 if (tb
->tb_next_offset
[1] != 0xffff) {
2908 direct_jmp2_count
++;
2912 /* XXX: avoid using doubles ? */
2913 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2914 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2915 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2916 max_target_code_size
);
2917 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2918 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2919 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2920 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2922 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2923 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2925 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2927 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2928 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2929 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2930 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2933 #if !defined(CONFIG_USER_ONLY)
2935 #define MMUSUFFIX _cmmu
2936 #define GETPC() NULL
2937 #define env cpu_single_env
2938 #define SOFTMMU_CODE_ACCESS
2941 #include "softmmu_template.h"
2944 #include "softmmu_template.h"
2947 #include "softmmu_template.h"
2950 #include "softmmu_template.h"