2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
37 #if defined(CONFIG_USER_ONLY)
41 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 //#define DEBUG_IOPORT
51 //#define DEBUG_SUBPAGE
53 #if !defined(CONFIG_USER_ONLY)
54 /* TB consistency checks only implemented for usermode emulation. */
58 /* threshold to flush the translated code buffer */
59 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77 #define TARGET_PHYS_ADDR_SPACE_BITS 32
80 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
81 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
83 /* any access to the tbs or the page table must use this lock */
84 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
86 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
87 uint8_t *code_gen_ptr
;
91 uint8_t *phys_ram_base
;
92 uint8_t *phys_ram_dirty
;
93 static ram_addr_t phys_ram_alloc_offset
= 0;
96 /* current CPU in the current thread. It is only valid inside
98 CPUState
*cpu_single_env
;
100 typedef struct PageDesc
{
101 /* list of TBs intersecting this ram page */
102 TranslationBlock
*first_tb
;
103 /* in order to optimize self modifying code, we count the number
104 of lookups we do to a given page to use a bitmap */
105 unsigned int code_write_count
;
106 uint8_t *code_bitmap
;
107 #if defined(CONFIG_USER_ONLY)
112 typedef struct PhysPageDesc
{
113 /* offset in host memory of the page + io_index in the low 12 bits */
114 uint32_t phys_offset
;
118 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119 /* XXX: this is a temporary hack for alpha target.
120 * In the future, this is to be replaced by a multi-level table
121 * to actually be able to handle the complete 64 bits address space.
123 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
125 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
128 #define L1_SIZE (1 << L1_BITS)
129 #define L2_SIZE (1 << L2_BITS)
131 static void io_mem_init(void);
133 unsigned long qemu_real_host_page_size
;
134 unsigned long qemu_host_page_bits
;
135 unsigned long qemu_host_page_size
;
136 unsigned long qemu_host_page_mask
;
138 /* XXX: for system emulation, it could just be an array */
139 static PageDesc
*l1_map
[L1_SIZE
];
140 PhysPageDesc
**l1_phys_map
;
142 /* io memory support */
143 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
144 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
145 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
146 static int io_mem_nb
;
147 #if defined(CONFIG_SOFTMMU)
148 static int io_mem_watch
;
152 char *logfilename
= "/tmp/qemu.log";
155 static int log_append
= 0;
158 static int tlb_flush_count
;
159 static int tb_flush_count
;
160 static int tb_phys_invalidate_count
;
162 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
163 typedef struct subpage_t
{
164 target_phys_addr_t base
;
165 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
];
166 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
];
167 void *opaque
[TARGET_PAGE_SIZE
];
170 static void page_init(void)
172 /* NOTE: we can always suppose that qemu_host_page_size >=
176 SYSTEM_INFO system_info
;
179 GetSystemInfo(&system_info
);
180 qemu_real_host_page_size
= system_info
.dwPageSize
;
182 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
183 PAGE_EXECUTE_READWRITE
, &old_protect
);
186 qemu_real_host_page_size
= getpagesize();
188 unsigned long start
, end
;
190 start
= (unsigned long)code_gen_buffer
;
191 start
&= ~(qemu_real_host_page_size
- 1);
193 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
194 end
+= qemu_real_host_page_size
- 1;
195 end
&= ~(qemu_real_host_page_size
- 1);
197 mprotect((void *)start
, end
- start
,
198 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
202 if (qemu_host_page_size
== 0)
203 qemu_host_page_size
= qemu_real_host_page_size
;
204 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
205 qemu_host_page_size
= TARGET_PAGE_SIZE
;
206 qemu_host_page_bits
= 0;
207 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
208 qemu_host_page_bits
++;
209 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
210 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
211 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
214 static inline PageDesc
*page_find_alloc(unsigned int index
)
218 lp
= &l1_map
[index
>> L2_BITS
];
221 /* allocate if not found */
222 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
223 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
226 return p
+ (index
& (L2_SIZE
- 1));
229 static inline PageDesc
*page_find(unsigned int index
)
233 p
= l1_map
[index
>> L2_BITS
];
236 return p
+ (index
& (L2_SIZE
- 1));
239 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
244 p
= (void **)l1_phys_map
;
245 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
247 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
248 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
250 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
253 /* allocate if not found */
256 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
257 memset(p
, 0, sizeof(void *) * L1_SIZE
);
261 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
265 /* allocate if not found */
268 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
270 for (i
= 0; i
< L2_SIZE
; i
++)
271 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
273 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
276 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
278 return phys_page_find_alloc(index
, 0);
281 #if !defined(CONFIG_USER_ONLY)
282 static void tlb_protect_code(ram_addr_t ram_addr
);
283 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
287 void cpu_exec_init(CPUState
*env
)
293 code_gen_ptr
= code_gen_buffer
;
297 env
->next_cpu
= NULL
;
300 while (*penv
!= NULL
) {
301 penv
= (CPUState
**)&(*penv
)->next_cpu
;
304 env
->cpu_index
= cpu_index
;
305 env
->nb_watchpoints
= 0;
309 static inline void invalidate_page_bitmap(PageDesc
*p
)
311 if (p
->code_bitmap
) {
312 qemu_free(p
->code_bitmap
);
313 p
->code_bitmap
= NULL
;
315 p
->code_write_count
= 0;
318 /* set to NULL all the 'first_tb' fields in all PageDescs */
319 static void page_flush_tb(void)
324 for(i
= 0; i
< L1_SIZE
; i
++) {
327 for(j
= 0; j
< L2_SIZE
; j
++) {
329 invalidate_page_bitmap(p
);
336 /* flush all the translation blocks */
337 /* XXX: tb_flush is currently not thread safe */
338 void tb_flush(CPUState
*env1
)
341 #if defined(DEBUG_FLUSH)
342 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
343 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
345 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
349 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
350 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
353 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
356 code_gen_ptr
= code_gen_buffer
;
357 /* XXX: flush processor icache at this point if cache flush is
362 #ifdef DEBUG_TB_CHECK
364 static void tb_invalidate_check(target_ulong address
)
366 TranslationBlock
*tb
;
368 address
&= TARGET_PAGE_MASK
;
369 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
370 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
371 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
372 address
>= tb
->pc
+ tb
->size
)) {
373 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
374 address
, (long)tb
->pc
, tb
->size
);
380 /* verify that all the pages have correct rights for code */
381 static void tb_page_check(void)
383 TranslationBlock
*tb
;
384 int i
, flags1
, flags2
;
386 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
387 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
388 flags1
= page_get_flags(tb
->pc
);
389 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
390 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
391 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
392 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
398 void tb_jmp_check(TranslationBlock
*tb
)
400 TranslationBlock
*tb1
;
403 /* suppress any remaining jumps to this TB */
407 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
410 tb1
= tb1
->jmp_next
[n1
];
412 /* check end of list */
414 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
420 /* invalidate one TB */
421 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
424 TranslationBlock
*tb1
;
428 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
431 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
435 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
437 TranslationBlock
*tb1
;
443 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
445 *ptb
= tb1
->page_next
[n1
];
448 ptb
= &tb1
->page_next
[n1
];
452 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
454 TranslationBlock
*tb1
, **ptb
;
457 ptb
= &tb
->jmp_next
[n
];
460 /* find tb(n) in circular list */
464 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
465 if (n1
== n
&& tb1
== tb
)
468 ptb
= &tb1
->jmp_first
;
470 ptb
= &tb1
->jmp_next
[n1
];
473 /* now we can suppress tb(n) from the list */
474 *ptb
= tb
->jmp_next
[n
];
476 tb
->jmp_next
[n
] = NULL
;
480 /* reset the jump entry 'n' of a TB so that it is not chained to
482 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
484 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
487 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
492 target_ulong phys_pc
;
493 TranslationBlock
*tb1
, *tb2
;
495 /* remove the TB from the hash list */
496 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
497 h
= tb_phys_hash_func(phys_pc
);
498 tb_remove(&tb_phys_hash
[h
], tb
,
499 offsetof(TranslationBlock
, phys_hash_next
));
501 /* remove the TB from the page list */
502 if (tb
->page_addr
[0] != page_addr
) {
503 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
504 tb_page_remove(&p
->first_tb
, tb
);
505 invalidate_page_bitmap(p
);
507 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
508 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
509 tb_page_remove(&p
->first_tb
, tb
);
510 invalidate_page_bitmap(p
);
513 tb_invalidated_flag
= 1;
515 /* remove the TB from the hash list */
516 h
= tb_jmp_cache_hash_func(tb
->pc
);
517 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
518 if (env
->tb_jmp_cache
[h
] == tb
)
519 env
->tb_jmp_cache
[h
] = NULL
;
522 /* suppress this TB from the two jump lists */
523 tb_jmp_remove(tb
, 0);
524 tb_jmp_remove(tb
, 1);
526 /* suppress any remaining jumps to this TB */
532 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
533 tb2
= tb1
->jmp_next
[n1
];
534 tb_reset_jump(tb1
, n1
);
535 tb1
->jmp_next
[n1
] = NULL
;
538 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
540 tb_phys_invalidate_count
++;
543 static inline void set_bits(uint8_t *tab
, int start
, int len
)
549 mask
= 0xff << (start
& 7);
550 if ((start
& ~7) == (end
& ~7)) {
552 mask
&= ~(0xff << (end
& 7));
557 start
= (start
+ 8) & ~7;
559 while (start
< end1
) {
564 mask
= ~(0xff << (end
& 7));
570 static void build_page_bitmap(PageDesc
*p
)
572 int n
, tb_start
, tb_end
;
573 TranslationBlock
*tb
;
575 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
578 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
583 tb
= (TranslationBlock
*)((long)tb
& ~3);
584 /* NOTE: this is subtle as a TB may span two physical pages */
586 /* NOTE: tb_end may be after the end of the page, but
587 it is not a problem */
588 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
589 tb_end
= tb_start
+ tb
->size
;
590 if (tb_end
> TARGET_PAGE_SIZE
)
591 tb_end
= TARGET_PAGE_SIZE
;
594 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
596 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
597 tb
= tb
->page_next
[n
];
601 #ifdef TARGET_HAS_PRECISE_SMC
603 static void tb_gen_code(CPUState
*env
,
604 target_ulong pc
, target_ulong cs_base
, int flags
,
607 TranslationBlock
*tb
;
609 target_ulong phys_pc
, phys_page2
, virt_page2
;
612 phys_pc
= get_phys_addr_code(env
, pc
);
615 /* flush must be done */
617 /* cannot fail at this point */
620 tc_ptr
= code_gen_ptr
;
622 tb
->cs_base
= cs_base
;
625 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
626 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
628 /* check next page if needed */
629 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
631 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
632 phys_page2
= get_phys_addr_code(env
, virt_page2
);
634 tb_link_phys(tb
, phys_pc
, phys_page2
);
638 /* invalidate all TBs which intersect with the target physical page
639 starting in range [start;end[. NOTE: start and end must refer to
640 the same physical page. 'is_cpu_write_access' should be true if called
641 from a real cpu write access: the virtual CPU will exit the current
642 TB if code is modified inside this TB. */
643 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
644 int is_cpu_write_access
)
646 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
647 CPUState
*env
= cpu_single_env
;
649 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
650 target_ulong tb_start
, tb_end
;
651 target_ulong current_pc
, current_cs_base
;
653 p
= page_find(start
>> TARGET_PAGE_BITS
);
656 if (!p
->code_bitmap
&&
657 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
658 is_cpu_write_access
) {
659 /* build code bitmap */
660 build_page_bitmap(p
);
663 /* we remove all the TBs in the range [start, end[ */
664 /* XXX: see if in some cases it could be faster to invalidate all the code */
665 current_tb_not_found
= is_cpu_write_access
;
666 current_tb_modified
= 0;
667 current_tb
= NULL
; /* avoid warning */
668 current_pc
= 0; /* avoid warning */
669 current_cs_base
= 0; /* avoid warning */
670 current_flags
= 0; /* avoid warning */
674 tb
= (TranslationBlock
*)((long)tb
& ~3);
675 tb_next
= tb
->page_next
[n
];
676 /* NOTE: this is subtle as a TB may span two physical pages */
678 /* NOTE: tb_end may be after the end of the page, but
679 it is not a problem */
680 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
681 tb_end
= tb_start
+ tb
->size
;
683 tb_start
= tb
->page_addr
[1];
684 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
686 if (!(tb_end
<= start
|| tb_start
>= end
)) {
687 #ifdef TARGET_HAS_PRECISE_SMC
688 if (current_tb_not_found
) {
689 current_tb_not_found
= 0;
691 if (env
->mem_write_pc
) {
692 /* now we have a real cpu fault */
693 current_tb
= tb_find_pc(env
->mem_write_pc
);
696 if (current_tb
== tb
&&
697 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
698 /* If we are modifying the current TB, we must stop
699 its execution. We could be more precise by checking
700 that the modification is after the current PC, but it
701 would require a specialized function to partially
702 restore the CPU state */
704 current_tb_modified
= 1;
705 cpu_restore_state(current_tb
, env
,
706 env
->mem_write_pc
, NULL
);
707 #if defined(TARGET_I386)
708 current_flags
= env
->hflags
;
709 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
710 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
711 current_pc
= current_cs_base
+ env
->eip
;
713 #error unsupported CPU
716 #endif /* TARGET_HAS_PRECISE_SMC */
717 /* we need to do that to handle the case where a signal
718 occurs while doing tb_phys_invalidate() */
721 saved_tb
= env
->current_tb
;
722 env
->current_tb
= NULL
;
724 tb_phys_invalidate(tb
, -1);
726 env
->current_tb
= saved_tb
;
727 if (env
->interrupt_request
&& env
->current_tb
)
728 cpu_interrupt(env
, env
->interrupt_request
);
733 #if !defined(CONFIG_USER_ONLY)
734 /* if no code remaining, no need to continue to use slow writes */
736 invalidate_page_bitmap(p
);
737 if (is_cpu_write_access
) {
738 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
742 #ifdef TARGET_HAS_PRECISE_SMC
743 if (current_tb_modified
) {
744 /* we generate a block containing just the instruction
745 modifying the memory. It will ensure that it cannot modify
747 env
->current_tb
= NULL
;
748 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
750 cpu_resume_from_signal(env
, NULL
);
755 /* len must be <= 8 and start must be a multiple of len */
756 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
763 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
764 cpu_single_env
->mem_write_vaddr
, len
,
766 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
770 p
= page_find(start
>> TARGET_PAGE_BITS
);
773 if (p
->code_bitmap
) {
774 offset
= start
& ~TARGET_PAGE_MASK
;
775 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
776 if (b
& ((1 << len
) - 1))
780 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
784 #if !defined(CONFIG_SOFTMMU)
785 static void tb_invalidate_phys_page(target_ulong addr
,
786 unsigned long pc
, void *puc
)
788 int n
, current_flags
, current_tb_modified
;
789 target_ulong current_pc
, current_cs_base
;
791 TranslationBlock
*tb
, *current_tb
;
792 #ifdef TARGET_HAS_PRECISE_SMC
793 CPUState
*env
= cpu_single_env
;
796 addr
&= TARGET_PAGE_MASK
;
797 p
= page_find(addr
>> TARGET_PAGE_BITS
);
801 current_tb_modified
= 0;
803 current_pc
= 0; /* avoid warning */
804 current_cs_base
= 0; /* avoid warning */
805 current_flags
= 0; /* avoid warning */
806 #ifdef TARGET_HAS_PRECISE_SMC
808 current_tb
= tb_find_pc(pc
);
813 tb
= (TranslationBlock
*)((long)tb
& ~3);
814 #ifdef TARGET_HAS_PRECISE_SMC
815 if (current_tb
== tb
&&
816 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
817 /* If we are modifying the current TB, we must stop
818 its execution. We could be more precise by checking
819 that the modification is after the current PC, but it
820 would require a specialized function to partially
821 restore the CPU state */
823 current_tb_modified
= 1;
824 cpu_restore_state(current_tb
, env
, pc
, puc
);
825 #if defined(TARGET_I386)
826 current_flags
= env
->hflags
;
827 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
828 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
829 current_pc
= current_cs_base
+ env
->eip
;
831 #error unsupported CPU
834 #endif /* TARGET_HAS_PRECISE_SMC */
835 tb_phys_invalidate(tb
, addr
);
836 tb
= tb
->page_next
[n
];
839 #ifdef TARGET_HAS_PRECISE_SMC
840 if (current_tb_modified
) {
841 /* we generate a block containing just the instruction
842 modifying the memory. It will ensure that it cannot modify
844 env
->current_tb
= NULL
;
845 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
847 cpu_resume_from_signal(env
, puc
);
853 /* add the tb in the target page and protect it if necessary */
854 static inline void tb_alloc_page(TranslationBlock
*tb
,
855 unsigned int n
, target_ulong page_addr
)
858 TranslationBlock
*last_first_tb
;
860 tb
->page_addr
[n
] = page_addr
;
861 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
862 tb
->page_next
[n
] = p
->first_tb
;
863 last_first_tb
= p
->first_tb
;
864 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
865 invalidate_page_bitmap(p
);
867 #if defined(TARGET_HAS_SMC) || 1
869 #if defined(CONFIG_USER_ONLY)
870 if (p
->flags
& PAGE_WRITE
) {
875 /* force the host page as non writable (writes will have a
876 page fault + mprotect overhead) */
877 page_addr
&= qemu_host_page_mask
;
879 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
880 addr
+= TARGET_PAGE_SIZE
) {
882 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
886 p2
->flags
&= ~PAGE_WRITE
;
887 page_get_flags(addr
);
889 mprotect(g2h(page_addr
), qemu_host_page_size
,
890 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
891 #ifdef DEBUG_TB_INVALIDATE
892 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
897 /* if some code is already present, then the pages are already
898 protected. So we handle the case where only the first TB is
899 allocated in a physical page */
900 if (!last_first_tb
) {
901 tlb_protect_code(page_addr
);
905 #endif /* TARGET_HAS_SMC */
908 /* Allocate a new translation block. Flush the translation buffer if
909 too many translation blocks or too much generated code. */
910 TranslationBlock
*tb_alloc(target_ulong pc
)
912 TranslationBlock
*tb
;
914 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
915 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
923 /* add a new TB and link it to the physical page tables. phys_page2 is
924 (-1) to indicate that only one page contains the TB. */
925 void tb_link_phys(TranslationBlock
*tb
,
926 target_ulong phys_pc
, target_ulong phys_page2
)
929 TranslationBlock
**ptb
;
931 /* add in the physical hash table */
932 h
= tb_phys_hash_func(phys_pc
);
933 ptb
= &tb_phys_hash
[h
];
934 tb
->phys_hash_next
= *ptb
;
937 /* add in the page list */
938 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
939 if (phys_page2
!= -1)
940 tb_alloc_page(tb
, 1, phys_page2
);
942 tb
->page_addr
[1] = -1;
944 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
945 tb
->jmp_next
[0] = NULL
;
946 tb
->jmp_next
[1] = NULL
;
948 /* init original jump addresses */
949 if (tb
->tb_next_offset
[0] != 0xffff)
950 tb_reset_jump(tb
, 0);
951 if (tb
->tb_next_offset
[1] != 0xffff)
952 tb_reset_jump(tb
, 1);
954 #ifdef DEBUG_TB_CHECK
959 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
960 tb[1].tc_ptr. Return NULL if not found */
961 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
965 TranslationBlock
*tb
;
969 if (tc_ptr
< (unsigned long)code_gen_buffer
||
970 tc_ptr
>= (unsigned long)code_gen_ptr
)
972 /* binary search (cf Knuth) */
975 while (m_min
<= m_max
) {
976 m
= (m_min
+ m_max
) >> 1;
978 v
= (unsigned long)tb
->tc_ptr
;
981 else if (tc_ptr
< v
) {
990 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
992 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
994 TranslationBlock
*tb1
, *tb_next
, **ptb
;
997 tb1
= tb
->jmp_next
[n
];
999 /* find head of list */
1002 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1005 tb1
= tb1
->jmp_next
[n1
];
1007 /* we are now sure now that tb jumps to tb1 */
1010 /* remove tb from the jmp_first list */
1011 ptb
= &tb_next
->jmp_first
;
1015 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1016 if (n1
== n
&& tb1
== tb
)
1018 ptb
= &tb1
->jmp_next
[n1
];
1020 *ptb
= tb
->jmp_next
[n
];
1021 tb
->jmp_next
[n
] = NULL
;
1023 /* suppress the jump to next tb in generated code */
1024 tb_reset_jump(tb
, n
);
1026 /* suppress jumps in the tb on which we could have jumped */
1027 tb_reset_jump_recursive(tb_next
);
1031 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1033 tb_reset_jump_recursive2(tb
, 0);
1034 tb_reset_jump_recursive2(tb
, 1);
1037 #if defined(TARGET_HAS_ICE)
1038 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1040 target_phys_addr_t addr
;
1042 ram_addr_t ram_addr
;
1045 addr
= cpu_get_phys_page_debug(env
, pc
);
1046 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1048 pd
= IO_MEM_UNASSIGNED
;
1050 pd
= p
->phys_offset
;
1052 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1053 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1057 /* Add a watchpoint. */
1058 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1062 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1063 if (addr
== env
->watchpoint
[i
].vaddr
)
1066 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1069 i
= env
->nb_watchpoints
++;
1070 env
->watchpoint
[i
].vaddr
= addr
;
1071 tlb_flush_page(env
, addr
);
1072 /* FIXME: This flush is needed because of the hack to make memory ops
1073 terminate the TB. It can be removed once the proper IO trap and
1074 re-execute bits are in. */
1079 /* Remove a watchpoint. */
1080 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1084 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1085 if (addr
== env
->watchpoint
[i
].vaddr
) {
1086 env
->nb_watchpoints
--;
1087 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1088 tlb_flush_page(env
, addr
);
1095 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1096 breakpoint is reached */
1097 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1099 #if defined(TARGET_HAS_ICE)
1102 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1103 if (env
->breakpoints
[i
] == pc
)
1107 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1109 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1111 breakpoint_invalidate(env
, pc
);
1118 /* remove a breakpoint */
1119 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1121 #if defined(TARGET_HAS_ICE)
1123 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1124 if (env
->breakpoints
[i
] == pc
)
1129 env
->nb_breakpoints
--;
1130 if (i
< env
->nb_breakpoints
)
1131 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1133 breakpoint_invalidate(env
, pc
);
1140 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1141 CPU loop after each instruction */
1142 void cpu_single_step(CPUState
*env
, int enabled
)
1144 #if defined(TARGET_HAS_ICE)
1145 if (env
->singlestep_enabled
!= enabled
) {
1146 env
->singlestep_enabled
= enabled
;
1147 /* must flush all the translated code to avoid inconsistancies */
1148 /* XXX: only flush what is necessary */
1154 /* enable or disable low levels log */
1155 void cpu_set_log(int log_flags
)
1157 loglevel
= log_flags
;
1158 if (loglevel
&& !logfile
) {
1159 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1161 perror(logfilename
);
1164 #if !defined(CONFIG_SOFTMMU)
1165 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1167 static uint8_t logfile_buf
[4096];
1168 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1171 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1175 if (!loglevel
&& logfile
) {
1181 void cpu_set_log_filename(const char *filename
)
1183 logfilename
= strdup(filename
);
1188 cpu_set_log(loglevel
);
1191 /* mask must never be zero, except for A20 change call */
1192 void cpu_interrupt(CPUState
*env
, int mask
)
1194 TranslationBlock
*tb
;
1195 static int interrupt_lock
;
1197 env
->interrupt_request
|= mask
;
1198 /* if the cpu is currently executing code, we must unlink it and
1199 all the potentially executing TB */
1200 tb
= env
->current_tb
;
1201 if (tb
&& !testandset(&interrupt_lock
)) {
1202 env
->current_tb
= NULL
;
1203 tb_reset_jump_recursive(tb
);
1208 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1210 env
->interrupt_request
&= ~mask
;
1213 CPULogItem cpu_log_items
[] = {
1214 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1215 "show generated host assembly code for each compiled TB" },
1216 { CPU_LOG_TB_IN_ASM
, "in_asm",
1217 "show target assembly code for each compiled TB" },
1218 { CPU_LOG_TB_OP
, "op",
1219 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1221 { CPU_LOG_TB_OP_OPT
, "op_opt",
1222 "show micro ops after optimization for each compiled TB" },
1224 { CPU_LOG_INT
, "int",
1225 "show interrupts/exceptions in short format" },
1226 { CPU_LOG_EXEC
, "exec",
1227 "show trace before each executed TB (lots of logs)" },
1228 { CPU_LOG_TB_CPU
, "cpu",
1229 "show CPU state before block translation" },
1231 { CPU_LOG_PCALL
, "pcall",
1232 "show protected mode far calls/returns/exceptions" },
1235 { CPU_LOG_IOPORT
, "ioport",
1236 "show all i/o ports accesses" },
1241 static int cmp1(const char *s1
, int n
, const char *s2
)
1243 if (strlen(s2
) != n
)
1245 return memcmp(s1
, s2
, n
) == 0;
1248 /* takes a comma separated list of log masks. Return 0 if error. */
1249 int cpu_str_to_log_mask(const char *str
)
1258 p1
= strchr(p
, ',');
1261 if(cmp1(p
,p1
-p
,"all")) {
1262 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1266 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1267 if (cmp1(p
, p1
- p
, item
->name
))
1281 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1288 fprintf(stderr
, "qemu: fatal: ");
1289 vfprintf(stderr
, fmt
, ap
);
1290 fprintf(stderr
, "\n");
1292 if(env
->intercept
& INTERCEPT_SVM_MASK
) {
1293 /* most probably the virtual machine should not
1294 be shut down but rather caught by the VMM */
1295 vmexit(SVM_EXIT_SHUTDOWN
, 0);
1297 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1299 cpu_dump_state(env
, stderr
, fprintf
, 0);
1302 fprintf(logfile
, "qemu: fatal: ");
1303 vfprintf(logfile
, fmt
, ap2
);
1304 fprintf(logfile
, "\n");
1306 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1308 cpu_dump_state(env
, logfile
, fprintf
, 0);
1318 CPUState
*cpu_copy(CPUState
*env
)
1320 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1321 /* preserve chaining and index */
1322 CPUState
*next_cpu
= new_env
->next_cpu
;
1323 int cpu_index
= new_env
->cpu_index
;
1324 memcpy(new_env
, env
, sizeof(CPUState
));
1325 new_env
->next_cpu
= next_cpu
;
1326 new_env
->cpu_index
= cpu_index
;
1330 #if !defined(CONFIG_USER_ONLY)
1332 /* NOTE: if flush_global is true, also flush global entries (not
1334 void tlb_flush(CPUState
*env
, int flush_global
)
1338 #if defined(DEBUG_TLB)
1339 printf("tlb_flush:\n");
1341 /* must reset current TB so that interrupts cannot modify the
1342 links while we are modifying them */
1343 env
->current_tb
= NULL
;
1345 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1346 env
->tlb_table
[0][i
].addr_read
= -1;
1347 env
->tlb_table
[0][i
].addr_write
= -1;
1348 env
->tlb_table
[0][i
].addr_code
= -1;
1349 env
->tlb_table
[1][i
].addr_read
= -1;
1350 env
->tlb_table
[1][i
].addr_write
= -1;
1351 env
->tlb_table
[1][i
].addr_code
= -1;
1352 #if (NB_MMU_MODES >= 3)
1353 env
->tlb_table
[2][i
].addr_read
= -1;
1354 env
->tlb_table
[2][i
].addr_write
= -1;
1355 env
->tlb_table
[2][i
].addr_code
= -1;
1356 #if (NB_MMU_MODES == 4)
1357 env
->tlb_table
[3][i
].addr_read
= -1;
1358 env
->tlb_table
[3][i
].addr_write
= -1;
1359 env
->tlb_table
[3][i
].addr_code
= -1;
1364 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1366 #if !defined(CONFIG_SOFTMMU)
1367 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1370 if (env
->kqemu_enabled
) {
1371 kqemu_flush(env
, flush_global
);
1377 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1379 if (addr
== (tlb_entry
->addr_read
&
1380 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1381 addr
== (tlb_entry
->addr_write
&
1382 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1383 addr
== (tlb_entry
->addr_code
&
1384 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1385 tlb_entry
->addr_read
= -1;
1386 tlb_entry
->addr_write
= -1;
1387 tlb_entry
->addr_code
= -1;
1391 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1394 TranslationBlock
*tb
;
1396 #if defined(DEBUG_TLB)
1397 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1399 /* must reset current TB so that interrupts cannot modify the
1400 links while we are modifying them */
1401 env
->current_tb
= NULL
;
1403 addr
&= TARGET_PAGE_MASK
;
1404 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1405 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1406 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1407 #if (NB_MMU_MODES >= 3)
1408 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1409 #if (NB_MMU_MODES == 4)
1410 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1414 /* Discard jump cache entries for any tb which might potentially
1415 overlap the flushed page. */
1416 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1417 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1419 i
= tb_jmp_cache_hash_page(addr
);
1420 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1422 #if !defined(CONFIG_SOFTMMU)
1423 if (addr
< MMAP_AREA_END
)
1424 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1427 if (env
->kqemu_enabled
) {
1428 kqemu_flush_page(env
, addr
);
1433 /* update the TLBs so that writes to code in the virtual page 'addr'
1435 static void tlb_protect_code(ram_addr_t ram_addr
)
1437 cpu_physical_memory_reset_dirty(ram_addr
,
1438 ram_addr
+ TARGET_PAGE_SIZE
,
1442 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1443 tested for self modifying code */
1444 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1447 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1450 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1451 unsigned long start
, unsigned long length
)
1454 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1455 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1456 if ((addr
- start
) < length
) {
1457 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1462 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1466 unsigned long length
, start1
;
1470 start
&= TARGET_PAGE_MASK
;
1471 end
= TARGET_PAGE_ALIGN(end
);
1473 length
= end
- start
;
1476 len
= length
>> TARGET_PAGE_BITS
;
1478 /* XXX: should not depend on cpu context */
1480 if (env
->kqemu_enabled
) {
1483 for(i
= 0; i
< len
; i
++) {
1484 kqemu_set_notdirty(env
, addr
);
1485 addr
+= TARGET_PAGE_SIZE
;
1489 mask
= ~dirty_flags
;
1490 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1491 for(i
= 0; i
< len
; i
++)
1494 /* we modify the TLB cache so that the dirty bit will be set again
1495 when accessing the range */
1496 start1
= start
+ (unsigned long)phys_ram_base
;
1497 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1498 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1499 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1500 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1501 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1502 #if (NB_MMU_MODES >= 3)
1503 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1504 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1505 #if (NB_MMU_MODES == 4)
1506 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1507 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1512 #if !defined(CONFIG_SOFTMMU)
1513 /* XXX: this is expensive */
1519 for(i
= 0; i
< L1_SIZE
; i
++) {
1522 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1523 for(j
= 0; j
< L2_SIZE
; j
++) {
1524 if (p
->valid_tag
== virt_valid_tag
&&
1525 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1526 (p
->prot
& PROT_WRITE
)) {
1527 if (addr
< MMAP_AREA_END
) {
1528 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1529 p
->prot
& ~PROT_WRITE
);
1532 addr
+= TARGET_PAGE_SIZE
;
1541 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1543 ram_addr_t ram_addr
;
1545 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1546 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1547 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1548 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1549 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1554 /* update the TLB according to the current state of the dirty bits */
1555 void cpu_tlb_update_dirty(CPUState
*env
)
1558 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1559 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1560 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1561 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1562 #if (NB_MMU_MODES >= 3)
1563 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1564 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1565 #if (NB_MMU_MODES == 4)
1566 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1567 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1572 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1573 unsigned long start
)
1576 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1577 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1578 if (addr
== start
) {
1579 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1584 /* update the TLB corresponding to virtual page vaddr and phys addr
1585 addr so that it is no longer dirty */
1586 static inline void tlb_set_dirty(CPUState
*env
,
1587 unsigned long addr
, target_ulong vaddr
)
1591 addr
&= TARGET_PAGE_MASK
;
1592 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1593 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1594 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1595 #if (NB_MMU_MODES >= 3)
1596 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1597 #if (NB_MMU_MODES == 4)
1598 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1603 /* add a new TLB entry. At most one entry for a given virtual address
1604 is permitted. Return 0 if OK or 2 if the page could not be mapped
1605 (can only happen in non SOFTMMU mode for I/O pages or pages
1606 conflicting with the host address space). */
1607 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1608 target_phys_addr_t paddr
, int prot
,
1609 int mmu_idx
, int is_softmmu
)
1614 target_ulong address
;
1615 target_phys_addr_t addend
;
1620 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1622 pd
= IO_MEM_UNASSIGNED
;
1624 pd
= p
->phys_offset
;
1626 #if defined(DEBUG_TLB)
1627 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1628 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1632 #if !defined(CONFIG_SOFTMMU)
1636 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1637 /* IO memory case */
1638 address
= vaddr
| pd
;
1641 /* standard memory */
1643 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1646 /* Make accesses to pages with watchpoints go via the
1647 watchpoint trap routines. */
1648 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1649 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1650 if (address
& ~TARGET_PAGE_MASK
) {
1651 env
->watchpoint
[i
].addend
= 0;
1652 address
= vaddr
| io_mem_watch
;
1654 env
->watchpoint
[i
].addend
= pd
- paddr
+
1655 (unsigned long) phys_ram_base
;
1656 /* TODO: Figure out how to make read watchpoints coexist
1658 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1663 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1665 te
= &env
->tlb_table
[mmu_idx
][index
];
1666 te
->addend
= addend
;
1667 if (prot
& PAGE_READ
) {
1668 te
->addr_read
= address
;
1672 if (prot
& PAGE_EXEC
) {
1673 te
->addr_code
= address
;
1677 if (prot
& PAGE_WRITE
) {
1678 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1679 (pd
& IO_MEM_ROMD
)) {
1680 /* write access calls the I/O callback */
1681 te
->addr_write
= vaddr
|
1682 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1683 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1684 !cpu_physical_memory_is_dirty(pd
)) {
1685 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1687 te
->addr_write
= address
;
1690 te
->addr_write
= -1;
1693 #if !defined(CONFIG_SOFTMMU)
1695 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1696 /* IO access: no mapping is done as it will be handled by the
1698 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1703 if (vaddr
>= MMAP_AREA_END
) {
1706 if (prot
& PROT_WRITE
) {
1707 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1708 #if defined(TARGET_HAS_SMC) || 1
1711 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1712 !cpu_physical_memory_is_dirty(pd
))) {
1713 /* ROM: we do as if code was inside */
1714 /* if code is present, we only map as read only and save the
1718 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1721 vp
->valid_tag
= virt_valid_tag
;
1722 prot
&= ~PAGE_WRITE
;
1725 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1726 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1727 if (map_addr
== MAP_FAILED
) {
1728 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1738 /* called from signal handler: invalidate the code and unprotect the
1739 page. Return TRUE if the fault was succesfully handled. */
1740 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1742 #if !defined(CONFIG_SOFTMMU)
1745 #if defined(DEBUG_TLB)
1746 printf("page_unprotect: addr=0x%08x\n", addr
);
1748 addr
&= TARGET_PAGE_MASK
;
1750 /* if it is not mapped, no need to worry here */
1751 if (addr
>= MMAP_AREA_END
)
1753 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1756 /* NOTE: in this case, validate_tag is _not_ tested as it
1757 validates only the code TLB */
1758 if (vp
->valid_tag
!= virt_valid_tag
)
1760 if (!(vp
->prot
& PAGE_WRITE
))
1762 #if defined(DEBUG_TLB)
1763 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1764 addr
, vp
->phys_addr
, vp
->prot
);
1766 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1767 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1768 (unsigned long)addr
, vp
->prot
);
1769 /* set the dirty bit */
1770 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1771 /* flush the code inside */
1772 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1781 void tlb_flush(CPUState
*env
, int flush_global
)
1785 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1789 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1790 target_phys_addr_t paddr
, int prot
,
1791 int mmu_idx
, int is_softmmu
)
1796 /* dump memory mappings */
1797 void page_dump(FILE *f
)
1799 unsigned long start
, end
;
1800 int i
, j
, prot
, prot1
;
1803 fprintf(f
, "%-8s %-8s %-8s %s\n",
1804 "start", "end", "size", "prot");
1808 for(i
= 0; i
<= L1_SIZE
; i
++) {
1813 for(j
= 0;j
< L2_SIZE
; j
++) {
1818 if (prot1
!= prot
) {
1819 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1821 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1822 start
, end
, end
- start
,
1823 prot
& PAGE_READ
? 'r' : '-',
1824 prot
& PAGE_WRITE
? 'w' : '-',
1825 prot
& PAGE_EXEC
? 'x' : '-');
1839 int page_get_flags(target_ulong address
)
1843 p
= page_find(address
>> TARGET_PAGE_BITS
);
1849 /* modify the flags of a page and invalidate the code if
1850 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1851 depending on PAGE_WRITE */
1852 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1857 start
= start
& TARGET_PAGE_MASK
;
1858 end
= TARGET_PAGE_ALIGN(end
);
1859 if (flags
& PAGE_WRITE
)
1860 flags
|= PAGE_WRITE_ORG
;
1861 spin_lock(&tb_lock
);
1862 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1863 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1864 /* if the write protection is set, then we invalidate the code
1866 if (!(p
->flags
& PAGE_WRITE
) &&
1867 (flags
& PAGE_WRITE
) &&
1869 tb_invalidate_phys_page(addr
, 0, NULL
);
1873 spin_unlock(&tb_lock
);
1876 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1882 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1883 start
= start
& TARGET_PAGE_MASK
;
1886 /* we've wrapped around */
1888 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1889 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1892 if( !(p
->flags
& PAGE_VALID
) )
1895 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1897 if (flags
& PAGE_WRITE
) {
1898 if (!(p
->flags
& PAGE_WRITE_ORG
))
1900 /* unprotect the page if it was put read-only because it
1901 contains translated code */
1902 if (!(p
->flags
& PAGE_WRITE
)) {
1903 if (!page_unprotect(addr
, 0, NULL
))
1912 /* called from signal handler: invalidate the code and unprotect the
1913 page. Return TRUE if the fault was succesfully handled. */
1914 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1916 unsigned int page_index
, prot
, pindex
;
1918 target_ulong host_start
, host_end
, addr
;
1920 host_start
= address
& qemu_host_page_mask
;
1921 page_index
= host_start
>> TARGET_PAGE_BITS
;
1922 p1
= page_find(page_index
);
1925 host_end
= host_start
+ qemu_host_page_size
;
1928 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1932 /* if the page was really writable, then we change its
1933 protection back to writable */
1934 if (prot
& PAGE_WRITE_ORG
) {
1935 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1936 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1937 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1938 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1939 p1
[pindex
].flags
|= PAGE_WRITE
;
1940 /* and since the content will be modified, we must invalidate
1941 the corresponding translated code. */
1942 tb_invalidate_phys_page(address
, pc
, puc
);
1943 #ifdef DEBUG_TB_CHECK
1944 tb_invalidate_check(address
);
1952 static inline void tlb_set_dirty(CPUState
*env
,
1953 unsigned long addr
, target_ulong vaddr
)
1956 #endif /* defined(CONFIG_USER_ONLY) */
1958 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1960 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
1962 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1965 if (addr > start_addr) \
1968 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1969 if (start_addr2 > 0) \
1973 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
1974 end_addr2 = TARGET_PAGE_SIZE - 1; \
1976 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1977 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
1982 /* register physical memory. 'size' must be a multiple of the target
1983 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1985 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1987 unsigned long phys_offset
)
1989 target_phys_addr_t addr
, end_addr
;
1992 unsigned long orig_size
= size
;
1995 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1996 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
1997 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1998 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1999 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2000 unsigned long orig_memory
= p
->phys_offset
;
2001 target_phys_addr_t start_addr2
, end_addr2
;
2002 int need_subpage
= 0;
2004 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2007 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2008 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2009 &p
->phys_offset
, orig_memory
);
2011 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2014 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2016 p
->phys_offset
= phys_offset
;
2017 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2018 (phys_offset
& IO_MEM_ROMD
))
2019 phys_offset
+= TARGET_PAGE_SIZE
;
2022 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2023 p
->phys_offset
= phys_offset
;
2024 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2025 (phys_offset
& IO_MEM_ROMD
))
2026 phys_offset
+= TARGET_PAGE_SIZE
;
2028 target_phys_addr_t start_addr2
, end_addr2
;
2029 int need_subpage
= 0;
2031 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2032 end_addr2
, need_subpage
);
2035 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2036 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2037 subpage_register(subpage
, start_addr2
, end_addr2
,
2044 /* since each CPU stores ram addresses in its TLB cache, we must
2045 reset the modified entries */
2047 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2052 /* XXX: temporary until new memory mapping API */
2053 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
2057 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2059 return IO_MEM_UNASSIGNED
;
2060 return p
->phys_offset
;
2063 /* XXX: better than nothing */
2064 ram_addr_t
qemu_ram_alloc(unsigned int size
)
2067 if ((phys_ram_alloc_offset
+ size
) >= phys_ram_size
) {
2068 fprintf(stderr
, "Not enough memory (requested_size = %u, max memory = %d)\n",
2069 size
, phys_ram_size
);
2072 addr
= phys_ram_alloc_offset
;
2073 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2077 void qemu_ram_free(ram_addr_t addr
)
2081 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2083 #ifdef DEBUG_UNASSIGNED
2084 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2087 do_unassigned_access(addr
, 0, 0, 0);
2089 do_unassigned_access(addr
, 0, 0, 0);
2094 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2096 #ifdef DEBUG_UNASSIGNED
2097 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2100 do_unassigned_access(addr
, 1, 0, 0);
2102 do_unassigned_access(addr
, 1, 0, 0);
2106 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2107 unassigned_mem_readb
,
2108 unassigned_mem_readb
,
2109 unassigned_mem_readb
,
2112 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2113 unassigned_mem_writeb
,
2114 unassigned_mem_writeb
,
2115 unassigned_mem_writeb
,
2118 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2120 unsigned long ram_addr
;
2122 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2123 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2124 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2125 #if !defined(CONFIG_USER_ONLY)
2126 tb_invalidate_phys_page_fast(ram_addr
, 1);
2127 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2130 stb_p((uint8_t *)(long)addr
, val
);
2132 if (cpu_single_env
->kqemu_enabled
&&
2133 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2134 kqemu_modify_page(cpu_single_env
, ram_addr
);
2136 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2137 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2138 /* we remove the notdirty callback only if the code has been
2140 if (dirty_flags
== 0xff)
2141 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2144 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2146 unsigned long ram_addr
;
2148 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2149 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2150 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2151 #if !defined(CONFIG_USER_ONLY)
2152 tb_invalidate_phys_page_fast(ram_addr
, 2);
2153 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2156 stw_p((uint8_t *)(long)addr
, val
);
2158 if (cpu_single_env
->kqemu_enabled
&&
2159 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2160 kqemu_modify_page(cpu_single_env
, ram_addr
);
2162 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2163 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2164 /* we remove the notdirty callback only if the code has been
2166 if (dirty_flags
== 0xff)
2167 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2170 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2172 unsigned long ram_addr
;
2174 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2175 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2176 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2177 #if !defined(CONFIG_USER_ONLY)
2178 tb_invalidate_phys_page_fast(ram_addr
, 4);
2179 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2182 stl_p((uint8_t *)(long)addr
, val
);
2184 if (cpu_single_env
->kqemu_enabled
&&
2185 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2186 kqemu_modify_page(cpu_single_env
, ram_addr
);
2188 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2189 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2190 /* we remove the notdirty callback only if the code has been
2192 if (dirty_flags
== 0xff)
2193 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2196 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2197 NULL
, /* never used */
2198 NULL
, /* never used */
2199 NULL
, /* never used */
2202 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2203 notdirty_mem_writeb
,
2204 notdirty_mem_writew
,
2205 notdirty_mem_writel
,
2208 #if defined(CONFIG_SOFTMMU)
2209 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2210 so these check for a hit then pass through to the normal out-of-line
2212 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2214 return ldub_phys(addr
);
2217 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2219 return lduw_phys(addr
);
2222 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2224 return ldl_phys(addr
);
2227 /* Generate a debug exception if a watchpoint has been hit.
2228 Returns the real physical address of the access. addr will be a host
2229 address in case of a RAM location. */
2230 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2232 CPUState
*env
= cpu_single_env
;
2234 target_ulong retaddr
;
2238 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2239 watch
= env
->watchpoint
[i
].vaddr
;
2240 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2241 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2242 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2243 cpu_single_env
->watchpoint_hit
= i
+ 1;
2244 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2252 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2255 addr
= check_watchpoint(addr
);
2256 stb_phys(addr
, val
);
2259 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2262 addr
= check_watchpoint(addr
);
2263 stw_phys(addr
, val
);
2266 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2269 addr
= check_watchpoint(addr
);
2270 stl_phys(addr
, val
);
2273 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2279 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2286 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2289 CPUReadMemoryFunc
**mem_read
;
2293 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2294 #if defined(DEBUG_SUBPAGE)
2295 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2296 mmio
, len
, addr
, idx
);
2298 mem_read
= mmio
->mem_read
[idx
];
2299 ret
= (*mem_read
[len
])(mmio
->opaque
[idx
], addr
);
2304 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2305 uint32_t value
, unsigned int len
)
2307 CPUWriteMemoryFunc
**mem_write
;
2310 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2311 #if defined(DEBUG_SUBPAGE)
2312 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2313 mmio
, len
, addr
, idx
, value
);
2315 mem_write
= mmio
->mem_write
[idx
];
2316 (*mem_write
[len
])(mmio
->opaque
[idx
], addr
, value
);
2319 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2321 #if defined(DEBUG_SUBPAGE)
2322 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2325 return subpage_readlen(opaque
, addr
, 0);
2328 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2331 #if defined(DEBUG_SUBPAGE)
2332 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2334 subpage_writelen(opaque
, addr
, value
, 0);
2337 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2339 #if defined(DEBUG_SUBPAGE)
2340 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2343 return subpage_readlen(opaque
, addr
, 1);
2346 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2349 #if defined(DEBUG_SUBPAGE)
2350 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2352 subpage_writelen(opaque
, addr
, value
, 1);
2355 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2357 #if defined(DEBUG_SUBPAGE)
2358 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2361 return subpage_readlen(opaque
, addr
, 2);
2364 static void subpage_writel (void *opaque
,
2365 target_phys_addr_t addr
, uint32_t value
)
2367 #if defined(DEBUG_SUBPAGE)
2368 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2370 subpage_writelen(opaque
, addr
, value
, 2);
2373 static CPUReadMemoryFunc
*subpage_read
[] = {
2379 static CPUWriteMemoryFunc
*subpage_write
[] = {
2385 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2390 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2392 idx
= SUBPAGE_IDX(start
);
2393 eidx
= SUBPAGE_IDX(end
);
2394 #if defined(DEBUG_SUBPAGE)
2395 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2396 mmio
, start
, end
, idx
, eidx
, memory
);
2398 memory
>>= IO_MEM_SHIFT
;
2399 for (; idx
<= eidx
; idx
++) {
2400 mmio
->mem_read
[idx
] = io_mem_read
[memory
];
2401 mmio
->mem_write
[idx
] = io_mem_write
[memory
];
2402 mmio
->opaque
[idx
] = io_mem_opaque
[memory
];
2408 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2414 mmio
= qemu_mallocz(sizeof(subpage_t
));
2417 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2418 #if defined(DEBUG_SUBPAGE)
2419 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2420 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2422 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2423 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2429 static void io_mem_init(void)
2431 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2432 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2433 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2436 #if defined(CONFIG_SOFTMMU)
2437 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2438 watch_mem_write
, NULL
);
2440 /* alloc dirty bits array */
2441 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2442 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2445 /* mem_read and mem_write are arrays of functions containing the
2446 function to access byte (index 0), word (index 1) and dword (index
2447 2). All functions must be supplied. If io_index is non zero, the
2448 corresponding io zone is modified. If it is zero, a new io zone is
2449 allocated. The return value can be used with
2450 cpu_register_physical_memory(). (-1) is returned if error. */
2451 int cpu_register_io_memory(int io_index
,
2452 CPUReadMemoryFunc
**mem_read
,
2453 CPUWriteMemoryFunc
**mem_write
,
2458 if (io_index
<= 0) {
2459 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2461 io_index
= io_mem_nb
++;
2463 if (io_index
>= IO_MEM_NB_ENTRIES
)
2467 for(i
= 0;i
< 3; i
++) {
2468 io_mem_read
[io_index
][i
] = mem_read
[i
];
2469 io_mem_write
[io_index
][i
] = mem_write
[i
];
2471 io_mem_opaque
[io_index
] = opaque
;
2472 return io_index
<< IO_MEM_SHIFT
;
2475 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2477 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2480 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2482 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2485 /* physical memory access (slow version, mainly for debug) */
2486 #if defined(CONFIG_USER_ONLY)
2487 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2488 int len
, int is_write
)
2495 page
= addr
& TARGET_PAGE_MASK
;
2496 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2499 flags
= page_get_flags(page
);
2500 if (!(flags
& PAGE_VALID
))
2503 if (!(flags
& PAGE_WRITE
))
2505 /* XXX: this code should not depend on lock_user */
2506 if (!(p
= lock_user(VERIFY_WRITE
, addr
, len
, 0)))
2507 /* FIXME - should this return an error rather than just fail? */
2509 memcpy(p
, buf
, len
);
2510 unlock_user(p
, addr
, len
);
2512 if (!(flags
& PAGE_READ
))
2514 /* XXX: this code should not depend on lock_user */
2515 if (!(p
= lock_user(VERIFY_READ
, addr
, len
, 1)))
2516 /* FIXME - should this return an error rather than just fail? */
2518 memcpy(buf
, p
, len
);
2519 unlock_user(p
, addr
, 0);
2528 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2529 int len
, int is_write
)
2534 target_phys_addr_t page
;
2539 page
= addr
& TARGET_PAGE_MASK
;
2540 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2543 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2545 pd
= IO_MEM_UNASSIGNED
;
2547 pd
= p
->phys_offset
;
2551 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2552 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2553 /* XXX: could force cpu_single_env to NULL to avoid
2555 if (l
>= 4 && ((addr
& 3) == 0)) {
2556 /* 32 bit write access */
2558 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2560 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2561 /* 16 bit write access */
2563 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2566 /* 8 bit write access */
2568 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2572 unsigned long addr1
;
2573 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2575 ptr
= phys_ram_base
+ addr1
;
2576 memcpy(ptr
, buf
, l
);
2577 if (!cpu_physical_memory_is_dirty(addr1
)) {
2578 /* invalidate code */
2579 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2581 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2582 (0xff & ~CODE_DIRTY_FLAG
);
2586 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2587 !(pd
& IO_MEM_ROMD
)) {
2589 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2590 if (l
>= 4 && ((addr
& 3) == 0)) {
2591 /* 32 bit read access */
2592 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2595 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2596 /* 16 bit read access */
2597 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2601 /* 8 bit read access */
2602 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2608 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2609 (addr
& ~TARGET_PAGE_MASK
);
2610 memcpy(buf
, ptr
, l
);
2619 /* used for ROM loading : can write in RAM and ROM */
2620 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2621 const uint8_t *buf
, int len
)
2625 target_phys_addr_t page
;
2630 page
= addr
& TARGET_PAGE_MASK
;
2631 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2634 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2636 pd
= IO_MEM_UNASSIGNED
;
2638 pd
= p
->phys_offset
;
2641 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2642 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2643 !(pd
& IO_MEM_ROMD
)) {
2646 unsigned long addr1
;
2647 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2649 ptr
= phys_ram_base
+ addr1
;
2650 memcpy(ptr
, buf
, l
);
2659 /* warning: addr must be aligned */
2660 uint32_t ldl_phys(target_phys_addr_t addr
)
2668 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2670 pd
= IO_MEM_UNASSIGNED
;
2672 pd
= p
->phys_offset
;
2675 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2676 !(pd
& IO_MEM_ROMD
)) {
2678 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2679 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2682 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2683 (addr
& ~TARGET_PAGE_MASK
);
2689 /* warning: addr must be aligned */
2690 uint64_t ldq_phys(target_phys_addr_t addr
)
2698 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2700 pd
= IO_MEM_UNASSIGNED
;
2702 pd
= p
->phys_offset
;
2705 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2706 !(pd
& IO_MEM_ROMD
)) {
2708 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2709 #ifdef TARGET_WORDS_BIGENDIAN
2710 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2711 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2713 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2714 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2718 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2719 (addr
& ~TARGET_PAGE_MASK
);
2726 uint32_t ldub_phys(target_phys_addr_t addr
)
2729 cpu_physical_memory_read(addr
, &val
, 1);
2734 uint32_t lduw_phys(target_phys_addr_t addr
)
2737 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2738 return tswap16(val
);
2741 /* warning: addr must be aligned. The ram page is not masked as dirty
2742 and the code inside is not invalidated. It is useful if the dirty
2743 bits are used to track modified PTEs */
2744 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2751 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2753 pd
= IO_MEM_UNASSIGNED
;
2755 pd
= p
->phys_offset
;
2758 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2759 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2760 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2762 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2763 (addr
& ~TARGET_PAGE_MASK
);
2768 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2775 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2777 pd
= IO_MEM_UNASSIGNED
;
2779 pd
= p
->phys_offset
;
2782 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2783 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2784 #ifdef TARGET_WORDS_BIGENDIAN
2785 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2786 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2788 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2789 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2792 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2793 (addr
& ~TARGET_PAGE_MASK
);
2798 /* warning: addr must be aligned */
2799 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2806 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2808 pd
= IO_MEM_UNASSIGNED
;
2810 pd
= p
->phys_offset
;
2813 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2814 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2815 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2817 unsigned long addr1
;
2818 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2820 ptr
= phys_ram_base
+ addr1
;
2822 if (!cpu_physical_memory_is_dirty(addr1
)) {
2823 /* invalidate code */
2824 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2826 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2827 (0xff & ~CODE_DIRTY_FLAG
);
2833 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2836 cpu_physical_memory_write(addr
, &v
, 1);
2840 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2842 uint16_t v
= tswap16(val
);
2843 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2847 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2850 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2855 /* virtual memory access for debug */
2856 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2857 uint8_t *buf
, int len
, int is_write
)
2860 target_phys_addr_t phys_addr
;
2864 page
= addr
& TARGET_PAGE_MASK
;
2865 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2866 /* if no physical page mapped, return an error */
2867 if (phys_addr
== -1)
2869 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2872 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2881 void dump_exec_info(FILE *f
,
2882 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2884 int i
, target_code_size
, max_target_code_size
;
2885 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2886 TranslationBlock
*tb
;
2888 target_code_size
= 0;
2889 max_target_code_size
= 0;
2891 direct_jmp_count
= 0;
2892 direct_jmp2_count
= 0;
2893 for(i
= 0; i
< nb_tbs
; i
++) {
2895 target_code_size
+= tb
->size
;
2896 if (tb
->size
> max_target_code_size
)
2897 max_target_code_size
= tb
->size
;
2898 if (tb
->page_addr
[1] != -1)
2900 if (tb
->tb_next_offset
[0] != 0xffff) {
2902 if (tb
->tb_next_offset
[1] != 0xffff) {
2903 direct_jmp2_count
++;
2907 /* XXX: avoid using doubles ? */
2908 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2909 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2910 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2911 max_target_code_size
);
2912 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2913 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2914 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2915 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2917 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2918 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2920 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2922 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2923 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2924 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2925 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2928 #if !defined(CONFIG_USER_ONLY)
2930 #define MMUSUFFIX _cmmu
2931 #define GETPC() NULL
2932 #define env cpu_single_env
2933 #define SOFTMMU_CODE_ACCESS
2936 #include "softmmu_template.h"
2939 #include "softmmu_template.h"
2942 #include "softmmu_template.h"
2945 #include "softmmu_template.h"