2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #if defined(CONFIG_USER_ONLY)
42 //#define DEBUG_TB_INVALIDATE
45 //#define DEBUG_UNASSIGNED
47 /* make various TB consistency checks */
48 //#define DEBUG_TB_CHECK
49 //#define DEBUG_TLB_CHECK
51 //#define DEBUG_IOPORT
52 //#define DEBUG_SUBPAGE
54 #if !defined(CONFIG_USER_ONLY)
55 /* TB consistency checks only implemented for usermode emulation. */
59 /* threshold to flush the translated code buffer */
60 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
62 #define SMC_BITMAP_USE_THRESHOLD 10
64 #define MMAP_AREA_START 0x00000000
65 #define MMAP_AREA_END 0xa8000000
67 #if defined(TARGET_SPARC64)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 41
69 #elif defined(TARGET_SPARC)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 36
71 #elif defined(TARGET_ALPHA)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 42
73 #define TARGET_VIRT_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_PPC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
78 #define TARGET_PHYS_ADDR_SPACE_BITS 32
81 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
82 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
84 /* any access to the tbs or the page table must use this lock */
85 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
87 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
88 uint8_t *code_gen_ptr
;
92 uint8_t *phys_ram_base
;
93 uint8_t *phys_ram_dirty
;
94 static ram_addr_t phys_ram_alloc_offset
= 0;
97 /* current CPU in the current thread. It is only valid inside
99 CPUState
*cpu_single_env
;
101 typedef struct PageDesc
{
102 /* list of TBs intersecting this ram page */
103 TranslationBlock
*first_tb
;
104 /* in order to optimize self modifying code, we count the number
105 of lookups we do to a given page to use a bitmap */
106 unsigned int code_write_count
;
107 uint8_t *code_bitmap
;
108 #if defined(CONFIG_USER_ONLY)
113 typedef struct PhysPageDesc
{
114 /* offset in host memory of the page + io_index in the low 12 bits */
115 uint32_t phys_offset
;
119 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
120 /* XXX: this is a temporary hack for alpha target.
121 * In the future, this is to be replaced by a multi-level table
122 * to actually be able to handle the complete 64 bits address space.
124 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
126 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
129 #define L1_SIZE (1 << L1_BITS)
130 #define L2_SIZE (1 << L2_BITS)
132 static void io_mem_init(void);
134 unsigned long qemu_real_host_page_size
;
135 unsigned long qemu_host_page_bits
;
136 unsigned long qemu_host_page_size
;
137 unsigned long qemu_host_page_mask
;
139 /* XXX: for system emulation, it could just be an array */
140 static PageDesc
*l1_map
[L1_SIZE
];
141 PhysPageDesc
**l1_phys_map
;
143 /* io memory support */
144 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
145 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
146 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
147 static int io_mem_nb
;
148 #if defined(CONFIG_SOFTMMU)
149 static int io_mem_watch
;
153 char *logfilename
= "/tmp/qemu.log";
156 static int log_append
= 0;
159 static int tlb_flush_count
;
160 static int tb_flush_count
;
161 static int tb_phys_invalidate_count
;
163 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
164 typedef struct subpage_t
{
165 target_phys_addr_t base
;
166 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
];
167 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
];
168 void *opaque
[TARGET_PAGE_SIZE
];
171 static void page_init(void)
173 /* NOTE: we can always suppose that qemu_host_page_size >=
177 SYSTEM_INFO system_info
;
180 GetSystemInfo(&system_info
);
181 qemu_real_host_page_size
= system_info
.dwPageSize
;
183 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
184 PAGE_EXECUTE_READWRITE
, &old_protect
);
187 qemu_real_host_page_size
= getpagesize();
189 unsigned long start
, end
;
191 start
= (unsigned long)code_gen_buffer
;
192 start
&= ~(qemu_real_host_page_size
- 1);
194 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
195 end
+= qemu_real_host_page_size
- 1;
196 end
&= ~(qemu_real_host_page_size
- 1);
198 mprotect((void *)start
, end
- start
,
199 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
203 if (qemu_host_page_size
== 0)
204 qemu_host_page_size
= qemu_real_host_page_size
;
205 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
206 qemu_host_page_size
= TARGET_PAGE_SIZE
;
207 qemu_host_page_bits
= 0;
208 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
209 qemu_host_page_bits
++;
210 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
211 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
212 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
214 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
216 long long startaddr
, endaddr
;
220 f
= fopen("/proc/self/maps", "r");
223 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
225 page_set_flags(TARGET_PAGE_ALIGN(startaddr
),
226 TARGET_PAGE_ALIGN(endaddr
),
236 static inline PageDesc
*page_find_alloc(unsigned int index
)
240 lp
= &l1_map
[index
>> L2_BITS
];
243 /* allocate if not found */
244 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
245 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
248 return p
+ (index
& (L2_SIZE
- 1));
251 static inline PageDesc
*page_find(unsigned int index
)
255 p
= l1_map
[index
>> L2_BITS
];
258 return p
+ (index
& (L2_SIZE
- 1));
261 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
266 p
= (void **)l1_phys_map
;
267 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
269 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
270 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
272 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
275 /* allocate if not found */
278 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
279 memset(p
, 0, sizeof(void *) * L1_SIZE
);
283 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
287 /* allocate if not found */
290 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
292 for (i
= 0; i
< L2_SIZE
; i
++)
293 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
295 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
298 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
300 return phys_page_find_alloc(index
, 0);
303 #if !defined(CONFIG_USER_ONLY)
304 static void tlb_protect_code(ram_addr_t ram_addr
);
305 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
309 void cpu_exec_init(CPUState
*env
)
315 code_gen_ptr
= code_gen_buffer
;
319 env
->next_cpu
= NULL
;
322 while (*penv
!= NULL
) {
323 penv
= (CPUState
**)&(*penv
)->next_cpu
;
326 env
->cpu_index
= cpu_index
;
327 env
->nb_watchpoints
= 0;
331 static inline void invalidate_page_bitmap(PageDesc
*p
)
333 if (p
->code_bitmap
) {
334 qemu_free(p
->code_bitmap
);
335 p
->code_bitmap
= NULL
;
337 p
->code_write_count
= 0;
340 /* set to NULL all the 'first_tb' fields in all PageDescs */
341 static void page_flush_tb(void)
346 for(i
= 0; i
< L1_SIZE
; i
++) {
349 for(j
= 0; j
< L2_SIZE
; j
++) {
351 invalidate_page_bitmap(p
);
358 /* flush all the translation blocks */
359 /* XXX: tb_flush is currently not thread safe */
360 void tb_flush(CPUState
*env1
)
363 #if defined(DEBUG_FLUSH)
364 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
365 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
367 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
371 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
372 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
375 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
378 code_gen_ptr
= code_gen_buffer
;
379 /* XXX: flush processor icache at this point if cache flush is
384 #ifdef DEBUG_TB_CHECK
386 static void tb_invalidate_check(target_ulong address
)
388 TranslationBlock
*tb
;
390 address
&= TARGET_PAGE_MASK
;
391 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
392 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
393 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
394 address
>= tb
->pc
+ tb
->size
)) {
395 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
396 address
, (long)tb
->pc
, tb
->size
);
402 /* verify that all the pages have correct rights for code */
403 static void tb_page_check(void)
405 TranslationBlock
*tb
;
406 int i
, flags1
, flags2
;
408 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
409 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
410 flags1
= page_get_flags(tb
->pc
);
411 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
412 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
413 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
414 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
420 void tb_jmp_check(TranslationBlock
*tb
)
422 TranslationBlock
*tb1
;
425 /* suppress any remaining jumps to this TB */
429 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
432 tb1
= tb1
->jmp_next
[n1
];
434 /* check end of list */
436 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
442 /* invalidate one TB */
443 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
446 TranslationBlock
*tb1
;
450 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
453 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
457 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
459 TranslationBlock
*tb1
;
465 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
467 *ptb
= tb1
->page_next
[n1
];
470 ptb
= &tb1
->page_next
[n1
];
474 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
476 TranslationBlock
*tb1
, **ptb
;
479 ptb
= &tb
->jmp_next
[n
];
482 /* find tb(n) in circular list */
486 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
487 if (n1
== n
&& tb1
== tb
)
490 ptb
= &tb1
->jmp_first
;
492 ptb
= &tb1
->jmp_next
[n1
];
495 /* now we can suppress tb(n) from the list */
496 *ptb
= tb
->jmp_next
[n
];
498 tb
->jmp_next
[n
] = NULL
;
502 /* reset the jump entry 'n' of a TB so that it is not chained to
504 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
506 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
509 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
514 target_ulong phys_pc
;
515 TranslationBlock
*tb1
, *tb2
;
517 /* remove the TB from the hash list */
518 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
519 h
= tb_phys_hash_func(phys_pc
);
520 tb_remove(&tb_phys_hash
[h
], tb
,
521 offsetof(TranslationBlock
, phys_hash_next
));
523 /* remove the TB from the page list */
524 if (tb
->page_addr
[0] != page_addr
) {
525 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
526 tb_page_remove(&p
->first_tb
, tb
);
527 invalidate_page_bitmap(p
);
529 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
530 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
531 tb_page_remove(&p
->first_tb
, tb
);
532 invalidate_page_bitmap(p
);
535 tb_invalidated_flag
= 1;
537 /* remove the TB from the hash list */
538 h
= tb_jmp_cache_hash_func(tb
->pc
);
539 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
540 if (env
->tb_jmp_cache
[h
] == tb
)
541 env
->tb_jmp_cache
[h
] = NULL
;
544 /* suppress this TB from the two jump lists */
545 tb_jmp_remove(tb
, 0);
546 tb_jmp_remove(tb
, 1);
548 /* suppress any remaining jumps to this TB */
554 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
555 tb2
= tb1
->jmp_next
[n1
];
556 tb_reset_jump(tb1
, n1
);
557 tb1
->jmp_next
[n1
] = NULL
;
560 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
562 tb_phys_invalidate_count
++;
565 static inline void set_bits(uint8_t *tab
, int start
, int len
)
571 mask
= 0xff << (start
& 7);
572 if ((start
& ~7) == (end
& ~7)) {
574 mask
&= ~(0xff << (end
& 7));
579 start
= (start
+ 8) & ~7;
581 while (start
< end1
) {
586 mask
= ~(0xff << (end
& 7));
592 static void build_page_bitmap(PageDesc
*p
)
594 int n
, tb_start
, tb_end
;
595 TranslationBlock
*tb
;
597 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
600 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
605 tb
= (TranslationBlock
*)((long)tb
& ~3);
606 /* NOTE: this is subtle as a TB may span two physical pages */
608 /* NOTE: tb_end may be after the end of the page, but
609 it is not a problem */
610 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
611 tb_end
= tb_start
+ tb
->size
;
612 if (tb_end
> TARGET_PAGE_SIZE
)
613 tb_end
= TARGET_PAGE_SIZE
;
616 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
618 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
619 tb
= tb
->page_next
[n
];
623 #ifdef TARGET_HAS_PRECISE_SMC
625 static void tb_gen_code(CPUState
*env
,
626 target_ulong pc
, target_ulong cs_base
, int flags
,
629 TranslationBlock
*tb
;
631 target_ulong phys_pc
, phys_page2
, virt_page2
;
634 phys_pc
= get_phys_addr_code(env
, pc
);
637 /* flush must be done */
639 /* cannot fail at this point */
642 tc_ptr
= code_gen_ptr
;
644 tb
->cs_base
= cs_base
;
647 cpu_gen_code(env
, tb
, &code_gen_size
);
648 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
650 /* check next page if needed */
651 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
653 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
654 phys_page2
= get_phys_addr_code(env
, virt_page2
);
656 tb_link_phys(tb
, phys_pc
, phys_page2
);
660 /* invalidate all TBs which intersect with the target physical page
661 starting in range [start;end[. NOTE: start and end must refer to
662 the same physical page. 'is_cpu_write_access' should be true if called
663 from a real cpu write access: the virtual CPU will exit the current
664 TB if code is modified inside this TB. */
665 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
666 int is_cpu_write_access
)
668 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
669 CPUState
*env
= cpu_single_env
;
671 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
672 target_ulong tb_start
, tb_end
;
673 target_ulong current_pc
, current_cs_base
;
675 p
= page_find(start
>> TARGET_PAGE_BITS
);
678 if (!p
->code_bitmap
&&
679 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
680 is_cpu_write_access
) {
681 /* build code bitmap */
682 build_page_bitmap(p
);
685 /* we remove all the TBs in the range [start, end[ */
686 /* XXX: see if in some cases it could be faster to invalidate all the code */
687 current_tb_not_found
= is_cpu_write_access
;
688 current_tb_modified
= 0;
689 current_tb
= NULL
; /* avoid warning */
690 current_pc
= 0; /* avoid warning */
691 current_cs_base
= 0; /* avoid warning */
692 current_flags
= 0; /* avoid warning */
696 tb
= (TranslationBlock
*)((long)tb
& ~3);
697 tb_next
= tb
->page_next
[n
];
698 /* NOTE: this is subtle as a TB may span two physical pages */
700 /* NOTE: tb_end may be after the end of the page, but
701 it is not a problem */
702 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
703 tb_end
= tb_start
+ tb
->size
;
705 tb_start
= tb
->page_addr
[1];
706 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
708 if (!(tb_end
<= start
|| tb_start
>= end
)) {
709 #ifdef TARGET_HAS_PRECISE_SMC
710 if (current_tb_not_found
) {
711 current_tb_not_found
= 0;
713 if (env
->mem_write_pc
) {
714 /* now we have a real cpu fault */
715 current_tb
= tb_find_pc(env
->mem_write_pc
);
718 if (current_tb
== tb
&&
719 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
720 /* If we are modifying the current TB, we must stop
721 its execution. We could be more precise by checking
722 that the modification is after the current PC, but it
723 would require a specialized function to partially
724 restore the CPU state */
726 current_tb_modified
= 1;
727 cpu_restore_state(current_tb
, env
,
728 env
->mem_write_pc
, NULL
);
729 #if defined(TARGET_I386)
730 current_flags
= env
->hflags
;
731 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
732 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
733 current_pc
= current_cs_base
+ env
->eip
;
735 #error unsupported CPU
738 #endif /* TARGET_HAS_PRECISE_SMC */
739 /* we need to do that to handle the case where a signal
740 occurs while doing tb_phys_invalidate() */
743 saved_tb
= env
->current_tb
;
744 env
->current_tb
= NULL
;
746 tb_phys_invalidate(tb
, -1);
748 env
->current_tb
= saved_tb
;
749 if (env
->interrupt_request
&& env
->current_tb
)
750 cpu_interrupt(env
, env
->interrupt_request
);
755 #if !defined(CONFIG_USER_ONLY)
756 /* if no code remaining, no need to continue to use slow writes */
758 invalidate_page_bitmap(p
);
759 if (is_cpu_write_access
) {
760 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
764 #ifdef TARGET_HAS_PRECISE_SMC
765 if (current_tb_modified
) {
766 /* we generate a block containing just the instruction
767 modifying the memory. It will ensure that it cannot modify
769 env
->current_tb
= NULL
;
770 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
772 cpu_resume_from_signal(env
, NULL
);
777 /* len must be <= 8 and start must be a multiple of len */
778 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
785 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
786 cpu_single_env
->mem_write_vaddr
, len
,
788 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
792 p
= page_find(start
>> TARGET_PAGE_BITS
);
795 if (p
->code_bitmap
) {
796 offset
= start
& ~TARGET_PAGE_MASK
;
797 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
798 if (b
& ((1 << len
) - 1))
802 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
806 #if !defined(CONFIG_SOFTMMU)
807 static void tb_invalidate_phys_page(target_ulong addr
,
808 unsigned long pc
, void *puc
)
810 int n
, current_flags
, current_tb_modified
;
811 target_ulong current_pc
, current_cs_base
;
813 TranslationBlock
*tb
, *current_tb
;
814 #ifdef TARGET_HAS_PRECISE_SMC
815 CPUState
*env
= cpu_single_env
;
818 addr
&= TARGET_PAGE_MASK
;
819 p
= page_find(addr
>> TARGET_PAGE_BITS
);
823 current_tb_modified
= 0;
825 current_pc
= 0; /* avoid warning */
826 current_cs_base
= 0; /* avoid warning */
827 current_flags
= 0; /* avoid warning */
828 #ifdef TARGET_HAS_PRECISE_SMC
830 current_tb
= tb_find_pc(pc
);
835 tb
= (TranslationBlock
*)((long)tb
& ~3);
836 #ifdef TARGET_HAS_PRECISE_SMC
837 if (current_tb
== tb
&&
838 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
839 /* If we are modifying the current TB, we must stop
840 its execution. We could be more precise by checking
841 that the modification is after the current PC, but it
842 would require a specialized function to partially
843 restore the CPU state */
845 current_tb_modified
= 1;
846 cpu_restore_state(current_tb
, env
, pc
, puc
);
847 #if defined(TARGET_I386)
848 current_flags
= env
->hflags
;
849 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
850 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
851 current_pc
= current_cs_base
+ env
->eip
;
853 #error unsupported CPU
856 #endif /* TARGET_HAS_PRECISE_SMC */
857 tb_phys_invalidate(tb
, addr
);
858 tb
= tb
->page_next
[n
];
861 #ifdef TARGET_HAS_PRECISE_SMC
862 if (current_tb_modified
) {
863 /* we generate a block containing just the instruction
864 modifying the memory. It will ensure that it cannot modify
866 env
->current_tb
= NULL
;
867 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
869 cpu_resume_from_signal(env
, puc
);
875 /* add the tb in the target page and protect it if necessary */
876 static inline void tb_alloc_page(TranslationBlock
*tb
,
877 unsigned int n
, target_ulong page_addr
)
880 TranslationBlock
*last_first_tb
;
882 tb
->page_addr
[n
] = page_addr
;
883 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
884 tb
->page_next
[n
] = p
->first_tb
;
885 last_first_tb
= p
->first_tb
;
886 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
887 invalidate_page_bitmap(p
);
889 #if defined(TARGET_HAS_SMC) || 1
891 #if defined(CONFIG_USER_ONLY)
892 if (p
->flags
& PAGE_WRITE
) {
897 /* force the host page as non writable (writes will have a
898 page fault + mprotect overhead) */
899 page_addr
&= qemu_host_page_mask
;
901 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
902 addr
+= TARGET_PAGE_SIZE
) {
904 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
908 p2
->flags
&= ~PAGE_WRITE
;
909 page_get_flags(addr
);
911 mprotect(g2h(page_addr
), qemu_host_page_size
,
912 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
913 #ifdef DEBUG_TB_INVALIDATE
914 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
919 /* if some code is already present, then the pages are already
920 protected. So we handle the case where only the first TB is
921 allocated in a physical page */
922 if (!last_first_tb
) {
923 tlb_protect_code(page_addr
);
927 #endif /* TARGET_HAS_SMC */
930 /* Allocate a new translation block. Flush the translation buffer if
931 too many translation blocks or too much generated code. */
932 TranslationBlock
*tb_alloc(target_ulong pc
)
934 TranslationBlock
*tb
;
936 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
937 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
945 /* add a new TB and link it to the physical page tables. phys_page2 is
946 (-1) to indicate that only one page contains the TB. */
947 void tb_link_phys(TranslationBlock
*tb
,
948 target_ulong phys_pc
, target_ulong phys_page2
)
951 TranslationBlock
**ptb
;
953 /* add in the physical hash table */
954 h
= tb_phys_hash_func(phys_pc
);
955 ptb
= &tb_phys_hash
[h
];
956 tb
->phys_hash_next
= *ptb
;
959 /* add in the page list */
960 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
961 if (phys_page2
!= -1)
962 tb_alloc_page(tb
, 1, phys_page2
);
964 tb
->page_addr
[1] = -1;
966 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
967 tb
->jmp_next
[0] = NULL
;
968 tb
->jmp_next
[1] = NULL
;
970 /* init original jump addresses */
971 if (tb
->tb_next_offset
[0] != 0xffff)
972 tb_reset_jump(tb
, 0);
973 if (tb
->tb_next_offset
[1] != 0xffff)
974 tb_reset_jump(tb
, 1);
976 #ifdef DEBUG_TB_CHECK
981 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
982 tb[1].tc_ptr. Return NULL if not found */
983 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
987 TranslationBlock
*tb
;
991 if (tc_ptr
< (unsigned long)code_gen_buffer
||
992 tc_ptr
>= (unsigned long)code_gen_ptr
)
994 /* binary search (cf Knuth) */
997 while (m_min
<= m_max
) {
998 m
= (m_min
+ m_max
) >> 1;
1000 v
= (unsigned long)tb
->tc_ptr
;
1003 else if (tc_ptr
< v
) {
1012 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1014 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1016 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1019 tb1
= tb
->jmp_next
[n
];
1021 /* find head of list */
1024 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1027 tb1
= tb1
->jmp_next
[n1
];
1029 /* we are now sure now that tb jumps to tb1 */
1032 /* remove tb from the jmp_first list */
1033 ptb
= &tb_next
->jmp_first
;
1037 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1038 if (n1
== n
&& tb1
== tb
)
1040 ptb
= &tb1
->jmp_next
[n1
];
1042 *ptb
= tb
->jmp_next
[n
];
1043 tb
->jmp_next
[n
] = NULL
;
1045 /* suppress the jump to next tb in generated code */
1046 tb_reset_jump(tb
, n
);
1048 /* suppress jumps in the tb on which we could have jumped */
1049 tb_reset_jump_recursive(tb_next
);
1053 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1055 tb_reset_jump_recursive2(tb
, 0);
1056 tb_reset_jump_recursive2(tb
, 1);
1059 #if defined(TARGET_HAS_ICE)
1060 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1062 target_phys_addr_t addr
;
1064 ram_addr_t ram_addr
;
1067 addr
= cpu_get_phys_page_debug(env
, pc
);
1068 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1070 pd
= IO_MEM_UNASSIGNED
;
1072 pd
= p
->phys_offset
;
1074 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1075 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1079 /* Add a watchpoint. */
1080 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1084 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1085 if (addr
== env
->watchpoint
[i
].vaddr
)
1088 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1091 i
= env
->nb_watchpoints
++;
1092 env
->watchpoint
[i
].vaddr
= addr
;
1093 tlb_flush_page(env
, addr
);
1094 /* FIXME: This flush is needed because of the hack to make memory ops
1095 terminate the TB. It can be removed once the proper IO trap and
1096 re-execute bits are in. */
1101 /* Remove a watchpoint. */
1102 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1106 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1107 if (addr
== env
->watchpoint
[i
].vaddr
) {
1108 env
->nb_watchpoints
--;
1109 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1110 tlb_flush_page(env
, addr
);
1117 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1118 breakpoint is reached */
1119 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1121 #if defined(TARGET_HAS_ICE)
1124 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1125 if (env
->breakpoints
[i
] == pc
)
1129 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1131 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1133 breakpoint_invalidate(env
, pc
);
1140 /* remove a breakpoint */
1141 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1143 #if defined(TARGET_HAS_ICE)
1145 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1146 if (env
->breakpoints
[i
] == pc
)
1151 env
->nb_breakpoints
--;
1152 if (i
< env
->nb_breakpoints
)
1153 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1155 breakpoint_invalidate(env
, pc
);
1162 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1163 CPU loop after each instruction */
1164 void cpu_single_step(CPUState
*env
, int enabled
)
1166 #if defined(TARGET_HAS_ICE)
1167 if (env
->singlestep_enabled
!= enabled
) {
1168 env
->singlestep_enabled
= enabled
;
1169 /* must flush all the translated code to avoid inconsistancies */
1170 /* XXX: only flush what is necessary */
1176 /* enable or disable low levels log */
1177 void cpu_set_log(int log_flags
)
1179 loglevel
= log_flags
;
1180 if (loglevel
&& !logfile
) {
1181 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1183 perror(logfilename
);
1186 #if !defined(CONFIG_SOFTMMU)
1187 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1189 static uint8_t logfile_buf
[4096];
1190 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1193 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1197 if (!loglevel
&& logfile
) {
1203 void cpu_set_log_filename(const char *filename
)
1205 logfilename
= strdup(filename
);
1210 cpu_set_log(loglevel
);
1213 /* mask must never be zero, except for A20 change call */
1214 void cpu_interrupt(CPUState
*env
, int mask
)
1216 TranslationBlock
*tb
;
1217 static int interrupt_lock
;
1219 env
->interrupt_request
|= mask
;
1220 /* if the cpu is currently executing code, we must unlink it and
1221 all the potentially executing TB */
1222 tb
= env
->current_tb
;
1223 if (tb
&& !testandset(&interrupt_lock
)) {
1224 env
->current_tb
= NULL
;
1225 tb_reset_jump_recursive(tb
);
1230 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1232 env
->interrupt_request
&= ~mask
;
1235 CPULogItem cpu_log_items
[] = {
1236 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1237 "show generated host assembly code for each compiled TB" },
1238 { CPU_LOG_TB_IN_ASM
, "in_asm",
1239 "show target assembly code for each compiled TB" },
1240 { CPU_LOG_TB_OP
, "op",
1241 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1243 { CPU_LOG_TB_OP_OPT
, "op_opt",
1244 "show micro ops after optimization for each compiled TB" },
1246 { CPU_LOG_INT
, "int",
1247 "show interrupts/exceptions in short format" },
1248 { CPU_LOG_EXEC
, "exec",
1249 "show trace before each executed TB (lots of logs)" },
1250 { CPU_LOG_TB_CPU
, "cpu",
1251 "show CPU state before block translation" },
1253 { CPU_LOG_PCALL
, "pcall",
1254 "show protected mode far calls/returns/exceptions" },
1257 { CPU_LOG_IOPORT
, "ioport",
1258 "show all i/o ports accesses" },
1263 static int cmp1(const char *s1
, int n
, const char *s2
)
1265 if (strlen(s2
) != n
)
1267 return memcmp(s1
, s2
, n
) == 0;
1270 /* takes a comma separated list of log masks. Return 0 if error. */
1271 int cpu_str_to_log_mask(const char *str
)
1280 p1
= strchr(p
, ',');
1283 if(cmp1(p
,p1
-p
,"all")) {
1284 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1288 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1289 if (cmp1(p
, p1
- p
, item
->name
))
1303 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1310 fprintf(stderr
, "qemu: fatal: ");
1311 vfprintf(stderr
, fmt
, ap
);
1312 fprintf(stderr
, "\n");
1314 if(env
->intercept
& INTERCEPT_SVM_MASK
) {
1315 /* most probably the virtual machine should not
1316 be shut down but rather caught by the VMM */
1317 vmexit(SVM_EXIT_SHUTDOWN
, 0);
1319 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1321 cpu_dump_state(env
, stderr
, fprintf
, 0);
1324 fprintf(logfile
, "qemu: fatal: ");
1325 vfprintf(logfile
, fmt
, ap2
);
1326 fprintf(logfile
, "\n");
1328 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1330 cpu_dump_state(env
, logfile
, fprintf
, 0);
1340 CPUState
*cpu_copy(CPUState
*env
)
1342 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1343 /* preserve chaining and index */
1344 CPUState
*next_cpu
= new_env
->next_cpu
;
1345 int cpu_index
= new_env
->cpu_index
;
1346 memcpy(new_env
, env
, sizeof(CPUState
));
1347 new_env
->next_cpu
= next_cpu
;
1348 new_env
->cpu_index
= cpu_index
;
1352 #if !defined(CONFIG_USER_ONLY)
1354 /* NOTE: if flush_global is true, also flush global entries (not
1356 void tlb_flush(CPUState
*env
, int flush_global
)
1360 #if defined(DEBUG_TLB)
1361 printf("tlb_flush:\n");
1363 /* must reset current TB so that interrupts cannot modify the
1364 links while we are modifying them */
1365 env
->current_tb
= NULL
;
1367 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1368 env
->tlb_table
[0][i
].addr_read
= -1;
1369 env
->tlb_table
[0][i
].addr_write
= -1;
1370 env
->tlb_table
[0][i
].addr_code
= -1;
1371 env
->tlb_table
[1][i
].addr_read
= -1;
1372 env
->tlb_table
[1][i
].addr_write
= -1;
1373 env
->tlb_table
[1][i
].addr_code
= -1;
1374 #if (NB_MMU_MODES >= 3)
1375 env
->tlb_table
[2][i
].addr_read
= -1;
1376 env
->tlb_table
[2][i
].addr_write
= -1;
1377 env
->tlb_table
[2][i
].addr_code
= -1;
1378 #if (NB_MMU_MODES == 4)
1379 env
->tlb_table
[3][i
].addr_read
= -1;
1380 env
->tlb_table
[3][i
].addr_write
= -1;
1381 env
->tlb_table
[3][i
].addr_code
= -1;
1386 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1388 #if !defined(CONFIG_SOFTMMU)
1389 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1392 if (env
->kqemu_enabled
) {
1393 kqemu_flush(env
, flush_global
);
1399 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1401 if (addr
== (tlb_entry
->addr_read
&
1402 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1403 addr
== (tlb_entry
->addr_write
&
1404 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1405 addr
== (tlb_entry
->addr_code
&
1406 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1407 tlb_entry
->addr_read
= -1;
1408 tlb_entry
->addr_write
= -1;
1409 tlb_entry
->addr_code
= -1;
1413 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1416 TranslationBlock
*tb
;
1418 #if defined(DEBUG_TLB)
1419 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1421 /* must reset current TB so that interrupts cannot modify the
1422 links while we are modifying them */
1423 env
->current_tb
= NULL
;
1425 addr
&= TARGET_PAGE_MASK
;
1426 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1427 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1428 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1429 #if (NB_MMU_MODES >= 3)
1430 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1431 #if (NB_MMU_MODES == 4)
1432 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1436 /* Discard jump cache entries for any tb which might potentially
1437 overlap the flushed page. */
1438 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1439 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1441 i
= tb_jmp_cache_hash_page(addr
);
1442 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1444 #if !defined(CONFIG_SOFTMMU)
1445 if (addr
< MMAP_AREA_END
)
1446 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1449 if (env
->kqemu_enabled
) {
1450 kqemu_flush_page(env
, addr
);
1455 /* update the TLBs so that writes to code in the virtual page 'addr'
1457 static void tlb_protect_code(ram_addr_t ram_addr
)
1459 cpu_physical_memory_reset_dirty(ram_addr
,
1460 ram_addr
+ TARGET_PAGE_SIZE
,
1464 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1465 tested for self modifying code */
1466 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1469 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1472 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1473 unsigned long start
, unsigned long length
)
1476 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1477 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1478 if ((addr
- start
) < length
) {
1479 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1484 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1488 unsigned long length
, start1
;
1492 start
&= TARGET_PAGE_MASK
;
1493 end
= TARGET_PAGE_ALIGN(end
);
1495 length
= end
- start
;
1498 len
= length
>> TARGET_PAGE_BITS
;
1500 /* XXX: should not depend on cpu context */
1502 if (env
->kqemu_enabled
) {
1505 for(i
= 0; i
< len
; i
++) {
1506 kqemu_set_notdirty(env
, addr
);
1507 addr
+= TARGET_PAGE_SIZE
;
1511 mask
= ~dirty_flags
;
1512 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1513 for(i
= 0; i
< len
; i
++)
1516 /* we modify the TLB cache so that the dirty bit will be set again
1517 when accessing the range */
1518 start1
= start
+ (unsigned long)phys_ram_base
;
1519 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1520 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1521 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1522 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1523 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1524 #if (NB_MMU_MODES >= 3)
1525 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1526 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1527 #if (NB_MMU_MODES == 4)
1528 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1529 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1534 #if !defined(CONFIG_SOFTMMU)
1535 /* XXX: this is expensive */
1541 for(i
= 0; i
< L1_SIZE
; i
++) {
1544 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1545 for(j
= 0; j
< L2_SIZE
; j
++) {
1546 if (p
->valid_tag
== virt_valid_tag
&&
1547 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1548 (p
->prot
& PROT_WRITE
)) {
1549 if (addr
< MMAP_AREA_END
) {
1550 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1551 p
->prot
& ~PROT_WRITE
);
1554 addr
+= TARGET_PAGE_SIZE
;
1563 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1565 ram_addr_t ram_addr
;
1567 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1568 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1569 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1570 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1571 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1576 /* update the TLB according to the current state of the dirty bits */
1577 void cpu_tlb_update_dirty(CPUState
*env
)
1580 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1581 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1582 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1583 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1584 #if (NB_MMU_MODES >= 3)
1585 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1586 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1587 #if (NB_MMU_MODES == 4)
1588 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1589 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1594 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1595 unsigned long start
)
1598 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1599 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1600 if (addr
== start
) {
1601 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1606 /* update the TLB corresponding to virtual page vaddr and phys addr
1607 addr so that it is no longer dirty */
1608 static inline void tlb_set_dirty(CPUState
*env
,
1609 unsigned long addr
, target_ulong vaddr
)
1613 addr
&= TARGET_PAGE_MASK
;
1614 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1615 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1616 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1617 #if (NB_MMU_MODES >= 3)
1618 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1619 #if (NB_MMU_MODES == 4)
1620 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1625 /* add a new TLB entry. At most one entry for a given virtual address
1626 is permitted. Return 0 if OK or 2 if the page could not be mapped
1627 (can only happen in non SOFTMMU mode for I/O pages or pages
1628 conflicting with the host address space). */
1629 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1630 target_phys_addr_t paddr
, int prot
,
1631 int mmu_idx
, int is_softmmu
)
1636 target_ulong address
;
1637 target_phys_addr_t addend
;
1642 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1644 pd
= IO_MEM_UNASSIGNED
;
1646 pd
= p
->phys_offset
;
1648 #if defined(DEBUG_TLB)
1649 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1650 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1654 #if !defined(CONFIG_SOFTMMU)
1658 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1659 /* IO memory case */
1660 address
= vaddr
| pd
;
1663 /* standard memory */
1665 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1668 /* Make accesses to pages with watchpoints go via the
1669 watchpoint trap routines. */
1670 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1671 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1672 if (address
& ~TARGET_PAGE_MASK
) {
1673 env
->watchpoint
[i
].addend
= 0;
1674 address
= vaddr
| io_mem_watch
;
1676 env
->watchpoint
[i
].addend
= pd
- paddr
+
1677 (unsigned long) phys_ram_base
;
1678 /* TODO: Figure out how to make read watchpoints coexist
1680 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1685 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1687 te
= &env
->tlb_table
[mmu_idx
][index
];
1688 te
->addend
= addend
;
1689 if (prot
& PAGE_READ
) {
1690 te
->addr_read
= address
;
1694 if (prot
& PAGE_EXEC
) {
1695 te
->addr_code
= address
;
1699 if (prot
& PAGE_WRITE
) {
1700 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1701 (pd
& IO_MEM_ROMD
)) {
1702 /* write access calls the I/O callback */
1703 te
->addr_write
= vaddr
|
1704 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1705 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1706 !cpu_physical_memory_is_dirty(pd
)) {
1707 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1709 te
->addr_write
= address
;
1712 te
->addr_write
= -1;
1715 #if !defined(CONFIG_SOFTMMU)
1717 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1718 /* IO access: no mapping is done as it will be handled by the
1720 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1725 if (vaddr
>= MMAP_AREA_END
) {
1728 if (prot
& PROT_WRITE
) {
1729 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1730 #if defined(TARGET_HAS_SMC) || 1
1733 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1734 !cpu_physical_memory_is_dirty(pd
))) {
1735 /* ROM: we do as if code was inside */
1736 /* if code is present, we only map as read only and save the
1740 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1743 vp
->valid_tag
= virt_valid_tag
;
1744 prot
&= ~PAGE_WRITE
;
1747 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1748 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1749 if (map_addr
== MAP_FAILED
) {
1750 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1760 /* called from signal handler: invalidate the code and unprotect the
1761 page. Return TRUE if the fault was succesfully handled. */
1762 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1764 #if !defined(CONFIG_SOFTMMU)
1767 #if defined(DEBUG_TLB)
1768 printf("page_unprotect: addr=0x%08x\n", addr
);
1770 addr
&= TARGET_PAGE_MASK
;
1772 /* if it is not mapped, no need to worry here */
1773 if (addr
>= MMAP_AREA_END
)
1775 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1778 /* NOTE: in this case, validate_tag is _not_ tested as it
1779 validates only the code TLB */
1780 if (vp
->valid_tag
!= virt_valid_tag
)
1782 if (!(vp
->prot
& PAGE_WRITE
))
1784 #if defined(DEBUG_TLB)
1785 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1786 addr
, vp
->phys_addr
, vp
->prot
);
1788 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1789 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1790 (unsigned long)addr
, vp
->prot
);
1791 /* set the dirty bit */
1792 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1793 /* flush the code inside */
1794 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1803 void tlb_flush(CPUState
*env
, int flush_global
)
1807 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1811 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1812 target_phys_addr_t paddr
, int prot
,
1813 int mmu_idx
, int is_softmmu
)
1818 /* dump memory mappings */
1819 void page_dump(FILE *f
)
1821 unsigned long start
, end
;
1822 int i
, j
, prot
, prot1
;
1825 fprintf(f
, "%-8s %-8s %-8s %s\n",
1826 "start", "end", "size", "prot");
1830 for(i
= 0; i
<= L1_SIZE
; i
++) {
1835 for(j
= 0;j
< L2_SIZE
; j
++) {
1840 if (prot1
!= prot
) {
1841 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1843 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1844 start
, end
, end
- start
,
1845 prot
& PAGE_READ
? 'r' : '-',
1846 prot
& PAGE_WRITE
? 'w' : '-',
1847 prot
& PAGE_EXEC
? 'x' : '-');
1861 int page_get_flags(target_ulong address
)
1865 p
= page_find(address
>> TARGET_PAGE_BITS
);
1871 /* modify the flags of a page and invalidate the code if
1872 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1873 depending on PAGE_WRITE */
1874 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1879 start
= start
& TARGET_PAGE_MASK
;
1880 end
= TARGET_PAGE_ALIGN(end
);
1881 if (flags
& PAGE_WRITE
)
1882 flags
|= PAGE_WRITE_ORG
;
1883 spin_lock(&tb_lock
);
1884 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1885 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1886 /* if the write protection is set, then we invalidate the code
1888 if (!(p
->flags
& PAGE_WRITE
) &&
1889 (flags
& PAGE_WRITE
) &&
1891 tb_invalidate_phys_page(addr
, 0, NULL
);
1895 spin_unlock(&tb_lock
);
1898 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1904 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1905 start
= start
& TARGET_PAGE_MASK
;
1908 /* we've wrapped around */
1910 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1911 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1914 if( !(p
->flags
& PAGE_VALID
) )
1917 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1919 if (flags
& PAGE_WRITE
) {
1920 if (!(p
->flags
& PAGE_WRITE_ORG
))
1922 /* unprotect the page if it was put read-only because it
1923 contains translated code */
1924 if (!(p
->flags
& PAGE_WRITE
)) {
1925 if (!page_unprotect(addr
, 0, NULL
))
1934 /* called from signal handler: invalidate the code and unprotect the
1935 page. Return TRUE if the fault was succesfully handled. */
1936 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1938 unsigned int page_index
, prot
, pindex
;
1940 target_ulong host_start
, host_end
, addr
;
1942 host_start
= address
& qemu_host_page_mask
;
1943 page_index
= host_start
>> TARGET_PAGE_BITS
;
1944 p1
= page_find(page_index
);
1947 host_end
= host_start
+ qemu_host_page_size
;
1950 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1954 /* if the page was really writable, then we change its
1955 protection back to writable */
1956 if (prot
& PAGE_WRITE_ORG
) {
1957 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1958 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1959 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1960 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1961 p1
[pindex
].flags
|= PAGE_WRITE
;
1962 /* and since the content will be modified, we must invalidate
1963 the corresponding translated code. */
1964 tb_invalidate_phys_page(address
, pc
, puc
);
1965 #ifdef DEBUG_TB_CHECK
1966 tb_invalidate_check(address
);
1974 static inline void tlb_set_dirty(CPUState
*env
,
1975 unsigned long addr
, target_ulong vaddr
)
1978 #endif /* defined(CONFIG_USER_ONLY) */
1980 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1982 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
1984 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1987 if (addr > start_addr) \
1990 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1991 if (start_addr2 > 0) \
1995 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
1996 end_addr2 = TARGET_PAGE_SIZE - 1; \
1998 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1999 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2004 /* register physical memory. 'size' must be a multiple of the target
2005 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2007 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2009 unsigned long phys_offset
)
2011 target_phys_addr_t addr
, end_addr
;
2014 unsigned long orig_size
= size
;
2017 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2018 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2019 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2020 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2021 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2022 unsigned long orig_memory
= p
->phys_offset
;
2023 target_phys_addr_t start_addr2
, end_addr2
;
2024 int need_subpage
= 0;
2026 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2029 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2030 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2031 &p
->phys_offset
, orig_memory
);
2033 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2036 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2038 p
->phys_offset
= phys_offset
;
2039 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2040 (phys_offset
& IO_MEM_ROMD
))
2041 phys_offset
+= TARGET_PAGE_SIZE
;
2044 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2045 p
->phys_offset
= phys_offset
;
2046 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2047 (phys_offset
& IO_MEM_ROMD
))
2048 phys_offset
+= TARGET_PAGE_SIZE
;
2050 target_phys_addr_t start_addr2
, end_addr2
;
2051 int need_subpage
= 0;
2053 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2054 end_addr2
, need_subpage
);
2057 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2058 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2059 subpage_register(subpage
, start_addr2
, end_addr2
,
2066 /* since each CPU stores ram addresses in its TLB cache, we must
2067 reset the modified entries */
2069 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2074 /* XXX: temporary until new memory mapping API */
2075 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
2079 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2081 return IO_MEM_UNASSIGNED
;
2082 return p
->phys_offset
;
2085 /* XXX: better than nothing */
2086 ram_addr_t
qemu_ram_alloc(unsigned int size
)
2089 if ((phys_ram_alloc_offset
+ size
) >= phys_ram_size
) {
2090 fprintf(stderr
, "Not enough memory (requested_size = %u, max memory = %d)\n",
2091 size
, phys_ram_size
);
2094 addr
= phys_ram_alloc_offset
;
2095 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2099 void qemu_ram_free(ram_addr_t addr
)
2103 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2105 #ifdef DEBUG_UNASSIGNED
2106 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2109 do_unassigned_access(addr
, 0, 0, 0);
2111 do_unassigned_access(addr
, 0, 0, 0);
2116 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2118 #ifdef DEBUG_UNASSIGNED
2119 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2122 do_unassigned_access(addr
, 1, 0, 0);
2124 do_unassigned_access(addr
, 1, 0, 0);
2128 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2129 unassigned_mem_readb
,
2130 unassigned_mem_readb
,
2131 unassigned_mem_readb
,
2134 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2135 unassigned_mem_writeb
,
2136 unassigned_mem_writeb
,
2137 unassigned_mem_writeb
,
2140 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2142 unsigned long ram_addr
;
2144 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2145 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2146 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2147 #if !defined(CONFIG_USER_ONLY)
2148 tb_invalidate_phys_page_fast(ram_addr
, 1);
2149 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2152 stb_p((uint8_t *)(long)addr
, val
);
2154 if (cpu_single_env
->kqemu_enabled
&&
2155 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2156 kqemu_modify_page(cpu_single_env
, ram_addr
);
2158 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2159 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2160 /* we remove the notdirty callback only if the code has been
2162 if (dirty_flags
== 0xff)
2163 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2166 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2168 unsigned long ram_addr
;
2170 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2171 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2172 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2173 #if !defined(CONFIG_USER_ONLY)
2174 tb_invalidate_phys_page_fast(ram_addr
, 2);
2175 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2178 stw_p((uint8_t *)(long)addr
, val
);
2180 if (cpu_single_env
->kqemu_enabled
&&
2181 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2182 kqemu_modify_page(cpu_single_env
, ram_addr
);
2184 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2185 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2186 /* we remove the notdirty callback only if the code has been
2188 if (dirty_flags
== 0xff)
2189 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2192 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2194 unsigned long ram_addr
;
2196 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2197 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2198 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2199 #if !defined(CONFIG_USER_ONLY)
2200 tb_invalidate_phys_page_fast(ram_addr
, 4);
2201 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2204 stl_p((uint8_t *)(long)addr
, val
);
2206 if (cpu_single_env
->kqemu_enabled
&&
2207 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2208 kqemu_modify_page(cpu_single_env
, ram_addr
);
2210 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2211 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2212 /* we remove the notdirty callback only if the code has been
2214 if (dirty_flags
== 0xff)
2215 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2218 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2219 NULL
, /* never used */
2220 NULL
, /* never used */
2221 NULL
, /* never used */
2224 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2225 notdirty_mem_writeb
,
2226 notdirty_mem_writew
,
2227 notdirty_mem_writel
,
2230 #if defined(CONFIG_SOFTMMU)
2231 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2232 so these check for a hit then pass through to the normal out-of-line
2234 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2236 return ldub_phys(addr
);
2239 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2241 return lduw_phys(addr
);
2244 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2246 return ldl_phys(addr
);
2249 /* Generate a debug exception if a watchpoint has been hit.
2250 Returns the real physical address of the access. addr will be a host
2251 address in case of a RAM location. */
2252 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2254 CPUState
*env
= cpu_single_env
;
2256 target_ulong retaddr
;
2260 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2261 watch
= env
->watchpoint
[i
].vaddr
;
2262 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2263 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2264 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2265 cpu_single_env
->watchpoint_hit
= i
+ 1;
2266 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2274 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2277 addr
= check_watchpoint(addr
);
2278 stb_phys(addr
, val
);
2281 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2284 addr
= check_watchpoint(addr
);
2285 stw_phys(addr
, val
);
2288 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2291 addr
= check_watchpoint(addr
);
2292 stl_phys(addr
, val
);
2295 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2301 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2308 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2311 CPUReadMemoryFunc
**mem_read
;
2315 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2316 #if defined(DEBUG_SUBPAGE)
2317 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2318 mmio
, len
, addr
, idx
);
2320 mem_read
= mmio
->mem_read
[idx
];
2321 ret
= (*mem_read
[len
])(mmio
->opaque
[idx
], addr
);
2326 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2327 uint32_t value
, unsigned int len
)
2329 CPUWriteMemoryFunc
**mem_write
;
2332 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2333 #if defined(DEBUG_SUBPAGE)
2334 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2335 mmio
, len
, addr
, idx
, value
);
2337 mem_write
= mmio
->mem_write
[idx
];
2338 (*mem_write
[len
])(mmio
->opaque
[idx
], addr
, value
);
2341 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2343 #if defined(DEBUG_SUBPAGE)
2344 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2347 return subpage_readlen(opaque
, addr
, 0);
2350 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2353 #if defined(DEBUG_SUBPAGE)
2354 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2356 subpage_writelen(opaque
, addr
, value
, 0);
2359 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2361 #if defined(DEBUG_SUBPAGE)
2362 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2365 return subpage_readlen(opaque
, addr
, 1);
2368 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2371 #if defined(DEBUG_SUBPAGE)
2372 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2374 subpage_writelen(opaque
, addr
, value
, 1);
2377 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2379 #if defined(DEBUG_SUBPAGE)
2380 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2383 return subpage_readlen(opaque
, addr
, 2);
2386 static void subpage_writel (void *opaque
,
2387 target_phys_addr_t addr
, uint32_t value
)
2389 #if defined(DEBUG_SUBPAGE)
2390 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2392 subpage_writelen(opaque
, addr
, value
, 2);
2395 static CPUReadMemoryFunc
*subpage_read
[] = {
2401 static CPUWriteMemoryFunc
*subpage_write
[] = {
2407 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2412 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2414 idx
= SUBPAGE_IDX(start
);
2415 eidx
= SUBPAGE_IDX(end
);
2416 #if defined(DEBUG_SUBPAGE)
2417 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2418 mmio
, start
, end
, idx
, eidx
, memory
);
2420 memory
>>= IO_MEM_SHIFT
;
2421 for (; idx
<= eidx
; idx
++) {
2422 mmio
->mem_read
[idx
] = io_mem_read
[memory
];
2423 mmio
->mem_write
[idx
] = io_mem_write
[memory
];
2424 mmio
->opaque
[idx
] = io_mem_opaque
[memory
];
2430 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2436 mmio
= qemu_mallocz(sizeof(subpage_t
));
2439 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2440 #if defined(DEBUG_SUBPAGE)
2441 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2442 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2444 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2445 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2451 static void io_mem_init(void)
2453 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2454 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2455 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2458 #if defined(CONFIG_SOFTMMU)
2459 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2460 watch_mem_write
, NULL
);
2462 /* alloc dirty bits array */
2463 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2464 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2467 /* mem_read and mem_write are arrays of functions containing the
2468 function to access byte (index 0), word (index 1) and dword (index
2469 2). All functions must be supplied. If io_index is non zero, the
2470 corresponding io zone is modified. If it is zero, a new io zone is
2471 allocated. The return value can be used with
2472 cpu_register_physical_memory(). (-1) is returned if error. */
2473 int cpu_register_io_memory(int io_index
,
2474 CPUReadMemoryFunc
**mem_read
,
2475 CPUWriteMemoryFunc
**mem_write
,
2480 if (io_index
<= 0) {
2481 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2483 io_index
= io_mem_nb
++;
2485 if (io_index
>= IO_MEM_NB_ENTRIES
)
2489 for(i
= 0;i
< 3; i
++) {
2490 io_mem_read
[io_index
][i
] = mem_read
[i
];
2491 io_mem_write
[io_index
][i
] = mem_write
[i
];
2493 io_mem_opaque
[io_index
] = opaque
;
2494 return io_index
<< IO_MEM_SHIFT
;
2497 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2499 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2502 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2504 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2507 /* physical memory access (slow version, mainly for debug) */
2508 #if defined(CONFIG_USER_ONLY)
2509 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2510 int len
, int is_write
)
2517 page
= addr
& TARGET_PAGE_MASK
;
2518 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2521 flags
= page_get_flags(page
);
2522 if (!(flags
& PAGE_VALID
))
2525 if (!(flags
& PAGE_WRITE
))
2527 /* XXX: this code should not depend on lock_user */
2528 if (!(p
= lock_user(VERIFY_WRITE
, addr
, len
, 0)))
2529 /* FIXME - should this return an error rather than just fail? */
2531 memcpy(p
, buf
, len
);
2532 unlock_user(p
, addr
, len
);
2534 if (!(flags
& PAGE_READ
))
2536 /* XXX: this code should not depend on lock_user */
2537 if (!(p
= lock_user(VERIFY_READ
, addr
, len
, 1)))
2538 /* FIXME - should this return an error rather than just fail? */
2540 memcpy(buf
, p
, len
);
2541 unlock_user(p
, addr
, 0);
2550 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2551 int len
, int is_write
)
2556 target_phys_addr_t page
;
2561 page
= addr
& TARGET_PAGE_MASK
;
2562 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2565 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2567 pd
= IO_MEM_UNASSIGNED
;
2569 pd
= p
->phys_offset
;
2573 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2574 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2575 /* XXX: could force cpu_single_env to NULL to avoid
2577 if (l
>= 4 && ((addr
& 3) == 0)) {
2578 /* 32 bit write access */
2580 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2582 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2583 /* 16 bit write access */
2585 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2588 /* 8 bit write access */
2590 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2594 unsigned long addr1
;
2595 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2597 ptr
= phys_ram_base
+ addr1
;
2598 memcpy(ptr
, buf
, l
);
2599 if (!cpu_physical_memory_is_dirty(addr1
)) {
2600 /* invalidate code */
2601 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2603 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2604 (0xff & ~CODE_DIRTY_FLAG
);
2608 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2609 !(pd
& IO_MEM_ROMD
)) {
2611 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2612 if (l
>= 4 && ((addr
& 3) == 0)) {
2613 /* 32 bit read access */
2614 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2617 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2618 /* 16 bit read access */
2619 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2623 /* 8 bit read access */
2624 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2630 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2631 (addr
& ~TARGET_PAGE_MASK
);
2632 memcpy(buf
, ptr
, l
);
2641 /* used for ROM loading : can write in RAM and ROM */
2642 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2643 const uint8_t *buf
, int len
)
2647 target_phys_addr_t page
;
2652 page
= addr
& TARGET_PAGE_MASK
;
2653 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2656 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2658 pd
= IO_MEM_UNASSIGNED
;
2660 pd
= p
->phys_offset
;
2663 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2664 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2665 !(pd
& IO_MEM_ROMD
)) {
2668 unsigned long addr1
;
2669 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2671 ptr
= phys_ram_base
+ addr1
;
2672 memcpy(ptr
, buf
, l
);
2681 /* warning: addr must be aligned */
2682 uint32_t ldl_phys(target_phys_addr_t addr
)
2690 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2692 pd
= IO_MEM_UNASSIGNED
;
2694 pd
= p
->phys_offset
;
2697 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2698 !(pd
& IO_MEM_ROMD
)) {
2700 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2701 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2704 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2705 (addr
& ~TARGET_PAGE_MASK
);
2711 /* warning: addr must be aligned */
2712 uint64_t ldq_phys(target_phys_addr_t addr
)
2720 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2722 pd
= IO_MEM_UNASSIGNED
;
2724 pd
= p
->phys_offset
;
2727 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2728 !(pd
& IO_MEM_ROMD
)) {
2730 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2731 #ifdef TARGET_WORDS_BIGENDIAN
2732 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2733 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2735 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2736 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2740 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2741 (addr
& ~TARGET_PAGE_MASK
);
2748 uint32_t ldub_phys(target_phys_addr_t addr
)
2751 cpu_physical_memory_read(addr
, &val
, 1);
2756 uint32_t lduw_phys(target_phys_addr_t addr
)
2759 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2760 return tswap16(val
);
2763 /* warning: addr must be aligned. The ram page is not masked as dirty
2764 and the code inside is not invalidated. It is useful if the dirty
2765 bits are used to track modified PTEs */
2766 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2773 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2775 pd
= IO_MEM_UNASSIGNED
;
2777 pd
= p
->phys_offset
;
2780 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2781 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2782 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2784 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2785 (addr
& ~TARGET_PAGE_MASK
);
2790 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2797 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2799 pd
= IO_MEM_UNASSIGNED
;
2801 pd
= p
->phys_offset
;
2804 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2805 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2806 #ifdef TARGET_WORDS_BIGENDIAN
2807 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2808 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2810 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2811 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2814 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2815 (addr
& ~TARGET_PAGE_MASK
);
2820 /* warning: addr must be aligned */
2821 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2828 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2830 pd
= IO_MEM_UNASSIGNED
;
2832 pd
= p
->phys_offset
;
2835 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2836 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2837 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2839 unsigned long addr1
;
2840 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2842 ptr
= phys_ram_base
+ addr1
;
2844 if (!cpu_physical_memory_is_dirty(addr1
)) {
2845 /* invalidate code */
2846 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2848 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2849 (0xff & ~CODE_DIRTY_FLAG
);
2855 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2858 cpu_physical_memory_write(addr
, &v
, 1);
2862 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2864 uint16_t v
= tswap16(val
);
2865 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2869 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2872 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2877 /* virtual memory access for debug */
2878 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2879 uint8_t *buf
, int len
, int is_write
)
2882 target_phys_addr_t phys_addr
;
2886 page
= addr
& TARGET_PAGE_MASK
;
2887 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2888 /* if no physical page mapped, return an error */
2889 if (phys_addr
== -1)
2891 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2894 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2903 void dump_exec_info(FILE *f
,
2904 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2906 int i
, target_code_size
, max_target_code_size
;
2907 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2908 TranslationBlock
*tb
;
2910 target_code_size
= 0;
2911 max_target_code_size
= 0;
2913 direct_jmp_count
= 0;
2914 direct_jmp2_count
= 0;
2915 for(i
= 0; i
< nb_tbs
; i
++) {
2917 target_code_size
+= tb
->size
;
2918 if (tb
->size
> max_target_code_size
)
2919 max_target_code_size
= tb
->size
;
2920 if (tb
->page_addr
[1] != -1)
2922 if (tb
->tb_next_offset
[0] != 0xffff) {
2924 if (tb
->tb_next_offset
[1] != 0xffff) {
2925 direct_jmp2_count
++;
2929 /* XXX: avoid using doubles ? */
2930 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2931 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2932 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2933 max_target_code_size
);
2934 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2935 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2936 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2937 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2939 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2940 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2942 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2944 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2945 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2946 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2947 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2950 #if !defined(CONFIG_USER_ONLY)
2952 #define MMUSUFFIX _cmmu
2953 #define GETPC() NULL
2954 #define env cpu_single_env
2955 #define SOFTMMU_CODE_ACCESS
2958 #include "softmmu_template.h"
2961 #include "softmmu_template.h"
2964 #include "softmmu_template.h"
2967 #include "softmmu_template.h"