2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
37 #if defined(CONFIG_USER_ONLY)
41 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 //#define DEBUG_IOPORT
51 //#define DEBUG_SUBPAGE
53 #if !defined(CONFIG_USER_ONLY)
54 /* TB consistency checks only implemented for usermode emulation. */
58 /* threshold to flush the translated code buffer */
59 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77 #define TARGET_PHYS_ADDR_SPACE_BITS 32
80 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
81 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
83 /* any access to the tbs or the page table must use this lock */
84 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
86 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
87 uint8_t *code_gen_ptr
;
91 uint8_t *phys_ram_base
;
92 uint8_t *phys_ram_dirty
;
93 static ram_addr_t phys_ram_alloc_offset
= 0;
96 /* current CPU in the current thread. It is only valid inside
98 CPUState
*cpu_single_env
;
100 typedef struct PageDesc
{
101 /* list of TBs intersecting this ram page */
102 TranslationBlock
*first_tb
;
103 /* in order to optimize self modifying code, we count the number
104 of lookups we do to a given page to use a bitmap */
105 unsigned int code_write_count
;
106 uint8_t *code_bitmap
;
107 #if defined(CONFIG_USER_ONLY)
112 typedef struct PhysPageDesc
{
113 /* offset in host memory of the page + io_index in the low 12 bits */
114 uint32_t phys_offset
;
118 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119 /* XXX: this is a temporary hack for alpha target.
120 * In the future, this is to be replaced by a multi-level table
121 * to actually be able to handle the complete 64 bits address space.
123 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
125 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
128 #define L1_SIZE (1 << L1_BITS)
129 #define L2_SIZE (1 << L2_BITS)
131 static void io_mem_init(void);
133 unsigned long qemu_real_host_page_size
;
134 unsigned long qemu_host_page_bits
;
135 unsigned long qemu_host_page_size
;
136 unsigned long qemu_host_page_mask
;
138 /* XXX: for system emulation, it could just be an array */
139 static PageDesc
*l1_map
[L1_SIZE
];
140 PhysPageDesc
**l1_phys_map
;
142 /* io memory support */
143 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
144 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
145 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
146 static int io_mem_nb
;
147 #if defined(CONFIG_SOFTMMU)
148 static int io_mem_watch
;
152 char *logfilename
= "/tmp/qemu.log";
157 static int tlb_flush_count
;
158 static int tb_flush_count
;
159 static int tb_phys_invalidate_count
;
161 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
162 typedef struct subpage_t
{
163 target_phys_addr_t base
;
164 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
];
165 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
];
166 void *opaque
[TARGET_PAGE_SIZE
];
169 static void page_init(void)
171 /* NOTE: we can always suppose that qemu_host_page_size >=
175 SYSTEM_INFO system_info
;
178 GetSystemInfo(&system_info
);
179 qemu_real_host_page_size
= system_info
.dwPageSize
;
181 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
182 PAGE_EXECUTE_READWRITE
, &old_protect
);
185 qemu_real_host_page_size
= getpagesize();
187 unsigned long start
, end
;
189 start
= (unsigned long)code_gen_buffer
;
190 start
&= ~(qemu_real_host_page_size
- 1);
192 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
193 end
+= qemu_real_host_page_size
- 1;
194 end
&= ~(qemu_real_host_page_size
- 1);
196 mprotect((void *)start
, end
- start
,
197 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
201 if (qemu_host_page_size
== 0)
202 qemu_host_page_size
= qemu_real_host_page_size
;
203 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
204 qemu_host_page_size
= TARGET_PAGE_SIZE
;
205 qemu_host_page_bits
= 0;
206 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
207 qemu_host_page_bits
++;
208 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
209 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
210 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
213 static inline PageDesc
*page_find_alloc(unsigned int index
)
217 lp
= &l1_map
[index
>> L2_BITS
];
220 /* allocate if not found */
221 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
222 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
225 return p
+ (index
& (L2_SIZE
- 1));
228 static inline PageDesc
*page_find(unsigned int index
)
232 p
= l1_map
[index
>> L2_BITS
];
235 return p
+ (index
& (L2_SIZE
- 1));
238 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
243 p
= (void **)l1_phys_map
;
244 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
246 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
247 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
249 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
252 /* allocate if not found */
255 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
256 memset(p
, 0, sizeof(void *) * L1_SIZE
);
260 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
264 /* allocate if not found */
267 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
269 for (i
= 0; i
< L2_SIZE
; i
++)
270 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
272 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
275 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
277 return phys_page_find_alloc(index
, 0);
280 #if !defined(CONFIG_USER_ONLY)
281 static void tlb_protect_code(ram_addr_t ram_addr
);
282 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
286 void cpu_exec_init(CPUState
*env
)
292 code_gen_ptr
= code_gen_buffer
;
296 env
->next_cpu
= NULL
;
299 while (*penv
!= NULL
) {
300 penv
= (CPUState
**)&(*penv
)->next_cpu
;
303 env
->cpu_index
= cpu_index
;
304 env
->nb_watchpoints
= 0;
308 static inline void invalidate_page_bitmap(PageDesc
*p
)
310 if (p
->code_bitmap
) {
311 qemu_free(p
->code_bitmap
);
312 p
->code_bitmap
= NULL
;
314 p
->code_write_count
= 0;
317 /* set to NULL all the 'first_tb' fields in all PageDescs */
318 static void page_flush_tb(void)
323 for(i
= 0; i
< L1_SIZE
; i
++) {
326 for(j
= 0; j
< L2_SIZE
; j
++) {
328 invalidate_page_bitmap(p
);
335 /* flush all the translation blocks */
336 /* XXX: tb_flush is currently not thread safe */
337 void tb_flush(CPUState
*env1
)
340 #if defined(DEBUG_FLUSH)
341 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
342 code_gen_ptr
- code_gen_buffer
,
344 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
348 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
349 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
352 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
355 code_gen_ptr
= code_gen_buffer
;
356 /* XXX: flush processor icache at this point if cache flush is
361 #ifdef DEBUG_TB_CHECK
363 static void tb_invalidate_check(target_ulong address
)
365 TranslationBlock
*tb
;
367 address
&= TARGET_PAGE_MASK
;
368 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
369 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
370 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
371 address
>= tb
->pc
+ tb
->size
)) {
372 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
373 address
, (long)tb
->pc
, tb
->size
);
379 /* verify that all the pages have correct rights for code */
380 static void tb_page_check(void)
382 TranslationBlock
*tb
;
383 int i
, flags1
, flags2
;
385 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
386 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
387 flags1
= page_get_flags(tb
->pc
);
388 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
389 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
390 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
391 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
397 void tb_jmp_check(TranslationBlock
*tb
)
399 TranslationBlock
*tb1
;
402 /* suppress any remaining jumps to this TB */
406 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
409 tb1
= tb1
->jmp_next
[n1
];
411 /* check end of list */
413 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
419 /* invalidate one TB */
420 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
423 TranslationBlock
*tb1
;
427 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
430 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
434 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
436 TranslationBlock
*tb1
;
442 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
444 *ptb
= tb1
->page_next
[n1
];
447 ptb
= &tb1
->page_next
[n1
];
451 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
453 TranslationBlock
*tb1
, **ptb
;
456 ptb
= &tb
->jmp_next
[n
];
459 /* find tb(n) in circular list */
463 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
464 if (n1
== n
&& tb1
== tb
)
467 ptb
= &tb1
->jmp_first
;
469 ptb
= &tb1
->jmp_next
[n1
];
472 /* now we can suppress tb(n) from the list */
473 *ptb
= tb
->jmp_next
[n
];
475 tb
->jmp_next
[n
] = NULL
;
479 /* reset the jump entry 'n' of a TB so that it is not chained to
481 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
483 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
486 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
491 target_ulong phys_pc
;
492 TranslationBlock
*tb1
, *tb2
;
494 /* remove the TB from the hash list */
495 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
496 h
= tb_phys_hash_func(phys_pc
);
497 tb_remove(&tb_phys_hash
[h
], tb
,
498 offsetof(TranslationBlock
, phys_hash_next
));
500 /* remove the TB from the page list */
501 if (tb
->page_addr
[0] != page_addr
) {
502 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
503 tb_page_remove(&p
->first_tb
, tb
);
504 invalidate_page_bitmap(p
);
506 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
507 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
508 tb_page_remove(&p
->first_tb
, tb
);
509 invalidate_page_bitmap(p
);
512 tb_invalidated_flag
= 1;
514 /* remove the TB from the hash list */
515 h
= tb_jmp_cache_hash_func(tb
->pc
);
516 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
517 if (env
->tb_jmp_cache
[h
] == tb
)
518 env
->tb_jmp_cache
[h
] = NULL
;
521 /* suppress this TB from the two jump lists */
522 tb_jmp_remove(tb
, 0);
523 tb_jmp_remove(tb
, 1);
525 /* suppress any remaining jumps to this TB */
531 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
532 tb2
= tb1
->jmp_next
[n1
];
533 tb_reset_jump(tb1
, n1
);
534 tb1
->jmp_next
[n1
] = NULL
;
537 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
539 tb_phys_invalidate_count
++;
542 static inline void set_bits(uint8_t *tab
, int start
, int len
)
548 mask
= 0xff << (start
& 7);
549 if ((start
& ~7) == (end
& ~7)) {
551 mask
&= ~(0xff << (end
& 7));
556 start
= (start
+ 8) & ~7;
558 while (start
< end1
) {
563 mask
= ~(0xff << (end
& 7));
569 static void build_page_bitmap(PageDesc
*p
)
571 int n
, tb_start
, tb_end
;
572 TranslationBlock
*tb
;
574 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
577 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
582 tb
= (TranslationBlock
*)((long)tb
& ~3);
583 /* NOTE: this is subtle as a TB may span two physical pages */
585 /* NOTE: tb_end may be after the end of the page, but
586 it is not a problem */
587 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
588 tb_end
= tb_start
+ tb
->size
;
589 if (tb_end
> TARGET_PAGE_SIZE
)
590 tb_end
= TARGET_PAGE_SIZE
;
593 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
595 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
596 tb
= tb
->page_next
[n
];
600 #ifdef TARGET_HAS_PRECISE_SMC
602 static void tb_gen_code(CPUState
*env
,
603 target_ulong pc
, target_ulong cs_base
, int flags
,
606 TranslationBlock
*tb
;
608 target_ulong phys_pc
, phys_page2
, virt_page2
;
611 phys_pc
= get_phys_addr_code(env
, pc
);
614 /* flush must be done */
616 /* cannot fail at this point */
619 tc_ptr
= code_gen_ptr
;
621 tb
->cs_base
= cs_base
;
624 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
625 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
627 /* check next page if needed */
628 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
630 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
631 phys_page2
= get_phys_addr_code(env
, virt_page2
);
633 tb_link_phys(tb
, phys_pc
, phys_page2
);
637 /* invalidate all TBs which intersect with the target physical page
638 starting in range [start;end[. NOTE: start and end must refer to
639 the same physical page. 'is_cpu_write_access' should be true if called
640 from a real cpu write access: the virtual CPU will exit the current
641 TB if code is modified inside this TB. */
642 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
643 int is_cpu_write_access
)
645 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
646 CPUState
*env
= cpu_single_env
;
648 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
649 target_ulong tb_start
, tb_end
;
650 target_ulong current_pc
, current_cs_base
;
652 p
= page_find(start
>> TARGET_PAGE_BITS
);
655 if (!p
->code_bitmap
&&
656 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
657 is_cpu_write_access
) {
658 /* build code bitmap */
659 build_page_bitmap(p
);
662 /* we remove all the TBs in the range [start, end[ */
663 /* XXX: see if in some cases it could be faster to invalidate all the code */
664 current_tb_not_found
= is_cpu_write_access
;
665 current_tb_modified
= 0;
666 current_tb
= NULL
; /* avoid warning */
667 current_pc
= 0; /* avoid warning */
668 current_cs_base
= 0; /* avoid warning */
669 current_flags
= 0; /* avoid warning */
673 tb
= (TranslationBlock
*)((long)tb
& ~3);
674 tb_next
= tb
->page_next
[n
];
675 /* NOTE: this is subtle as a TB may span two physical pages */
677 /* NOTE: tb_end may be after the end of the page, but
678 it is not a problem */
679 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
680 tb_end
= tb_start
+ tb
->size
;
682 tb_start
= tb
->page_addr
[1];
683 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
685 if (!(tb_end
<= start
|| tb_start
>= end
)) {
686 #ifdef TARGET_HAS_PRECISE_SMC
687 if (current_tb_not_found
) {
688 current_tb_not_found
= 0;
690 if (env
->mem_write_pc
) {
691 /* now we have a real cpu fault */
692 current_tb
= tb_find_pc(env
->mem_write_pc
);
695 if (current_tb
== tb
&&
696 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
697 /* If we are modifying the current TB, we must stop
698 its execution. We could be more precise by checking
699 that the modification is after the current PC, but it
700 would require a specialized function to partially
701 restore the CPU state */
703 current_tb_modified
= 1;
704 cpu_restore_state(current_tb
, env
,
705 env
->mem_write_pc
, NULL
);
706 #if defined(TARGET_I386)
707 current_flags
= env
->hflags
;
708 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
709 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
710 current_pc
= current_cs_base
+ env
->eip
;
712 #error unsupported CPU
715 #endif /* TARGET_HAS_PRECISE_SMC */
716 /* we need to do that to handle the case where a signal
717 occurs while doing tb_phys_invalidate() */
720 saved_tb
= env
->current_tb
;
721 env
->current_tb
= NULL
;
723 tb_phys_invalidate(tb
, -1);
725 env
->current_tb
= saved_tb
;
726 if (env
->interrupt_request
&& env
->current_tb
)
727 cpu_interrupt(env
, env
->interrupt_request
);
732 #if !defined(CONFIG_USER_ONLY)
733 /* if no code remaining, no need to continue to use slow writes */
735 invalidate_page_bitmap(p
);
736 if (is_cpu_write_access
) {
737 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
741 #ifdef TARGET_HAS_PRECISE_SMC
742 if (current_tb_modified
) {
743 /* we generate a block containing just the instruction
744 modifying the memory. It will ensure that it cannot modify
746 env
->current_tb
= NULL
;
747 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
749 cpu_resume_from_signal(env
, NULL
);
754 /* len must be <= 8 and start must be a multiple of len */
755 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
762 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
763 cpu_single_env
->mem_write_vaddr
, len
,
765 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
769 p
= page_find(start
>> TARGET_PAGE_BITS
);
772 if (p
->code_bitmap
) {
773 offset
= start
& ~TARGET_PAGE_MASK
;
774 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
775 if (b
& ((1 << len
) - 1))
779 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
783 #if !defined(CONFIG_SOFTMMU)
784 static void tb_invalidate_phys_page(target_ulong addr
,
785 unsigned long pc
, void *puc
)
787 int n
, current_flags
, current_tb_modified
;
788 target_ulong current_pc
, current_cs_base
;
790 TranslationBlock
*tb
, *current_tb
;
791 #ifdef TARGET_HAS_PRECISE_SMC
792 CPUState
*env
= cpu_single_env
;
795 addr
&= TARGET_PAGE_MASK
;
796 p
= page_find(addr
>> TARGET_PAGE_BITS
);
800 current_tb_modified
= 0;
802 current_pc
= 0; /* avoid warning */
803 current_cs_base
= 0; /* avoid warning */
804 current_flags
= 0; /* avoid warning */
805 #ifdef TARGET_HAS_PRECISE_SMC
807 current_tb
= tb_find_pc(pc
);
812 tb
= (TranslationBlock
*)((long)tb
& ~3);
813 #ifdef TARGET_HAS_PRECISE_SMC
814 if (current_tb
== tb
&&
815 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
816 /* If we are modifying the current TB, we must stop
817 its execution. We could be more precise by checking
818 that the modification is after the current PC, but it
819 would require a specialized function to partially
820 restore the CPU state */
822 current_tb_modified
= 1;
823 cpu_restore_state(current_tb
, env
, pc
, puc
);
824 #if defined(TARGET_I386)
825 current_flags
= env
->hflags
;
826 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
827 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
828 current_pc
= current_cs_base
+ env
->eip
;
830 #error unsupported CPU
833 #endif /* TARGET_HAS_PRECISE_SMC */
834 tb_phys_invalidate(tb
, addr
);
835 tb
= tb
->page_next
[n
];
838 #ifdef TARGET_HAS_PRECISE_SMC
839 if (current_tb_modified
) {
840 /* we generate a block containing just the instruction
841 modifying the memory. It will ensure that it cannot modify
843 env
->current_tb
= NULL
;
844 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
846 cpu_resume_from_signal(env
, puc
);
852 /* add the tb in the target page and protect it if necessary */
853 static inline void tb_alloc_page(TranslationBlock
*tb
,
854 unsigned int n
, target_ulong page_addr
)
857 TranslationBlock
*last_first_tb
;
859 tb
->page_addr
[n
] = page_addr
;
860 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
861 tb
->page_next
[n
] = p
->first_tb
;
862 last_first_tb
= p
->first_tb
;
863 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
864 invalidate_page_bitmap(p
);
866 #if defined(TARGET_HAS_SMC) || 1
868 #if defined(CONFIG_USER_ONLY)
869 if (p
->flags
& PAGE_WRITE
) {
874 /* force the host page as non writable (writes will have a
875 page fault + mprotect overhead) */
876 page_addr
&= qemu_host_page_mask
;
878 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
879 addr
+= TARGET_PAGE_SIZE
) {
881 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
885 p2
->flags
&= ~PAGE_WRITE
;
886 page_get_flags(addr
);
888 mprotect(g2h(page_addr
), qemu_host_page_size
,
889 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
890 #ifdef DEBUG_TB_INVALIDATE
891 printf("protecting code page: 0x%08lx\n",
896 /* if some code is already present, then the pages are already
897 protected. So we handle the case where only the first TB is
898 allocated in a physical page */
899 if (!last_first_tb
) {
900 tlb_protect_code(page_addr
);
904 #endif /* TARGET_HAS_SMC */
907 /* Allocate a new translation block. Flush the translation buffer if
908 too many translation blocks or too much generated code. */
909 TranslationBlock
*tb_alloc(target_ulong pc
)
911 TranslationBlock
*tb
;
913 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
914 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
922 /* add a new TB and link it to the physical page tables. phys_page2 is
923 (-1) to indicate that only one page contains the TB. */
924 void tb_link_phys(TranslationBlock
*tb
,
925 target_ulong phys_pc
, target_ulong phys_page2
)
928 TranslationBlock
**ptb
;
930 /* add in the physical hash table */
931 h
= tb_phys_hash_func(phys_pc
);
932 ptb
= &tb_phys_hash
[h
];
933 tb
->phys_hash_next
= *ptb
;
936 /* add in the page list */
937 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
938 if (phys_page2
!= -1)
939 tb_alloc_page(tb
, 1, phys_page2
);
941 tb
->page_addr
[1] = -1;
943 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
944 tb
->jmp_next
[0] = NULL
;
945 tb
->jmp_next
[1] = NULL
;
947 tb
->cflags
&= ~CF_FP_USED
;
948 if (tb
->cflags
& CF_TB_FP_USED
)
949 tb
->cflags
|= CF_FP_USED
;
952 /* init original jump addresses */
953 if (tb
->tb_next_offset
[0] != 0xffff)
954 tb_reset_jump(tb
, 0);
955 if (tb
->tb_next_offset
[1] != 0xffff)
956 tb_reset_jump(tb
, 1);
958 #ifdef DEBUG_TB_CHECK
963 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
964 tb[1].tc_ptr. Return NULL if not found */
965 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
969 TranslationBlock
*tb
;
973 if (tc_ptr
< (unsigned long)code_gen_buffer
||
974 tc_ptr
>= (unsigned long)code_gen_ptr
)
976 /* binary search (cf Knuth) */
979 while (m_min
<= m_max
) {
980 m
= (m_min
+ m_max
) >> 1;
982 v
= (unsigned long)tb
->tc_ptr
;
985 else if (tc_ptr
< v
) {
994 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
996 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
998 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1001 tb1
= tb
->jmp_next
[n
];
1003 /* find head of list */
1006 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1009 tb1
= tb1
->jmp_next
[n1
];
1011 /* we are now sure now that tb jumps to tb1 */
1014 /* remove tb from the jmp_first list */
1015 ptb
= &tb_next
->jmp_first
;
1019 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1020 if (n1
== n
&& tb1
== tb
)
1022 ptb
= &tb1
->jmp_next
[n1
];
1024 *ptb
= tb
->jmp_next
[n
];
1025 tb
->jmp_next
[n
] = NULL
;
1027 /* suppress the jump to next tb in generated code */
1028 tb_reset_jump(tb
, n
);
1030 /* suppress jumps in the tb on which we could have jumped */
1031 tb_reset_jump_recursive(tb_next
);
1035 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1037 tb_reset_jump_recursive2(tb
, 0);
1038 tb_reset_jump_recursive2(tb
, 1);
1041 #if defined(TARGET_HAS_ICE)
1042 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1044 target_phys_addr_t addr
;
1046 ram_addr_t ram_addr
;
1049 addr
= cpu_get_phys_page_debug(env
, pc
);
1050 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1052 pd
= IO_MEM_UNASSIGNED
;
1054 pd
= p
->phys_offset
;
1056 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1057 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1061 /* Add a watchpoint. */
1062 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1066 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1067 if (addr
== env
->watchpoint
[i
].vaddr
)
1070 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1073 i
= env
->nb_watchpoints
++;
1074 env
->watchpoint
[i
].vaddr
= addr
;
1075 tlb_flush_page(env
, addr
);
1076 /* FIXME: This flush is needed because of the hack to make memory ops
1077 terminate the TB. It can be removed once the proper IO trap and
1078 re-execute bits are in. */
1083 /* Remove a watchpoint. */
1084 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1088 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1089 if (addr
== env
->watchpoint
[i
].vaddr
) {
1090 env
->nb_watchpoints
--;
1091 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1092 tlb_flush_page(env
, addr
);
1099 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1100 breakpoint is reached */
1101 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1103 #if defined(TARGET_HAS_ICE)
1106 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1107 if (env
->breakpoints
[i
] == pc
)
1111 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1113 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1115 breakpoint_invalidate(env
, pc
);
1122 /* remove a breakpoint */
1123 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1125 #if defined(TARGET_HAS_ICE)
1127 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1128 if (env
->breakpoints
[i
] == pc
)
1133 env
->nb_breakpoints
--;
1134 if (i
< env
->nb_breakpoints
)
1135 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1137 breakpoint_invalidate(env
, pc
);
1144 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1145 CPU loop after each instruction */
1146 void cpu_single_step(CPUState
*env
, int enabled
)
1148 #if defined(TARGET_HAS_ICE)
1149 if (env
->singlestep_enabled
!= enabled
) {
1150 env
->singlestep_enabled
= enabled
;
1151 /* must flush all the translated code to avoid inconsistancies */
1152 /* XXX: only flush what is necessary */
1158 /* enable or disable low levels log */
1159 void cpu_set_log(int log_flags
)
1161 loglevel
= log_flags
;
1162 if (loglevel
&& !logfile
) {
1163 logfile
= fopen(logfilename
, "w");
1165 perror(logfilename
);
1168 #if !defined(CONFIG_SOFTMMU)
1169 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1171 static uint8_t logfile_buf
[4096];
1172 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1175 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1180 void cpu_set_log_filename(const char *filename
)
1182 logfilename
= strdup(filename
);
1185 /* mask must never be zero, except for A20 change call */
1186 void cpu_interrupt(CPUState
*env
, int mask
)
1188 TranslationBlock
*tb
;
1189 static int interrupt_lock
;
1191 env
->interrupt_request
|= mask
;
1192 /* if the cpu is currently executing code, we must unlink it and
1193 all the potentially executing TB */
1194 tb
= env
->current_tb
;
1195 if (tb
&& !testandset(&interrupt_lock
)) {
1196 env
->current_tb
= NULL
;
1197 tb_reset_jump_recursive(tb
);
1202 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1204 env
->interrupt_request
&= ~mask
;
1207 CPULogItem cpu_log_items
[] = {
1208 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1209 "show generated host assembly code for each compiled TB" },
1210 { CPU_LOG_TB_IN_ASM
, "in_asm",
1211 "show target assembly code for each compiled TB" },
1212 { CPU_LOG_TB_OP
, "op",
1213 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1215 { CPU_LOG_TB_OP_OPT
, "op_opt",
1216 "show micro ops after optimization for each compiled TB" },
1218 { CPU_LOG_INT
, "int",
1219 "show interrupts/exceptions in short format" },
1220 { CPU_LOG_EXEC
, "exec",
1221 "show trace before each executed TB (lots of logs)" },
1222 { CPU_LOG_TB_CPU
, "cpu",
1223 "show CPU state before bloc translation" },
1225 { CPU_LOG_PCALL
, "pcall",
1226 "show protected mode far calls/returns/exceptions" },
1229 { CPU_LOG_IOPORT
, "ioport",
1230 "show all i/o ports accesses" },
1235 static int cmp1(const char *s1
, int n
, const char *s2
)
1237 if (strlen(s2
) != n
)
1239 return memcmp(s1
, s2
, n
) == 0;
1242 /* takes a comma separated list of log masks. Return 0 if error. */
1243 int cpu_str_to_log_mask(const char *str
)
1252 p1
= strchr(p
, ',');
1255 if(cmp1(p
,p1
-p
,"all")) {
1256 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1260 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1261 if (cmp1(p
, p1
- p
, item
->name
))
1275 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1280 fprintf(stderr
, "qemu: fatal: ");
1281 vfprintf(stderr
, fmt
, ap
);
1282 fprintf(stderr
, "\n");
1284 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1286 cpu_dump_state(env
, stderr
, fprintf
, 0);
1292 CPUState
*cpu_copy(CPUState
*env
)
1294 CPUState
*new_env
= cpu_init();
1295 /* preserve chaining and index */
1296 CPUState
*next_cpu
= new_env
->next_cpu
;
1297 int cpu_index
= new_env
->cpu_index
;
1298 memcpy(new_env
, env
, sizeof(CPUState
));
1299 new_env
->next_cpu
= next_cpu
;
1300 new_env
->cpu_index
= cpu_index
;
1304 #if !defined(CONFIG_USER_ONLY)
1306 /* NOTE: if flush_global is true, also flush global entries (not
1308 void tlb_flush(CPUState
*env
, int flush_global
)
1312 #if defined(DEBUG_TLB)
1313 printf("tlb_flush:\n");
1315 /* must reset current TB so that interrupts cannot modify the
1316 links while we are modifying them */
1317 env
->current_tb
= NULL
;
1319 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1320 env
->tlb_table
[0][i
].addr_read
= -1;
1321 env
->tlb_table
[0][i
].addr_write
= -1;
1322 env
->tlb_table
[0][i
].addr_code
= -1;
1323 env
->tlb_table
[1][i
].addr_read
= -1;
1324 env
->tlb_table
[1][i
].addr_write
= -1;
1325 env
->tlb_table
[1][i
].addr_code
= -1;
1326 #if (NB_MMU_MODES >= 3)
1327 env
->tlb_table
[2][i
].addr_read
= -1;
1328 env
->tlb_table
[2][i
].addr_write
= -1;
1329 env
->tlb_table
[2][i
].addr_code
= -1;
1330 #if (NB_MMU_MODES == 4)
1331 env
->tlb_table
[3][i
].addr_read
= -1;
1332 env
->tlb_table
[3][i
].addr_write
= -1;
1333 env
->tlb_table
[3][i
].addr_code
= -1;
1338 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1340 #if !defined(CONFIG_SOFTMMU)
1341 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1344 if (env
->kqemu_enabled
) {
1345 kqemu_flush(env
, flush_global
);
1351 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1353 if (addr
== (tlb_entry
->addr_read
&
1354 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1355 addr
== (tlb_entry
->addr_write
&
1356 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1357 addr
== (tlb_entry
->addr_code
&
1358 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1359 tlb_entry
->addr_read
= -1;
1360 tlb_entry
->addr_write
= -1;
1361 tlb_entry
->addr_code
= -1;
1365 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1368 TranslationBlock
*tb
;
1370 #if defined(DEBUG_TLB)
1371 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1373 /* must reset current TB so that interrupts cannot modify the
1374 links while we are modifying them */
1375 env
->current_tb
= NULL
;
1377 addr
&= TARGET_PAGE_MASK
;
1378 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1379 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1380 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1381 #if (NB_MMU_MODES >= 3)
1382 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1383 #if (NB_MMU_MODES == 4)
1384 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1388 /* Discard jump cache entries for any tb which might potentially
1389 overlap the flushed page. */
1390 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1391 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1393 i
= tb_jmp_cache_hash_page(addr
);
1394 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1396 #if !defined(CONFIG_SOFTMMU)
1397 if (addr
< MMAP_AREA_END
)
1398 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1401 if (env
->kqemu_enabled
) {
1402 kqemu_flush_page(env
, addr
);
1407 /* update the TLBs so that writes to code in the virtual page 'addr'
1409 static void tlb_protect_code(ram_addr_t ram_addr
)
1411 cpu_physical_memory_reset_dirty(ram_addr
,
1412 ram_addr
+ TARGET_PAGE_SIZE
,
1416 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1417 tested for self modifying code */
1418 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1421 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1424 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1425 unsigned long start
, unsigned long length
)
1428 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1429 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1430 if ((addr
- start
) < length
) {
1431 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1436 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1440 unsigned long length
, start1
;
1444 start
&= TARGET_PAGE_MASK
;
1445 end
= TARGET_PAGE_ALIGN(end
);
1447 length
= end
- start
;
1450 len
= length
>> TARGET_PAGE_BITS
;
1452 /* XXX: should not depend on cpu context */
1454 if (env
->kqemu_enabled
) {
1457 for(i
= 0; i
< len
; i
++) {
1458 kqemu_set_notdirty(env
, addr
);
1459 addr
+= TARGET_PAGE_SIZE
;
1463 mask
= ~dirty_flags
;
1464 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1465 for(i
= 0; i
< len
; i
++)
1468 /* we modify the TLB cache so that the dirty bit will be set again
1469 when accessing the range */
1470 start1
= start
+ (unsigned long)phys_ram_base
;
1471 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1472 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1473 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1474 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1475 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1476 #if (NB_MMU_MODES >= 3)
1477 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1478 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1479 #if (NB_MMU_MODES == 4)
1480 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1481 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1486 #if !defined(CONFIG_SOFTMMU)
1487 /* XXX: this is expensive */
1493 for(i
= 0; i
< L1_SIZE
; i
++) {
1496 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1497 for(j
= 0; j
< L2_SIZE
; j
++) {
1498 if (p
->valid_tag
== virt_valid_tag
&&
1499 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1500 (p
->prot
& PROT_WRITE
)) {
1501 if (addr
< MMAP_AREA_END
) {
1502 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1503 p
->prot
& ~PROT_WRITE
);
1506 addr
+= TARGET_PAGE_SIZE
;
1515 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1517 ram_addr_t ram_addr
;
1519 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1520 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1521 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1522 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1523 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1528 /* update the TLB according to the current state of the dirty bits */
1529 void cpu_tlb_update_dirty(CPUState
*env
)
1532 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1533 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1534 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1535 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1536 #if (NB_MMU_MODES >= 3)
1537 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1538 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1539 #if (NB_MMU_MODES == 4)
1540 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1541 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1546 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1547 unsigned long start
)
1550 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1551 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1552 if (addr
== start
) {
1553 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1558 /* update the TLB corresponding to virtual page vaddr and phys addr
1559 addr so that it is no longer dirty */
1560 static inline void tlb_set_dirty(CPUState
*env
,
1561 unsigned long addr
, target_ulong vaddr
)
1565 addr
&= TARGET_PAGE_MASK
;
1566 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1567 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1568 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1569 #if (NB_MMU_MODES >= 3)
1570 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1571 #if (NB_MMU_MODES == 4)
1572 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1577 /* add a new TLB entry. At most one entry for a given virtual address
1578 is permitted. Return 0 if OK or 2 if the page could not be mapped
1579 (can only happen in non SOFTMMU mode for I/O pages or pages
1580 conflicting with the host address space). */
1581 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1582 target_phys_addr_t paddr
, int prot
,
1583 int is_user
, int is_softmmu
)
1588 target_ulong address
;
1589 target_phys_addr_t addend
;
1594 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1596 pd
= IO_MEM_UNASSIGNED
;
1598 pd
= p
->phys_offset
;
1600 #if defined(DEBUG_TLB)
1601 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1602 vaddr
, (int)paddr
, prot
, is_user
, is_softmmu
, pd
);
1606 #if !defined(CONFIG_SOFTMMU)
1610 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1611 /* IO memory case */
1612 address
= vaddr
| pd
;
1615 /* standard memory */
1617 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1620 /* Make accesses to pages with watchpoints go via the
1621 watchpoint trap routines. */
1622 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1623 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1624 if (address
& ~TARGET_PAGE_MASK
) {
1625 env
->watchpoint
[i
].is_ram
= 0;
1626 address
= vaddr
| io_mem_watch
;
1628 env
->watchpoint
[i
].is_ram
= 1;
1629 /* TODO: Figure out how to make read watchpoints coexist
1631 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1636 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1638 te
= &env
->tlb_table
[is_user
][index
];
1639 te
->addend
= addend
;
1640 if (prot
& PAGE_READ
) {
1641 te
->addr_read
= address
;
1645 if (prot
& PAGE_EXEC
) {
1646 te
->addr_code
= address
;
1650 if (prot
& PAGE_WRITE
) {
1651 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1652 (pd
& IO_MEM_ROMD
)) {
1653 /* write access calls the I/O callback */
1654 te
->addr_write
= vaddr
|
1655 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1656 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1657 !cpu_physical_memory_is_dirty(pd
)) {
1658 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1660 te
->addr_write
= address
;
1663 te
->addr_write
= -1;
1666 #if !defined(CONFIG_SOFTMMU)
1668 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1669 /* IO access: no mapping is done as it will be handled by the
1671 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1676 if (vaddr
>= MMAP_AREA_END
) {
1679 if (prot
& PROT_WRITE
) {
1680 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1681 #if defined(TARGET_HAS_SMC) || 1
1684 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1685 !cpu_physical_memory_is_dirty(pd
))) {
1686 /* ROM: we do as if code was inside */
1687 /* if code is present, we only map as read only and save the
1691 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1694 vp
->valid_tag
= virt_valid_tag
;
1695 prot
&= ~PAGE_WRITE
;
1698 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1699 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1700 if (map_addr
== MAP_FAILED
) {
1701 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1711 /* called from signal handler: invalidate the code and unprotect the
1712 page. Return TRUE if the fault was succesfully handled. */
1713 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1715 #if !defined(CONFIG_SOFTMMU)
1718 #if defined(DEBUG_TLB)
1719 printf("page_unprotect: addr=0x%08x\n", addr
);
1721 addr
&= TARGET_PAGE_MASK
;
1723 /* if it is not mapped, no need to worry here */
1724 if (addr
>= MMAP_AREA_END
)
1726 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1729 /* NOTE: in this case, validate_tag is _not_ tested as it
1730 validates only the code TLB */
1731 if (vp
->valid_tag
!= virt_valid_tag
)
1733 if (!(vp
->prot
& PAGE_WRITE
))
1735 #if defined(DEBUG_TLB)
1736 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1737 addr
, vp
->phys_addr
, vp
->prot
);
1739 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1740 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1741 (unsigned long)addr
, vp
->prot
);
1742 /* set the dirty bit */
1743 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1744 /* flush the code inside */
1745 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1754 void tlb_flush(CPUState
*env
, int flush_global
)
1758 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1762 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1763 target_phys_addr_t paddr
, int prot
,
1764 int is_user
, int is_softmmu
)
1769 /* dump memory mappings */
1770 void page_dump(FILE *f
)
1772 unsigned long start
, end
;
1773 int i
, j
, prot
, prot1
;
1776 fprintf(f
, "%-8s %-8s %-8s %s\n",
1777 "start", "end", "size", "prot");
1781 for(i
= 0; i
<= L1_SIZE
; i
++) {
1786 for(j
= 0;j
< L2_SIZE
; j
++) {
1791 if (prot1
!= prot
) {
1792 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1794 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1795 start
, end
, end
- start
,
1796 prot
& PAGE_READ
? 'r' : '-',
1797 prot
& PAGE_WRITE
? 'w' : '-',
1798 prot
& PAGE_EXEC
? 'x' : '-');
1812 int page_get_flags(target_ulong address
)
1816 p
= page_find(address
>> TARGET_PAGE_BITS
);
1822 /* modify the flags of a page and invalidate the code if
1823 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1824 depending on PAGE_WRITE */
1825 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1830 start
= start
& TARGET_PAGE_MASK
;
1831 end
= TARGET_PAGE_ALIGN(end
);
1832 if (flags
& PAGE_WRITE
)
1833 flags
|= PAGE_WRITE_ORG
;
1834 spin_lock(&tb_lock
);
1835 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1836 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1837 /* if the write protection is set, then we invalidate the code
1839 if (!(p
->flags
& PAGE_WRITE
) &&
1840 (flags
& PAGE_WRITE
) &&
1842 tb_invalidate_phys_page(addr
, 0, NULL
);
1846 spin_unlock(&tb_lock
);
1849 /* called from signal handler: invalidate the code and unprotect the
1850 page. Return TRUE if the fault was succesfully handled. */
1851 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1853 unsigned int page_index
, prot
, pindex
;
1855 target_ulong host_start
, host_end
, addr
;
1857 host_start
= address
& qemu_host_page_mask
;
1858 page_index
= host_start
>> TARGET_PAGE_BITS
;
1859 p1
= page_find(page_index
);
1862 host_end
= host_start
+ qemu_host_page_size
;
1865 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1869 /* if the page was really writable, then we change its
1870 protection back to writable */
1871 if (prot
& PAGE_WRITE_ORG
) {
1872 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1873 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1874 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1875 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1876 p1
[pindex
].flags
|= PAGE_WRITE
;
1877 /* and since the content will be modified, we must invalidate
1878 the corresponding translated code. */
1879 tb_invalidate_phys_page(address
, pc
, puc
);
1880 #ifdef DEBUG_TB_CHECK
1881 tb_invalidate_check(address
);
1889 /* call this function when system calls directly modify a memory area */
1890 /* ??? This should be redundant now we have lock_user. */
1891 void page_unprotect_range(target_ulong data
, target_ulong data_size
)
1893 target_ulong start
, end
, addr
;
1896 end
= start
+ data_size
;
1897 start
&= TARGET_PAGE_MASK
;
1898 end
= TARGET_PAGE_ALIGN(end
);
1899 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1900 page_unprotect(addr
, 0, NULL
);
1904 static inline void tlb_set_dirty(CPUState
*env
,
1905 unsigned long addr
, target_ulong vaddr
)
1908 #endif /* defined(CONFIG_USER_ONLY) */
1910 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1912 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
1914 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1917 if (addr > start_addr) \
1920 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1921 if (start_addr2 > 0) \
1925 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
1926 end_addr2 = TARGET_PAGE_SIZE - 1; \
1928 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1929 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
1934 /* register physical memory. 'size' must be a multiple of the target
1935 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1937 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1939 unsigned long phys_offset
)
1941 target_phys_addr_t addr
, end_addr
;
1944 unsigned long orig_size
= size
;
1947 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1948 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
1949 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1950 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1951 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
1952 unsigned long orig_memory
= p
->phys_offset
;
1953 target_phys_addr_t start_addr2
, end_addr2
;
1954 int need_subpage
= 0;
1956 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
1959 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
1960 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
1961 &p
->phys_offset
, orig_memory
);
1963 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
1966 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
1968 p
->phys_offset
= phys_offset
;
1969 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
1970 (phys_offset
& IO_MEM_ROMD
))
1971 phys_offset
+= TARGET_PAGE_SIZE
;
1974 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1975 p
->phys_offset
= phys_offset
;
1976 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
1977 (phys_offset
& IO_MEM_ROMD
))
1978 phys_offset
+= TARGET_PAGE_SIZE
;
1980 target_phys_addr_t start_addr2
, end_addr2
;
1981 int need_subpage
= 0;
1983 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
1984 end_addr2
, need_subpage
);
1987 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
1988 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
1989 subpage_register(subpage
, start_addr2
, end_addr2
,
1996 /* since each CPU stores ram addresses in its TLB cache, we must
1997 reset the modified entries */
1999 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2004 /* XXX: temporary until new memory mapping API */
2005 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
2009 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2011 return IO_MEM_UNASSIGNED
;
2012 return p
->phys_offset
;
2015 /* XXX: better than nothing */
2016 ram_addr_t
qemu_ram_alloc(unsigned int size
)
2019 if ((phys_ram_alloc_offset
+ size
) >= phys_ram_size
) {
2020 fprintf(stderr
, "Not enough memory (requested_size = %u, max memory = %d)\n",
2021 size
, phys_ram_size
);
2024 addr
= phys_ram_alloc_offset
;
2025 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2029 void qemu_ram_free(ram_addr_t addr
)
2033 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2035 #ifdef DEBUG_UNASSIGNED
2036 printf("Unassigned mem read " TARGET_FMT_lx
"\n", addr
);
2039 do_unassigned_access(addr
, 0, 0, 0);
2044 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2046 #ifdef DEBUG_UNASSIGNED
2047 printf("Unassigned mem write " TARGET_FMT_lx
" = 0x%x\n", addr
, val
);
2050 do_unassigned_access(addr
, 1, 0, 0);
2054 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2055 unassigned_mem_readb
,
2056 unassigned_mem_readb
,
2057 unassigned_mem_readb
,
2060 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2061 unassigned_mem_writeb
,
2062 unassigned_mem_writeb
,
2063 unassigned_mem_writeb
,
2066 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2068 unsigned long ram_addr
;
2070 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2071 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2072 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2073 #if !defined(CONFIG_USER_ONLY)
2074 tb_invalidate_phys_page_fast(ram_addr
, 1);
2075 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2078 stb_p((uint8_t *)(long)addr
, val
);
2080 if (cpu_single_env
->kqemu_enabled
&&
2081 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2082 kqemu_modify_page(cpu_single_env
, ram_addr
);
2084 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2085 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2086 /* we remove the notdirty callback only if the code has been
2088 if (dirty_flags
== 0xff)
2089 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2092 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2094 unsigned long ram_addr
;
2096 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2097 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2098 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2099 #if !defined(CONFIG_USER_ONLY)
2100 tb_invalidate_phys_page_fast(ram_addr
, 2);
2101 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2104 stw_p((uint8_t *)(long)addr
, val
);
2106 if (cpu_single_env
->kqemu_enabled
&&
2107 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2108 kqemu_modify_page(cpu_single_env
, ram_addr
);
2110 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2111 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2112 /* we remove the notdirty callback only if the code has been
2114 if (dirty_flags
== 0xff)
2115 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2118 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2120 unsigned long ram_addr
;
2122 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2123 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2124 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2125 #if !defined(CONFIG_USER_ONLY)
2126 tb_invalidate_phys_page_fast(ram_addr
, 4);
2127 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2130 stl_p((uint8_t *)(long)addr
, val
);
2132 if (cpu_single_env
->kqemu_enabled
&&
2133 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2134 kqemu_modify_page(cpu_single_env
, ram_addr
);
2136 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2137 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2138 /* we remove the notdirty callback only if the code has been
2140 if (dirty_flags
== 0xff)
2141 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2144 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2145 NULL
, /* never used */
2146 NULL
, /* never used */
2147 NULL
, /* never used */
2150 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2151 notdirty_mem_writeb
,
2152 notdirty_mem_writew
,
2153 notdirty_mem_writel
,
2156 #if defined(CONFIG_SOFTMMU)
2157 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2158 so these check for a hit then pass through to the normal out-of-line
2160 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2162 return ldub_phys(addr
);
2165 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2167 return lduw_phys(addr
);
2170 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2172 return ldl_phys(addr
);
2175 /* Generate a debug exception if a watchpoint has been hit.
2176 Returns the real physical address of the access. addr will be a host
2177 address in the is_ram case. */
2178 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2180 CPUState
*env
= cpu_single_env
;
2182 target_ulong retaddr
;
2186 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2187 watch
= env
->watchpoint
[i
].vaddr
;
2188 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2189 if (env
->watchpoint
[i
].is_ram
)
2190 retaddr
= addr
- (unsigned long)phys_ram_base
;
2191 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2192 cpu_single_env
->watchpoint_hit
= i
+ 1;
2193 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2201 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2204 addr
= check_watchpoint(addr
);
2205 stb_phys(addr
, val
);
2208 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2211 addr
= check_watchpoint(addr
);
2212 stw_phys(addr
, val
);
2215 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2218 addr
= check_watchpoint(addr
);
2219 stl_phys(addr
, val
);
2222 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2228 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2235 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2238 CPUReadMemoryFunc
**mem_read
;
2242 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2243 #if defined(DEBUG_SUBPAGE)
2244 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2245 mmio
, len
, addr
, idx
);
2247 mem_read
= mmio
->mem_read
[idx
];
2248 ret
= (*mem_read
[len
])(mmio
->opaque
[idx
], addr
);
2253 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2254 uint32_t value
, unsigned int len
)
2256 CPUWriteMemoryFunc
**mem_write
;
2259 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2260 #if defined(DEBUG_SUBPAGE)
2261 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2262 mmio
, len
, addr
, idx
, value
);
2264 mem_write
= mmio
->mem_write
[idx
];
2265 (*mem_write
[len
])(mmio
->opaque
[idx
], addr
, value
);
2268 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2270 #if defined(DEBUG_SUBPAGE)
2271 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2274 return subpage_readlen(opaque
, addr
, 0);
2277 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2280 #if defined(DEBUG_SUBPAGE)
2281 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2283 subpage_writelen(opaque
, addr
, value
, 0);
2286 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2288 #if defined(DEBUG_SUBPAGE)
2289 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2292 return subpage_readlen(opaque
, addr
, 1);
2295 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2298 #if defined(DEBUG_SUBPAGE)
2299 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2301 subpage_writelen(opaque
, addr
, value
, 1);
2304 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2306 #if defined(DEBUG_SUBPAGE)
2307 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2310 return subpage_readlen(opaque
, addr
, 2);
2313 static void subpage_writel (void *opaque
,
2314 target_phys_addr_t addr
, uint32_t value
)
2316 #if defined(DEBUG_SUBPAGE)
2317 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2319 subpage_writelen(opaque
, addr
, value
, 2);
2322 static CPUReadMemoryFunc
*subpage_read
[] = {
2328 static CPUWriteMemoryFunc
*subpage_write
[] = {
2334 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2339 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2341 idx
= SUBPAGE_IDX(start
);
2342 eidx
= SUBPAGE_IDX(end
);
2343 #if defined(DEBUG_SUBPAGE)
2344 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2345 mmio
, start
, end
, idx
, eidx
, memory
);
2347 memory
>>= IO_MEM_SHIFT
;
2348 for (; idx
<= eidx
; idx
++) {
2349 mmio
->mem_read
[idx
] = io_mem_read
[memory
];
2350 mmio
->mem_write
[idx
] = io_mem_write
[memory
];
2351 mmio
->opaque
[idx
] = io_mem_opaque
[memory
];
2357 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2363 mmio
= qemu_mallocz(sizeof(subpage_t
));
2366 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2367 #if defined(DEBUG_SUBPAGE)
2368 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2369 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2371 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2372 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2378 static void io_mem_init(void)
2380 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2381 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2382 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2385 #if defined(CONFIG_SOFTMMU)
2386 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2387 watch_mem_write
, NULL
);
2389 /* alloc dirty bits array */
2390 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2391 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2394 /* mem_read and mem_write are arrays of functions containing the
2395 function to access byte (index 0), word (index 1) and dword (index
2396 2). All functions must be supplied. If io_index is non zero, the
2397 corresponding io zone is modified. If it is zero, a new io zone is
2398 allocated. The return value can be used with
2399 cpu_register_physical_memory(). (-1) is returned if error. */
2400 int cpu_register_io_memory(int io_index
,
2401 CPUReadMemoryFunc
**mem_read
,
2402 CPUWriteMemoryFunc
**mem_write
,
2407 if (io_index
<= 0) {
2408 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2410 io_index
= io_mem_nb
++;
2412 if (io_index
>= IO_MEM_NB_ENTRIES
)
2416 for(i
= 0;i
< 3; i
++) {
2417 io_mem_read
[io_index
][i
] = mem_read
[i
];
2418 io_mem_write
[io_index
][i
] = mem_write
[i
];
2420 io_mem_opaque
[io_index
] = opaque
;
2421 return io_index
<< IO_MEM_SHIFT
;
2424 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2426 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2429 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2431 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2434 /* physical memory access (slow version, mainly for debug) */
2435 #if defined(CONFIG_USER_ONLY)
2436 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2437 int len
, int is_write
)
2444 page
= addr
& TARGET_PAGE_MASK
;
2445 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2448 flags
= page_get_flags(page
);
2449 if (!(flags
& PAGE_VALID
))
2452 if (!(flags
& PAGE_WRITE
))
2454 p
= lock_user(addr
, len
, 0);
2455 memcpy(p
, buf
, len
);
2456 unlock_user(p
, addr
, len
);
2458 if (!(flags
& PAGE_READ
))
2460 p
= lock_user(addr
, len
, 1);
2461 memcpy(buf
, p
, len
);
2462 unlock_user(p
, addr
, 0);
2471 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2472 int len
, int is_write
)
2477 target_phys_addr_t page
;
2482 page
= addr
& TARGET_PAGE_MASK
;
2483 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2486 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2488 pd
= IO_MEM_UNASSIGNED
;
2490 pd
= p
->phys_offset
;
2494 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2495 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2496 /* XXX: could force cpu_single_env to NULL to avoid
2498 if (l
>= 4 && ((addr
& 3) == 0)) {
2499 /* 32 bit write access */
2501 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2503 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2504 /* 16 bit write access */
2506 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2509 /* 8 bit write access */
2511 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2515 unsigned long addr1
;
2516 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2518 ptr
= phys_ram_base
+ addr1
;
2519 memcpy(ptr
, buf
, l
);
2520 if (!cpu_physical_memory_is_dirty(addr1
)) {
2521 /* invalidate code */
2522 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2524 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2525 (0xff & ~CODE_DIRTY_FLAG
);
2529 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2530 !(pd
& IO_MEM_ROMD
)) {
2532 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2533 if (l
>= 4 && ((addr
& 3) == 0)) {
2534 /* 32 bit read access */
2535 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2538 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2539 /* 16 bit read access */
2540 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2544 /* 8 bit read access */
2545 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2551 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2552 (addr
& ~TARGET_PAGE_MASK
);
2553 memcpy(buf
, ptr
, l
);
2562 /* used for ROM loading : can write in RAM and ROM */
2563 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2564 const uint8_t *buf
, int len
)
2568 target_phys_addr_t page
;
2573 page
= addr
& TARGET_PAGE_MASK
;
2574 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2577 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2579 pd
= IO_MEM_UNASSIGNED
;
2581 pd
= p
->phys_offset
;
2584 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2585 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2586 !(pd
& IO_MEM_ROMD
)) {
2589 unsigned long addr1
;
2590 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2592 ptr
= phys_ram_base
+ addr1
;
2593 memcpy(ptr
, buf
, l
);
2602 /* warning: addr must be aligned */
2603 uint32_t ldl_phys(target_phys_addr_t addr
)
2611 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2613 pd
= IO_MEM_UNASSIGNED
;
2615 pd
= p
->phys_offset
;
2618 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2619 !(pd
& IO_MEM_ROMD
)) {
2621 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2622 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2625 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2626 (addr
& ~TARGET_PAGE_MASK
);
2632 /* warning: addr must be aligned */
2633 uint64_t ldq_phys(target_phys_addr_t addr
)
2641 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2643 pd
= IO_MEM_UNASSIGNED
;
2645 pd
= p
->phys_offset
;
2648 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2649 !(pd
& IO_MEM_ROMD
)) {
2651 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2652 #ifdef TARGET_WORDS_BIGENDIAN
2653 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2654 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2656 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2657 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2661 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2662 (addr
& ~TARGET_PAGE_MASK
);
2669 uint32_t ldub_phys(target_phys_addr_t addr
)
2672 cpu_physical_memory_read(addr
, &val
, 1);
2677 uint32_t lduw_phys(target_phys_addr_t addr
)
2680 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2681 return tswap16(val
);
2684 /* warning: addr must be aligned. The ram page is not masked as dirty
2685 and the code inside is not invalidated. It is useful if the dirty
2686 bits are used to track modified PTEs */
2687 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2694 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2696 pd
= IO_MEM_UNASSIGNED
;
2698 pd
= p
->phys_offset
;
2701 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2702 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2703 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2705 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2706 (addr
& ~TARGET_PAGE_MASK
);
2711 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2718 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2720 pd
= IO_MEM_UNASSIGNED
;
2722 pd
= p
->phys_offset
;
2725 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2726 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2727 #ifdef TARGET_WORDS_BIGENDIAN
2728 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2729 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2731 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2732 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2735 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2736 (addr
& ~TARGET_PAGE_MASK
);
2741 /* warning: addr must be aligned */
2742 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2749 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2751 pd
= IO_MEM_UNASSIGNED
;
2753 pd
= p
->phys_offset
;
2756 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2757 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2758 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2760 unsigned long addr1
;
2761 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2763 ptr
= phys_ram_base
+ addr1
;
2765 if (!cpu_physical_memory_is_dirty(addr1
)) {
2766 /* invalidate code */
2767 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2769 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2770 (0xff & ~CODE_DIRTY_FLAG
);
2776 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2779 cpu_physical_memory_write(addr
, &v
, 1);
2783 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2785 uint16_t v
= tswap16(val
);
2786 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2790 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2793 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2798 /* virtual memory access for debug */
2799 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2800 uint8_t *buf
, int len
, int is_write
)
2803 target_phys_addr_t phys_addr
;
2807 page
= addr
& TARGET_PAGE_MASK
;
2808 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2809 /* if no physical page mapped, return an error */
2810 if (phys_addr
== -1)
2812 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2815 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2824 void dump_exec_info(FILE *f
,
2825 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2827 int i
, target_code_size
, max_target_code_size
;
2828 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2829 TranslationBlock
*tb
;
2831 target_code_size
= 0;
2832 max_target_code_size
= 0;
2834 direct_jmp_count
= 0;
2835 direct_jmp2_count
= 0;
2836 for(i
= 0; i
< nb_tbs
; i
++) {
2838 target_code_size
+= tb
->size
;
2839 if (tb
->size
> max_target_code_size
)
2840 max_target_code_size
= tb
->size
;
2841 if (tb
->page_addr
[1] != -1)
2843 if (tb
->tb_next_offset
[0] != 0xffff) {
2845 if (tb
->tb_next_offset
[1] != 0xffff) {
2846 direct_jmp2_count
++;
2850 /* XXX: avoid using doubles ? */
2851 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2852 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2853 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2854 max_target_code_size
);
2855 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2856 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2857 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2858 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2860 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2861 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2863 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2865 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2866 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2867 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2868 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2871 #if !defined(CONFIG_USER_ONLY)
2873 #define MMUSUFFIX _cmmu
2874 #define GETPC() NULL
2875 #define env cpu_single_env
2876 #define SOFTMMU_CODE_ACCESS
2879 #include "softmmu_template.h"
2882 #include "softmmu_template.h"
2885 #include "softmmu_template.h"
2888 #include "softmmu_template.h"