2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
39 #if defined(CONFIG_USER_ONLY)
43 //#define DEBUG_TB_INVALIDATE
46 //#define DEBUG_UNASSIGNED
48 /* make various TB consistency checks */
49 //#define DEBUG_TB_CHECK
50 //#define DEBUG_TLB_CHECK
52 //#define DEBUG_IOPORT
53 //#define DEBUG_SUBPAGE
55 #if !defined(CONFIG_USER_ONLY)
56 /* TB consistency checks only implemented for usermode emulation. */
60 /* threshold to flush the translated code buffer */
61 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #define MMAP_AREA_START 0x00000000
66 #define MMAP_AREA_END 0xa8000000
68 #if defined(TARGET_SPARC64)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 41
70 #elif defined(TARGET_SPARC)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 36
72 #elif defined(TARGET_ALPHA)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #define TARGET_VIRT_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_PPC64)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
83 #define TARGET_PHYS_ADDR_SPACE_BITS 32
86 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
87 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
89 /* any access to the tbs or the page table must use this lock */
90 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
92 uint8_t code_gen_prologue
[1024] __attribute__((aligned (32)));
93 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
94 uint8_t *code_gen_ptr
;
96 ram_addr_t phys_ram_size
;
98 uint8_t *phys_ram_base
;
99 uint8_t *phys_ram_dirty
;
100 static ram_addr_t phys_ram_alloc_offset
= 0;
103 /* current CPU in the current thread. It is only valid inside
105 CPUState
*cpu_single_env
;
107 typedef struct PageDesc
{
108 /* list of TBs intersecting this ram page */
109 TranslationBlock
*first_tb
;
110 /* in order to optimize self modifying code, we count the number
111 of lookups we do to a given page to use a bitmap */
112 unsigned int code_write_count
;
113 uint8_t *code_bitmap
;
114 #if defined(CONFIG_USER_ONLY)
119 typedef struct PhysPageDesc
{
120 /* offset in host memory of the page + io_index in the low 12 bits */
121 ram_addr_t phys_offset
;
125 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
126 /* XXX: this is a temporary hack for alpha target.
127 * In the future, this is to be replaced by a multi-level table
128 * to actually be able to handle the complete 64 bits address space.
130 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
132 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
135 #define L1_SIZE (1 << L1_BITS)
136 #define L2_SIZE (1 << L2_BITS)
138 static void io_mem_init(void);
140 unsigned long qemu_real_host_page_size
;
141 unsigned long qemu_host_page_bits
;
142 unsigned long qemu_host_page_size
;
143 unsigned long qemu_host_page_mask
;
145 /* XXX: for system emulation, it could just be an array */
146 static PageDesc
*l1_map
[L1_SIZE
];
147 PhysPageDesc
**l1_phys_map
;
149 /* io memory support */
150 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
151 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
152 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
153 static int io_mem_nb
;
154 #if defined(CONFIG_SOFTMMU)
155 static int io_mem_watch
;
159 char *logfilename
= "/tmp/qemu.log";
162 static int log_append
= 0;
165 static int tlb_flush_count
;
166 static int tb_flush_count
;
167 static int tb_phys_invalidate_count
;
169 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
170 typedef struct subpage_t
{
171 target_phys_addr_t base
;
172 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
173 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
174 void *opaque
[TARGET_PAGE_SIZE
][2][4];
178 static void map_exec(void *addr
, long size
)
181 VirtualProtect(addr
, size
,
182 PAGE_EXECUTE_READWRITE
, &old_protect
);
186 static void map_exec(void *addr
, long size
)
188 unsigned long start
, end
;
190 start
= (unsigned long)addr
;
191 start
&= ~(qemu_real_host_page_size
- 1);
193 end
= (unsigned long)addr
+ size
;
194 end
+= qemu_real_host_page_size
- 1;
195 end
&= ~(qemu_real_host_page_size
- 1);
197 mprotect((void *)start
, end
- start
,
198 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
202 static void page_init(void)
204 /* NOTE: we can always suppose that qemu_host_page_size >=
208 SYSTEM_INFO system_info
;
211 GetSystemInfo(&system_info
);
212 qemu_real_host_page_size
= system_info
.dwPageSize
;
215 qemu_real_host_page_size
= getpagesize();
217 map_exec(code_gen_buffer
, sizeof(code_gen_buffer
));
218 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
220 if (qemu_host_page_size
== 0)
221 qemu_host_page_size
= qemu_real_host_page_size
;
222 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
223 qemu_host_page_size
= TARGET_PAGE_SIZE
;
224 qemu_host_page_bits
= 0;
225 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
226 qemu_host_page_bits
++;
227 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
228 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
229 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
231 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
233 long long startaddr
, endaddr
;
237 f
= fopen("/proc/self/maps", "r");
240 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
242 startaddr
= MIN(startaddr
,
243 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
244 endaddr
= MIN(endaddr
,
245 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
246 page_set_flags(TARGET_PAGE_ALIGN(startaddr
),
247 TARGET_PAGE_ALIGN(endaddr
),
257 static inline PageDesc
*page_find_alloc(target_ulong index
)
261 lp
= &l1_map
[index
>> L2_BITS
];
264 /* allocate if not found */
265 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
266 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
269 return p
+ (index
& (L2_SIZE
- 1));
272 static inline PageDesc
*page_find(target_ulong index
)
276 p
= l1_map
[index
>> L2_BITS
];
279 return p
+ (index
& (L2_SIZE
- 1));
282 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
287 p
= (void **)l1_phys_map
;
288 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
290 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
291 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
293 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
296 /* allocate if not found */
299 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
300 memset(p
, 0, sizeof(void *) * L1_SIZE
);
304 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
308 /* allocate if not found */
311 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
313 for (i
= 0; i
< L2_SIZE
; i
++)
314 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
316 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
319 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
321 return phys_page_find_alloc(index
, 0);
324 #if !defined(CONFIG_USER_ONLY)
325 static void tlb_protect_code(ram_addr_t ram_addr
);
326 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
330 void cpu_exec_init(CPUState
*env
)
337 code_gen_ptr
= code_gen_buffer
;
341 env
->next_cpu
= NULL
;
344 while (*penv
!= NULL
) {
345 penv
= (CPUState
**)&(*penv
)->next_cpu
;
348 env
->cpu_index
= cpu_index
;
349 env
->nb_watchpoints
= 0;
353 static inline void invalidate_page_bitmap(PageDesc
*p
)
355 if (p
->code_bitmap
) {
356 qemu_free(p
->code_bitmap
);
357 p
->code_bitmap
= NULL
;
359 p
->code_write_count
= 0;
362 /* set to NULL all the 'first_tb' fields in all PageDescs */
363 static void page_flush_tb(void)
368 for(i
= 0; i
< L1_SIZE
; i
++) {
371 for(j
= 0; j
< L2_SIZE
; j
++) {
373 invalidate_page_bitmap(p
);
380 /* flush all the translation blocks */
381 /* XXX: tb_flush is currently not thread safe */
382 void tb_flush(CPUState
*env1
)
385 #if defined(DEBUG_FLUSH)
386 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
387 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
389 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
391 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > CODE_GEN_BUFFER_SIZE
)
392 cpu_abort(env1
, "Internal error: code buffer overflow\n");
396 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
397 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
400 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
403 code_gen_ptr
= code_gen_buffer
;
404 /* XXX: flush processor icache at this point if cache flush is
409 #ifdef DEBUG_TB_CHECK
411 static void tb_invalidate_check(target_ulong address
)
413 TranslationBlock
*tb
;
415 address
&= TARGET_PAGE_MASK
;
416 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
417 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
418 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
419 address
>= tb
->pc
+ tb
->size
)) {
420 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
421 address
, (long)tb
->pc
, tb
->size
);
427 /* verify that all the pages have correct rights for code */
428 static void tb_page_check(void)
430 TranslationBlock
*tb
;
431 int i
, flags1
, flags2
;
433 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
434 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
435 flags1
= page_get_flags(tb
->pc
);
436 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
437 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
438 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
439 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
445 void tb_jmp_check(TranslationBlock
*tb
)
447 TranslationBlock
*tb1
;
450 /* suppress any remaining jumps to this TB */
454 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
457 tb1
= tb1
->jmp_next
[n1
];
459 /* check end of list */
461 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
467 /* invalidate one TB */
468 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
471 TranslationBlock
*tb1
;
475 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
478 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
482 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
484 TranslationBlock
*tb1
;
490 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
492 *ptb
= tb1
->page_next
[n1
];
495 ptb
= &tb1
->page_next
[n1
];
499 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
501 TranslationBlock
*tb1
, **ptb
;
504 ptb
= &tb
->jmp_next
[n
];
507 /* find tb(n) in circular list */
511 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
512 if (n1
== n
&& tb1
== tb
)
515 ptb
= &tb1
->jmp_first
;
517 ptb
= &tb1
->jmp_next
[n1
];
520 /* now we can suppress tb(n) from the list */
521 *ptb
= tb
->jmp_next
[n
];
523 tb
->jmp_next
[n
] = NULL
;
527 /* reset the jump entry 'n' of a TB so that it is not chained to
529 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
531 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
534 static inline void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
539 target_phys_addr_t phys_pc
;
540 TranslationBlock
*tb1
, *tb2
;
542 /* remove the TB from the hash list */
543 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
544 h
= tb_phys_hash_func(phys_pc
);
545 tb_remove(&tb_phys_hash
[h
], tb
,
546 offsetof(TranslationBlock
, phys_hash_next
));
548 /* remove the TB from the page list */
549 if (tb
->page_addr
[0] != page_addr
) {
550 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
551 tb_page_remove(&p
->first_tb
, tb
);
552 invalidate_page_bitmap(p
);
554 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
555 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
556 tb_page_remove(&p
->first_tb
, tb
);
557 invalidate_page_bitmap(p
);
560 tb_invalidated_flag
= 1;
562 /* remove the TB from the hash list */
563 h
= tb_jmp_cache_hash_func(tb
->pc
);
564 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
565 if (env
->tb_jmp_cache
[h
] == tb
)
566 env
->tb_jmp_cache
[h
] = NULL
;
569 /* suppress this TB from the two jump lists */
570 tb_jmp_remove(tb
, 0);
571 tb_jmp_remove(tb
, 1);
573 /* suppress any remaining jumps to this TB */
579 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
580 tb2
= tb1
->jmp_next
[n1
];
581 tb_reset_jump(tb1
, n1
);
582 tb1
->jmp_next
[n1
] = NULL
;
585 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
587 tb_phys_invalidate_count
++;
590 static inline void set_bits(uint8_t *tab
, int start
, int len
)
596 mask
= 0xff << (start
& 7);
597 if ((start
& ~7) == (end
& ~7)) {
599 mask
&= ~(0xff << (end
& 7));
604 start
= (start
+ 8) & ~7;
606 while (start
< end1
) {
611 mask
= ~(0xff << (end
& 7));
617 static void build_page_bitmap(PageDesc
*p
)
619 int n
, tb_start
, tb_end
;
620 TranslationBlock
*tb
;
622 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
625 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
630 tb
= (TranslationBlock
*)((long)tb
& ~3);
631 /* NOTE: this is subtle as a TB may span two physical pages */
633 /* NOTE: tb_end may be after the end of the page, but
634 it is not a problem */
635 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
636 tb_end
= tb_start
+ tb
->size
;
637 if (tb_end
> TARGET_PAGE_SIZE
)
638 tb_end
= TARGET_PAGE_SIZE
;
641 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
643 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
644 tb
= tb
->page_next
[n
];
648 #ifdef TARGET_HAS_PRECISE_SMC
650 static void tb_gen_code(CPUState
*env
,
651 target_ulong pc
, target_ulong cs_base
, int flags
,
654 TranslationBlock
*tb
;
656 target_ulong phys_pc
, phys_page2
, virt_page2
;
659 phys_pc
= get_phys_addr_code(env
, pc
);
662 /* flush must be done */
664 /* cannot fail at this point */
667 tc_ptr
= code_gen_ptr
;
669 tb
->cs_base
= cs_base
;
672 cpu_gen_code(env
, tb
, &code_gen_size
);
673 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
675 /* check next page if needed */
676 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
678 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
679 phys_page2
= get_phys_addr_code(env
, virt_page2
);
681 tb_link_phys(tb
, phys_pc
, phys_page2
);
685 /* invalidate all TBs which intersect with the target physical page
686 starting in range [start;end[. NOTE: start and end must refer to
687 the same physical page. 'is_cpu_write_access' should be true if called
688 from a real cpu write access: the virtual CPU will exit the current
689 TB if code is modified inside this TB. */
690 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
691 int is_cpu_write_access
)
693 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
694 CPUState
*env
= cpu_single_env
;
696 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
697 target_ulong tb_start
, tb_end
;
698 target_ulong current_pc
, current_cs_base
;
700 p
= page_find(start
>> TARGET_PAGE_BITS
);
703 if (!p
->code_bitmap
&&
704 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
705 is_cpu_write_access
) {
706 /* build code bitmap */
707 build_page_bitmap(p
);
710 /* we remove all the TBs in the range [start, end[ */
711 /* XXX: see if in some cases it could be faster to invalidate all the code */
712 current_tb_not_found
= is_cpu_write_access
;
713 current_tb_modified
= 0;
714 current_tb
= NULL
; /* avoid warning */
715 current_pc
= 0; /* avoid warning */
716 current_cs_base
= 0; /* avoid warning */
717 current_flags
= 0; /* avoid warning */
721 tb
= (TranslationBlock
*)((long)tb
& ~3);
722 tb_next
= tb
->page_next
[n
];
723 /* NOTE: this is subtle as a TB may span two physical pages */
725 /* NOTE: tb_end may be after the end of the page, but
726 it is not a problem */
727 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
728 tb_end
= tb_start
+ tb
->size
;
730 tb_start
= tb
->page_addr
[1];
731 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
733 if (!(tb_end
<= start
|| tb_start
>= end
)) {
734 #ifdef TARGET_HAS_PRECISE_SMC
735 if (current_tb_not_found
) {
736 current_tb_not_found
= 0;
738 if (env
->mem_write_pc
) {
739 /* now we have a real cpu fault */
740 current_tb
= tb_find_pc(env
->mem_write_pc
);
743 if (current_tb
== tb
&&
744 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
745 /* If we are modifying the current TB, we must stop
746 its execution. We could be more precise by checking
747 that the modification is after the current PC, but it
748 would require a specialized function to partially
749 restore the CPU state */
751 current_tb_modified
= 1;
752 cpu_restore_state(current_tb
, env
,
753 env
->mem_write_pc
, NULL
);
754 #if defined(TARGET_I386)
755 current_flags
= env
->hflags
;
756 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
757 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
758 current_pc
= current_cs_base
+ env
->eip
;
760 #error unsupported CPU
763 #endif /* TARGET_HAS_PRECISE_SMC */
764 /* we need to do that to handle the case where a signal
765 occurs while doing tb_phys_invalidate() */
768 saved_tb
= env
->current_tb
;
769 env
->current_tb
= NULL
;
771 tb_phys_invalidate(tb
, -1);
773 env
->current_tb
= saved_tb
;
774 if (env
->interrupt_request
&& env
->current_tb
)
775 cpu_interrupt(env
, env
->interrupt_request
);
780 #if !defined(CONFIG_USER_ONLY)
781 /* if no code remaining, no need to continue to use slow writes */
783 invalidate_page_bitmap(p
);
784 if (is_cpu_write_access
) {
785 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
789 #ifdef TARGET_HAS_PRECISE_SMC
790 if (current_tb_modified
) {
791 /* we generate a block containing just the instruction
792 modifying the memory. It will ensure that it cannot modify
794 env
->current_tb
= NULL
;
795 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
797 cpu_resume_from_signal(env
, NULL
);
802 /* len must be <= 8 and start must be a multiple of len */
803 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
810 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
811 cpu_single_env
->mem_write_vaddr
, len
,
813 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
817 p
= page_find(start
>> TARGET_PAGE_BITS
);
820 if (p
->code_bitmap
) {
821 offset
= start
& ~TARGET_PAGE_MASK
;
822 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
823 if (b
& ((1 << len
) - 1))
827 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
831 #if !defined(CONFIG_SOFTMMU)
832 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
833 unsigned long pc
, void *puc
)
835 int n
, current_flags
, current_tb_modified
;
836 target_ulong current_pc
, current_cs_base
;
838 TranslationBlock
*tb
, *current_tb
;
839 #ifdef TARGET_HAS_PRECISE_SMC
840 CPUState
*env
= cpu_single_env
;
843 addr
&= TARGET_PAGE_MASK
;
844 p
= page_find(addr
>> TARGET_PAGE_BITS
);
848 current_tb_modified
= 0;
850 current_pc
= 0; /* avoid warning */
851 current_cs_base
= 0; /* avoid warning */
852 current_flags
= 0; /* avoid warning */
853 #ifdef TARGET_HAS_PRECISE_SMC
855 current_tb
= tb_find_pc(pc
);
860 tb
= (TranslationBlock
*)((long)tb
& ~3);
861 #ifdef TARGET_HAS_PRECISE_SMC
862 if (current_tb
== tb
&&
863 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
864 /* If we are modifying the current TB, we must stop
865 its execution. We could be more precise by checking
866 that the modification is after the current PC, but it
867 would require a specialized function to partially
868 restore the CPU state */
870 current_tb_modified
= 1;
871 cpu_restore_state(current_tb
, env
, pc
, puc
);
872 #if defined(TARGET_I386)
873 current_flags
= env
->hflags
;
874 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
875 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
876 current_pc
= current_cs_base
+ env
->eip
;
878 #error unsupported CPU
881 #endif /* TARGET_HAS_PRECISE_SMC */
882 tb_phys_invalidate(tb
, addr
);
883 tb
= tb
->page_next
[n
];
886 #ifdef TARGET_HAS_PRECISE_SMC
887 if (current_tb_modified
) {
888 /* we generate a block containing just the instruction
889 modifying the memory. It will ensure that it cannot modify
891 env
->current_tb
= NULL
;
892 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
894 cpu_resume_from_signal(env
, puc
);
900 /* add the tb in the target page and protect it if necessary */
901 static inline void tb_alloc_page(TranslationBlock
*tb
,
902 unsigned int n
, target_ulong page_addr
)
905 TranslationBlock
*last_first_tb
;
907 tb
->page_addr
[n
] = page_addr
;
908 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
909 tb
->page_next
[n
] = p
->first_tb
;
910 last_first_tb
= p
->first_tb
;
911 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
912 invalidate_page_bitmap(p
);
914 #if defined(TARGET_HAS_SMC) || 1
916 #if defined(CONFIG_USER_ONLY)
917 if (p
->flags
& PAGE_WRITE
) {
922 /* force the host page as non writable (writes will have a
923 page fault + mprotect overhead) */
924 page_addr
&= qemu_host_page_mask
;
926 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
927 addr
+= TARGET_PAGE_SIZE
) {
929 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
933 p2
->flags
&= ~PAGE_WRITE
;
934 page_get_flags(addr
);
936 mprotect(g2h(page_addr
), qemu_host_page_size
,
937 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
938 #ifdef DEBUG_TB_INVALIDATE
939 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
944 /* if some code is already present, then the pages are already
945 protected. So we handle the case where only the first TB is
946 allocated in a physical page */
947 if (!last_first_tb
) {
948 tlb_protect_code(page_addr
);
952 #endif /* TARGET_HAS_SMC */
955 /* Allocate a new translation block. Flush the translation buffer if
956 too many translation blocks or too much generated code. */
957 TranslationBlock
*tb_alloc(target_ulong pc
)
959 TranslationBlock
*tb
;
961 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
962 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
970 /* add a new TB and link it to the physical page tables. phys_page2 is
971 (-1) to indicate that only one page contains the TB. */
972 void tb_link_phys(TranslationBlock
*tb
,
973 target_ulong phys_pc
, target_ulong phys_page2
)
976 TranslationBlock
**ptb
;
978 /* add in the physical hash table */
979 h
= tb_phys_hash_func(phys_pc
);
980 ptb
= &tb_phys_hash
[h
];
981 tb
->phys_hash_next
= *ptb
;
984 /* add in the page list */
985 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
986 if (phys_page2
!= -1)
987 tb_alloc_page(tb
, 1, phys_page2
);
989 tb
->page_addr
[1] = -1;
991 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
992 tb
->jmp_next
[0] = NULL
;
993 tb
->jmp_next
[1] = NULL
;
995 /* init original jump addresses */
996 if (tb
->tb_next_offset
[0] != 0xffff)
997 tb_reset_jump(tb
, 0);
998 if (tb
->tb_next_offset
[1] != 0xffff)
999 tb_reset_jump(tb
, 1);
1001 #ifdef DEBUG_TB_CHECK
1006 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1007 tb[1].tc_ptr. Return NULL if not found */
1008 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1010 int m_min
, m_max
, m
;
1012 TranslationBlock
*tb
;
1016 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1017 tc_ptr
>= (unsigned long)code_gen_ptr
)
1019 /* binary search (cf Knuth) */
1022 while (m_min
<= m_max
) {
1023 m
= (m_min
+ m_max
) >> 1;
1025 v
= (unsigned long)tb
->tc_ptr
;
1028 else if (tc_ptr
< v
) {
1037 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1039 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1041 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1044 tb1
= tb
->jmp_next
[n
];
1046 /* find head of list */
1049 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1052 tb1
= tb1
->jmp_next
[n1
];
1054 /* we are now sure now that tb jumps to tb1 */
1057 /* remove tb from the jmp_first list */
1058 ptb
= &tb_next
->jmp_first
;
1062 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1063 if (n1
== n
&& tb1
== tb
)
1065 ptb
= &tb1
->jmp_next
[n1
];
1067 *ptb
= tb
->jmp_next
[n
];
1068 tb
->jmp_next
[n
] = NULL
;
1070 /* suppress the jump to next tb in generated code */
1071 tb_reset_jump(tb
, n
);
1073 /* suppress jumps in the tb on which we could have jumped */
1074 tb_reset_jump_recursive(tb_next
);
1078 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1080 tb_reset_jump_recursive2(tb
, 0);
1081 tb_reset_jump_recursive2(tb
, 1);
1084 #if defined(TARGET_HAS_ICE)
1085 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1087 target_phys_addr_t addr
;
1089 ram_addr_t ram_addr
;
1092 addr
= cpu_get_phys_page_debug(env
, pc
);
1093 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1095 pd
= IO_MEM_UNASSIGNED
;
1097 pd
= p
->phys_offset
;
1099 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1100 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1104 /* Add a watchpoint. */
1105 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1109 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1110 if (addr
== env
->watchpoint
[i
].vaddr
)
1113 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1116 i
= env
->nb_watchpoints
++;
1117 env
->watchpoint
[i
].vaddr
= addr
;
1118 tlb_flush_page(env
, addr
);
1119 /* FIXME: This flush is needed because of the hack to make memory ops
1120 terminate the TB. It can be removed once the proper IO trap and
1121 re-execute bits are in. */
1126 /* Remove a watchpoint. */
1127 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1131 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1132 if (addr
== env
->watchpoint
[i
].vaddr
) {
1133 env
->nb_watchpoints
--;
1134 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1135 tlb_flush_page(env
, addr
);
1142 /* Remove all watchpoints. */
1143 void cpu_watchpoint_remove_all(CPUState
*env
) {
1146 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1147 tlb_flush_page(env
, env
->watchpoint
[i
].vaddr
);
1149 env
->nb_watchpoints
= 0;
1152 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1153 breakpoint is reached */
1154 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1156 #if defined(TARGET_HAS_ICE)
1159 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1160 if (env
->breakpoints
[i
] == pc
)
1164 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1166 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1168 breakpoint_invalidate(env
, pc
);
1175 /* remove all breakpoints */
1176 void cpu_breakpoint_remove_all(CPUState
*env
) {
1177 #if defined(TARGET_HAS_ICE)
1179 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1180 breakpoint_invalidate(env
, env
->breakpoints
[i
]);
1182 env
->nb_breakpoints
= 0;
1186 /* remove a breakpoint */
1187 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1189 #if defined(TARGET_HAS_ICE)
1191 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1192 if (env
->breakpoints
[i
] == pc
)
1197 env
->nb_breakpoints
--;
1198 if (i
< env
->nb_breakpoints
)
1199 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1201 breakpoint_invalidate(env
, pc
);
1208 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1209 CPU loop after each instruction */
1210 void cpu_single_step(CPUState
*env
, int enabled
)
1212 #if defined(TARGET_HAS_ICE)
1213 if (env
->singlestep_enabled
!= enabled
) {
1214 env
->singlestep_enabled
= enabled
;
1215 /* must flush all the translated code to avoid inconsistancies */
1216 /* XXX: only flush what is necessary */
1222 /* enable or disable low levels log */
1223 void cpu_set_log(int log_flags
)
1225 loglevel
= log_flags
;
1226 if (loglevel
&& !logfile
) {
1227 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1229 perror(logfilename
);
1232 #if !defined(CONFIG_SOFTMMU)
1233 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1235 static uint8_t logfile_buf
[4096];
1236 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1239 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1243 if (!loglevel
&& logfile
) {
1249 void cpu_set_log_filename(const char *filename
)
1251 logfilename
= strdup(filename
);
1256 cpu_set_log(loglevel
);
1259 /* mask must never be zero, except for A20 change call */
1260 void cpu_interrupt(CPUState
*env
, int mask
)
1262 TranslationBlock
*tb
;
1263 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1265 env
->interrupt_request
|= mask
;
1266 /* if the cpu is currently executing code, we must unlink it and
1267 all the potentially executing TB */
1268 tb
= env
->current_tb
;
1269 if (tb
&& !testandset(&interrupt_lock
)) {
1270 env
->current_tb
= NULL
;
1271 tb_reset_jump_recursive(tb
);
1272 resetlock(&interrupt_lock
);
1276 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1278 env
->interrupt_request
&= ~mask
;
1281 CPULogItem cpu_log_items
[] = {
1282 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1283 "show generated host assembly code for each compiled TB" },
1284 { CPU_LOG_TB_IN_ASM
, "in_asm",
1285 "show target assembly code for each compiled TB" },
1286 { CPU_LOG_TB_OP
, "op",
1287 "show micro ops for each compiled TB" },
1288 { CPU_LOG_TB_OP_OPT
, "op_opt",
1291 "before eflags optimization and "
1293 "after liveness analysis" },
1294 { CPU_LOG_INT
, "int",
1295 "show interrupts/exceptions in short format" },
1296 { CPU_LOG_EXEC
, "exec",
1297 "show trace before each executed TB (lots of logs)" },
1298 { CPU_LOG_TB_CPU
, "cpu",
1299 "show CPU state before block translation" },
1301 { CPU_LOG_PCALL
, "pcall",
1302 "show protected mode far calls/returns/exceptions" },
1305 { CPU_LOG_IOPORT
, "ioport",
1306 "show all i/o ports accesses" },
1311 static int cmp1(const char *s1
, int n
, const char *s2
)
1313 if (strlen(s2
) != n
)
1315 return memcmp(s1
, s2
, n
) == 0;
1318 /* takes a comma separated list of log masks. Return 0 if error. */
1319 int cpu_str_to_log_mask(const char *str
)
1328 p1
= strchr(p
, ',');
1331 if(cmp1(p
,p1
-p
,"all")) {
1332 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1336 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1337 if (cmp1(p
, p1
- p
, item
->name
))
1351 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1358 fprintf(stderr
, "qemu: fatal: ");
1359 vfprintf(stderr
, fmt
, ap
);
1360 fprintf(stderr
, "\n");
1362 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1364 cpu_dump_state(env
, stderr
, fprintf
, 0);
1367 fprintf(logfile
, "qemu: fatal: ");
1368 vfprintf(logfile
, fmt
, ap2
);
1369 fprintf(logfile
, "\n");
1371 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1373 cpu_dump_state(env
, logfile
, fprintf
, 0);
1383 CPUState
*cpu_copy(CPUState
*env
)
1385 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1386 /* preserve chaining and index */
1387 CPUState
*next_cpu
= new_env
->next_cpu
;
1388 int cpu_index
= new_env
->cpu_index
;
1389 memcpy(new_env
, env
, sizeof(CPUState
));
1390 new_env
->next_cpu
= next_cpu
;
1391 new_env
->cpu_index
= cpu_index
;
1395 #if !defined(CONFIG_USER_ONLY)
1397 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1401 /* Discard jump cache entries for any tb which might potentially
1402 overlap the flushed page. */
1403 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1404 memset (&env
->tb_jmp_cache
[i
], 0,
1405 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1407 i
= tb_jmp_cache_hash_page(addr
);
1408 memset (&env
->tb_jmp_cache
[i
], 0,
1409 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1412 /* NOTE: if flush_global is true, also flush global entries (not
1414 void tlb_flush(CPUState
*env
, int flush_global
)
1418 #if defined(DEBUG_TLB)
1419 printf("tlb_flush:\n");
1421 /* must reset current TB so that interrupts cannot modify the
1422 links while we are modifying them */
1423 env
->current_tb
= NULL
;
1425 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1426 env
->tlb_table
[0][i
].addr_read
= -1;
1427 env
->tlb_table
[0][i
].addr_write
= -1;
1428 env
->tlb_table
[0][i
].addr_code
= -1;
1429 env
->tlb_table
[1][i
].addr_read
= -1;
1430 env
->tlb_table
[1][i
].addr_write
= -1;
1431 env
->tlb_table
[1][i
].addr_code
= -1;
1432 #if (NB_MMU_MODES >= 3)
1433 env
->tlb_table
[2][i
].addr_read
= -1;
1434 env
->tlb_table
[2][i
].addr_write
= -1;
1435 env
->tlb_table
[2][i
].addr_code
= -1;
1436 #if (NB_MMU_MODES == 4)
1437 env
->tlb_table
[3][i
].addr_read
= -1;
1438 env
->tlb_table
[3][i
].addr_write
= -1;
1439 env
->tlb_table
[3][i
].addr_code
= -1;
1444 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1446 #if !defined(CONFIG_SOFTMMU)
1447 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1450 if (env
->kqemu_enabled
) {
1451 kqemu_flush(env
, flush_global
);
1457 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1459 if (addr
== (tlb_entry
->addr_read
&
1460 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1461 addr
== (tlb_entry
->addr_write
&
1462 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1463 addr
== (tlb_entry
->addr_code
&
1464 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1465 tlb_entry
->addr_read
= -1;
1466 tlb_entry
->addr_write
= -1;
1467 tlb_entry
->addr_code
= -1;
1471 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1475 #if defined(DEBUG_TLB)
1476 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1478 /* must reset current TB so that interrupts cannot modify the
1479 links while we are modifying them */
1480 env
->current_tb
= NULL
;
1482 addr
&= TARGET_PAGE_MASK
;
1483 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1484 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1485 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1486 #if (NB_MMU_MODES >= 3)
1487 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1488 #if (NB_MMU_MODES == 4)
1489 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1493 tlb_flush_jmp_cache(env
, addr
);
1495 #if !defined(CONFIG_SOFTMMU)
1496 if (addr
< MMAP_AREA_END
)
1497 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1500 if (env
->kqemu_enabled
) {
1501 kqemu_flush_page(env
, addr
);
1506 /* update the TLBs so that writes to code in the virtual page 'addr'
1508 static void tlb_protect_code(ram_addr_t ram_addr
)
1510 cpu_physical_memory_reset_dirty(ram_addr
,
1511 ram_addr
+ TARGET_PAGE_SIZE
,
1515 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1516 tested for self modifying code */
1517 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1520 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1523 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1524 unsigned long start
, unsigned long length
)
1527 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1528 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1529 if ((addr
- start
) < length
) {
1530 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1535 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1539 unsigned long length
, start1
;
1543 start
&= TARGET_PAGE_MASK
;
1544 end
= TARGET_PAGE_ALIGN(end
);
1546 length
= end
- start
;
1549 len
= length
>> TARGET_PAGE_BITS
;
1551 /* XXX: should not depend on cpu context */
1553 if (env
->kqemu_enabled
) {
1556 for(i
= 0; i
< len
; i
++) {
1557 kqemu_set_notdirty(env
, addr
);
1558 addr
+= TARGET_PAGE_SIZE
;
1562 mask
= ~dirty_flags
;
1563 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1564 for(i
= 0; i
< len
; i
++)
1567 /* we modify the TLB cache so that the dirty bit will be set again
1568 when accessing the range */
1569 start1
= start
+ (unsigned long)phys_ram_base
;
1570 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1571 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1572 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1573 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1574 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1575 #if (NB_MMU_MODES >= 3)
1576 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1577 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1578 #if (NB_MMU_MODES == 4)
1579 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1580 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1585 #if !defined(CONFIG_SOFTMMU)
1586 /* XXX: this is expensive */
1592 for(i
= 0; i
< L1_SIZE
; i
++) {
1595 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1596 for(j
= 0; j
< L2_SIZE
; j
++) {
1597 if (p
->valid_tag
== virt_valid_tag
&&
1598 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1599 (p
->prot
& PROT_WRITE
)) {
1600 if (addr
< MMAP_AREA_END
) {
1601 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1602 p
->prot
& ~PROT_WRITE
);
1605 addr
+= TARGET_PAGE_SIZE
;
1614 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1616 ram_addr_t ram_addr
;
1618 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1619 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1620 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1621 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1622 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1627 /* update the TLB according to the current state of the dirty bits */
1628 void cpu_tlb_update_dirty(CPUState
*env
)
1631 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1632 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1633 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1634 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1635 #if (NB_MMU_MODES >= 3)
1636 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1637 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1638 #if (NB_MMU_MODES == 4)
1639 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1640 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1645 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1646 unsigned long start
)
1649 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1650 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1651 if (addr
== start
) {
1652 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1657 /* update the TLB corresponding to virtual page vaddr and phys addr
1658 addr so that it is no longer dirty */
1659 static inline void tlb_set_dirty(CPUState
*env
,
1660 unsigned long addr
, target_ulong vaddr
)
1664 addr
&= TARGET_PAGE_MASK
;
1665 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1666 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1667 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1668 #if (NB_MMU_MODES >= 3)
1669 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1670 #if (NB_MMU_MODES == 4)
1671 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1676 /* add a new TLB entry. At most one entry for a given virtual address
1677 is permitted. Return 0 if OK or 2 if the page could not be mapped
1678 (can only happen in non SOFTMMU mode for I/O pages or pages
1679 conflicting with the host address space). */
1680 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1681 target_phys_addr_t paddr
, int prot
,
1682 int mmu_idx
, int is_softmmu
)
1687 target_ulong address
;
1688 target_phys_addr_t addend
;
1693 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1695 pd
= IO_MEM_UNASSIGNED
;
1697 pd
= p
->phys_offset
;
1699 #if defined(DEBUG_TLB)
1700 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1701 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1705 #if !defined(CONFIG_SOFTMMU)
1709 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1710 /* IO memory case */
1711 address
= vaddr
| pd
;
1714 /* standard memory */
1716 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1719 /* Make accesses to pages with watchpoints go via the
1720 watchpoint trap routines. */
1721 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1722 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1723 if (address
& ~TARGET_PAGE_MASK
) {
1724 env
->watchpoint
[i
].addend
= 0;
1725 address
= vaddr
| io_mem_watch
;
1727 env
->watchpoint
[i
].addend
= pd
- paddr
+
1728 (unsigned long) phys_ram_base
;
1729 /* TODO: Figure out how to make read watchpoints coexist
1731 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1736 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1738 te
= &env
->tlb_table
[mmu_idx
][index
];
1739 te
->addend
= addend
;
1740 if (prot
& PAGE_READ
) {
1741 te
->addr_read
= address
;
1746 if (te
->addr_code
!= -1) {
1747 tlb_flush_jmp_cache(env
, te
->addr_code
);
1749 if (prot
& PAGE_EXEC
) {
1750 te
->addr_code
= address
;
1754 if (prot
& PAGE_WRITE
) {
1755 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1756 (pd
& IO_MEM_ROMD
)) {
1757 /* write access calls the I/O callback */
1758 te
->addr_write
= vaddr
|
1759 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1760 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1761 !cpu_physical_memory_is_dirty(pd
)) {
1762 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1764 te
->addr_write
= address
;
1767 te
->addr_write
= -1;
1770 #if !defined(CONFIG_SOFTMMU)
1772 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1773 /* IO access: no mapping is done as it will be handled by the
1775 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1780 if (vaddr
>= MMAP_AREA_END
) {
1783 if (prot
& PROT_WRITE
) {
1784 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1785 #if defined(TARGET_HAS_SMC) || 1
1788 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1789 !cpu_physical_memory_is_dirty(pd
))) {
1790 /* ROM: we do as if code was inside */
1791 /* if code is present, we only map as read only and save the
1795 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1798 vp
->valid_tag
= virt_valid_tag
;
1799 prot
&= ~PAGE_WRITE
;
1802 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1803 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1804 if (map_addr
== MAP_FAILED
) {
1805 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1815 /* called from signal handler: invalidate the code and unprotect the
1816 page. Return TRUE if the fault was succesfully handled. */
1817 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1819 #if !defined(CONFIG_SOFTMMU)
1822 #if defined(DEBUG_TLB)
1823 printf("page_unprotect: addr=0x%08x\n", addr
);
1825 addr
&= TARGET_PAGE_MASK
;
1827 /* if it is not mapped, no need to worry here */
1828 if (addr
>= MMAP_AREA_END
)
1830 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1833 /* NOTE: in this case, validate_tag is _not_ tested as it
1834 validates only the code TLB */
1835 if (vp
->valid_tag
!= virt_valid_tag
)
1837 if (!(vp
->prot
& PAGE_WRITE
))
1839 #if defined(DEBUG_TLB)
1840 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1841 addr
, vp
->phys_addr
, vp
->prot
);
1843 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1844 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1845 (unsigned long)addr
, vp
->prot
);
1846 /* set the dirty bit */
1847 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1848 /* flush the code inside */
1849 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1858 void tlb_flush(CPUState
*env
, int flush_global
)
1862 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1866 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1867 target_phys_addr_t paddr
, int prot
,
1868 int mmu_idx
, int is_softmmu
)
1873 /* dump memory mappings */
1874 void page_dump(FILE *f
)
1876 unsigned long start
, end
;
1877 int i
, j
, prot
, prot1
;
1880 fprintf(f
, "%-8s %-8s %-8s %s\n",
1881 "start", "end", "size", "prot");
1885 for(i
= 0; i
<= L1_SIZE
; i
++) {
1890 for(j
= 0;j
< L2_SIZE
; j
++) {
1895 if (prot1
!= prot
) {
1896 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1898 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1899 start
, end
, end
- start
,
1900 prot
& PAGE_READ
? 'r' : '-',
1901 prot
& PAGE_WRITE
? 'w' : '-',
1902 prot
& PAGE_EXEC
? 'x' : '-');
1916 int page_get_flags(target_ulong address
)
1920 p
= page_find(address
>> TARGET_PAGE_BITS
);
1926 /* modify the flags of a page and invalidate the code if
1927 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1928 depending on PAGE_WRITE */
1929 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1934 start
= start
& TARGET_PAGE_MASK
;
1935 end
= TARGET_PAGE_ALIGN(end
);
1936 if (flags
& PAGE_WRITE
)
1937 flags
|= PAGE_WRITE_ORG
;
1938 spin_lock(&tb_lock
);
1939 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1940 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1941 /* if the write protection is set, then we invalidate the code
1943 if (!(p
->flags
& PAGE_WRITE
) &&
1944 (flags
& PAGE_WRITE
) &&
1946 tb_invalidate_phys_page(addr
, 0, NULL
);
1950 spin_unlock(&tb_lock
);
1953 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1959 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1960 start
= start
& TARGET_PAGE_MASK
;
1963 /* we've wrapped around */
1965 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1966 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1969 if( !(p
->flags
& PAGE_VALID
) )
1972 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1974 if (flags
& PAGE_WRITE
) {
1975 if (!(p
->flags
& PAGE_WRITE_ORG
))
1977 /* unprotect the page if it was put read-only because it
1978 contains translated code */
1979 if (!(p
->flags
& PAGE_WRITE
)) {
1980 if (!page_unprotect(addr
, 0, NULL
))
1989 /* called from signal handler: invalidate the code and unprotect the
1990 page. Return TRUE if the fault was succesfully handled. */
1991 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1993 unsigned int page_index
, prot
, pindex
;
1995 target_ulong host_start
, host_end
, addr
;
1997 host_start
= address
& qemu_host_page_mask
;
1998 page_index
= host_start
>> TARGET_PAGE_BITS
;
1999 p1
= page_find(page_index
);
2002 host_end
= host_start
+ qemu_host_page_size
;
2005 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2009 /* if the page was really writable, then we change its
2010 protection back to writable */
2011 if (prot
& PAGE_WRITE_ORG
) {
2012 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2013 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2014 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2015 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2016 p1
[pindex
].flags
|= PAGE_WRITE
;
2017 /* and since the content will be modified, we must invalidate
2018 the corresponding translated code. */
2019 tb_invalidate_phys_page(address
, pc
, puc
);
2020 #ifdef DEBUG_TB_CHECK
2021 tb_invalidate_check(address
);
2029 static inline void tlb_set_dirty(CPUState
*env
,
2030 unsigned long addr
, target_ulong vaddr
)
2033 #endif /* defined(CONFIG_USER_ONLY) */
2035 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2037 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2038 ram_addr_t orig_memory
);
2039 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2042 if (addr > start_addr) \
2045 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2046 if (start_addr2 > 0) \
2050 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2051 end_addr2 = TARGET_PAGE_SIZE - 1; \
2053 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2054 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2059 /* register physical memory. 'size' must be a multiple of the target
2060 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2062 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2064 ram_addr_t phys_offset
)
2066 target_phys_addr_t addr
, end_addr
;
2069 ram_addr_t orig_size
= size
;
2072 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2073 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2074 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2075 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2076 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2077 ram_addr_t orig_memory
= p
->phys_offset
;
2078 target_phys_addr_t start_addr2
, end_addr2
;
2079 int need_subpage
= 0;
2081 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2083 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2084 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2085 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2086 &p
->phys_offset
, orig_memory
);
2088 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2091 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2093 p
->phys_offset
= phys_offset
;
2094 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2095 (phys_offset
& IO_MEM_ROMD
))
2096 phys_offset
+= TARGET_PAGE_SIZE
;
2099 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2100 p
->phys_offset
= phys_offset
;
2101 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2102 (phys_offset
& IO_MEM_ROMD
))
2103 phys_offset
+= TARGET_PAGE_SIZE
;
2105 target_phys_addr_t start_addr2
, end_addr2
;
2106 int need_subpage
= 0;
2108 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2109 end_addr2
, need_subpage
);
2111 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2112 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2113 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2114 subpage_register(subpage
, start_addr2
, end_addr2
,
2121 /* since each CPU stores ram addresses in its TLB cache, we must
2122 reset the modified entries */
2124 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2129 /* XXX: temporary until new memory mapping API */
2130 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2134 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2136 return IO_MEM_UNASSIGNED
;
2137 return p
->phys_offset
;
2140 /* XXX: better than nothing */
2141 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2144 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2145 fprintf(stderr
, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
2146 size
, phys_ram_size
);
2149 addr
= phys_ram_alloc_offset
;
2150 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2154 void qemu_ram_free(ram_addr_t addr
)
2158 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2160 #ifdef DEBUG_UNASSIGNED
2161 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2164 do_unassigned_access(addr
, 0, 0, 0);
2166 do_unassigned_access(addr
, 0, 0, 0);
2171 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2173 #ifdef DEBUG_UNASSIGNED
2174 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2177 do_unassigned_access(addr
, 1, 0, 0);
2179 do_unassigned_access(addr
, 1, 0, 0);
2183 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2184 unassigned_mem_readb
,
2185 unassigned_mem_readb
,
2186 unassigned_mem_readb
,
2189 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2190 unassigned_mem_writeb
,
2191 unassigned_mem_writeb
,
2192 unassigned_mem_writeb
,
2195 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2197 unsigned long ram_addr
;
2199 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2200 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2201 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2202 #if !defined(CONFIG_USER_ONLY)
2203 tb_invalidate_phys_page_fast(ram_addr
, 1);
2204 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2207 stb_p((uint8_t *)(long)addr
, val
);
2209 if (cpu_single_env
->kqemu_enabled
&&
2210 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2211 kqemu_modify_page(cpu_single_env
, ram_addr
);
2213 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2214 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2215 /* we remove the notdirty callback only if the code has been
2217 if (dirty_flags
== 0xff)
2218 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2221 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2223 unsigned long ram_addr
;
2225 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2226 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2227 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2228 #if !defined(CONFIG_USER_ONLY)
2229 tb_invalidate_phys_page_fast(ram_addr
, 2);
2230 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2233 stw_p((uint8_t *)(long)addr
, val
);
2235 if (cpu_single_env
->kqemu_enabled
&&
2236 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2237 kqemu_modify_page(cpu_single_env
, ram_addr
);
2239 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2240 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2241 /* we remove the notdirty callback only if the code has been
2243 if (dirty_flags
== 0xff)
2244 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2247 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2249 unsigned long ram_addr
;
2251 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2252 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2253 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2254 #if !defined(CONFIG_USER_ONLY)
2255 tb_invalidate_phys_page_fast(ram_addr
, 4);
2256 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2259 stl_p((uint8_t *)(long)addr
, val
);
2261 if (cpu_single_env
->kqemu_enabled
&&
2262 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2263 kqemu_modify_page(cpu_single_env
, ram_addr
);
2265 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2266 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2267 /* we remove the notdirty callback only if the code has been
2269 if (dirty_flags
== 0xff)
2270 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2273 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2274 NULL
, /* never used */
2275 NULL
, /* never used */
2276 NULL
, /* never used */
2279 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2280 notdirty_mem_writeb
,
2281 notdirty_mem_writew
,
2282 notdirty_mem_writel
,
2285 #if defined(CONFIG_SOFTMMU)
2286 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2287 so these check for a hit then pass through to the normal out-of-line
2289 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2291 return ldub_phys(addr
);
2294 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2296 return lduw_phys(addr
);
2299 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2301 return ldl_phys(addr
);
2304 /* Generate a debug exception if a watchpoint has been hit.
2305 Returns the real physical address of the access. addr will be a host
2306 address in case of a RAM location. */
2307 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2309 CPUState
*env
= cpu_single_env
;
2311 target_ulong retaddr
;
2315 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2316 watch
= env
->watchpoint
[i
].vaddr
;
2317 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2318 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2319 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2320 cpu_single_env
->watchpoint_hit
= i
+ 1;
2321 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2329 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2332 addr
= check_watchpoint(addr
);
2333 stb_phys(addr
, val
);
2336 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2339 addr
= check_watchpoint(addr
);
2340 stw_phys(addr
, val
);
2343 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2346 addr
= check_watchpoint(addr
);
2347 stl_phys(addr
, val
);
2350 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2356 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2363 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2369 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2370 #if defined(DEBUG_SUBPAGE)
2371 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2372 mmio
, len
, addr
, idx
);
2374 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2379 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2380 uint32_t value
, unsigned int len
)
2384 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2385 #if defined(DEBUG_SUBPAGE)
2386 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2387 mmio
, len
, addr
, idx
, value
);
2389 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2392 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2394 #if defined(DEBUG_SUBPAGE)
2395 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2398 return subpage_readlen(opaque
, addr
, 0);
2401 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2404 #if defined(DEBUG_SUBPAGE)
2405 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2407 subpage_writelen(opaque
, addr
, value
, 0);
2410 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2412 #if defined(DEBUG_SUBPAGE)
2413 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2416 return subpage_readlen(opaque
, addr
, 1);
2419 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2422 #if defined(DEBUG_SUBPAGE)
2423 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2425 subpage_writelen(opaque
, addr
, value
, 1);
2428 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2430 #if defined(DEBUG_SUBPAGE)
2431 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2434 return subpage_readlen(opaque
, addr
, 2);
2437 static void subpage_writel (void *opaque
,
2438 target_phys_addr_t addr
, uint32_t value
)
2440 #if defined(DEBUG_SUBPAGE)
2441 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2443 subpage_writelen(opaque
, addr
, value
, 2);
2446 static CPUReadMemoryFunc
*subpage_read
[] = {
2452 static CPUWriteMemoryFunc
*subpage_write
[] = {
2458 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2464 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2466 idx
= SUBPAGE_IDX(start
);
2467 eidx
= SUBPAGE_IDX(end
);
2468 #if defined(DEBUG_SUBPAGE)
2469 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2470 mmio
, start
, end
, idx
, eidx
, memory
);
2472 memory
>>= IO_MEM_SHIFT
;
2473 for (; idx
<= eidx
; idx
++) {
2474 for (i
= 0; i
< 4; i
++) {
2475 if (io_mem_read
[memory
][i
]) {
2476 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2477 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2479 if (io_mem_write
[memory
][i
]) {
2480 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2481 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2489 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2490 ram_addr_t orig_memory
)
2495 mmio
= qemu_mallocz(sizeof(subpage_t
));
2498 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2499 #if defined(DEBUG_SUBPAGE)
2500 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2501 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2503 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2504 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2510 static void io_mem_init(void)
2512 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2513 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2514 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2517 #if defined(CONFIG_SOFTMMU)
2518 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2519 watch_mem_write
, NULL
);
2521 /* alloc dirty bits array */
2522 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2523 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2526 /* mem_read and mem_write are arrays of functions containing the
2527 function to access byte (index 0), word (index 1) and dword (index
2528 2). Functions can be omitted with a NULL function pointer. The
2529 registered functions may be modified dynamically later.
2530 If io_index is non zero, the corresponding io zone is
2531 modified. If it is zero, a new io zone is allocated. The return
2532 value can be used with cpu_register_physical_memory(). (-1) is
2533 returned if error. */
2534 int cpu_register_io_memory(int io_index
,
2535 CPUReadMemoryFunc
**mem_read
,
2536 CPUWriteMemoryFunc
**mem_write
,
2539 int i
, subwidth
= 0;
2541 if (io_index
<= 0) {
2542 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2544 io_index
= io_mem_nb
++;
2546 if (io_index
>= IO_MEM_NB_ENTRIES
)
2550 for(i
= 0;i
< 3; i
++) {
2551 if (!mem_read
[i
] || !mem_write
[i
])
2552 subwidth
= IO_MEM_SUBWIDTH
;
2553 io_mem_read
[io_index
][i
] = mem_read
[i
];
2554 io_mem_write
[io_index
][i
] = mem_write
[i
];
2556 io_mem_opaque
[io_index
] = opaque
;
2557 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2560 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2562 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2565 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2567 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2570 /* physical memory access (slow version, mainly for debug) */
2571 #if defined(CONFIG_USER_ONLY)
2572 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2573 int len
, int is_write
)
2580 page
= addr
& TARGET_PAGE_MASK
;
2581 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2584 flags
= page_get_flags(page
);
2585 if (!(flags
& PAGE_VALID
))
2588 if (!(flags
& PAGE_WRITE
))
2590 /* XXX: this code should not depend on lock_user */
2591 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2592 /* FIXME - should this return an error rather than just fail? */
2595 unlock_user(p
, addr
, l
);
2597 if (!(flags
& PAGE_READ
))
2599 /* XXX: this code should not depend on lock_user */
2600 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2601 /* FIXME - should this return an error rather than just fail? */
2604 unlock_user(p
, addr
, 0);
2613 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2614 int len
, int is_write
)
2619 target_phys_addr_t page
;
2624 page
= addr
& TARGET_PAGE_MASK
;
2625 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2628 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2630 pd
= IO_MEM_UNASSIGNED
;
2632 pd
= p
->phys_offset
;
2636 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2637 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2638 /* XXX: could force cpu_single_env to NULL to avoid
2640 if (l
>= 4 && ((addr
& 3) == 0)) {
2641 /* 32 bit write access */
2643 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2645 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2646 /* 16 bit write access */
2648 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2651 /* 8 bit write access */
2653 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2657 unsigned long addr1
;
2658 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2660 ptr
= phys_ram_base
+ addr1
;
2661 memcpy(ptr
, buf
, l
);
2662 if (!cpu_physical_memory_is_dirty(addr1
)) {
2663 /* invalidate code */
2664 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2666 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2667 (0xff & ~CODE_DIRTY_FLAG
);
2671 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2672 !(pd
& IO_MEM_ROMD
)) {
2674 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2675 if (l
>= 4 && ((addr
& 3) == 0)) {
2676 /* 32 bit read access */
2677 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2680 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2681 /* 16 bit read access */
2682 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2686 /* 8 bit read access */
2687 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2693 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2694 (addr
& ~TARGET_PAGE_MASK
);
2695 memcpy(buf
, ptr
, l
);
2704 /* used for ROM loading : can write in RAM and ROM */
2705 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2706 const uint8_t *buf
, int len
)
2710 target_phys_addr_t page
;
2715 page
= addr
& TARGET_PAGE_MASK
;
2716 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2719 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2721 pd
= IO_MEM_UNASSIGNED
;
2723 pd
= p
->phys_offset
;
2726 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2727 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2728 !(pd
& IO_MEM_ROMD
)) {
2731 unsigned long addr1
;
2732 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2734 ptr
= phys_ram_base
+ addr1
;
2735 memcpy(ptr
, buf
, l
);
2744 /* warning: addr must be aligned */
2745 uint32_t ldl_phys(target_phys_addr_t addr
)
2753 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2755 pd
= IO_MEM_UNASSIGNED
;
2757 pd
= p
->phys_offset
;
2760 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2761 !(pd
& IO_MEM_ROMD
)) {
2763 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2764 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2767 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2768 (addr
& ~TARGET_PAGE_MASK
);
2774 /* warning: addr must be aligned */
2775 uint64_t ldq_phys(target_phys_addr_t addr
)
2783 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2785 pd
= IO_MEM_UNASSIGNED
;
2787 pd
= p
->phys_offset
;
2790 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2791 !(pd
& IO_MEM_ROMD
)) {
2793 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2794 #ifdef TARGET_WORDS_BIGENDIAN
2795 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2796 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2798 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2799 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2803 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2804 (addr
& ~TARGET_PAGE_MASK
);
2811 uint32_t ldub_phys(target_phys_addr_t addr
)
2814 cpu_physical_memory_read(addr
, &val
, 1);
2819 uint32_t lduw_phys(target_phys_addr_t addr
)
2822 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2823 return tswap16(val
);
2826 /* warning: addr must be aligned. The ram page is not masked as dirty
2827 and the code inside is not invalidated. It is useful if the dirty
2828 bits are used to track modified PTEs */
2829 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2836 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2838 pd
= IO_MEM_UNASSIGNED
;
2840 pd
= p
->phys_offset
;
2843 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2844 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2845 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2847 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2848 (addr
& ~TARGET_PAGE_MASK
);
2853 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2860 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2862 pd
= IO_MEM_UNASSIGNED
;
2864 pd
= p
->phys_offset
;
2867 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2868 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2869 #ifdef TARGET_WORDS_BIGENDIAN
2870 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2871 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2873 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2874 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2877 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2878 (addr
& ~TARGET_PAGE_MASK
);
2883 /* warning: addr must be aligned */
2884 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2891 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2893 pd
= IO_MEM_UNASSIGNED
;
2895 pd
= p
->phys_offset
;
2898 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2899 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2900 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2902 unsigned long addr1
;
2903 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2905 ptr
= phys_ram_base
+ addr1
;
2907 if (!cpu_physical_memory_is_dirty(addr1
)) {
2908 /* invalidate code */
2909 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2911 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2912 (0xff & ~CODE_DIRTY_FLAG
);
2918 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2921 cpu_physical_memory_write(addr
, &v
, 1);
2925 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2927 uint16_t v
= tswap16(val
);
2928 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2932 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2935 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2940 /* virtual memory access for debug */
2941 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2942 uint8_t *buf
, int len
, int is_write
)
2945 target_phys_addr_t phys_addr
;
2949 page
= addr
& TARGET_PAGE_MASK
;
2950 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2951 /* if no physical page mapped, return an error */
2952 if (phys_addr
== -1)
2954 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2957 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2966 void dump_exec_info(FILE *f
,
2967 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2969 int i
, target_code_size
, max_target_code_size
;
2970 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2971 TranslationBlock
*tb
;
2973 target_code_size
= 0;
2974 max_target_code_size
= 0;
2976 direct_jmp_count
= 0;
2977 direct_jmp2_count
= 0;
2978 for(i
= 0; i
< nb_tbs
; i
++) {
2980 target_code_size
+= tb
->size
;
2981 if (tb
->size
> max_target_code_size
)
2982 max_target_code_size
= tb
->size
;
2983 if (tb
->page_addr
[1] != -1)
2985 if (tb
->tb_next_offset
[0] != 0xffff) {
2987 if (tb
->tb_next_offset
[1] != 0xffff) {
2988 direct_jmp2_count
++;
2992 /* XXX: avoid using doubles ? */
2993 cpu_fprintf(f
, "Translation buffer state:\n");
2994 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2995 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2996 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2997 max_target_code_size
);
2998 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2999 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3000 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3001 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3003 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3004 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3006 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3008 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3009 cpu_fprintf(f
, "\nStatistics:\n");
3010 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3011 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3012 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3013 #ifdef CONFIG_PROFILER
3016 tot
= dyngen_interm_time
+ dyngen_code_time
;
3017 cpu_fprintf(f
, "JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
3019 cpu_fprintf(f
, "translated TBs %" PRId64
" (aborted=%" PRId64
" %0.1f%%)\n",
3021 dyngen_tb_count1
- dyngen_tb_count
,
3022 dyngen_tb_count1
? (double)(dyngen_tb_count1
- dyngen_tb_count
) / dyngen_tb_count1
* 100.0 : 0);
3023 cpu_fprintf(f
, "avg ops/TB %0.1f max=%d\n",
3024 dyngen_tb_count
? (double)dyngen_op_count
/ dyngen_tb_count
: 0, dyngen_op_count_max
);
3025 cpu_fprintf(f
, "old ops/total ops %0.1f%%\n",
3026 dyngen_op_count
? (double)dyngen_old_op_count
/ dyngen_op_count
* 100.0 : 0);
3027 cpu_fprintf(f
, "deleted ops/TB %0.2f\n",
3029 (double)dyngen_tcg_del_op_count
/ dyngen_tb_count
: 0);
3030 cpu_fprintf(f
, "cycles/op %0.1f\n",
3031 dyngen_op_count
? (double)tot
/ dyngen_op_count
: 0);
3032 cpu_fprintf(f
, "cycles/in byte %0.1f\n",
3033 dyngen_code_in_len
? (double)tot
/ dyngen_code_in_len
: 0);
3034 cpu_fprintf(f
, "cycles/out byte %0.1f\n",
3035 dyngen_code_out_len
? (double)tot
/ dyngen_code_out_len
: 0);
3038 cpu_fprintf(f
, " gen_interm time %0.1f%%\n",
3039 (double)dyngen_interm_time
/ tot
* 100.0);
3040 cpu_fprintf(f
, " gen_code time %0.1f%%\n",
3041 (double)dyngen_code_time
/ tot
* 100.0);
3042 cpu_fprintf(f
, "cpu_restore count %" PRId64
"\n",
3043 dyngen_restore_count
);
3044 cpu_fprintf(f
, " avg cycles %0.1f\n",
3045 dyngen_restore_count
? (double)dyngen_restore_time
/ dyngen_restore_count
: 0);
3047 extern void dump_op_count(void);
3054 #if !defined(CONFIG_USER_ONLY)
3056 #define MMUSUFFIX _cmmu
3057 #define GETPC() NULL
3058 #define env cpu_single_env
3059 #define SOFTMMU_CODE_ACCESS
3062 #include "softmmu_template.h"
3065 #include "softmmu_template.h"
3068 #include "softmmu_template.h"
3071 #include "softmmu_template.h"