2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
39 #if defined(CONFIG_USER_ONLY)
43 //#define DEBUG_TB_INVALIDATE
46 //#define DEBUG_UNASSIGNED
48 /* make various TB consistency checks */
49 //#define DEBUG_TB_CHECK
50 //#define DEBUG_TLB_CHECK
52 //#define DEBUG_IOPORT
53 //#define DEBUG_SUBPAGE
55 #if !defined(CONFIG_USER_ONLY)
56 /* TB consistency checks only implemented for usermode emulation. */
60 /* threshold to flush the translated code buffer */
61 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #define MMAP_AREA_START 0x00000000
66 #define MMAP_AREA_END 0xa8000000
68 #if defined(TARGET_SPARC64)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 41
70 #elif defined(TARGET_SPARC)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 36
72 #elif defined(TARGET_ALPHA)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #define TARGET_VIRT_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_PPC64)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
83 #define TARGET_PHYS_ADDR_SPACE_BITS 32
86 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
87 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
89 /* any access to the tbs or the page table must use this lock */
90 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
92 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
93 uint8_t *code_gen_ptr
;
95 ram_addr_t phys_ram_size
;
97 uint8_t *phys_ram_base
;
98 uint8_t *phys_ram_dirty
;
99 static ram_addr_t phys_ram_alloc_offset
= 0;
102 /* current CPU in the current thread. It is only valid inside
104 CPUState
*cpu_single_env
;
106 typedef struct PageDesc
{
107 /* list of TBs intersecting this ram page */
108 TranslationBlock
*first_tb
;
109 /* in order to optimize self modifying code, we count the number
110 of lookups we do to a given page to use a bitmap */
111 unsigned int code_write_count
;
112 uint8_t *code_bitmap
;
113 #if defined(CONFIG_USER_ONLY)
118 typedef struct PhysPageDesc
{
119 /* offset in host memory of the page + io_index in the low 12 bits */
120 ram_addr_t phys_offset
;
124 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
125 /* XXX: this is a temporary hack for alpha target.
126 * In the future, this is to be replaced by a multi-level table
127 * to actually be able to handle the complete 64 bits address space.
129 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
131 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
134 #define L1_SIZE (1 << L1_BITS)
135 #define L2_SIZE (1 << L2_BITS)
137 static void io_mem_init(void);
139 unsigned long qemu_real_host_page_size
;
140 unsigned long qemu_host_page_bits
;
141 unsigned long qemu_host_page_size
;
142 unsigned long qemu_host_page_mask
;
144 /* XXX: for system emulation, it could just be an array */
145 static PageDesc
*l1_map
[L1_SIZE
];
146 PhysPageDesc
**l1_phys_map
;
148 /* io memory support */
149 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
150 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
151 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
152 static int io_mem_nb
;
153 #if defined(CONFIG_SOFTMMU)
154 static int io_mem_watch
;
158 char *logfilename
= "/tmp/qemu.log";
161 static int log_append
= 0;
164 static int tlb_flush_count
;
165 static int tb_flush_count
;
166 static int tb_phys_invalidate_count
;
168 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
169 typedef struct subpage_t
{
170 target_phys_addr_t base
;
171 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
172 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
173 void *opaque
[TARGET_PAGE_SIZE
][2][4];
176 static void page_init(void)
178 /* NOTE: we can always suppose that qemu_host_page_size >=
182 SYSTEM_INFO system_info
;
185 GetSystemInfo(&system_info
);
186 qemu_real_host_page_size
= system_info
.dwPageSize
;
188 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
189 PAGE_EXECUTE_READWRITE
, &old_protect
);
192 qemu_real_host_page_size
= getpagesize();
194 unsigned long start
, end
;
196 start
= (unsigned long)code_gen_buffer
;
197 start
&= ~(qemu_real_host_page_size
- 1);
199 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
200 end
+= qemu_real_host_page_size
- 1;
201 end
&= ~(qemu_real_host_page_size
- 1);
203 mprotect((void *)start
, end
- start
,
204 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
208 if (qemu_host_page_size
== 0)
209 qemu_host_page_size
= qemu_real_host_page_size
;
210 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
211 qemu_host_page_size
= TARGET_PAGE_SIZE
;
212 qemu_host_page_bits
= 0;
213 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
214 qemu_host_page_bits
++;
215 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
216 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
217 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
219 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
221 long long startaddr
, endaddr
;
225 f
= fopen("/proc/self/maps", "r");
228 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
230 startaddr
= MIN(startaddr
,
231 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
232 endaddr
= MIN(endaddr
,
233 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
234 page_set_flags(TARGET_PAGE_ALIGN(startaddr
),
235 TARGET_PAGE_ALIGN(endaddr
),
245 static inline PageDesc
*page_find_alloc(target_ulong index
)
249 lp
= &l1_map
[index
>> L2_BITS
];
252 /* allocate if not found */
253 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
254 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
257 return p
+ (index
& (L2_SIZE
- 1));
260 static inline PageDesc
*page_find(target_ulong index
)
264 p
= l1_map
[index
>> L2_BITS
];
267 return p
+ (index
& (L2_SIZE
- 1));
270 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
275 p
= (void **)l1_phys_map
;
276 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
278 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
279 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
281 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
284 /* allocate if not found */
287 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
288 memset(p
, 0, sizeof(void *) * L1_SIZE
);
292 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
296 /* allocate if not found */
299 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
301 for (i
= 0; i
< L2_SIZE
; i
++)
302 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
304 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
307 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
309 return phys_page_find_alloc(index
, 0);
312 #if !defined(CONFIG_USER_ONLY)
313 static void tlb_protect_code(ram_addr_t ram_addr
);
314 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
318 void cpu_exec_init(CPUState
*env
)
325 code_gen_ptr
= code_gen_buffer
;
329 env
->next_cpu
= NULL
;
332 while (*penv
!= NULL
) {
333 penv
= (CPUState
**)&(*penv
)->next_cpu
;
336 env
->cpu_index
= cpu_index
;
337 env
->nb_watchpoints
= 0;
341 static inline void invalidate_page_bitmap(PageDesc
*p
)
343 if (p
->code_bitmap
) {
344 qemu_free(p
->code_bitmap
);
345 p
->code_bitmap
= NULL
;
347 p
->code_write_count
= 0;
350 /* set to NULL all the 'first_tb' fields in all PageDescs */
351 static void page_flush_tb(void)
356 for(i
= 0; i
< L1_SIZE
; i
++) {
359 for(j
= 0; j
< L2_SIZE
; j
++) {
361 invalidate_page_bitmap(p
);
368 /* flush all the translation blocks */
369 /* XXX: tb_flush is currently not thread safe */
370 void tb_flush(CPUState
*env1
)
373 #if defined(DEBUG_FLUSH)
374 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
375 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
377 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
379 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > CODE_GEN_BUFFER_SIZE
)
380 cpu_abort(env1
, "Internal error: code buffer overflow\n");
384 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
385 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
388 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
391 code_gen_ptr
= code_gen_buffer
;
392 /* XXX: flush processor icache at this point if cache flush is
397 #ifdef DEBUG_TB_CHECK
399 static void tb_invalidate_check(target_ulong address
)
401 TranslationBlock
*tb
;
403 address
&= TARGET_PAGE_MASK
;
404 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
405 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
406 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
407 address
>= tb
->pc
+ tb
->size
)) {
408 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
409 address
, (long)tb
->pc
, tb
->size
);
415 /* verify that all the pages have correct rights for code */
416 static void tb_page_check(void)
418 TranslationBlock
*tb
;
419 int i
, flags1
, flags2
;
421 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
422 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
423 flags1
= page_get_flags(tb
->pc
);
424 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
425 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
426 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
427 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
433 void tb_jmp_check(TranslationBlock
*tb
)
435 TranslationBlock
*tb1
;
438 /* suppress any remaining jumps to this TB */
442 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
445 tb1
= tb1
->jmp_next
[n1
];
447 /* check end of list */
449 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
455 /* invalidate one TB */
456 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
459 TranslationBlock
*tb1
;
463 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
466 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
470 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
472 TranslationBlock
*tb1
;
478 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
480 *ptb
= tb1
->page_next
[n1
];
483 ptb
= &tb1
->page_next
[n1
];
487 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
489 TranslationBlock
*tb1
, **ptb
;
492 ptb
= &tb
->jmp_next
[n
];
495 /* find tb(n) in circular list */
499 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
500 if (n1
== n
&& tb1
== tb
)
503 ptb
= &tb1
->jmp_first
;
505 ptb
= &tb1
->jmp_next
[n1
];
508 /* now we can suppress tb(n) from the list */
509 *ptb
= tb
->jmp_next
[n
];
511 tb
->jmp_next
[n
] = NULL
;
515 /* reset the jump entry 'n' of a TB so that it is not chained to
517 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
519 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
522 static inline void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
527 target_phys_addr_t phys_pc
;
528 TranslationBlock
*tb1
, *tb2
;
530 /* remove the TB from the hash list */
531 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
532 h
= tb_phys_hash_func(phys_pc
);
533 tb_remove(&tb_phys_hash
[h
], tb
,
534 offsetof(TranslationBlock
, phys_hash_next
));
536 /* remove the TB from the page list */
537 if (tb
->page_addr
[0] != page_addr
) {
538 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
539 tb_page_remove(&p
->first_tb
, tb
);
540 invalidate_page_bitmap(p
);
542 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
543 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
544 tb_page_remove(&p
->first_tb
, tb
);
545 invalidate_page_bitmap(p
);
548 tb_invalidated_flag
= 1;
550 /* remove the TB from the hash list */
551 h
= tb_jmp_cache_hash_func(tb
->pc
);
552 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
553 if (env
->tb_jmp_cache
[h
] == tb
)
554 env
->tb_jmp_cache
[h
] = NULL
;
557 /* suppress this TB from the two jump lists */
558 tb_jmp_remove(tb
, 0);
559 tb_jmp_remove(tb
, 1);
561 /* suppress any remaining jumps to this TB */
567 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
568 tb2
= tb1
->jmp_next
[n1
];
569 tb_reset_jump(tb1
, n1
);
570 tb1
->jmp_next
[n1
] = NULL
;
573 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
575 tb_phys_invalidate_count
++;
578 static inline void set_bits(uint8_t *tab
, int start
, int len
)
584 mask
= 0xff << (start
& 7);
585 if ((start
& ~7) == (end
& ~7)) {
587 mask
&= ~(0xff << (end
& 7));
592 start
= (start
+ 8) & ~7;
594 while (start
< end1
) {
599 mask
= ~(0xff << (end
& 7));
605 static void build_page_bitmap(PageDesc
*p
)
607 int n
, tb_start
, tb_end
;
608 TranslationBlock
*tb
;
610 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
613 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
618 tb
= (TranslationBlock
*)((long)tb
& ~3);
619 /* NOTE: this is subtle as a TB may span two physical pages */
621 /* NOTE: tb_end may be after the end of the page, but
622 it is not a problem */
623 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
624 tb_end
= tb_start
+ tb
->size
;
625 if (tb_end
> TARGET_PAGE_SIZE
)
626 tb_end
= TARGET_PAGE_SIZE
;
629 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
631 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
632 tb
= tb
->page_next
[n
];
636 #ifdef TARGET_HAS_PRECISE_SMC
638 static void tb_gen_code(CPUState
*env
,
639 target_ulong pc
, target_ulong cs_base
, int flags
,
642 TranslationBlock
*tb
;
644 target_ulong phys_pc
, phys_page2
, virt_page2
;
647 phys_pc
= get_phys_addr_code(env
, pc
);
650 /* flush must be done */
652 /* cannot fail at this point */
655 tc_ptr
= code_gen_ptr
;
657 tb
->cs_base
= cs_base
;
660 cpu_gen_code(env
, tb
, &code_gen_size
);
661 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
663 /* check next page if needed */
664 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
666 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
667 phys_page2
= get_phys_addr_code(env
, virt_page2
);
669 tb_link_phys(tb
, phys_pc
, phys_page2
);
673 /* invalidate all TBs which intersect with the target physical page
674 starting in range [start;end[. NOTE: start and end must refer to
675 the same physical page. 'is_cpu_write_access' should be true if called
676 from a real cpu write access: the virtual CPU will exit the current
677 TB if code is modified inside this TB. */
678 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
679 int is_cpu_write_access
)
681 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
682 CPUState
*env
= cpu_single_env
;
684 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
685 target_ulong tb_start
, tb_end
;
686 target_ulong current_pc
, current_cs_base
;
688 p
= page_find(start
>> TARGET_PAGE_BITS
);
691 if (!p
->code_bitmap
&&
692 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
693 is_cpu_write_access
) {
694 /* build code bitmap */
695 build_page_bitmap(p
);
698 /* we remove all the TBs in the range [start, end[ */
699 /* XXX: see if in some cases it could be faster to invalidate all the code */
700 current_tb_not_found
= is_cpu_write_access
;
701 current_tb_modified
= 0;
702 current_tb
= NULL
; /* avoid warning */
703 current_pc
= 0; /* avoid warning */
704 current_cs_base
= 0; /* avoid warning */
705 current_flags
= 0; /* avoid warning */
709 tb
= (TranslationBlock
*)((long)tb
& ~3);
710 tb_next
= tb
->page_next
[n
];
711 /* NOTE: this is subtle as a TB may span two physical pages */
713 /* NOTE: tb_end may be after the end of the page, but
714 it is not a problem */
715 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
716 tb_end
= tb_start
+ tb
->size
;
718 tb_start
= tb
->page_addr
[1];
719 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
721 if (!(tb_end
<= start
|| tb_start
>= end
)) {
722 #ifdef TARGET_HAS_PRECISE_SMC
723 if (current_tb_not_found
) {
724 current_tb_not_found
= 0;
726 if (env
->mem_write_pc
) {
727 /* now we have a real cpu fault */
728 current_tb
= tb_find_pc(env
->mem_write_pc
);
731 if (current_tb
== tb
&&
732 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
733 /* If we are modifying the current TB, we must stop
734 its execution. We could be more precise by checking
735 that the modification is after the current PC, but it
736 would require a specialized function to partially
737 restore the CPU state */
739 current_tb_modified
= 1;
740 cpu_restore_state(current_tb
, env
,
741 env
->mem_write_pc
, NULL
);
742 #if defined(TARGET_I386)
743 current_flags
= env
->hflags
;
744 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
745 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
746 current_pc
= current_cs_base
+ env
->eip
;
748 #error unsupported CPU
751 #endif /* TARGET_HAS_PRECISE_SMC */
752 /* we need to do that to handle the case where a signal
753 occurs while doing tb_phys_invalidate() */
756 saved_tb
= env
->current_tb
;
757 env
->current_tb
= NULL
;
759 tb_phys_invalidate(tb
, -1);
761 env
->current_tb
= saved_tb
;
762 if (env
->interrupt_request
&& env
->current_tb
)
763 cpu_interrupt(env
, env
->interrupt_request
);
768 #if !defined(CONFIG_USER_ONLY)
769 /* if no code remaining, no need to continue to use slow writes */
771 invalidate_page_bitmap(p
);
772 if (is_cpu_write_access
) {
773 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
777 #ifdef TARGET_HAS_PRECISE_SMC
778 if (current_tb_modified
) {
779 /* we generate a block containing just the instruction
780 modifying the memory. It will ensure that it cannot modify
782 env
->current_tb
= NULL
;
783 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
785 cpu_resume_from_signal(env
, NULL
);
790 /* len must be <= 8 and start must be a multiple of len */
791 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
798 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
799 cpu_single_env
->mem_write_vaddr
, len
,
801 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
805 p
= page_find(start
>> TARGET_PAGE_BITS
);
808 if (p
->code_bitmap
) {
809 offset
= start
& ~TARGET_PAGE_MASK
;
810 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
811 if (b
& ((1 << len
) - 1))
815 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
819 #if !defined(CONFIG_SOFTMMU)
820 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
821 unsigned long pc
, void *puc
)
823 int n
, current_flags
, current_tb_modified
;
824 target_ulong current_pc
, current_cs_base
;
826 TranslationBlock
*tb
, *current_tb
;
827 #ifdef TARGET_HAS_PRECISE_SMC
828 CPUState
*env
= cpu_single_env
;
831 addr
&= TARGET_PAGE_MASK
;
832 p
= page_find(addr
>> TARGET_PAGE_BITS
);
836 current_tb_modified
= 0;
838 current_pc
= 0; /* avoid warning */
839 current_cs_base
= 0; /* avoid warning */
840 current_flags
= 0; /* avoid warning */
841 #ifdef TARGET_HAS_PRECISE_SMC
843 current_tb
= tb_find_pc(pc
);
848 tb
= (TranslationBlock
*)((long)tb
& ~3);
849 #ifdef TARGET_HAS_PRECISE_SMC
850 if (current_tb
== tb
&&
851 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
852 /* If we are modifying the current TB, we must stop
853 its execution. We could be more precise by checking
854 that the modification is after the current PC, but it
855 would require a specialized function to partially
856 restore the CPU state */
858 current_tb_modified
= 1;
859 cpu_restore_state(current_tb
, env
, pc
, puc
);
860 #if defined(TARGET_I386)
861 current_flags
= env
->hflags
;
862 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
863 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
864 current_pc
= current_cs_base
+ env
->eip
;
866 #error unsupported CPU
869 #endif /* TARGET_HAS_PRECISE_SMC */
870 tb_phys_invalidate(tb
, addr
);
871 tb
= tb
->page_next
[n
];
874 #ifdef TARGET_HAS_PRECISE_SMC
875 if (current_tb_modified
) {
876 /* we generate a block containing just the instruction
877 modifying the memory. It will ensure that it cannot modify
879 env
->current_tb
= NULL
;
880 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
882 cpu_resume_from_signal(env
, puc
);
888 /* add the tb in the target page and protect it if necessary */
889 static inline void tb_alloc_page(TranslationBlock
*tb
,
890 unsigned int n
, target_ulong page_addr
)
893 TranslationBlock
*last_first_tb
;
895 tb
->page_addr
[n
] = page_addr
;
896 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
897 tb
->page_next
[n
] = p
->first_tb
;
898 last_first_tb
= p
->first_tb
;
899 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
900 invalidate_page_bitmap(p
);
902 #if defined(TARGET_HAS_SMC) || 1
904 #if defined(CONFIG_USER_ONLY)
905 if (p
->flags
& PAGE_WRITE
) {
910 /* force the host page as non writable (writes will have a
911 page fault + mprotect overhead) */
912 page_addr
&= qemu_host_page_mask
;
914 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
915 addr
+= TARGET_PAGE_SIZE
) {
917 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
921 p2
->flags
&= ~PAGE_WRITE
;
922 page_get_flags(addr
);
924 mprotect(g2h(page_addr
), qemu_host_page_size
,
925 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
926 #ifdef DEBUG_TB_INVALIDATE
927 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
932 /* if some code is already present, then the pages are already
933 protected. So we handle the case where only the first TB is
934 allocated in a physical page */
935 if (!last_first_tb
) {
936 tlb_protect_code(page_addr
);
940 #endif /* TARGET_HAS_SMC */
943 /* Allocate a new translation block. Flush the translation buffer if
944 too many translation blocks or too much generated code. */
945 TranslationBlock
*tb_alloc(target_ulong pc
)
947 TranslationBlock
*tb
;
949 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
950 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
958 /* add a new TB and link it to the physical page tables. phys_page2 is
959 (-1) to indicate that only one page contains the TB. */
960 void tb_link_phys(TranslationBlock
*tb
,
961 target_ulong phys_pc
, target_ulong phys_page2
)
964 TranslationBlock
**ptb
;
966 /* add in the physical hash table */
967 h
= tb_phys_hash_func(phys_pc
);
968 ptb
= &tb_phys_hash
[h
];
969 tb
->phys_hash_next
= *ptb
;
972 /* add in the page list */
973 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
974 if (phys_page2
!= -1)
975 tb_alloc_page(tb
, 1, phys_page2
);
977 tb
->page_addr
[1] = -1;
979 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
980 tb
->jmp_next
[0] = NULL
;
981 tb
->jmp_next
[1] = NULL
;
983 /* init original jump addresses */
984 if (tb
->tb_next_offset
[0] != 0xffff)
985 tb_reset_jump(tb
, 0);
986 if (tb
->tb_next_offset
[1] != 0xffff)
987 tb_reset_jump(tb
, 1);
989 #ifdef DEBUG_TB_CHECK
994 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
995 tb[1].tc_ptr. Return NULL if not found */
996 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1000 TranslationBlock
*tb
;
1004 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1005 tc_ptr
>= (unsigned long)code_gen_ptr
)
1007 /* binary search (cf Knuth) */
1010 while (m_min
<= m_max
) {
1011 m
= (m_min
+ m_max
) >> 1;
1013 v
= (unsigned long)tb
->tc_ptr
;
1016 else if (tc_ptr
< v
) {
1025 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1027 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1029 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1032 tb1
= tb
->jmp_next
[n
];
1034 /* find head of list */
1037 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1040 tb1
= tb1
->jmp_next
[n1
];
1042 /* we are now sure now that tb jumps to tb1 */
1045 /* remove tb from the jmp_first list */
1046 ptb
= &tb_next
->jmp_first
;
1050 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1051 if (n1
== n
&& tb1
== tb
)
1053 ptb
= &tb1
->jmp_next
[n1
];
1055 *ptb
= tb
->jmp_next
[n
];
1056 tb
->jmp_next
[n
] = NULL
;
1058 /* suppress the jump to next tb in generated code */
1059 tb_reset_jump(tb
, n
);
1061 /* suppress jumps in the tb on which we could have jumped */
1062 tb_reset_jump_recursive(tb_next
);
1066 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1068 tb_reset_jump_recursive2(tb
, 0);
1069 tb_reset_jump_recursive2(tb
, 1);
1072 #if defined(TARGET_HAS_ICE)
1073 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1075 target_phys_addr_t addr
;
1077 ram_addr_t ram_addr
;
1080 addr
= cpu_get_phys_page_debug(env
, pc
);
1081 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1083 pd
= IO_MEM_UNASSIGNED
;
1085 pd
= p
->phys_offset
;
1087 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1088 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1092 /* Add a watchpoint. */
1093 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1097 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1098 if (addr
== env
->watchpoint
[i
].vaddr
)
1101 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1104 i
= env
->nb_watchpoints
++;
1105 env
->watchpoint
[i
].vaddr
= addr
;
1106 tlb_flush_page(env
, addr
);
1107 /* FIXME: This flush is needed because of the hack to make memory ops
1108 terminate the TB. It can be removed once the proper IO trap and
1109 re-execute bits are in. */
1114 /* Remove a watchpoint. */
1115 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1119 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1120 if (addr
== env
->watchpoint
[i
].vaddr
) {
1121 env
->nb_watchpoints
--;
1122 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1123 tlb_flush_page(env
, addr
);
1130 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1131 breakpoint is reached */
1132 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1134 #if defined(TARGET_HAS_ICE)
1137 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1138 if (env
->breakpoints
[i
] == pc
)
1142 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1144 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1146 breakpoint_invalidate(env
, pc
);
1153 /* remove a breakpoint */
1154 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1156 #if defined(TARGET_HAS_ICE)
1158 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1159 if (env
->breakpoints
[i
] == pc
)
1164 env
->nb_breakpoints
--;
1165 if (i
< env
->nb_breakpoints
)
1166 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1168 breakpoint_invalidate(env
, pc
);
1175 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1176 CPU loop after each instruction */
1177 void cpu_single_step(CPUState
*env
, int enabled
)
1179 #if defined(TARGET_HAS_ICE)
1180 if (env
->singlestep_enabled
!= enabled
) {
1181 env
->singlestep_enabled
= enabled
;
1182 /* must flush all the translated code to avoid inconsistancies */
1183 /* XXX: only flush what is necessary */
1189 /* enable or disable low levels log */
1190 void cpu_set_log(int log_flags
)
1192 loglevel
= log_flags
;
1193 if (loglevel
&& !logfile
) {
1194 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1196 perror(logfilename
);
1199 #if !defined(CONFIG_SOFTMMU)
1200 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1202 static uint8_t logfile_buf
[4096];
1203 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1206 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1210 if (!loglevel
&& logfile
) {
1216 void cpu_set_log_filename(const char *filename
)
1218 logfilename
= strdup(filename
);
1223 cpu_set_log(loglevel
);
1226 /* mask must never be zero, except for A20 change call */
1227 void cpu_interrupt(CPUState
*env
, int mask
)
1229 TranslationBlock
*tb
;
1230 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1232 env
->interrupt_request
|= mask
;
1233 /* if the cpu is currently executing code, we must unlink it and
1234 all the potentially executing TB */
1235 tb
= env
->current_tb
;
1236 if (tb
&& !testandset(&interrupt_lock
)) {
1237 env
->current_tb
= NULL
;
1238 tb_reset_jump_recursive(tb
);
1239 resetlock(&interrupt_lock
);
1243 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1245 env
->interrupt_request
&= ~mask
;
1248 CPULogItem cpu_log_items
[] = {
1249 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1250 "show generated host assembly code for each compiled TB" },
1251 { CPU_LOG_TB_IN_ASM
, "in_asm",
1252 "show target assembly code for each compiled TB" },
1253 { CPU_LOG_TB_OP
, "op",
1254 "show micro ops for each compiled TB" },
1255 { CPU_LOG_TB_OP_OPT
, "op_opt",
1258 "before eflags optimization and "
1260 "after liveness analysis" },
1261 { CPU_LOG_INT
, "int",
1262 "show interrupts/exceptions in short format" },
1263 { CPU_LOG_EXEC
, "exec",
1264 "show trace before each executed TB (lots of logs)" },
1265 { CPU_LOG_TB_CPU
, "cpu",
1266 "show CPU state before block translation" },
1268 { CPU_LOG_PCALL
, "pcall",
1269 "show protected mode far calls/returns/exceptions" },
1272 { CPU_LOG_IOPORT
, "ioport",
1273 "show all i/o ports accesses" },
1278 static int cmp1(const char *s1
, int n
, const char *s2
)
1280 if (strlen(s2
) != n
)
1282 return memcmp(s1
, s2
, n
) == 0;
1285 /* takes a comma separated list of log masks. Return 0 if error. */
1286 int cpu_str_to_log_mask(const char *str
)
1295 p1
= strchr(p
, ',');
1298 if(cmp1(p
,p1
-p
,"all")) {
1299 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1303 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1304 if (cmp1(p
, p1
- p
, item
->name
))
1318 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1325 fprintf(stderr
, "qemu: fatal: ");
1326 vfprintf(stderr
, fmt
, ap
);
1327 fprintf(stderr
, "\n");
1329 if(env
->intercept
& INTERCEPT_SVM_MASK
) {
1330 /* most probably the virtual machine should not
1331 be shut down but rather caught by the VMM */
1332 vmexit(SVM_EXIT_SHUTDOWN
, 0);
1334 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1336 cpu_dump_state(env
, stderr
, fprintf
, 0);
1339 fprintf(logfile
, "qemu: fatal: ");
1340 vfprintf(logfile
, fmt
, ap2
);
1341 fprintf(logfile
, "\n");
1343 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1345 cpu_dump_state(env
, logfile
, fprintf
, 0);
1355 CPUState
*cpu_copy(CPUState
*env
)
1357 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1358 /* preserve chaining and index */
1359 CPUState
*next_cpu
= new_env
->next_cpu
;
1360 int cpu_index
= new_env
->cpu_index
;
1361 memcpy(new_env
, env
, sizeof(CPUState
));
1362 new_env
->next_cpu
= next_cpu
;
1363 new_env
->cpu_index
= cpu_index
;
1367 #if !defined(CONFIG_USER_ONLY)
1369 /* NOTE: if flush_global is true, also flush global entries (not
1371 void tlb_flush(CPUState
*env
, int flush_global
)
1375 #if defined(DEBUG_TLB)
1376 printf("tlb_flush:\n");
1378 /* must reset current TB so that interrupts cannot modify the
1379 links while we are modifying them */
1380 env
->current_tb
= NULL
;
1382 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1383 env
->tlb_table
[0][i
].addr_read
= -1;
1384 env
->tlb_table
[0][i
].addr_write
= -1;
1385 env
->tlb_table
[0][i
].addr_code
= -1;
1386 env
->tlb_table
[1][i
].addr_read
= -1;
1387 env
->tlb_table
[1][i
].addr_write
= -1;
1388 env
->tlb_table
[1][i
].addr_code
= -1;
1389 #if (NB_MMU_MODES >= 3)
1390 env
->tlb_table
[2][i
].addr_read
= -1;
1391 env
->tlb_table
[2][i
].addr_write
= -1;
1392 env
->tlb_table
[2][i
].addr_code
= -1;
1393 #if (NB_MMU_MODES == 4)
1394 env
->tlb_table
[3][i
].addr_read
= -1;
1395 env
->tlb_table
[3][i
].addr_write
= -1;
1396 env
->tlb_table
[3][i
].addr_code
= -1;
1401 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1403 #if !defined(CONFIG_SOFTMMU)
1404 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1407 if (env
->kqemu_enabled
) {
1408 kqemu_flush(env
, flush_global
);
1414 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1416 if (addr
== (tlb_entry
->addr_read
&
1417 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1418 addr
== (tlb_entry
->addr_write
&
1419 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1420 addr
== (tlb_entry
->addr_code
&
1421 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1422 tlb_entry
->addr_read
= -1;
1423 tlb_entry
->addr_write
= -1;
1424 tlb_entry
->addr_code
= -1;
1428 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1431 TranslationBlock
*tb
;
1433 #if defined(DEBUG_TLB)
1434 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1436 /* must reset current TB so that interrupts cannot modify the
1437 links while we are modifying them */
1438 env
->current_tb
= NULL
;
1440 addr
&= TARGET_PAGE_MASK
;
1441 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1442 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1443 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1444 #if (NB_MMU_MODES >= 3)
1445 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1446 #if (NB_MMU_MODES == 4)
1447 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1451 /* Discard jump cache entries for any tb which might potentially
1452 overlap the flushed page. */
1453 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1454 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1456 i
= tb_jmp_cache_hash_page(addr
);
1457 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1459 #if !defined(CONFIG_SOFTMMU)
1460 if (addr
< MMAP_AREA_END
)
1461 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1464 if (env
->kqemu_enabled
) {
1465 kqemu_flush_page(env
, addr
);
1470 /* update the TLBs so that writes to code in the virtual page 'addr'
1472 static void tlb_protect_code(ram_addr_t ram_addr
)
1474 cpu_physical_memory_reset_dirty(ram_addr
,
1475 ram_addr
+ TARGET_PAGE_SIZE
,
1479 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1480 tested for self modifying code */
1481 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1484 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1487 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1488 unsigned long start
, unsigned long length
)
1491 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1492 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1493 if ((addr
- start
) < length
) {
1494 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1499 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1503 unsigned long length
, start1
;
1507 start
&= TARGET_PAGE_MASK
;
1508 end
= TARGET_PAGE_ALIGN(end
);
1510 length
= end
- start
;
1513 len
= length
>> TARGET_PAGE_BITS
;
1515 /* XXX: should not depend on cpu context */
1517 if (env
->kqemu_enabled
) {
1520 for(i
= 0; i
< len
; i
++) {
1521 kqemu_set_notdirty(env
, addr
);
1522 addr
+= TARGET_PAGE_SIZE
;
1526 mask
= ~dirty_flags
;
1527 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1528 for(i
= 0; i
< len
; i
++)
1531 /* we modify the TLB cache so that the dirty bit will be set again
1532 when accessing the range */
1533 start1
= start
+ (unsigned long)phys_ram_base
;
1534 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1535 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1536 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1537 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1538 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1539 #if (NB_MMU_MODES >= 3)
1540 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1541 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1542 #if (NB_MMU_MODES == 4)
1543 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1544 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1549 #if !defined(CONFIG_SOFTMMU)
1550 /* XXX: this is expensive */
1556 for(i
= 0; i
< L1_SIZE
; i
++) {
1559 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1560 for(j
= 0; j
< L2_SIZE
; j
++) {
1561 if (p
->valid_tag
== virt_valid_tag
&&
1562 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1563 (p
->prot
& PROT_WRITE
)) {
1564 if (addr
< MMAP_AREA_END
) {
1565 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1566 p
->prot
& ~PROT_WRITE
);
1569 addr
+= TARGET_PAGE_SIZE
;
1578 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1580 ram_addr_t ram_addr
;
1582 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1583 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1584 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1585 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1586 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1591 /* update the TLB according to the current state of the dirty bits */
1592 void cpu_tlb_update_dirty(CPUState
*env
)
1595 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1596 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1597 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1598 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1599 #if (NB_MMU_MODES >= 3)
1600 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1601 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1602 #if (NB_MMU_MODES == 4)
1603 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1604 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1609 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1610 unsigned long start
)
1613 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1614 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1615 if (addr
== start
) {
1616 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1621 /* update the TLB corresponding to virtual page vaddr and phys addr
1622 addr so that it is no longer dirty */
1623 static inline void tlb_set_dirty(CPUState
*env
,
1624 unsigned long addr
, target_ulong vaddr
)
1628 addr
&= TARGET_PAGE_MASK
;
1629 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1630 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1631 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1632 #if (NB_MMU_MODES >= 3)
1633 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1634 #if (NB_MMU_MODES == 4)
1635 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1640 /* add a new TLB entry. At most one entry for a given virtual address
1641 is permitted. Return 0 if OK or 2 if the page could not be mapped
1642 (can only happen in non SOFTMMU mode for I/O pages or pages
1643 conflicting with the host address space). */
1644 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1645 target_phys_addr_t paddr
, int prot
,
1646 int mmu_idx
, int is_softmmu
)
1651 target_ulong address
;
1652 target_phys_addr_t addend
;
1657 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1659 pd
= IO_MEM_UNASSIGNED
;
1661 pd
= p
->phys_offset
;
1663 #if defined(DEBUG_TLB)
1664 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1665 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1669 #if !defined(CONFIG_SOFTMMU)
1673 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1674 /* IO memory case */
1675 address
= vaddr
| pd
;
1678 /* standard memory */
1680 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1683 /* Make accesses to pages with watchpoints go via the
1684 watchpoint trap routines. */
1685 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1686 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1687 if (address
& ~TARGET_PAGE_MASK
) {
1688 env
->watchpoint
[i
].addend
= 0;
1689 address
= vaddr
| io_mem_watch
;
1691 env
->watchpoint
[i
].addend
= pd
- paddr
+
1692 (unsigned long) phys_ram_base
;
1693 /* TODO: Figure out how to make read watchpoints coexist
1695 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1700 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1702 te
= &env
->tlb_table
[mmu_idx
][index
];
1703 te
->addend
= addend
;
1704 if (prot
& PAGE_READ
) {
1705 te
->addr_read
= address
;
1709 if (prot
& PAGE_EXEC
) {
1710 te
->addr_code
= address
;
1714 if (prot
& PAGE_WRITE
) {
1715 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1716 (pd
& IO_MEM_ROMD
)) {
1717 /* write access calls the I/O callback */
1718 te
->addr_write
= vaddr
|
1719 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1720 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1721 !cpu_physical_memory_is_dirty(pd
)) {
1722 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1724 te
->addr_write
= address
;
1727 te
->addr_write
= -1;
1730 #if !defined(CONFIG_SOFTMMU)
1732 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1733 /* IO access: no mapping is done as it will be handled by the
1735 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1740 if (vaddr
>= MMAP_AREA_END
) {
1743 if (prot
& PROT_WRITE
) {
1744 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1745 #if defined(TARGET_HAS_SMC) || 1
1748 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1749 !cpu_physical_memory_is_dirty(pd
))) {
1750 /* ROM: we do as if code was inside */
1751 /* if code is present, we only map as read only and save the
1755 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1758 vp
->valid_tag
= virt_valid_tag
;
1759 prot
&= ~PAGE_WRITE
;
1762 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1763 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1764 if (map_addr
== MAP_FAILED
) {
1765 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1775 /* called from signal handler: invalidate the code and unprotect the
1776 page. Return TRUE if the fault was succesfully handled. */
1777 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1779 #if !defined(CONFIG_SOFTMMU)
1782 #if defined(DEBUG_TLB)
1783 printf("page_unprotect: addr=0x%08x\n", addr
);
1785 addr
&= TARGET_PAGE_MASK
;
1787 /* if it is not mapped, no need to worry here */
1788 if (addr
>= MMAP_AREA_END
)
1790 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1793 /* NOTE: in this case, validate_tag is _not_ tested as it
1794 validates only the code TLB */
1795 if (vp
->valid_tag
!= virt_valid_tag
)
1797 if (!(vp
->prot
& PAGE_WRITE
))
1799 #if defined(DEBUG_TLB)
1800 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1801 addr
, vp
->phys_addr
, vp
->prot
);
1803 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1804 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1805 (unsigned long)addr
, vp
->prot
);
1806 /* set the dirty bit */
1807 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1808 /* flush the code inside */
1809 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1818 void tlb_flush(CPUState
*env
, int flush_global
)
1822 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1826 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1827 target_phys_addr_t paddr
, int prot
,
1828 int mmu_idx
, int is_softmmu
)
1833 /* dump memory mappings */
1834 void page_dump(FILE *f
)
1836 unsigned long start
, end
;
1837 int i
, j
, prot
, prot1
;
1840 fprintf(f
, "%-8s %-8s %-8s %s\n",
1841 "start", "end", "size", "prot");
1845 for(i
= 0; i
<= L1_SIZE
; i
++) {
1850 for(j
= 0;j
< L2_SIZE
; j
++) {
1855 if (prot1
!= prot
) {
1856 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1858 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1859 start
, end
, end
- start
,
1860 prot
& PAGE_READ
? 'r' : '-',
1861 prot
& PAGE_WRITE
? 'w' : '-',
1862 prot
& PAGE_EXEC
? 'x' : '-');
1876 int page_get_flags(target_ulong address
)
1880 p
= page_find(address
>> TARGET_PAGE_BITS
);
1886 /* modify the flags of a page and invalidate the code if
1887 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1888 depending on PAGE_WRITE */
1889 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1894 start
= start
& TARGET_PAGE_MASK
;
1895 end
= TARGET_PAGE_ALIGN(end
);
1896 if (flags
& PAGE_WRITE
)
1897 flags
|= PAGE_WRITE_ORG
;
1898 spin_lock(&tb_lock
);
1899 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1900 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1901 /* if the write protection is set, then we invalidate the code
1903 if (!(p
->flags
& PAGE_WRITE
) &&
1904 (flags
& PAGE_WRITE
) &&
1906 tb_invalidate_phys_page(addr
, 0, NULL
);
1910 spin_unlock(&tb_lock
);
1913 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1919 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1920 start
= start
& TARGET_PAGE_MASK
;
1923 /* we've wrapped around */
1925 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1926 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1929 if( !(p
->flags
& PAGE_VALID
) )
1932 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1934 if (flags
& PAGE_WRITE
) {
1935 if (!(p
->flags
& PAGE_WRITE_ORG
))
1937 /* unprotect the page if it was put read-only because it
1938 contains translated code */
1939 if (!(p
->flags
& PAGE_WRITE
)) {
1940 if (!page_unprotect(addr
, 0, NULL
))
1949 /* called from signal handler: invalidate the code and unprotect the
1950 page. Return TRUE if the fault was succesfully handled. */
1951 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1953 unsigned int page_index
, prot
, pindex
;
1955 target_ulong host_start
, host_end
, addr
;
1957 host_start
= address
& qemu_host_page_mask
;
1958 page_index
= host_start
>> TARGET_PAGE_BITS
;
1959 p1
= page_find(page_index
);
1962 host_end
= host_start
+ qemu_host_page_size
;
1965 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1969 /* if the page was really writable, then we change its
1970 protection back to writable */
1971 if (prot
& PAGE_WRITE_ORG
) {
1972 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1973 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1974 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1975 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1976 p1
[pindex
].flags
|= PAGE_WRITE
;
1977 /* and since the content will be modified, we must invalidate
1978 the corresponding translated code. */
1979 tb_invalidate_phys_page(address
, pc
, puc
);
1980 #ifdef DEBUG_TB_CHECK
1981 tb_invalidate_check(address
);
1989 static inline void tlb_set_dirty(CPUState
*env
,
1990 unsigned long addr
, target_ulong vaddr
)
1993 #endif /* defined(CONFIG_USER_ONLY) */
1995 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1997 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
1998 ram_addr_t orig_memory
);
1999 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2002 if (addr > start_addr) \
2005 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2006 if (start_addr2 > 0) \
2010 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2011 end_addr2 = TARGET_PAGE_SIZE - 1; \
2013 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2014 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2019 /* register physical memory. 'size' must be a multiple of the target
2020 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2022 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2024 ram_addr_t phys_offset
)
2026 target_phys_addr_t addr
, end_addr
;
2029 ram_addr_t orig_size
= size
;
2032 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2033 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2034 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2035 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2036 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2037 ram_addr_t orig_memory
= p
->phys_offset
;
2038 target_phys_addr_t start_addr2
, end_addr2
;
2039 int need_subpage
= 0;
2041 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2043 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2044 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2045 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2046 &p
->phys_offset
, orig_memory
);
2048 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2051 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2053 p
->phys_offset
= phys_offset
;
2054 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2055 (phys_offset
& IO_MEM_ROMD
))
2056 phys_offset
+= TARGET_PAGE_SIZE
;
2059 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2060 p
->phys_offset
= phys_offset
;
2061 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2062 (phys_offset
& IO_MEM_ROMD
))
2063 phys_offset
+= TARGET_PAGE_SIZE
;
2065 target_phys_addr_t start_addr2
, end_addr2
;
2066 int need_subpage
= 0;
2068 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2069 end_addr2
, need_subpage
);
2071 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2072 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2073 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2074 subpage_register(subpage
, start_addr2
, end_addr2
,
2081 /* since each CPU stores ram addresses in its TLB cache, we must
2082 reset the modified entries */
2084 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2089 /* XXX: temporary until new memory mapping API */
2090 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2094 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2096 return IO_MEM_UNASSIGNED
;
2097 return p
->phys_offset
;
2100 /* XXX: better than nothing */
2101 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2104 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2105 fprintf(stderr
, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
2106 size
, phys_ram_size
);
2109 addr
= phys_ram_alloc_offset
;
2110 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2114 void qemu_ram_free(ram_addr_t addr
)
2118 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2120 #ifdef DEBUG_UNASSIGNED
2121 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2124 do_unassigned_access(addr
, 0, 0, 0);
2126 do_unassigned_access(addr
, 0, 0, 0);
2131 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2133 #ifdef DEBUG_UNASSIGNED
2134 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2137 do_unassigned_access(addr
, 1, 0, 0);
2139 do_unassigned_access(addr
, 1, 0, 0);
2143 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2144 unassigned_mem_readb
,
2145 unassigned_mem_readb
,
2146 unassigned_mem_readb
,
2149 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2150 unassigned_mem_writeb
,
2151 unassigned_mem_writeb
,
2152 unassigned_mem_writeb
,
2155 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2157 unsigned long ram_addr
;
2159 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2160 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2161 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2162 #if !defined(CONFIG_USER_ONLY)
2163 tb_invalidate_phys_page_fast(ram_addr
, 1);
2164 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2167 stb_p((uint8_t *)(long)addr
, val
);
2169 if (cpu_single_env
->kqemu_enabled
&&
2170 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2171 kqemu_modify_page(cpu_single_env
, ram_addr
);
2173 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2174 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2175 /* we remove the notdirty callback only if the code has been
2177 if (dirty_flags
== 0xff)
2178 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2181 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2183 unsigned long ram_addr
;
2185 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2186 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2187 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2188 #if !defined(CONFIG_USER_ONLY)
2189 tb_invalidate_phys_page_fast(ram_addr
, 2);
2190 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2193 stw_p((uint8_t *)(long)addr
, val
);
2195 if (cpu_single_env
->kqemu_enabled
&&
2196 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2197 kqemu_modify_page(cpu_single_env
, ram_addr
);
2199 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2200 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2201 /* we remove the notdirty callback only if the code has been
2203 if (dirty_flags
== 0xff)
2204 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2207 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2209 unsigned long ram_addr
;
2211 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2212 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2213 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2214 #if !defined(CONFIG_USER_ONLY)
2215 tb_invalidate_phys_page_fast(ram_addr
, 4);
2216 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2219 stl_p((uint8_t *)(long)addr
, val
);
2221 if (cpu_single_env
->kqemu_enabled
&&
2222 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2223 kqemu_modify_page(cpu_single_env
, ram_addr
);
2225 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2226 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2227 /* we remove the notdirty callback only if the code has been
2229 if (dirty_flags
== 0xff)
2230 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2233 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2234 NULL
, /* never used */
2235 NULL
, /* never used */
2236 NULL
, /* never used */
2239 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2240 notdirty_mem_writeb
,
2241 notdirty_mem_writew
,
2242 notdirty_mem_writel
,
2245 #if defined(CONFIG_SOFTMMU)
2246 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2247 so these check for a hit then pass through to the normal out-of-line
2249 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2251 return ldub_phys(addr
);
2254 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2256 return lduw_phys(addr
);
2259 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2261 return ldl_phys(addr
);
2264 /* Generate a debug exception if a watchpoint has been hit.
2265 Returns the real physical address of the access. addr will be a host
2266 address in case of a RAM location. */
2267 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2269 CPUState
*env
= cpu_single_env
;
2271 target_ulong retaddr
;
2275 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2276 watch
= env
->watchpoint
[i
].vaddr
;
2277 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2278 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2279 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2280 cpu_single_env
->watchpoint_hit
= i
+ 1;
2281 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2289 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2292 addr
= check_watchpoint(addr
);
2293 stb_phys(addr
, val
);
2296 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2299 addr
= check_watchpoint(addr
);
2300 stw_phys(addr
, val
);
2303 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2306 addr
= check_watchpoint(addr
);
2307 stl_phys(addr
, val
);
2310 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2316 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2323 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2329 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2330 #if defined(DEBUG_SUBPAGE)
2331 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2332 mmio
, len
, addr
, idx
);
2334 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2339 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2340 uint32_t value
, unsigned int len
)
2344 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2345 #if defined(DEBUG_SUBPAGE)
2346 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2347 mmio
, len
, addr
, idx
, value
);
2349 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2352 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2354 #if defined(DEBUG_SUBPAGE)
2355 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2358 return subpage_readlen(opaque
, addr
, 0);
2361 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2364 #if defined(DEBUG_SUBPAGE)
2365 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2367 subpage_writelen(opaque
, addr
, value
, 0);
2370 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2372 #if defined(DEBUG_SUBPAGE)
2373 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2376 return subpage_readlen(opaque
, addr
, 1);
2379 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2382 #if defined(DEBUG_SUBPAGE)
2383 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2385 subpage_writelen(opaque
, addr
, value
, 1);
2388 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2390 #if defined(DEBUG_SUBPAGE)
2391 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2394 return subpage_readlen(opaque
, addr
, 2);
2397 static void subpage_writel (void *opaque
,
2398 target_phys_addr_t addr
, uint32_t value
)
2400 #if defined(DEBUG_SUBPAGE)
2401 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2403 subpage_writelen(opaque
, addr
, value
, 2);
2406 static CPUReadMemoryFunc
*subpage_read
[] = {
2412 static CPUWriteMemoryFunc
*subpage_write
[] = {
2418 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2424 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2426 idx
= SUBPAGE_IDX(start
);
2427 eidx
= SUBPAGE_IDX(end
);
2428 #if defined(DEBUG_SUBPAGE)
2429 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2430 mmio
, start
, end
, idx
, eidx
, memory
);
2432 memory
>>= IO_MEM_SHIFT
;
2433 for (; idx
<= eidx
; idx
++) {
2434 for (i
= 0; i
< 4; i
++) {
2435 if (io_mem_read
[memory
][i
]) {
2436 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2437 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2439 if (io_mem_write
[memory
][i
]) {
2440 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2441 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2449 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2450 ram_addr_t orig_memory
)
2455 mmio
= qemu_mallocz(sizeof(subpage_t
));
2458 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2459 #if defined(DEBUG_SUBPAGE)
2460 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2461 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2463 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2464 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2470 static void io_mem_init(void)
2472 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2473 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2474 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2477 #if defined(CONFIG_SOFTMMU)
2478 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2479 watch_mem_write
, NULL
);
2481 /* alloc dirty bits array */
2482 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2483 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2486 /* mem_read and mem_write are arrays of functions containing the
2487 function to access byte (index 0), word (index 1) and dword (index
2488 2). Functions can be omitted with a NULL function pointer. The
2489 registered functions may be modified dynamically later.
2490 If io_index is non zero, the corresponding io zone is
2491 modified. If it is zero, a new io zone is allocated. The return
2492 value can be used with cpu_register_physical_memory(). (-1) is
2493 returned if error. */
2494 int cpu_register_io_memory(int io_index
,
2495 CPUReadMemoryFunc
**mem_read
,
2496 CPUWriteMemoryFunc
**mem_write
,
2499 int i
, subwidth
= 0;
2501 if (io_index
<= 0) {
2502 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2504 io_index
= io_mem_nb
++;
2506 if (io_index
>= IO_MEM_NB_ENTRIES
)
2510 for(i
= 0;i
< 3; i
++) {
2511 if (!mem_read
[i
] || !mem_write
[i
])
2512 subwidth
= IO_MEM_SUBWIDTH
;
2513 io_mem_read
[io_index
][i
] = mem_read
[i
];
2514 io_mem_write
[io_index
][i
] = mem_write
[i
];
2516 io_mem_opaque
[io_index
] = opaque
;
2517 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2520 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2522 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2525 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2527 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2530 /* physical memory access (slow version, mainly for debug) */
2531 #if defined(CONFIG_USER_ONLY)
2532 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2533 int len
, int is_write
)
2540 page
= addr
& TARGET_PAGE_MASK
;
2541 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2544 flags
= page_get_flags(page
);
2545 if (!(flags
& PAGE_VALID
))
2548 if (!(flags
& PAGE_WRITE
))
2550 /* XXX: this code should not depend on lock_user */
2551 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2552 /* FIXME - should this return an error rather than just fail? */
2555 unlock_user(p
, addr
, l
);
2557 if (!(flags
& PAGE_READ
))
2559 /* XXX: this code should not depend on lock_user */
2560 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2561 /* FIXME - should this return an error rather than just fail? */
2564 unlock_user(p
, addr
, 0);
2573 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2574 int len
, int is_write
)
2579 target_phys_addr_t page
;
2584 page
= addr
& TARGET_PAGE_MASK
;
2585 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2588 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2590 pd
= IO_MEM_UNASSIGNED
;
2592 pd
= p
->phys_offset
;
2596 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2597 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2598 /* XXX: could force cpu_single_env to NULL to avoid
2600 if (l
>= 4 && ((addr
& 3) == 0)) {
2601 /* 32 bit write access */
2603 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2605 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2606 /* 16 bit write access */
2608 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2611 /* 8 bit write access */
2613 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2617 unsigned long addr1
;
2618 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2620 ptr
= phys_ram_base
+ addr1
;
2621 memcpy(ptr
, buf
, l
);
2622 if (!cpu_physical_memory_is_dirty(addr1
)) {
2623 /* invalidate code */
2624 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2626 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2627 (0xff & ~CODE_DIRTY_FLAG
);
2631 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2632 !(pd
& IO_MEM_ROMD
)) {
2634 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2635 if (l
>= 4 && ((addr
& 3) == 0)) {
2636 /* 32 bit read access */
2637 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2640 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2641 /* 16 bit read access */
2642 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2646 /* 8 bit read access */
2647 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2653 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2654 (addr
& ~TARGET_PAGE_MASK
);
2655 memcpy(buf
, ptr
, l
);
2664 /* used for ROM loading : can write in RAM and ROM */
2665 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2666 const uint8_t *buf
, int len
)
2670 target_phys_addr_t page
;
2675 page
= addr
& TARGET_PAGE_MASK
;
2676 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2679 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2681 pd
= IO_MEM_UNASSIGNED
;
2683 pd
= p
->phys_offset
;
2686 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2687 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2688 !(pd
& IO_MEM_ROMD
)) {
2691 unsigned long addr1
;
2692 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2694 ptr
= phys_ram_base
+ addr1
;
2695 memcpy(ptr
, buf
, l
);
2704 /* warning: addr must be aligned */
2705 uint32_t ldl_phys(target_phys_addr_t addr
)
2713 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2715 pd
= IO_MEM_UNASSIGNED
;
2717 pd
= p
->phys_offset
;
2720 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2721 !(pd
& IO_MEM_ROMD
)) {
2723 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2724 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2727 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2728 (addr
& ~TARGET_PAGE_MASK
);
2734 /* warning: addr must be aligned */
2735 uint64_t ldq_phys(target_phys_addr_t addr
)
2743 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2745 pd
= IO_MEM_UNASSIGNED
;
2747 pd
= p
->phys_offset
;
2750 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2751 !(pd
& IO_MEM_ROMD
)) {
2753 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2754 #ifdef TARGET_WORDS_BIGENDIAN
2755 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2756 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2758 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2759 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2763 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2764 (addr
& ~TARGET_PAGE_MASK
);
2771 uint32_t ldub_phys(target_phys_addr_t addr
)
2774 cpu_physical_memory_read(addr
, &val
, 1);
2779 uint32_t lduw_phys(target_phys_addr_t addr
)
2782 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2783 return tswap16(val
);
2786 /* warning: addr must be aligned. The ram page is not masked as dirty
2787 and the code inside is not invalidated. It is useful if the dirty
2788 bits are used to track modified PTEs */
2789 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2796 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2798 pd
= IO_MEM_UNASSIGNED
;
2800 pd
= p
->phys_offset
;
2803 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2804 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2805 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2807 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2808 (addr
& ~TARGET_PAGE_MASK
);
2813 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2820 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2822 pd
= IO_MEM_UNASSIGNED
;
2824 pd
= p
->phys_offset
;
2827 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2828 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2829 #ifdef TARGET_WORDS_BIGENDIAN
2830 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2831 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2833 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2834 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2837 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2838 (addr
& ~TARGET_PAGE_MASK
);
2843 /* warning: addr must be aligned */
2844 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2851 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2853 pd
= IO_MEM_UNASSIGNED
;
2855 pd
= p
->phys_offset
;
2858 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2859 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2860 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2862 unsigned long addr1
;
2863 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2865 ptr
= phys_ram_base
+ addr1
;
2867 if (!cpu_physical_memory_is_dirty(addr1
)) {
2868 /* invalidate code */
2869 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2871 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2872 (0xff & ~CODE_DIRTY_FLAG
);
2878 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2881 cpu_physical_memory_write(addr
, &v
, 1);
2885 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2887 uint16_t v
= tswap16(val
);
2888 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2892 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2895 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2900 /* virtual memory access for debug */
2901 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2902 uint8_t *buf
, int len
, int is_write
)
2905 target_phys_addr_t phys_addr
;
2909 page
= addr
& TARGET_PAGE_MASK
;
2910 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2911 /* if no physical page mapped, return an error */
2912 if (phys_addr
== -1)
2914 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2917 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2926 void dump_exec_info(FILE *f
,
2927 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2929 int i
, target_code_size
, max_target_code_size
;
2930 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2931 TranslationBlock
*tb
;
2933 target_code_size
= 0;
2934 max_target_code_size
= 0;
2936 direct_jmp_count
= 0;
2937 direct_jmp2_count
= 0;
2938 for(i
= 0; i
< nb_tbs
; i
++) {
2940 target_code_size
+= tb
->size
;
2941 if (tb
->size
> max_target_code_size
)
2942 max_target_code_size
= tb
->size
;
2943 if (tb
->page_addr
[1] != -1)
2945 if (tb
->tb_next_offset
[0] != 0xffff) {
2947 if (tb
->tb_next_offset
[1] != 0xffff) {
2948 direct_jmp2_count
++;
2952 /* XXX: avoid using doubles ? */
2953 cpu_fprintf(f
, "Translation buffer state:\n");
2954 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2955 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2956 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2957 max_target_code_size
);
2958 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2959 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2960 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2961 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2963 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2964 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2966 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2968 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2969 cpu_fprintf(f
, "\nStatistics:\n");
2970 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2971 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2972 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2973 #ifdef CONFIG_PROFILER
2976 tot
= dyngen_interm_time
+ dyngen_code_time
;
2977 cpu_fprintf(f
, "JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
2979 cpu_fprintf(f
, "translated TBs %" PRId64
" (aborted=%" PRId64
" %0.1f%%)\n",
2981 dyngen_tb_count1
- dyngen_tb_count
,
2982 dyngen_tb_count1
? (double)(dyngen_tb_count1
- dyngen_tb_count
) / dyngen_tb_count1
* 100.0 : 0);
2983 cpu_fprintf(f
, "avg ops/TB %0.1f max=%d\n",
2984 dyngen_tb_count
? (double)dyngen_op_count
/ dyngen_tb_count
: 0, dyngen_op_count_max
);
2985 cpu_fprintf(f
, "old ops/total ops %0.1f%%\n",
2986 dyngen_op_count
? (double)dyngen_old_op_count
/ dyngen_op_count
* 100.0 : 0);
2987 cpu_fprintf(f
, "deleted ops/TB %0.2f\n",
2989 (double)dyngen_tcg_del_op_count
/ dyngen_tb_count
: 0);
2990 cpu_fprintf(f
, "cycles/op %0.1f\n",
2991 dyngen_op_count
? (double)tot
/ dyngen_op_count
: 0);
2992 cpu_fprintf(f
, "cycles/in byte %0.1f\n",
2993 dyngen_code_in_len
? (double)tot
/ dyngen_code_in_len
: 0);
2994 cpu_fprintf(f
, "cycles/out byte %0.1f\n",
2995 dyngen_code_out_len
? (double)tot
/ dyngen_code_out_len
: 0);
2998 cpu_fprintf(f
, " gen_interm time %0.1f%%\n",
2999 (double)dyngen_interm_time
/ tot
* 100.0);
3000 cpu_fprintf(f
, " gen_code time %0.1f%%\n",
3001 (double)dyngen_code_time
/ tot
* 100.0);
3002 cpu_fprintf(f
, "cpu_restore count %" PRId64
"\n",
3003 dyngen_restore_count
);
3004 cpu_fprintf(f
, " avg cycles %0.1f\n",
3005 dyngen_restore_count
? (double)dyngen_restore_time
/ dyngen_restore_count
: 0);
3007 extern void dump_op_count(void);
3014 #if !defined(CONFIG_USER_ONLY)
3016 #define MMUSUFFIX _cmmu
3017 #define GETPC() NULL
3018 #define env cpu_single_env
3019 #define SOFTMMU_CODE_ACCESS
3022 #include "softmmu_template.h"
3025 #include "softmmu_template.h"
3028 #include "softmmu_template.h"
3031 #include "softmmu_template.h"