2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
39 #if defined(CONFIG_USER_ONLY)
43 //#define DEBUG_TB_INVALIDATE
46 //#define DEBUG_UNASSIGNED
48 /* make various TB consistency checks */
49 //#define DEBUG_TB_CHECK
50 //#define DEBUG_TLB_CHECK
52 //#define DEBUG_IOPORT
53 //#define DEBUG_SUBPAGE
55 #if !defined(CONFIG_USER_ONLY)
56 /* TB consistency checks only implemented for usermode emulation. */
60 /* threshold to flush the translated code buffer */
61 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #define MMAP_AREA_START 0x00000000
66 #define MMAP_AREA_END 0xa8000000
68 #if defined(TARGET_SPARC64)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 41
70 #elif defined(TARGET_SPARC)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 36
72 #elif defined(TARGET_ALPHA)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #define TARGET_VIRT_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_PPC64)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
79 #define TARGET_PHYS_ADDR_SPACE_BITS 32
82 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
83 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
85 /* any access to the tbs or the page table must use this lock */
86 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
88 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
89 uint8_t *code_gen_ptr
;
93 uint8_t *phys_ram_base
;
94 uint8_t *phys_ram_dirty
;
95 static ram_addr_t phys_ram_alloc_offset
= 0;
98 /* current CPU in the current thread. It is only valid inside
100 CPUState
*cpu_single_env
;
102 typedef struct PageDesc
{
103 /* list of TBs intersecting this ram page */
104 TranslationBlock
*first_tb
;
105 /* in order to optimize self modifying code, we count the number
106 of lookups we do to a given page to use a bitmap */
107 unsigned int code_write_count
;
108 uint8_t *code_bitmap
;
109 #if defined(CONFIG_USER_ONLY)
114 typedef struct PhysPageDesc
{
115 /* offset in host memory of the page + io_index in the low 12 bits */
116 uint32_t phys_offset
;
120 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
121 /* XXX: this is a temporary hack for alpha target.
122 * In the future, this is to be replaced by a multi-level table
123 * to actually be able to handle the complete 64 bits address space.
125 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
127 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
130 #define L1_SIZE (1 << L1_BITS)
131 #define L2_SIZE (1 << L2_BITS)
133 static void io_mem_init(void);
135 unsigned long qemu_real_host_page_size
;
136 unsigned long qemu_host_page_bits
;
137 unsigned long qemu_host_page_size
;
138 unsigned long qemu_host_page_mask
;
140 /* XXX: for system emulation, it could just be an array */
141 static PageDesc
*l1_map
[L1_SIZE
];
142 PhysPageDesc
**l1_phys_map
;
144 /* io memory support */
145 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
146 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
147 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
148 static int io_mem_nb
;
149 #if defined(CONFIG_SOFTMMU)
150 static int io_mem_watch
;
154 char *logfilename
= "/tmp/qemu.log";
157 static int log_append
= 0;
160 static int tlb_flush_count
;
161 static int tb_flush_count
;
162 static int tb_phys_invalidate_count
;
164 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
165 typedef struct subpage_t
{
166 target_phys_addr_t base
;
167 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
168 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
169 void *opaque
[TARGET_PAGE_SIZE
][2][4];
172 static void page_init(void)
174 /* NOTE: we can always suppose that qemu_host_page_size >=
178 SYSTEM_INFO system_info
;
181 GetSystemInfo(&system_info
);
182 qemu_real_host_page_size
= system_info
.dwPageSize
;
184 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
185 PAGE_EXECUTE_READWRITE
, &old_protect
);
188 qemu_real_host_page_size
= getpagesize();
190 unsigned long start
, end
;
192 start
= (unsigned long)code_gen_buffer
;
193 start
&= ~(qemu_real_host_page_size
- 1);
195 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
196 end
+= qemu_real_host_page_size
- 1;
197 end
&= ~(qemu_real_host_page_size
- 1);
199 mprotect((void *)start
, end
- start
,
200 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
204 if (qemu_host_page_size
== 0)
205 qemu_host_page_size
= qemu_real_host_page_size
;
206 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
207 qemu_host_page_size
= TARGET_PAGE_SIZE
;
208 qemu_host_page_bits
= 0;
209 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
210 qemu_host_page_bits
++;
211 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
212 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
213 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
215 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
217 long long startaddr
, endaddr
;
221 f
= fopen("/proc/self/maps", "r");
224 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
226 page_set_flags(TARGET_PAGE_ALIGN(startaddr
),
227 TARGET_PAGE_ALIGN(endaddr
),
237 static inline PageDesc
*page_find_alloc(unsigned int index
)
241 lp
= &l1_map
[index
>> L2_BITS
];
244 /* allocate if not found */
245 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
246 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
249 return p
+ (index
& (L2_SIZE
- 1));
252 static inline PageDesc
*page_find(unsigned int index
)
256 p
= l1_map
[index
>> L2_BITS
];
259 return p
+ (index
& (L2_SIZE
- 1));
262 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
267 p
= (void **)l1_phys_map
;
268 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
270 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
271 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
273 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
276 /* allocate if not found */
279 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
280 memset(p
, 0, sizeof(void *) * L1_SIZE
);
284 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
288 /* allocate if not found */
291 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
293 for (i
= 0; i
< L2_SIZE
; i
++)
294 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
296 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
299 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
301 return phys_page_find_alloc(index
, 0);
304 #if !defined(CONFIG_USER_ONLY)
305 static void tlb_protect_code(ram_addr_t ram_addr
);
306 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
310 void cpu_exec_init(CPUState
*env
)
317 code_gen_ptr
= code_gen_buffer
;
321 env
->next_cpu
= NULL
;
324 while (*penv
!= NULL
) {
325 penv
= (CPUState
**)&(*penv
)->next_cpu
;
328 env
->cpu_index
= cpu_index
;
329 env
->nb_watchpoints
= 0;
333 static inline void invalidate_page_bitmap(PageDesc
*p
)
335 if (p
->code_bitmap
) {
336 qemu_free(p
->code_bitmap
);
337 p
->code_bitmap
= NULL
;
339 p
->code_write_count
= 0;
342 /* set to NULL all the 'first_tb' fields in all PageDescs */
343 static void page_flush_tb(void)
348 for(i
= 0; i
< L1_SIZE
; i
++) {
351 for(j
= 0; j
< L2_SIZE
; j
++) {
353 invalidate_page_bitmap(p
);
360 /* flush all the translation blocks */
361 /* XXX: tb_flush is currently not thread safe */
362 void tb_flush(CPUState
*env1
)
365 #if defined(DEBUG_FLUSH)
366 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
367 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
369 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
371 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > CODE_GEN_BUFFER_SIZE
)
372 cpu_abort(env1
, "Internal error: code buffer overflow\n");
376 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
377 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
380 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
383 code_gen_ptr
= code_gen_buffer
;
384 /* XXX: flush processor icache at this point if cache flush is
389 #ifdef DEBUG_TB_CHECK
391 static void tb_invalidate_check(target_ulong address
)
393 TranslationBlock
*tb
;
395 address
&= TARGET_PAGE_MASK
;
396 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
397 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
398 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
399 address
>= tb
->pc
+ tb
->size
)) {
400 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
401 address
, (long)tb
->pc
, tb
->size
);
407 /* verify that all the pages have correct rights for code */
408 static void tb_page_check(void)
410 TranslationBlock
*tb
;
411 int i
, flags1
, flags2
;
413 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
414 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
415 flags1
= page_get_flags(tb
->pc
);
416 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
417 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
418 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
419 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
425 void tb_jmp_check(TranslationBlock
*tb
)
427 TranslationBlock
*tb1
;
430 /* suppress any remaining jumps to this TB */
434 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
437 tb1
= tb1
->jmp_next
[n1
];
439 /* check end of list */
441 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
447 /* invalidate one TB */
448 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
451 TranslationBlock
*tb1
;
455 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
458 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
462 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
464 TranslationBlock
*tb1
;
470 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
472 *ptb
= tb1
->page_next
[n1
];
475 ptb
= &tb1
->page_next
[n1
];
479 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
481 TranslationBlock
*tb1
, **ptb
;
484 ptb
= &tb
->jmp_next
[n
];
487 /* find tb(n) in circular list */
491 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
492 if (n1
== n
&& tb1
== tb
)
495 ptb
= &tb1
->jmp_first
;
497 ptb
= &tb1
->jmp_next
[n1
];
500 /* now we can suppress tb(n) from the list */
501 *ptb
= tb
->jmp_next
[n
];
503 tb
->jmp_next
[n
] = NULL
;
507 /* reset the jump entry 'n' of a TB so that it is not chained to
509 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
511 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
514 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
519 target_ulong phys_pc
;
520 TranslationBlock
*tb1
, *tb2
;
522 /* remove the TB from the hash list */
523 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
524 h
= tb_phys_hash_func(phys_pc
);
525 tb_remove(&tb_phys_hash
[h
], tb
,
526 offsetof(TranslationBlock
, phys_hash_next
));
528 /* remove the TB from the page list */
529 if (tb
->page_addr
[0] != page_addr
) {
530 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
531 tb_page_remove(&p
->first_tb
, tb
);
532 invalidate_page_bitmap(p
);
534 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
535 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
536 tb_page_remove(&p
->first_tb
, tb
);
537 invalidate_page_bitmap(p
);
540 tb_invalidated_flag
= 1;
542 /* remove the TB from the hash list */
543 h
= tb_jmp_cache_hash_func(tb
->pc
);
544 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
545 if (env
->tb_jmp_cache
[h
] == tb
)
546 env
->tb_jmp_cache
[h
] = NULL
;
549 /* suppress this TB from the two jump lists */
550 tb_jmp_remove(tb
, 0);
551 tb_jmp_remove(tb
, 1);
553 /* suppress any remaining jumps to this TB */
559 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
560 tb2
= tb1
->jmp_next
[n1
];
561 tb_reset_jump(tb1
, n1
);
562 tb1
->jmp_next
[n1
] = NULL
;
565 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
567 tb_phys_invalidate_count
++;
570 static inline void set_bits(uint8_t *tab
, int start
, int len
)
576 mask
= 0xff << (start
& 7);
577 if ((start
& ~7) == (end
& ~7)) {
579 mask
&= ~(0xff << (end
& 7));
584 start
= (start
+ 8) & ~7;
586 while (start
< end1
) {
591 mask
= ~(0xff << (end
& 7));
597 static void build_page_bitmap(PageDesc
*p
)
599 int n
, tb_start
, tb_end
;
600 TranslationBlock
*tb
;
602 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
605 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
610 tb
= (TranslationBlock
*)((long)tb
& ~3);
611 /* NOTE: this is subtle as a TB may span two physical pages */
613 /* NOTE: tb_end may be after the end of the page, but
614 it is not a problem */
615 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
616 tb_end
= tb_start
+ tb
->size
;
617 if (tb_end
> TARGET_PAGE_SIZE
)
618 tb_end
= TARGET_PAGE_SIZE
;
621 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
623 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
624 tb
= tb
->page_next
[n
];
628 #ifdef TARGET_HAS_PRECISE_SMC
630 static void tb_gen_code(CPUState
*env
,
631 target_ulong pc
, target_ulong cs_base
, int flags
,
634 TranslationBlock
*tb
;
636 target_ulong phys_pc
, phys_page2
, virt_page2
;
639 phys_pc
= get_phys_addr_code(env
, pc
);
642 /* flush must be done */
644 /* cannot fail at this point */
647 tc_ptr
= code_gen_ptr
;
649 tb
->cs_base
= cs_base
;
652 cpu_gen_code(env
, tb
, &code_gen_size
);
653 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
655 /* check next page if needed */
656 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
658 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
659 phys_page2
= get_phys_addr_code(env
, virt_page2
);
661 tb_link_phys(tb
, phys_pc
, phys_page2
);
665 /* invalidate all TBs which intersect with the target physical page
666 starting in range [start;end[. NOTE: start and end must refer to
667 the same physical page. 'is_cpu_write_access' should be true if called
668 from a real cpu write access: the virtual CPU will exit the current
669 TB if code is modified inside this TB. */
670 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
671 int is_cpu_write_access
)
673 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
674 CPUState
*env
= cpu_single_env
;
676 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
677 target_ulong tb_start
, tb_end
;
678 target_ulong current_pc
, current_cs_base
;
680 p
= page_find(start
>> TARGET_PAGE_BITS
);
683 if (!p
->code_bitmap
&&
684 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
685 is_cpu_write_access
) {
686 /* build code bitmap */
687 build_page_bitmap(p
);
690 /* we remove all the TBs in the range [start, end[ */
691 /* XXX: see if in some cases it could be faster to invalidate all the code */
692 current_tb_not_found
= is_cpu_write_access
;
693 current_tb_modified
= 0;
694 current_tb
= NULL
; /* avoid warning */
695 current_pc
= 0; /* avoid warning */
696 current_cs_base
= 0; /* avoid warning */
697 current_flags
= 0; /* avoid warning */
701 tb
= (TranslationBlock
*)((long)tb
& ~3);
702 tb_next
= tb
->page_next
[n
];
703 /* NOTE: this is subtle as a TB may span two physical pages */
705 /* NOTE: tb_end may be after the end of the page, but
706 it is not a problem */
707 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
708 tb_end
= tb_start
+ tb
->size
;
710 tb_start
= tb
->page_addr
[1];
711 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
713 if (!(tb_end
<= start
|| tb_start
>= end
)) {
714 #ifdef TARGET_HAS_PRECISE_SMC
715 if (current_tb_not_found
) {
716 current_tb_not_found
= 0;
718 if (env
->mem_write_pc
) {
719 /* now we have a real cpu fault */
720 current_tb
= tb_find_pc(env
->mem_write_pc
);
723 if (current_tb
== tb
&&
724 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
725 /* If we are modifying the current TB, we must stop
726 its execution. We could be more precise by checking
727 that the modification is after the current PC, but it
728 would require a specialized function to partially
729 restore the CPU state */
731 current_tb_modified
= 1;
732 cpu_restore_state(current_tb
, env
,
733 env
->mem_write_pc
, NULL
);
734 #if defined(TARGET_I386)
735 current_flags
= env
->hflags
;
736 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
737 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
738 current_pc
= current_cs_base
+ env
->eip
;
740 #error unsupported CPU
743 #endif /* TARGET_HAS_PRECISE_SMC */
744 /* we need to do that to handle the case where a signal
745 occurs while doing tb_phys_invalidate() */
748 saved_tb
= env
->current_tb
;
749 env
->current_tb
= NULL
;
751 tb_phys_invalidate(tb
, -1);
753 env
->current_tb
= saved_tb
;
754 if (env
->interrupt_request
&& env
->current_tb
)
755 cpu_interrupt(env
, env
->interrupt_request
);
760 #if !defined(CONFIG_USER_ONLY)
761 /* if no code remaining, no need to continue to use slow writes */
763 invalidate_page_bitmap(p
);
764 if (is_cpu_write_access
) {
765 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
769 #ifdef TARGET_HAS_PRECISE_SMC
770 if (current_tb_modified
) {
771 /* we generate a block containing just the instruction
772 modifying the memory. It will ensure that it cannot modify
774 env
->current_tb
= NULL
;
775 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
777 cpu_resume_from_signal(env
, NULL
);
782 /* len must be <= 8 and start must be a multiple of len */
783 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
790 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
791 cpu_single_env
->mem_write_vaddr
, len
,
793 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
797 p
= page_find(start
>> TARGET_PAGE_BITS
);
800 if (p
->code_bitmap
) {
801 offset
= start
& ~TARGET_PAGE_MASK
;
802 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
803 if (b
& ((1 << len
) - 1))
807 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
811 #if !defined(CONFIG_SOFTMMU)
812 static void tb_invalidate_phys_page(target_ulong addr
,
813 unsigned long pc
, void *puc
)
815 int n
, current_flags
, current_tb_modified
;
816 target_ulong current_pc
, current_cs_base
;
818 TranslationBlock
*tb
, *current_tb
;
819 #ifdef TARGET_HAS_PRECISE_SMC
820 CPUState
*env
= cpu_single_env
;
823 addr
&= TARGET_PAGE_MASK
;
824 p
= page_find(addr
>> TARGET_PAGE_BITS
);
828 current_tb_modified
= 0;
830 current_pc
= 0; /* avoid warning */
831 current_cs_base
= 0; /* avoid warning */
832 current_flags
= 0; /* avoid warning */
833 #ifdef TARGET_HAS_PRECISE_SMC
835 current_tb
= tb_find_pc(pc
);
840 tb
= (TranslationBlock
*)((long)tb
& ~3);
841 #ifdef TARGET_HAS_PRECISE_SMC
842 if (current_tb
== tb
&&
843 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
844 /* If we are modifying the current TB, we must stop
845 its execution. We could be more precise by checking
846 that the modification is after the current PC, but it
847 would require a specialized function to partially
848 restore the CPU state */
850 current_tb_modified
= 1;
851 cpu_restore_state(current_tb
, env
, pc
, puc
);
852 #if defined(TARGET_I386)
853 current_flags
= env
->hflags
;
854 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
855 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
856 current_pc
= current_cs_base
+ env
->eip
;
858 #error unsupported CPU
861 #endif /* TARGET_HAS_PRECISE_SMC */
862 tb_phys_invalidate(tb
, addr
);
863 tb
= tb
->page_next
[n
];
866 #ifdef TARGET_HAS_PRECISE_SMC
867 if (current_tb_modified
) {
868 /* we generate a block containing just the instruction
869 modifying the memory. It will ensure that it cannot modify
871 env
->current_tb
= NULL
;
872 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
874 cpu_resume_from_signal(env
, puc
);
880 /* add the tb in the target page and protect it if necessary */
881 static inline void tb_alloc_page(TranslationBlock
*tb
,
882 unsigned int n
, target_ulong page_addr
)
885 TranslationBlock
*last_first_tb
;
887 tb
->page_addr
[n
] = page_addr
;
888 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
889 tb
->page_next
[n
] = p
->first_tb
;
890 last_first_tb
= p
->first_tb
;
891 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
892 invalidate_page_bitmap(p
);
894 #if defined(TARGET_HAS_SMC) || 1
896 #if defined(CONFIG_USER_ONLY)
897 if (p
->flags
& PAGE_WRITE
) {
902 /* force the host page as non writable (writes will have a
903 page fault + mprotect overhead) */
904 page_addr
&= qemu_host_page_mask
;
906 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
907 addr
+= TARGET_PAGE_SIZE
) {
909 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
913 p2
->flags
&= ~PAGE_WRITE
;
914 page_get_flags(addr
);
916 mprotect(g2h(page_addr
), qemu_host_page_size
,
917 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
918 #ifdef DEBUG_TB_INVALIDATE
919 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
924 /* if some code is already present, then the pages are already
925 protected. So we handle the case where only the first TB is
926 allocated in a physical page */
927 if (!last_first_tb
) {
928 tlb_protect_code(page_addr
);
932 #endif /* TARGET_HAS_SMC */
935 /* Allocate a new translation block. Flush the translation buffer if
936 too many translation blocks or too much generated code. */
937 TranslationBlock
*tb_alloc(target_ulong pc
)
939 TranslationBlock
*tb
;
941 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
942 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
950 /* add a new TB and link it to the physical page tables. phys_page2 is
951 (-1) to indicate that only one page contains the TB. */
952 void tb_link_phys(TranslationBlock
*tb
,
953 target_ulong phys_pc
, target_ulong phys_page2
)
956 TranslationBlock
**ptb
;
958 /* add in the physical hash table */
959 h
= tb_phys_hash_func(phys_pc
);
960 ptb
= &tb_phys_hash
[h
];
961 tb
->phys_hash_next
= *ptb
;
964 /* add in the page list */
965 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
966 if (phys_page2
!= -1)
967 tb_alloc_page(tb
, 1, phys_page2
);
969 tb
->page_addr
[1] = -1;
971 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
972 tb
->jmp_next
[0] = NULL
;
973 tb
->jmp_next
[1] = NULL
;
975 /* init original jump addresses */
976 if (tb
->tb_next_offset
[0] != 0xffff)
977 tb_reset_jump(tb
, 0);
978 if (tb
->tb_next_offset
[1] != 0xffff)
979 tb_reset_jump(tb
, 1);
981 #ifdef DEBUG_TB_CHECK
986 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
987 tb[1].tc_ptr. Return NULL if not found */
988 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
992 TranslationBlock
*tb
;
996 if (tc_ptr
< (unsigned long)code_gen_buffer
||
997 tc_ptr
>= (unsigned long)code_gen_ptr
)
999 /* binary search (cf Knuth) */
1002 while (m_min
<= m_max
) {
1003 m
= (m_min
+ m_max
) >> 1;
1005 v
= (unsigned long)tb
->tc_ptr
;
1008 else if (tc_ptr
< v
) {
1017 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1019 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1021 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1024 tb1
= tb
->jmp_next
[n
];
1026 /* find head of list */
1029 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1032 tb1
= tb1
->jmp_next
[n1
];
1034 /* we are now sure now that tb jumps to tb1 */
1037 /* remove tb from the jmp_first list */
1038 ptb
= &tb_next
->jmp_first
;
1042 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1043 if (n1
== n
&& tb1
== tb
)
1045 ptb
= &tb1
->jmp_next
[n1
];
1047 *ptb
= tb
->jmp_next
[n
];
1048 tb
->jmp_next
[n
] = NULL
;
1050 /* suppress the jump to next tb in generated code */
1051 tb_reset_jump(tb
, n
);
1053 /* suppress jumps in the tb on which we could have jumped */
1054 tb_reset_jump_recursive(tb_next
);
1058 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1060 tb_reset_jump_recursive2(tb
, 0);
1061 tb_reset_jump_recursive2(tb
, 1);
1064 #if defined(TARGET_HAS_ICE)
1065 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1067 target_phys_addr_t addr
;
1069 ram_addr_t ram_addr
;
1072 addr
= cpu_get_phys_page_debug(env
, pc
);
1073 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1075 pd
= IO_MEM_UNASSIGNED
;
1077 pd
= p
->phys_offset
;
1079 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1080 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1084 /* Add a watchpoint. */
1085 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1089 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1090 if (addr
== env
->watchpoint
[i
].vaddr
)
1093 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1096 i
= env
->nb_watchpoints
++;
1097 env
->watchpoint
[i
].vaddr
= addr
;
1098 tlb_flush_page(env
, addr
);
1099 /* FIXME: This flush is needed because of the hack to make memory ops
1100 terminate the TB. It can be removed once the proper IO trap and
1101 re-execute bits are in. */
1106 /* Remove a watchpoint. */
1107 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1111 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1112 if (addr
== env
->watchpoint
[i
].vaddr
) {
1113 env
->nb_watchpoints
--;
1114 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1115 tlb_flush_page(env
, addr
);
1122 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1123 breakpoint is reached */
1124 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1126 #if defined(TARGET_HAS_ICE)
1129 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1130 if (env
->breakpoints
[i
] == pc
)
1134 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1136 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1138 breakpoint_invalidate(env
, pc
);
1145 /* remove a breakpoint */
1146 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1148 #if defined(TARGET_HAS_ICE)
1150 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1151 if (env
->breakpoints
[i
] == pc
)
1156 env
->nb_breakpoints
--;
1157 if (i
< env
->nb_breakpoints
)
1158 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1160 breakpoint_invalidate(env
, pc
);
1167 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1168 CPU loop after each instruction */
1169 void cpu_single_step(CPUState
*env
, int enabled
)
1171 #if defined(TARGET_HAS_ICE)
1172 if (env
->singlestep_enabled
!= enabled
) {
1173 env
->singlestep_enabled
= enabled
;
1174 /* must flush all the translated code to avoid inconsistancies */
1175 /* XXX: only flush what is necessary */
1181 /* enable or disable low levels log */
1182 void cpu_set_log(int log_flags
)
1184 loglevel
= log_flags
;
1185 if (loglevel
&& !logfile
) {
1186 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1188 perror(logfilename
);
1191 #if !defined(CONFIG_SOFTMMU)
1192 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1194 static uint8_t logfile_buf
[4096];
1195 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1198 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1202 if (!loglevel
&& logfile
) {
1208 void cpu_set_log_filename(const char *filename
)
1210 logfilename
= strdup(filename
);
1215 cpu_set_log(loglevel
);
1218 /* mask must never be zero, except for A20 change call */
1219 void cpu_interrupt(CPUState
*env
, int mask
)
1221 TranslationBlock
*tb
;
1222 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1224 env
->interrupt_request
|= mask
;
1225 /* if the cpu is currently executing code, we must unlink it and
1226 all the potentially executing TB */
1227 tb
= env
->current_tb
;
1228 if (tb
&& !testandset(&interrupt_lock
)) {
1229 env
->current_tb
= NULL
;
1230 tb_reset_jump_recursive(tb
);
1231 resetlock(&interrupt_lock
);
1235 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1237 env
->interrupt_request
&= ~mask
;
1240 CPULogItem cpu_log_items
[] = {
1241 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1242 "show generated host assembly code for each compiled TB" },
1243 { CPU_LOG_TB_IN_ASM
, "in_asm",
1244 "show target assembly code for each compiled TB" },
1245 { CPU_LOG_TB_OP
, "op",
1246 "show micro ops for each compiled TB" },
1247 { CPU_LOG_TB_OP_OPT
, "op_opt",
1250 "before eflags optimization and "
1252 "after liveness analysis" },
1253 { CPU_LOG_INT
, "int",
1254 "show interrupts/exceptions in short format" },
1255 { CPU_LOG_EXEC
, "exec",
1256 "show trace before each executed TB (lots of logs)" },
1257 { CPU_LOG_TB_CPU
, "cpu",
1258 "show CPU state before block translation" },
1260 { CPU_LOG_PCALL
, "pcall",
1261 "show protected mode far calls/returns/exceptions" },
1264 { CPU_LOG_IOPORT
, "ioport",
1265 "show all i/o ports accesses" },
1270 static int cmp1(const char *s1
, int n
, const char *s2
)
1272 if (strlen(s2
) != n
)
1274 return memcmp(s1
, s2
, n
) == 0;
1277 /* takes a comma separated list of log masks. Return 0 if error. */
1278 int cpu_str_to_log_mask(const char *str
)
1287 p1
= strchr(p
, ',');
1290 if(cmp1(p
,p1
-p
,"all")) {
1291 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1295 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1296 if (cmp1(p
, p1
- p
, item
->name
))
1310 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1317 fprintf(stderr
, "qemu: fatal: ");
1318 vfprintf(stderr
, fmt
, ap
);
1319 fprintf(stderr
, "\n");
1321 if(env
->intercept
& INTERCEPT_SVM_MASK
) {
1322 /* most probably the virtual machine should not
1323 be shut down but rather caught by the VMM */
1324 vmexit(SVM_EXIT_SHUTDOWN
, 0);
1326 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1328 cpu_dump_state(env
, stderr
, fprintf
, 0);
1331 fprintf(logfile
, "qemu: fatal: ");
1332 vfprintf(logfile
, fmt
, ap2
);
1333 fprintf(logfile
, "\n");
1335 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1337 cpu_dump_state(env
, logfile
, fprintf
, 0);
1347 CPUState
*cpu_copy(CPUState
*env
)
1349 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1350 /* preserve chaining and index */
1351 CPUState
*next_cpu
= new_env
->next_cpu
;
1352 int cpu_index
= new_env
->cpu_index
;
1353 memcpy(new_env
, env
, sizeof(CPUState
));
1354 new_env
->next_cpu
= next_cpu
;
1355 new_env
->cpu_index
= cpu_index
;
1359 #if !defined(CONFIG_USER_ONLY)
1361 /* NOTE: if flush_global is true, also flush global entries (not
1363 void tlb_flush(CPUState
*env
, int flush_global
)
1367 #if defined(DEBUG_TLB)
1368 printf("tlb_flush:\n");
1370 /* must reset current TB so that interrupts cannot modify the
1371 links while we are modifying them */
1372 env
->current_tb
= NULL
;
1374 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1375 env
->tlb_table
[0][i
].addr_read
= -1;
1376 env
->tlb_table
[0][i
].addr_write
= -1;
1377 env
->tlb_table
[0][i
].addr_code
= -1;
1378 env
->tlb_table
[1][i
].addr_read
= -1;
1379 env
->tlb_table
[1][i
].addr_write
= -1;
1380 env
->tlb_table
[1][i
].addr_code
= -1;
1381 #if (NB_MMU_MODES >= 3)
1382 env
->tlb_table
[2][i
].addr_read
= -1;
1383 env
->tlb_table
[2][i
].addr_write
= -1;
1384 env
->tlb_table
[2][i
].addr_code
= -1;
1385 #if (NB_MMU_MODES == 4)
1386 env
->tlb_table
[3][i
].addr_read
= -1;
1387 env
->tlb_table
[3][i
].addr_write
= -1;
1388 env
->tlb_table
[3][i
].addr_code
= -1;
1393 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1395 #if !defined(CONFIG_SOFTMMU)
1396 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1399 if (env
->kqemu_enabled
) {
1400 kqemu_flush(env
, flush_global
);
1406 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1408 if (addr
== (tlb_entry
->addr_read
&
1409 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1410 addr
== (tlb_entry
->addr_write
&
1411 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1412 addr
== (tlb_entry
->addr_code
&
1413 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1414 tlb_entry
->addr_read
= -1;
1415 tlb_entry
->addr_write
= -1;
1416 tlb_entry
->addr_code
= -1;
1420 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1423 TranslationBlock
*tb
;
1425 #if defined(DEBUG_TLB)
1426 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1428 /* must reset current TB so that interrupts cannot modify the
1429 links while we are modifying them */
1430 env
->current_tb
= NULL
;
1432 addr
&= TARGET_PAGE_MASK
;
1433 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1434 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1435 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1436 #if (NB_MMU_MODES >= 3)
1437 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1438 #if (NB_MMU_MODES == 4)
1439 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1443 /* Discard jump cache entries for any tb which might potentially
1444 overlap the flushed page. */
1445 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1446 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1448 i
= tb_jmp_cache_hash_page(addr
);
1449 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1451 #if !defined(CONFIG_SOFTMMU)
1452 if (addr
< MMAP_AREA_END
)
1453 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1456 if (env
->kqemu_enabled
) {
1457 kqemu_flush_page(env
, addr
);
1462 /* update the TLBs so that writes to code in the virtual page 'addr'
1464 static void tlb_protect_code(ram_addr_t ram_addr
)
1466 cpu_physical_memory_reset_dirty(ram_addr
,
1467 ram_addr
+ TARGET_PAGE_SIZE
,
1471 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1472 tested for self modifying code */
1473 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1476 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1479 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1480 unsigned long start
, unsigned long length
)
1483 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1484 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1485 if ((addr
- start
) < length
) {
1486 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1491 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1495 unsigned long length
, start1
;
1499 start
&= TARGET_PAGE_MASK
;
1500 end
= TARGET_PAGE_ALIGN(end
);
1502 length
= end
- start
;
1505 len
= length
>> TARGET_PAGE_BITS
;
1507 /* XXX: should not depend on cpu context */
1509 if (env
->kqemu_enabled
) {
1512 for(i
= 0; i
< len
; i
++) {
1513 kqemu_set_notdirty(env
, addr
);
1514 addr
+= TARGET_PAGE_SIZE
;
1518 mask
= ~dirty_flags
;
1519 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1520 for(i
= 0; i
< len
; i
++)
1523 /* we modify the TLB cache so that the dirty bit will be set again
1524 when accessing the range */
1525 start1
= start
+ (unsigned long)phys_ram_base
;
1526 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1527 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1528 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1529 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1530 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1531 #if (NB_MMU_MODES >= 3)
1532 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1533 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1534 #if (NB_MMU_MODES == 4)
1535 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1536 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1541 #if !defined(CONFIG_SOFTMMU)
1542 /* XXX: this is expensive */
1548 for(i
= 0; i
< L1_SIZE
; i
++) {
1551 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1552 for(j
= 0; j
< L2_SIZE
; j
++) {
1553 if (p
->valid_tag
== virt_valid_tag
&&
1554 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1555 (p
->prot
& PROT_WRITE
)) {
1556 if (addr
< MMAP_AREA_END
) {
1557 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1558 p
->prot
& ~PROT_WRITE
);
1561 addr
+= TARGET_PAGE_SIZE
;
1570 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1572 ram_addr_t ram_addr
;
1574 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1575 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1576 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1577 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1578 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1583 /* update the TLB according to the current state of the dirty bits */
1584 void cpu_tlb_update_dirty(CPUState
*env
)
1587 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1588 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1589 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1590 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1591 #if (NB_MMU_MODES >= 3)
1592 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1593 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1594 #if (NB_MMU_MODES == 4)
1595 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1596 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1601 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1602 unsigned long start
)
1605 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1606 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1607 if (addr
== start
) {
1608 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1613 /* update the TLB corresponding to virtual page vaddr and phys addr
1614 addr so that it is no longer dirty */
1615 static inline void tlb_set_dirty(CPUState
*env
,
1616 unsigned long addr
, target_ulong vaddr
)
1620 addr
&= TARGET_PAGE_MASK
;
1621 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1622 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1623 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1624 #if (NB_MMU_MODES >= 3)
1625 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1626 #if (NB_MMU_MODES == 4)
1627 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1632 /* add a new TLB entry. At most one entry for a given virtual address
1633 is permitted. Return 0 if OK or 2 if the page could not be mapped
1634 (can only happen in non SOFTMMU mode for I/O pages or pages
1635 conflicting with the host address space). */
1636 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1637 target_phys_addr_t paddr
, int prot
,
1638 int mmu_idx
, int is_softmmu
)
1643 target_ulong address
;
1644 target_phys_addr_t addend
;
1649 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1651 pd
= IO_MEM_UNASSIGNED
;
1653 pd
= p
->phys_offset
;
1655 #if defined(DEBUG_TLB)
1656 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1657 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1661 #if !defined(CONFIG_SOFTMMU)
1665 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1666 /* IO memory case */
1667 address
= vaddr
| pd
;
1670 /* standard memory */
1672 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1675 /* Make accesses to pages with watchpoints go via the
1676 watchpoint trap routines. */
1677 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1678 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1679 if (address
& ~TARGET_PAGE_MASK
) {
1680 env
->watchpoint
[i
].addend
= 0;
1681 address
= vaddr
| io_mem_watch
;
1683 env
->watchpoint
[i
].addend
= pd
- paddr
+
1684 (unsigned long) phys_ram_base
;
1685 /* TODO: Figure out how to make read watchpoints coexist
1687 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1692 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1694 te
= &env
->tlb_table
[mmu_idx
][index
];
1695 te
->addend
= addend
;
1696 if (prot
& PAGE_READ
) {
1697 te
->addr_read
= address
;
1701 if (prot
& PAGE_EXEC
) {
1702 te
->addr_code
= address
;
1706 if (prot
& PAGE_WRITE
) {
1707 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1708 (pd
& IO_MEM_ROMD
)) {
1709 /* write access calls the I/O callback */
1710 te
->addr_write
= vaddr
|
1711 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1712 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1713 !cpu_physical_memory_is_dirty(pd
)) {
1714 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1716 te
->addr_write
= address
;
1719 te
->addr_write
= -1;
1722 #if !defined(CONFIG_SOFTMMU)
1724 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1725 /* IO access: no mapping is done as it will be handled by the
1727 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1732 if (vaddr
>= MMAP_AREA_END
) {
1735 if (prot
& PROT_WRITE
) {
1736 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1737 #if defined(TARGET_HAS_SMC) || 1
1740 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1741 !cpu_physical_memory_is_dirty(pd
))) {
1742 /* ROM: we do as if code was inside */
1743 /* if code is present, we only map as read only and save the
1747 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1750 vp
->valid_tag
= virt_valid_tag
;
1751 prot
&= ~PAGE_WRITE
;
1754 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1755 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1756 if (map_addr
== MAP_FAILED
) {
1757 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1767 /* called from signal handler: invalidate the code and unprotect the
1768 page. Return TRUE if the fault was succesfully handled. */
1769 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1771 #if !defined(CONFIG_SOFTMMU)
1774 #if defined(DEBUG_TLB)
1775 printf("page_unprotect: addr=0x%08x\n", addr
);
1777 addr
&= TARGET_PAGE_MASK
;
1779 /* if it is not mapped, no need to worry here */
1780 if (addr
>= MMAP_AREA_END
)
1782 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1785 /* NOTE: in this case, validate_tag is _not_ tested as it
1786 validates only the code TLB */
1787 if (vp
->valid_tag
!= virt_valid_tag
)
1789 if (!(vp
->prot
& PAGE_WRITE
))
1791 #if defined(DEBUG_TLB)
1792 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1793 addr
, vp
->phys_addr
, vp
->prot
);
1795 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1796 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1797 (unsigned long)addr
, vp
->prot
);
1798 /* set the dirty bit */
1799 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1800 /* flush the code inside */
1801 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1810 void tlb_flush(CPUState
*env
, int flush_global
)
1814 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1818 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1819 target_phys_addr_t paddr
, int prot
,
1820 int mmu_idx
, int is_softmmu
)
1825 /* dump memory mappings */
1826 void page_dump(FILE *f
)
1828 unsigned long start
, end
;
1829 int i
, j
, prot
, prot1
;
1832 fprintf(f
, "%-8s %-8s %-8s %s\n",
1833 "start", "end", "size", "prot");
1837 for(i
= 0; i
<= L1_SIZE
; i
++) {
1842 for(j
= 0;j
< L2_SIZE
; j
++) {
1847 if (prot1
!= prot
) {
1848 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1850 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1851 start
, end
, end
- start
,
1852 prot
& PAGE_READ
? 'r' : '-',
1853 prot
& PAGE_WRITE
? 'w' : '-',
1854 prot
& PAGE_EXEC
? 'x' : '-');
1868 int page_get_flags(target_ulong address
)
1872 p
= page_find(address
>> TARGET_PAGE_BITS
);
1878 /* modify the flags of a page and invalidate the code if
1879 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1880 depending on PAGE_WRITE */
1881 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1886 start
= start
& TARGET_PAGE_MASK
;
1887 end
= TARGET_PAGE_ALIGN(end
);
1888 if (flags
& PAGE_WRITE
)
1889 flags
|= PAGE_WRITE_ORG
;
1890 spin_lock(&tb_lock
);
1891 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1892 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1893 /* if the write protection is set, then we invalidate the code
1895 if (!(p
->flags
& PAGE_WRITE
) &&
1896 (flags
& PAGE_WRITE
) &&
1898 tb_invalidate_phys_page(addr
, 0, NULL
);
1902 spin_unlock(&tb_lock
);
1905 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1911 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1912 start
= start
& TARGET_PAGE_MASK
;
1915 /* we've wrapped around */
1917 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1918 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1921 if( !(p
->flags
& PAGE_VALID
) )
1924 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1926 if (flags
& PAGE_WRITE
) {
1927 if (!(p
->flags
& PAGE_WRITE_ORG
))
1929 /* unprotect the page if it was put read-only because it
1930 contains translated code */
1931 if (!(p
->flags
& PAGE_WRITE
)) {
1932 if (!page_unprotect(addr
, 0, NULL
))
1941 /* called from signal handler: invalidate the code and unprotect the
1942 page. Return TRUE if the fault was succesfully handled. */
1943 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1945 unsigned int page_index
, prot
, pindex
;
1947 target_ulong host_start
, host_end
, addr
;
1949 host_start
= address
& qemu_host_page_mask
;
1950 page_index
= host_start
>> TARGET_PAGE_BITS
;
1951 p1
= page_find(page_index
);
1954 host_end
= host_start
+ qemu_host_page_size
;
1957 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1961 /* if the page was really writable, then we change its
1962 protection back to writable */
1963 if (prot
& PAGE_WRITE_ORG
) {
1964 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1965 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1966 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1967 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1968 p1
[pindex
].flags
|= PAGE_WRITE
;
1969 /* and since the content will be modified, we must invalidate
1970 the corresponding translated code. */
1971 tb_invalidate_phys_page(address
, pc
, puc
);
1972 #ifdef DEBUG_TB_CHECK
1973 tb_invalidate_check(address
);
1981 static inline void tlb_set_dirty(CPUState
*env
,
1982 unsigned long addr
, target_ulong vaddr
)
1985 #endif /* defined(CONFIG_USER_ONLY) */
1987 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1989 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
1991 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1994 if (addr > start_addr) \
1997 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1998 if (start_addr2 > 0) \
2002 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2003 end_addr2 = TARGET_PAGE_SIZE - 1; \
2005 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2006 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2011 /* register physical memory. 'size' must be a multiple of the target
2012 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2014 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2016 unsigned long phys_offset
)
2018 target_phys_addr_t addr
, end_addr
;
2021 unsigned long orig_size
= size
;
2024 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2025 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2026 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2027 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2028 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2029 unsigned long orig_memory
= p
->phys_offset
;
2030 target_phys_addr_t start_addr2
, end_addr2
;
2031 int need_subpage
= 0;
2033 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2035 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2036 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2037 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2038 &p
->phys_offset
, orig_memory
);
2040 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2043 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2045 p
->phys_offset
= phys_offset
;
2046 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2047 (phys_offset
& IO_MEM_ROMD
))
2048 phys_offset
+= TARGET_PAGE_SIZE
;
2051 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2052 p
->phys_offset
= phys_offset
;
2053 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2054 (phys_offset
& IO_MEM_ROMD
))
2055 phys_offset
+= TARGET_PAGE_SIZE
;
2057 target_phys_addr_t start_addr2
, end_addr2
;
2058 int need_subpage
= 0;
2060 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2061 end_addr2
, need_subpage
);
2063 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2064 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2065 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2066 subpage_register(subpage
, start_addr2
, end_addr2
,
2073 /* since each CPU stores ram addresses in its TLB cache, we must
2074 reset the modified entries */
2076 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2081 /* XXX: temporary until new memory mapping API */
2082 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
2086 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2088 return IO_MEM_UNASSIGNED
;
2089 return p
->phys_offset
;
2092 /* XXX: better than nothing */
2093 ram_addr_t
qemu_ram_alloc(unsigned int size
)
2096 if ((phys_ram_alloc_offset
+ size
) >= phys_ram_size
) {
2097 fprintf(stderr
, "Not enough memory (requested_size = %u, max memory = %d)\n",
2098 size
, phys_ram_size
);
2101 addr
= phys_ram_alloc_offset
;
2102 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2106 void qemu_ram_free(ram_addr_t addr
)
2110 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2112 #ifdef DEBUG_UNASSIGNED
2113 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2116 do_unassigned_access(addr
, 0, 0, 0);
2118 do_unassigned_access(addr
, 0, 0, 0);
2123 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2125 #ifdef DEBUG_UNASSIGNED
2126 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2129 do_unassigned_access(addr
, 1, 0, 0);
2131 do_unassigned_access(addr
, 1, 0, 0);
2135 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2136 unassigned_mem_readb
,
2137 unassigned_mem_readb
,
2138 unassigned_mem_readb
,
2141 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2142 unassigned_mem_writeb
,
2143 unassigned_mem_writeb
,
2144 unassigned_mem_writeb
,
2147 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2149 unsigned long ram_addr
;
2151 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2152 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2153 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2154 #if !defined(CONFIG_USER_ONLY)
2155 tb_invalidate_phys_page_fast(ram_addr
, 1);
2156 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2159 stb_p((uint8_t *)(long)addr
, val
);
2161 if (cpu_single_env
->kqemu_enabled
&&
2162 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2163 kqemu_modify_page(cpu_single_env
, ram_addr
);
2165 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2166 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2167 /* we remove the notdirty callback only if the code has been
2169 if (dirty_flags
== 0xff)
2170 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2173 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2175 unsigned long ram_addr
;
2177 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2178 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2179 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2180 #if !defined(CONFIG_USER_ONLY)
2181 tb_invalidate_phys_page_fast(ram_addr
, 2);
2182 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2185 stw_p((uint8_t *)(long)addr
, val
);
2187 if (cpu_single_env
->kqemu_enabled
&&
2188 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2189 kqemu_modify_page(cpu_single_env
, ram_addr
);
2191 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2192 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2193 /* we remove the notdirty callback only if the code has been
2195 if (dirty_flags
== 0xff)
2196 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2199 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2201 unsigned long ram_addr
;
2203 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2204 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2205 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2206 #if !defined(CONFIG_USER_ONLY)
2207 tb_invalidate_phys_page_fast(ram_addr
, 4);
2208 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2211 stl_p((uint8_t *)(long)addr
, val
);
2213 if (cpu_single_env
->kqemu_enabled
&&
2214 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2215 kqemu_modify_page(cpu_single_env
, ram_addr
);
2217 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2218 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2219 /* we remove the notdirty callback only if the code has been
2221 if (dirty_flags
== 0xff)
2222 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2225 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2226 NULL
, /* never used */
2227 NULL
, /* never used */
2228 NULL
, /* never used */
2231 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2232 notdirty_mem_writeb
,
2233 notdirty_mem_writew
,
2234 notdirty_mem_writel
,
2237 #if defined(CONFIG_SOFTMMU)
2238 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2239 so these check for a hit then pass through to the normal out-of-line
2241 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2243 return ldub_phys(addr
);
2246 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2248 return lduw_phys(addr
);
2251 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2253 return ldl_phys(addr
);
2256 /* Generate a debug exception if a watchpoint has been hit.
2257 Returns the real physical address of the access. addr will be a host
2258 address in case of a RAM location. */
2259 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2261 CPUState
*env
= cpu_single_env
;
2263 target_ulong retaddr
;
2267 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2268 watch
= env
->watchpoint
[i
].vaddr
;
2269 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2270 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2271 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2272 cpu_single_env
->watchpoint_hit
= i
+ 1;
2273 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2281 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2284 addr
= check_watchpoint(addr
);
2285 stb_phys(addr
, val
);
2288 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2291 addr
= check_watchpoint(addr
);
2292 stw_phys(addr
, val
);
2295 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2298 addr
= check_watchpoint(addr
);
2299 stl_phys(addr
, val
);
2302 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2308 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2315 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2321 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2322 #if defined(DEBUG_SUBPAGE)
2323 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2324 mmio
, len
, addr
, idx
);
2326 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2331 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2332 uint32_t value
, unsigned int len
)
2336 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2337 #if defined(DEBUG_SUBPAGE)
2338 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2339 mmio
, len
, addr
, idx
, value
);
2341 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2344 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2346 #if defined(DEBUG_SUBPAGE)
2347 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2350 return subpage_readlen(opaque
, addr
, 0);
2353 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2356 #if defined(DEBUG_SUBPAGE)
2357 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2359 subpage_writelen(opaque
, addr
, value
, 0);
2362 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2364 #if defined(DEBUG_SUBPAGE)
2365 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2368 return subpage_readlen(opaque
, addr
, 1);
2371 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2374 #if defined(DEBUG_SUBPAGE)
2375 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2377 subpage_writelen(opaque
, addr
, value
, 1);
2380 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2382 #if defined(DEBUG_SUBPAGE)
2383 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2386 return subpage_readlen(opaque
, addr
, 2);
2389 static void subpage_writel (void *opaque
,
2390 target_phys_addr_t addr
, uint32_t value
)
2392 #if defined(DEBUG_SUBPAGE)
2393 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2395 subpage_writelen(opaque
, addr
, value
, 2);
2398 static CPUReadMemoryFunc
*subpage_read
[] = {
2404 static CPUWriteMemoryFunc
*subpage_write
[] = {
2410 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2416 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2418 idx
= SUBPAGE_IDX(start
);
2419 eidx
= SUBPAGE_IDX(end
);
2420 #if defined(DEBUG_SUBPAGE)
2421 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2422 mmio
, start
, end
, idx
, eidx
, memory
);
2424 memory
>>= IO_MEM_SHIFT
;
2425 for (; idx
<= eidx
; idx
++) {
2426 for (i
= 0; i
< 4; i
++) {
2427 if (io_mem_read
[memory
][i
]) {
2428 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2429 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2431 if (io_mem_write
[memory
][i
]) {
2432 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2433 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2441 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2447 mmio
= qemu_mallocz(sizeof(subpage_t
));
2450 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2451 #if defined(DEBUG_SUBPAGE)
2452 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2453 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2455 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2456 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2462 static void io_mem_init(void)
2464 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2465 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2466 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2469 #if defined(CONFIG_SOFTMMU)
2470 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2471 watch_mem_write
, NULL
);
2473 /* alloc dirty bits array */
2474 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2475 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2478 /* mem_read and mem_write are arrays of functions containing the
2479 function to access byte (index 0), word (index 1) and dword (index
2480 2). Functions can be omitted with a NULL function pointer. The
2481 registered functions may be modified dynamically later.
2482 If io_index is non zero, the corresponding io zone is
2483 modified. If it is zero, a new io zone is allocated. The return
2484 value can be used with cpu_register_physical_memory(). (-1) is
2485 returned if error. */
2486 int cpu_register_io_memory(int io_index
,
2487 CPUReadMemoryFunc
**mem_read
,
2488 CPUWriteMemoryFunc
**mem_write
,
2491 int i
, subwidth
= 0;
2493 if (io_index
<= 0) {
2494 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2496 io_index
= io_mem_nb
++;
2498 if (io_index
>= IO_MEM_NB_ENTRIES
)
2502 for(i
= 0;i
< 3; i
++) {
2503 if (!mem_read
[i
] || !mem_write
[i
])
2504 subwidth
= IO_MEM_SUBWIDTH
;
2505 io_mem_read
[io_index
][i
] = mem_read
[i
];
2506 io_mem_write
[io_index
][i
] = mem_write
[i
];
2508 io_mem_opaque
[io_index
] = opaque
;
2509 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2512 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2514 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2517 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2519 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2522 /* physical memory access (slow version, mainly for debug) */
2523 #if defined(CONFIG_USER_ONLY)
2524 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2525 int len
, int is_write
)
2532 page
= addr
& TARGET_PAGE_MASK
;
2533 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2536 flags
= page_get_flags(page
);
2537 if (!(flags
& PAGE_VALID
))
2540 if (!(flags
& PAGE_WRITE
))
2542 /* XXX: this code should not depend on lock_user */
2543 if (!(p
= lock_user(VERIFY_WRITE
, addr
, len
, 0)))
2544 /* FIXME - should this return an error rather than just fail? */
2546 memcpy(p
, buf
, len
);
2547 unlock_user(p
, addr
, len
);
2549 if (!(flags
& PAGE_READ
))
2551 /* XXX: this code should not depend on lock_user */
2552 if (!(p
= lock_user(VERIFY_READ
, addr
, len
, 1)))
2553 /* FIXME - should this return an error rather than just fail? */
2555 memcpy(buf
, p
, len
);
2556 unlock_user(p
, addr
, 0);
2565 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2566 int len
, int is_write
)
2571 target_phys_addr_t page
;
2576 page
= addr
& TARGET_PAGE_MASK
;
2577 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2580 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2582 pd
= IO_MEM_UNASSIGNED
;
2584 pd
= p
->phys_offset
;
2588 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2589 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2590 /* XXX: could force cpu_single_env to NULL to avoid
2592 if (l
>= 4 && ((addr
& 3) == 0)) {
2593 /* 32 bit write access */
2595 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2597 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2598 /* 16 bit write access */
2600 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2603 /* 8 bit write access */
2605 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2609 unsigned long addr1
;
2610 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2612 ptr
= phys_ram_base
+ addr1
;
2613 memcpy(ptr
, buf
, l
);
2614 if (!cpu_physical_memory_is_dirty(addr1
)) {
2615 /* invalidate code */
2616 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2618 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2619 (0xff & ~CODE_DIRTY_FLAG
);
2623 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2624 !(pd
& IO_MEM_ROMD
)) {
2626 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2627 if (l
>= 4 && ((addr
& 3) == 0)) {
2628 /* 32 bit read access */
2629 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2632 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2633 /* 16 bit read access */
2634 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2638 /* 8 bit read access */
2639 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2645 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2646 (addr
& ~TARGET_PAGE_MASK
);
2647 memcpy(buf
, ptr
, l
);
2656 /* used for ROM loading : can write in RAM and ROM */
2657 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2658 const uint8_t *buf
, int len
)
2662 target_phys_addr_t page
;
2667 page
= addr
& TARGET_PAGE_MASK
;
2668 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2671 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2673 pd
= IO_MEM_UNASSIGNED
;
2675 pd
= p
->phys_offset
;
2678 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2679 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2680 !(pd
& IO_MEM_ROMD
)) {
2683 unsigned long addr1
;
2684 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2686 ptr
= phys_ram_base
+ addr1
;
2687 memcpy(ptr
, buf
, l
);
2696 /* warning: addr must be aligned */
2697 uint32_t ldl_phys(target_phys_addr_t addr
)
2705 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2707 pd
= IO_MEM_UNASSIGNED
;
2709 pd
= p
->phys_offset
;
2712 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2713 !(pd
& IO_MEM_ROMD
)) {
2715 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2716 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2719 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2720 (addr
& ~TARGET_PAGE_MASK
);
2726 /* warning: addr must be aligned */
2727 uint64_t ldq_phys(target_phys_addr_t addr
)
2735 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2737 pd
= IO_MEM_UNASSIGNED
;
2739 pd
= p
->phys_offset
;
2742 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2743 !(pd
& IO_MEM_ROMD
)) {
2745 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2746 #ifdef TARGET_WORDS_BIGENDIAN
2747 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2748 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2750 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2751 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2755 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2756 (addr
& ~TARGET_PAGE_MASK
);
2763 uint32_t ldub_phys(target_phys_addr_t addr
)
2766 cpu_physical_memory_read(addr
, &val
, 1);
2771 uint32_t lduw_phys(target_phys_addr_t addr
)
2774 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2775 return tswap16(val
);
2778 /* warning: addr must be aligned. The ram page is not masked as dirty
2779 and the code inside is not invalidated. It is useful if the dirty
2780 bits are used to track modified PTEs */
2781 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2788 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2790 pd
= IO_MEM_UNASSIGNED
;
2792 pd
= p
->phys_offset
;
2795 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2796 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2797 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2799 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2800 (addr
& ~TARGET_PAGE_MASK
);
2805 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2812 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2814 pd
= IO_MEM_UNASSIGNED
;
2816 pd
= p
->phys_offset
;
2819 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2820 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2821 #ifdef TARGET_WORDS_BIGENDIAN
2822 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2823 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2825 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2826 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2829 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2830 (addr
& ~TARGET_PAGE_MASK
);
2835 /* warning: addr must be aligned */
2836 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2843 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2845 pd
= IO_MEM_UNASSIGNED
;
2847 pd
= p
->phys_offset
;
2850 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2851 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2852 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2854 unsigned long addr1
;
2855 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2857 ptr
= phys_ram_base
+ addr1
;
2859 if (!cpu_physical_memory_is_dirty(addr1
)) {
2860 /* invalidate code */
2861 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2863 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2864 (0xff & ~CODE_DIRTY_FLAG
);
2870 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2873 cpu_physical_memory_write(addr
, &v
, 1);
2877 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2879 uint16_t v
= tswap16(val
);
2880 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2884 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2887 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2892 /* virtual memory access for debug */
2893 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2894 uint8_t *buf
, int len
, int is_write
)
2897 target_phys_addr_t phys_addr
;
2901 page
= addr
& TARGET_PAGE_MASK
;
2902 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2903 /* if no physical page mapped, return an error */
2904 if (phys_addr
== -1)
2906 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2909 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2918 void dump_exec_info(FILE *f
,
2919 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2921 int i
, target_code_size
, max_target_code_size
;
2922 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2923 TranslationBlock
*tb
;
2925 target_code_size
= 0;
2926 max_target_code_size
= 0;
2928 direct_jmp_count
= 0;
2929 direct_jmp2_count
= 0;
2930 for(i
= 0; i
< nb_tbs
; i
++) {
2932 target_code_size
+= tb
->size
;
2933 if (tb
->size
> max_target_code_size
)
2934 max_target_code_size
= tb
->size
;
2935 if (tb
->page_addr
[1] != -1)
2937 if (tb
->tb_next_offset
[0] != 0xffff) {
2939 if (tb
->tb_next_offset
[1] != 0xffff) {
2940 direct_jmp2_count
++;
2944 /* XXX: avoid using doubles ? */
2945 cpu_fprintf(f
, "Translation buffer state:\n");
2946 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2947 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2948 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2949 max_target_code_size
);
2950 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2951 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2952 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2953 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2955 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2956 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2958 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2960 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2961 cpu_fprintf(f
, "\nStatistics:\n");
2962 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2963 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2964 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2965 #ifdef CONFIG_PROFILER
2968 tot
= dyngen_interm_time
+ dyngen_code_time
;
2969 cpu_fprintf(f
, "JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
2971 cpu_fprintf(f
, "translated TBs %" PRId64
" (aborted=%" PRId64
" %0.1f%%)\n",
2973 dyngen_tb_count1
- dyngen_tb_count
,
2974 dyngen_tb_count1
? (double)(dyngen_tb_count1
- dyngen_tb_count
) / dyngen_tb_count1
* 100.0 : 0);
2975 cpu_fprintf(f
, "avg ops/TB %0.1f max=%d\n",
2976 dyngen_tb_count
? (double)dyngen_op_count
/ dyngen_tb_count
: 0, dyngen_op_count_max
);
2977 cpu_fprintf(f
, "old ops/total ops %0.1f%%\n",
2978 dyngen_op_count
? (double)dyngen_old_op_count
/ dyngen_op_count
* 100.0 : 0);
2979 cpu_fprintf(f
, "deleted ops/TB %0.2f\n",
2981 (double)dyngen_tcg_del_op_count
/ dyngen_tb_count
: 0);
2982 cpu_fprintf(f
, "cycles/op %0.1f\n",
2983 dyngen_op_count
? (double)tot
/ dyngen_op_count
: 0);
2984 cpu_fprintf(f
, "cycles/in byte %0.1f\n",
2985 dyngen_code_in_len
? (double)tot
/ dyngen_code_in_len
: 0);
2986 cpu_fprintf(f
, "cycles/out byte %0.1f\n",
2987 dyngen_code_out_len
? (double)tot
/ dyngen_code_out_len
: 0);
2990 cpu_fprintf(f
, " gen_interm time %0.1f%%\n",
2991 (double)dyngen_interm_time
/ tot
* 100.0);
2992 cpu_fprintf(f
, " gen_code time %0.1f%%\n",
2993 (double)dyngen_code_time
/ tot
* 100.0);
2994 cpu_fprintf(f
, "cpu_restore count %" PRId64
"\n",
2995 dyngen_restore_count
);
2996 cpu_fprintf(f
, " avg cycles %0.1f\n",
2997 dyngen_restore_count
? (double)dyngen_restore_time
/ dyngen_restore_count
: 0);
2999 extern void dump_op_count(void);
3006 #if !defined(CONFIG_USER_ONLY)
3008 #define MMUSUFFIX _cmmu
3009 #define GETPC() NULL
3010 #define env cpu_single_env
3011 #define SOFTMMU_CODE_ACCESS
3014 #include "softmmu_template.h"
3017 #include "softmmu_template.h"
3020 #include "softmmu_template.h"
3023 #include "softmmu_template.h"