2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
39 #if defined(CONFIG_USER_ONLY)
43 //#define DEBUG_TB_INVALIDATE
46 //#define DEBUG_UNASSIGNED
48 /* make various TB consistency checks */
49 //#define DEBUG_TB_CHECK
50 //#define DEBUG_TLB_CHECK
52 //#define DEBUG_IOPORT
53 //#define DEBUG_SUBPAGE
55 #if !defined(CONFIG_USER_ONLY)
56 /* TB consistency checks only implemented for usermode emulation. */
60 /* threshold to flush the translated code buffer */
61 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #define MMAP_AREA_START 0x00000000
66 #define MMAP_AREA_END 0xa8000000
68 #if defined(TARGET_SPARC64)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 41
70 #elif defined(TARGET_SPARC)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 36
72 #elif defined(TARGET_ALPHA)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #define TARGET_VIRT_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_PPC64)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
83 #define TARGET_PHYS_ADDR_SPACE_BITS 32
86 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
87 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
89 /* any access to the tbs or the page table must use this lock */
90 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
92 uint8_t code_gen_prologue
[1024] __attribute__((aligned (32)));
93 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
94 uint8_t *code_gen_ptr
;
96 ram_addr_t phys_ram_size
;
98 uint8_t *phys_ram_base
;
99 uint8_t *phys_ram_dirty
;
100 static ram_addr_t phys_ram_alloc_offset
= 0;
103 /* current CPU in the current thread. It is only valid inside
105 CPUState
*cpu_single_env
;
107 typedef struct PageDesc
{
108 /* list of TBs intersecting this ram page */
109 TranslationBlock
*first_tb
;
110 /* in order to optimize self modifying code, we count the number
111 of lookups we do to a given page to use a bitmap */
112 unsigned int code_write_count
;
113 uint8_t *code_bitmap
;
114 #if defined(CONFIG_USER_ONLY)
119 typedef struct PhysPageDesc
{
120 /* offset in host memory of the page + io_index in the low 12 bits */
121 ram_addr_t phys_offset
;
125 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
126 /* XXX: this is a temporary hack for alpha target.
127 * In the future, this is to be replaced by a multi-level table
128 * to actually be able to handle the complete 64 bits address space.
130 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
132 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
135 #define L1_SIZE (1 << L1_BITS)
136 #define L2_SIZE (1 << L2_BITS)
138 static void io_mem_init(void);
140 unsigned long qemu_real_host_page_size
;
141 unsigned long qemu_host_page_bits
;
142 unsigned long qemu_host_page_size
;
143 unsigned long qemu_host_page_mask
;
145 /* XXX: for system emulation, it could just be an array */
146 static PageDesc
*l1_map
[L1_SIZE
];
147 PhysPageDesc
**l1_phys_map
;
149 /* io memory support */
150 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
151 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
152 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
153 static int io_mem_nb
;
154 #if defined(CONFIG_SOFTMMU)
155 static int io_mem_watch
;
159 char *logfilename
= "/tmp/qemu.log";
162 static int log_append
= 0;
165 static int tlb_flush_count
;
166 static int tb_flush_count
;
167 static int tb_phys_invalidate_count
;
169 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
170 typedef struct subpage_t
{
171 target_phys_addr_t base
;
172 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
173 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
174 void *opaque
[TARGET_PAGE_SIZE
][2][4];
178 static void map_exec(void *addr
, long size
)
181 VirtualProtect(addr
, size
,
182 PAGE_EXECUTE_READWRITE
, &old_protect
);
186 static void map_exec(void *addr
, long size
)
188 unsigned long start
, end
;
190 start
= (unsigned long)addr
;
191 start
&= ~(qemu_real_host_page_size
- 1);
193 end
= (unsigned long)addr
+ size
;
194 end
+= qemu_real_host_page_size
- 1;
195 end
&= ~(qemu_real_host_page_size
- 1);
197 mprotect((void *)start
, end
- start
,
198 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
202 static void page_init(void)
204 /* NOTE: we can always suppose that qemu_host_page_size >=
208 SYSTEM_INFO system_info
;
211 GetSystemInfo(&system_info
);
212 qemu_real_host_page_size
= system_info
.dwPageSize
;
215 qemu_real_host_page_size
= getpagesize();
217 map_exec(code_gen_buffer
, sizeof(code_gen_buffer
));
218 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
220 if (qemu_host_page_size
== 0)
221 qemu_host_page_size
= qemu_real_host_page_size
;
222 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
223 qemu_host_page_size
= TARGET_PAGE_SIZE
;
224 qemu_host_page_bits
= 0;
225 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
226 qemu_host_page_bits
++;
227 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
228 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
229 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
231 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
233 long long startaddr
, endaddr
;
237 f
= fopen("/proc/self/maps", "r");
240 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
242 startaddr
= MIN(startaddr
,
243 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
244 endaddr
= MIN(endaddr
,
245 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
246 page_set_flags(TARGET_PAGE_ALIGN(startaddr
),
247 TARGET_PAGE_ALIGN(endaddr
),
257 static inline PageDesc
*page_find_alloc(target_ulong index
)
261 lp
= &l1_map
[index
>> L2_BITS
];
264 /* allocate if not found */
265 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
266 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
269 return p
+ (index
& (L2_SIZE
- 1));
272 static inline PageDesc
*page_find(target_ulong index
)
276 p
= l1_map
[index
>> L2_BITS
];
279 return p
+ (index
& (L2_SIZE
- 1));
282 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
287 p
= (void **)l1_phys_map
;
288 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
290 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
291 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
293 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
296 /* allocate if not found */
299 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
300 memset(p
, 0, sizeof(void *) * L1_SIZE
);
304 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
308 /* allocate if not found */
311 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
313 for (i
= 0; i
< L2_SIZE
; i
++)
314 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
316 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
319 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
321 return phys_page_find_alloc(index
, 0);
324 #if !defined(CONFIG_USER_ONLY)
325 static void tlb_protect_code(ram_addr_t ram_addr
);
326 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
330 void cpu_exec_init(CPUState
*env
)
337 code_gen_ptr
= code_gen_buffer
;
341 env
->next_cpu
= NULL
;
344 while (*penv
!= NULL
) {
345 penv
= (CPUState
**)&(*penv
)->next_cpu
;
348 env
->cpu_index
= cpu_index
;
349 env
->nb_watchpoints
= 0;
353 static inline void invalidate_page_bitmap(PageDesc
*p
)
355 if (p
->code_bitmap
) {
356 qemu_free(p
->code_bitmap
);
357 p
->code_bitmap
= NULL
;
359 p
->code_write_count
= 0;
362 /* set to NULL all the 'first_tb' fields in all PageDescs */
363 static void page_flush_tb(void)
368 for(i
= 0; i
< L1_SIZE
; i
++) {
371 for(j
= 0; j
< L2_SIZE
; j
++) {
373 invalidate_page_bitmap(p
);
380 /* flush all the translation blocks */
381 /* XXX: tb_flush is currently not thread safe */
382 void tb_flush(CPUState
*env1
)
385 #if defined(DEBUG_FLUSH)
386 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
387 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
389 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
391 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > CODE_GEN_BUFFER_SIZE
)
392 cpu_abort(env1
, "Internal error: code buffer overflow\n");
396 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
397 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
400 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
403 code_gen_ptr
= code_gen_buffer
;
404 /* XXX: flush processor icache at this point if cache flush is
409 #ifdef DEBUG_TB_CHECK
411 static void tb_invalidate_check(target_ulong address
)
413 TranslationBlock
*tb
;
415 address
&= TARGET_PAGE_MASK
;
416 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
417 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
418 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
419 address
>= tb
->pc
+ tb
->size
)) {
420 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
421 address
, (long)tb
->pc
, tb
->size
);
427 /* verify that all the pages have correct rights for code */
428 static void tb_page_check(void)
430 TranslationBlock
*tb
;
431 int i
, flags1
, flags2
;
433 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
434 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
435 flags1
= page_get_flags(tb
->pc
);
436 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
437 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
438 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
439 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
445 void tb_jmp_check(TranslationBlock
*tb
)
447 TranslationBlock
*tb1
;
450 /* suppress any remaining jumps to this TB */
454 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
457 tb1
= tb1
->jmp_next
[n1
];
459 /* check end of list */
461 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
467 /* invalidate one TB */
468 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
471 TranslationBlock
*tb1
;
475 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
478 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
482 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
484 TranslationBlock
*tb1
;
490 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
492 *ptb
= tb1
->page_next
[n1
];
495 ptb
= &tb1
->page_next
[n1
];
499 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
501 TranslationBlock
*tb1
, **ptb
;
504 ptb
= &tb
->jmp_next
[n
];
507 /* find tb(n) in circular list */
511 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
512 if (n1
== n
&& tb1
== tb
)
515 ptb
= &tb1
->jmp_first
;
517 ptb
= &tb1
->jmp_next
[n1
];
520 /* now we can suppress tb(n) from the list */
521 *ptb
= tb
->jmp_next
[n
];
523 tb
->jmp_next
[n
] = NULL
;
527 /* reset the jump entry 'n' of a TB so that it is not chained to
529 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
531 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
534 static inline void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
539 target_phys_addr_t phys_pc
;
540 TranslationBlock
*tb1
, *tb2
;
542 /* remove the TB from the hash list */
543 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
544 h
= tb_phys_hash_func(phys_pc
);
545 tb_remove(&tb_phys_hash
[h
], tb
,
546 offsetof(TranslationBlock
, phys_hash_next
));
548 /* remove the TB from the page list */
549 if (tb
->page_addr
[0] != page_addr
) {
550 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
551 tb_page_remove(&p
->first_tb
, tb
);
552 invalidate_page_bitmap(p
);
554 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
555 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
556 tb_page_remove(&p
->first_tb
, tb
);
557 invalidate_page_bitmap(p
);
560 tb_invalidated_flag
= 1;
562 /* remove the TB from the hash list */
563 h
= tb_jmp_cache_hash_func(tb
->pc
);
564 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
565 if (env
->tb_jmp_cache
[h
] == tb
)
566 env
->tb_jmp_cache
[h
] = NULL
;
569 /* suppress this TB from the two jump lists */
570 tb_jmp_remove(tb
, 0);
571 tb_jmp_remove(tb
, 1);
573 /* suppress any remaining jumps to this TB */
579 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
580 tb2
= tb1
->jmp_next
[n1
];
581 tb_reset_jump(tb1
, n1
);
582 tb1
->jmp_next
[n1
] = NULL
;
585 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
587 tb_phys_invalidate_count
++;
590 static inline void set_bits(uint8_t *tab
, int start
, int len
)
596 mask
= 0xff << (start
& 7);
597 if ((start
& ~7) == (end
& ~7)) {
599 mask
&= ~(0xff << (end
& 7));
604 start
= (start
+ 8) & ~7;
606 while (start
< end1
) {
611 mask
= ~(0xff << (end
& 7));
617 static void build_page_bitmap(PageDesc
*p
)
619 int n
, tb_start
, tb_end
;
620 TranslationBlock
*tb
;
622 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
625 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
630 tb
= (TranslationBlock
*)((long)tb
& ~3);
631 /* NOTE: this is subtle as a TB may span two physical pages */
633 /* NOTE: tb_end may be after the end of the page, but
634 it is not a problem */
635 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
636 tb_end
= tb_start
+ tb
->size
;
637 if (tb_end
> TARGET_PAGE_SIZE
)
638 tb_end
= TARGET_PAGE_SIZE
;
641 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
643 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
644 tb
= tb
->page_next
[n
];
648 #ifdef TARGET_HAS_PRECISE_SMC
650 static void tb_gen_code(CPUState
*env
,
651 target_ulong pc
, target_ulong cs_base
, int flags
,
654 TranslationBlock
*tb
;
656 target_ulong phys_pc
, phys_page2
, virt_page2
;
659 phys_pc
= get_phys_addr_code(env
, pc
);
662 /* flush must be done */
664 /* cannot fail at this point */
667 tc_ptr
= code_gen_ptr
;
669 tb
->cs_base
= cs_base
;
672 cpu_gen_code(env
, tb
, &code_gen_size
);
673 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
675 /* check next page if needed */
676 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
678 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
679 phys_page2
= get_phys_addr_code(env
, virt_page2
);
681 tb_link_phys(tb
, phys_pc
, phys_page2
);
685 /* invalidate all TBs which intersect with the target physical page
686 starting in range [start;end[. NOTE: start and end must refer to
687 the same physical page. 'is_cpu_write_access' should be true if called
688 from a real cpu write access: the virtual CPU will exit the current
689 TB if code is modified inside this TB. */
690 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
691 int is_cpu_write_access
)
693 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
694 CPUState
*env
= cpu_single_env
;
696 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
697 target_ulong tb_start
, tb_end
;
698 target_ulong current_pc
, current_cs_base
;
700 p
= page_find(start
>> TARGET_PAGE_BITS
);
703 if (!p
->code_bitmap
&&
704 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
705 is_cpu_write_access
) {
706 /* build code bitmap */
707 build_page_bitmap(p
);
710 /* we remove all the TBs in the range [start, end[ */
711 /* XXX: see if in some cases it could be faster to invalidate all the code */
712 current_tb_not_found
= is_cpu_write_access
;
713 current_tb_modified
= 0;
714 current_tb
= NULL
; /* avoid warning */
715 current_pc
= 0; /* avoid warning */
716 current_cs_base
= 0; /* avoid warning */
717 current_flags
= 0; /* avoid warning */
721 tb
= (TranslationBlock
*)((long)tb
& ~3);
722 tb_next
= tb
->page_next
[n
];
723 /* NOTE: this is subtle as a TB may span two physical pages */
725 /* NOTE: tb_end may be after the end of the page, but
726 it is not a problem */
727 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
728 tb_end
= tb_start
+ tb
->size
;
730 tb_start
= tb
->page_addr
[1];
731 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
733 if (!(tb_end
<= start
|| tb_start
>= end
)) {
734 #ifdef TARGET_HAS_PRECISE_SMC
735 if (current_tb_not_found
) {
736 current_tb_not_found
= 0;
738 if (env
->mem_write_pc
) {
739 /* now we have a real cpu fault */
740 current_tb
= tb_find_pc(env
->mem_write_pc
);
743 if (current_tb
== tb
&&
744 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
745 /* If we are modifying the current TB, we must stop
746 its execution. We could be more precise by checking
747 that the modification is after the current PC, but it
748 would require a specialized function to partially
749 restore the CPU state */
751 current_tb_modified
= 1;
752 cpu_restore_state(current_tb
, env
,
753 env
->mem_write_pc
, NULL
);
754 #if defined(TARGET_I386)
755 current_flags
= env
->hflags
;
756 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
757 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
758 current_pc
= current_cs_base
+ env
->eip
;
760 #error unsupported CPU
763 #endif /* TARGET_HAS_PRECISE_SMC */
764 /* we need to do that to handle the case where a signal
765 occurs while doing tb_phys_invalidate() */
768 saved_tb
= env
->current_tb
;
769 env
->current_tb
= NULL
;
771 tb_phys_invalidate(tb
, -1);
773 env
->current_tb
= saved_tb
;
774 if (env
->interrupt_request
&& env
->current_tb
)
775 cpu_interrupt(env
, env
->interrupt_request
);
780 #if !defined(CONFIG_USER_ONLY)
781 /* if no code remaining, no need to continue to use slow writes */
783 invalidate_page_bitmap(p
);
784 if (is_cpu_write_access
) {
785 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
789 #ifdef TARGET_HAS_PRECISE_SMC
790 if (current_tb_modified
) {
791 /* we generate a block containing just the instruction
792 modifying the memory. It will ensure that it cannot modify
794 env
->current_tb
= NULL
;
795 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
797 cpu_resume_from_signal(env
, NULL
);
802 /* len must be <= 8 and start must be a multiple of len */
803 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
810 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
811 cpu_single_env
->mem_write_vaddr
, len
,
813 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
817 p
= page_find(start
>> TARGET_PAGE_BITS
);
820 if (p
->code_bitmap
) {
821 offset
= start
& ~TARGET_PAGE_MASK
;
822 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
823 if (b
& ((1 << len
) - 1))
827 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
831 #if !defined(CONFIG_SOFTMMU)
832 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
833 unsigned long pc
, void *puc
)
835 int n
, current_flags
, current_tb_modified
;
836 target_ulong current_pc
, current_cs_base
;
838 TranslationBlock
*tb
, *current_tb
;
839 #ifdef TARGET_HAS_PRECISE_SMC
840 CPUState
*env
= cpu_single_env
;
843 addr
&= TARGET_PAGE_MASK
;
844 p
= page_find(addr
>> TARGET_PAGE_BITS
);
848 current_tb_modified
= 0;
850 current_pc
= 0; /* avoid warning */
851 current_cs_base
= 0; /* avoid warning */
852 current_flags
= 0; /* avoid warning */
853 #ifdef TARGET_HAS_PRECISE_SMC
855 current_tb
= tb_find_pc(pc
);
860 tb
= (TranslationBlock
*)((long)tb
& ~3);
861 #ifdef TARGET_HAS_PRECISE_SMC
862 if (current_tb
== tb
&&
863 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
864 /* If we are modifying the current TB, we must stop
865 its execution. We could be more precise by checking
866 that the modification is after the current PC, but it
867 would require a specialized function to partially
868 restore the CPU state */
870 current_tb_modified
= 1;
871 cpu_restore_state(current_tb
, env
, pc
, puc
);
872 #if defined(TARGET_I386)
873 current_flags
= env
->hflags
;
874 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
875 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
876 current_pc
= current_cs_base
+ env
->eip
;
878 #error unsupported CPU
881 #endif /* TARGET_HAS_PRECISE_SMC */
882 tb_phys_invalidate(tb
, addr
);
883 tb
= tb
->page_next
[n
];
886 #ifdef TARGET_HAS_PRECISE_SMC
887 if (current_tb_modified
) {
888 /* we generate a block containing just the instruction
889 modifying the memory. It will ensure that it cannot modify
891 env
->current_tb
= NULL
;
892 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
894 cpu_resume_from_signal(env
, puc
);
900 /* add the tb in the target page and protect it if necessary */
901 static inline void tb_alloc_page(TranslationBlock
*tb
,
902 unsigned int n
, target_ulong page_addr
)
905 TranslationBlock
*last_first_tb
;
907 tb
->page_addr
[n
] = page_addr
;
908 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
909 tb
->page_next
[n
] = p
->first_tb
;
910 last_first_tb
= p
->first_tb
;
911 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
912 invalidate_page_bitmap(p
);
914 #if defined(TARGET_HAS_SMC) || 1
916 #if defined(CONFIG_USER_ONLY)
917 if (p
->flags
& PAGE_WRITE
) {
922 /* force the host page as non writable (writes will have a
923 page fault + mprotect overhead) */
924 page_addr
&= qemu_host_page_mask
;
926 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
927 addr
+= TARGET_PAGE_SIZE
) {
929 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
933 p2
->flags
&= ~PAGE_WRITE
;
934 page_get_flags(addr
);
936 mprotect(g2h(page_addr
), qemu_host_page_size
,
937 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
938 #ifdef DEBUG_TB_INVALIDATE
939 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
944 /* if some code is already present, then the pages are already
945 protected. So we handle the case where only the first TB is
946 allocated in a physical page */
947 if (!last_first_tb
) {
948 tlb_protect_code(page_addr
);
952 #endif /* TARGET_HAS_SMC */
955 /* Allocate a new translation block. Flush the translation buffer if
956 too many translation blocks or too much generated code. */
957 TranslationBlock
*tb_alloc(target_ulong pc
)
959 TranslationBlock
*tb
;
961 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
962 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
970 /* add a new TB and link it to the physical page tables. phys_page2 is
971 (-1) to indicate that only one page contains the TB. */
972 void tb_link_phys(TranslationBlock
*tb
,
973 target_ulong phys_pc
, target_ulong phys_page2
)
976 TranslationBlock
**ptb
;
978 /* add in the physical hash table */
979 h
= tb_phys_hash_func(phys_pc
);
980 ptb
= &tb_phys_hash
[h
];
981 tb
->phys_hash_next
= *ptb
;
984 /* add in the page list */
985 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
986 if (phys_page2
!= -1)
987 tb_alloc_page(tb
, 1, phys_page2
);
989 tb
->page_addr
[1] = -1;
991 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
992 tb
->jmp_next
[0] = NULL
;
993 tb
->jmp_next
[1] = NULL
;
995 /* init original jump addresses */
996 if (tb
->tb_next_offset
[0] != 0xffff)
997 tb_reset_jump(tb
, 0);
998 if (tb
->tb_next_offset
[1] != 0xffff)
999 tb_reset_jump(tb
, 1);
1001 #ifdef DEBUG_TB_CHECK
1006 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1007 tb[1].tc_ptr. Return NULL if not found */
1008 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1010 int m_min
, m_max
, m
;
1012 TranslationBlock
*tb
;
1016 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1017 tc_ptr
>= (unsigned long)code_gen_ptr
)
1019 /* binary search (cf Knuth) */
1022 while (m_min
<= m_max
) {
1023 m
= (m_min
+ m_max
) >> 1;
1025 v
= (unsigned long)tb
->tc_ptr
;
1028 else if (tc_ptr
< v
) {
1037 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1039 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1041 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1044 tb1
= tb
->jmp_next
[n
];
1046 /* find head of list */
1049 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1052 tb1
= tb1
->jmp_next
[n1
];
1054 /* we are now sure now that tb jumps to tb1 */
1057 /* remove tb from the jmp_first list */
1058 ptb
= &tb_next
->jmp_first
;
1062 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1063 if (n1
== n
&& tb1
== tb
)
1065 ptb
= &tb1
->jmp_next
[n1
];
1067 *ptb
= tb
->jmp_next
[n
];
1068 tb
->jmp_next
[n
] = NULL
;
1070 /* suppress the jump to next tb in generated code */
1071 tb_reset_jump(tb
, n
);
1073 /* suppress jumps in the tb on which we could have jumped */
1074 tb_reset_jump_recursive(tb_next
);
1078 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1080 tb_reset_jump_recursive2(tb
, 0);
1081 tb_reset_jump_recursive2(tb
, 1);
1084 #if defined(TARGET_HAS_ICE)
1085 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1087 target_phys_addr_t addr
;
1089 ram_addr_t ram_addr
;
1092 addr
= cpu_get_phys_page_debug(env
, pc
);
1093 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1095 pd
= IO_MEM_UNASSIGNED
;
1097 pd
= p
->phys_offset
;
1099 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1100 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1104 /* Add a watchpoint. */
1105 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1109 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1110 if (addr
== env
->watchpoint
[i
].vaddr
)
1113 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1116 i
= env
->nb_watchpoints
++;
1117 env
->watchpoint
[i
].vaddr
= addr
;
1118 tlb_flush_page(env
, addr
);
1119 /* FIXME: This flush is needed because of the hack to make memory ops
1120 terminate the TB. It can be removed once the proper IO trap and
1121 re-execute bits are in. */
1126 /* Remove a watchpoint. */
1127 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1131 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1132 if (addr
== env
->watchpoint
[i
].vaddr
) {
1133 env
->nb_watchpoints
--;
1134 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1135 tlb_flush_page(env
, addr
);
1142 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1143 breakpoint is reached */
1144 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1146 #if defined(TARGET_HAS_ICE)
1149 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1150 if (env
->breakpoints
[i
] == pc
)
1154 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1156 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1158 breakpoint_invalidate(env
, pc
);
1165 /* remove a breakpoint */
1166 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1168 #if defined(TARGET_HAS_ICE)
1170 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1171 if (env
->breakpoints
[i
] == pc
)
1176 env
->nb_breakpoints
--;
1177 if (i
< env
->nb_breakpoints
)
1178 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1180 breakpoint_invalidate(env
, pc
);
1187 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1188 CPU loop after each instruction */
1189 void cpu_single_step(CPUState
*env
, int enabled
)
1191 #if defined(TARGET_HAS_ICE)
1192 if (env
->singlestep_enabled
!= enabled
) {
1193 env
->singlestep_enabled
= enabled
;
1194 /* must flush all the translated code to avoid inconsistancies */
1195 /* XXX: only flush what is necessary */
1201 /* enable or disable low levels log */
1202 void cpu_set_log(int log_flags
)
1204 loglevel
= log_flags
;
1205 if (loglevel
&& !logfile
) {
1206 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1208 perror(logfilename
);
1211 #if !defined(CONFIG_SOFTMMU)
1212 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1214 static uint8_t logfile_buf
[4096];
1215 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1218 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1222 if (!loglevel
&& logfile
) {
1228 void cpu_set_log_filename(const char *filename
)
1230 logfilename
= strdup(filename
);
1235 cpu_set_log(loglevel
);
1238 /* mask must never be zero, except for A20 change call */
1239 void cpu_interrupt(CPUState
*env
, int mask
)
1241 TranslationBlock
*tb
;
1242 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1244 env
->interrupt_request
|= mask
;
1245 /* if the cpu is currently executing code, we must unlink it and
1246 all the potentially executing TB */
1247 tb
= env
->current_tb
;
1248 if (tb
&& !testandset(&interrupt_lock
)) {
1249 env
->current_tb
= NULL
;
1250 tb_reset_jump_recursive(tb
);
1251 resetlock(&interrupt_lock
);
1255 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1257 env
->interrupt_request
&= ~mask
;
1260 CPULogItem cpu_log_items
[] = {
1261 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1262 "show generated host assembly code for each compiled TB" },
1263 { CPU_LOG_TB_IN_ASM
, "in_asm",
1264 "show target assembly code for each compiled TB" },
1265 { CPU_LOG_TB_OP
, "op",
1266 "show micro ops for each compiled TB" },
1267 { CPU_LOG_TB_OP_OPT
, "op_opt",
1270 "before eflags optimization and "
1272 "after liveness analysis" },
1273 { CPU_LOG_INT
, "int",
1274 "show interrupts/exceptions in short format" },
1275 { CPU_LOG_EXEC
, "exec",
1276 "show trace before each executed TB (lots of logs)" },
1277 { CPU_LOG_TB_CPU
, "cpu",
1278 "show CPU state before block translation" },
1280 { CPU_LOG_PCALL
, "pcall",
1281 "show protected mode far calls/returns/exceptions" },
1284 { CPU_LOG_IOPORT
, "ioport",
1285 "show all i/o ports accesses" },
1290 static int cmp1(const char *s1
, int n
, const char *s2
)
1292 if (strlen(s2
) != n
)
1294 return memcmp(s1
, s2
, n
) == 0;
1297 /* takes a comma separated list of log masks. Return 0 if error. */
1298 int cpu_str_to_log_mask(const char *str
)
1307 p1
= strchr(p
, ',');
1310 if(cmp1(p
,p1
-p
,"all")) {
1311 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1315 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1316 if (cmp1(p
, p1
- p
, item
->name
))
1330 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1337 fprintf(stderr
, "qemu: fatal: ");
1338 vfprintf(stderr
, fmt
, ap
);
1339 fprintf(stderr
, "\n");
1341 if(env
->intercept
& INTERCEPT_SVM_MASK
) {
1342 /* most probably the virtual machine should not
1343 be shut down but rather caught by the VMM */
1344 vmexit(SVM_EXIT_SHUTDOWN
, 0);
1346 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1348 cpu_dump_state(env
, stderr
, fprintf
, 0);
1351 fprintf(logfile
, "qemu: fatal: ");
1352 vfprintf(logfile
, fmt
, ap2
);
1353 fprintf(logfile
, "\n");
1355 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1357 cpu_dump_state(env
, logfile
, fprintf
, 0);
1367 CPUState
*cpu_copy(CPUState
*env
)
1369 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1370 /* preserve chaining and index */
1371 CPUState
*next_cpu
= new_env
->next_cpu
;
1372 int cpu_index
= new_env
->cpu_index
;
1373 memcpy(new_env
, env
, sizeof(CPUState
));
1374 new_env
->next_cpu
= next_cpu
;
1375 new_env
->cpu_index
= cpu_index
;
1379 #if !defined(CONFIG_USER_ONLY)
1381 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1385 /* Discard jump cache entries for any tb which might potentially
1386 overlap the flushed page. */
1387 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1388 memset (&env
->tb_jmp_cache
[i
], 0,
1389 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1391 i
= tb_jmp_cache_hash_page(addr
);
1392 memset (&env
->tb_jmp_cache
[i
], 0,
1393 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1396 /* NOTE: if flush_global is true, also flush global entries (not
1398 void tlb_flush(CPUState
*env
, int flush_global
)
1402 #if defined(DEBUG_TLB)
1403 printf("tlb_flush:\n");
1405 /* must reset current TB so that interrupts cannot modify the
1406 links while we are modifying them */
1407 env
->current_tb
= NULL
;
1409 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1410 env
->tlb_table
[0][i
].addr_read
= -1;
1411 env
->tlb_table
[0][i
].addr_write
= -1;
1412 env
->tlb_table
[0][i
].addr_code
= -1;
1413 env
->tlb_table
[1][i
].addr_read
= -1;
1414 env
->tlb_table
[1][i
].addr_write
= -1;
1415 env
->tlb_table
[1][i
].addr_code
= -1;
1416 #if (NB_MMU_MODES >= 3)
1417 env
->tlb_table
[2][i
].addr_read
= -1;
1418 env
->tlb_table
[2][i
].addr_write
= -1;
1419 env
->tlb_table
[2][i
].addr_code
= -1;
1420 #if (NB_MMU_MODES == 4)
1421 env
->tlb_table
[3][i
].addr_read
= -1;
1422 env
->tlb_table
[3][i
].addr_write
= -1;
1423 env
->tlb_table
[3][i
].addr_code
= -1;
1428 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1430 #if !defined(CONFIG_SOFTMMU)
1431 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1434 if (env
->kqemu_enabled
) {
1435 kqemu_flush(env
, flush_global
);
1441 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1443 if (addr
== (tlb_entry
->addr_read
&
1444 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1445 addr
== (tlb_entry
->addr_write
&
1446 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1447 addr
== (tlb_entry
->addr_code
&
1448 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1449 tlb_entry
->addr_read
= -1;
1450 tlb_entry
->addr_write
= -1;
1451 tlb_entry
->addr_code
= -1;
1455 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1459 #if defined(DEBUG_TLB)
1460 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1462 /* must reset current TB so that interrupts cannot modify the
1463 links while we are modifying them */
1464 env
->current_tb
= NULL
;
1466 addr
&= TARGET_PAGE_MASK
;
1467 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1468 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1469 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1470 #if (NB_MMU_MODES >= 3)
1471 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1472 #if (NB_MMU_MODES == 4)
1473 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1477 tlb_flush_jmp_cache(env
, addr
);
1479 #if !defined(CONFIG_SOFTMMU)
1480 if (addr
< MMAP_AREA_END
)
1481 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1484 if (env
->kqemu_enabled
) {
1485 kqemu_flush_page(env
, addr
);
1490 /* update the TLBs so that writes to code in the virtual page 'addr'
1492 static void tlb_protect_code(ram_addr_t ram_addr
)
1494 cpu_physical_memory_reset_dirty(ram_addr
,
1495 ram_addr
+ TARGET_PAGE_SIZE
,
1499 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1500 tested for self modifying code */
1501 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1504 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1507 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1508 unsigned long start
, unsigned long length
)
1511 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1512 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1513 if ((addr
- start
) < length
) {
1514 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1519 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1523 unsigned long length
, start1
;
1527 start
&= TARGET_PAGE_MASK
;
1528 end
= TARGET_PAGE_ALIGN(end
);
1530 length
= end
- start
;
1533 len
= length
>> TARGET_PAGE_BITS
;
1535 /* XXX: should not depend on cpu context */
1537 if (env
->kqemu_enabled
) {
1540 for(i
= 0; i
< len
; i
++) {
1541 kqemu_set_notdirty(env
, addr
);
1542 addr
+= TARGET_PAGE_SIZE
;
1546 mask
= ~dirty_flags
;
1547 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1548 for(i
= 0; i
< len
; i
++)
1551 /* we modify the TLB cache so that the dirty bit will be set again
1552 when accessing the range */
1553 start1
= start
+ (unsigned long)phys_ram_base
;
1554 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1555 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1556 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1557 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1558 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1559 #if (NB_MMU_MODES >= 3)
1560 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1561 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1562 #if (NB_MMU_MODES == 4)
1563 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1564 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1569 #if !defined(CONFIG_SOFTMMU)
1570 /* XXX: this is expensive */
1576 for(i
= 0; i
< L1_SIZE
; i
++) {
1579 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1580 for(j
= 0; j
< L2_SIZE
; j
++) {
1581 if (p
->valid_tag
== virt_valid_tag
&&
1582 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1583 (p
->prot
& PROT_WRITE
)) {
1584 if (addr
< MMAP_AREA_END
) {
1585 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1586 p
->prot
& ~PROT_WRITE
);
1589 addr
+= TARGET_PAGE_SIZE
;
1598 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1600 ram_addr_t ram_addr
;
1602 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1603 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1604 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1605 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1606 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1611 /* update the TLB according to the current state of the dirty bits */
1612 void cpu_tlb_update_dirty(CPUState
*env
)
1615 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1616 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1617 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1618 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1619 #if (NB_MMU_MODES >= 3)
1620 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1621 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1622 #if (NB_MMU_MODES == 4)
1623 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1624 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1629 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1630 unsigned long start
)
1633 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1634 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1635 if (addr
== start
) {
1636 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1641 /* update the TLB corresponding to virtual page vaddr and phys addr
1642 addr so that it is no longer dirty */
1643 static inline void tlb_set_dirty(CPUState
*env
,
1644 unsigned long addr
, target_ulong vaddr
)
1648 addr
&= TARGET_PAGE_MASK
;
1649 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1650 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1651 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1652 #if (NB_MMU_MODES >= 3)
1653 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1654 #if (NB_MMU_MODES == 4)
1655 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1660 /* add a new TLB entry. At most one entry for a given virtual address
1661 is permitted. Return 0 if OK or 2 if the page could not be mapped
1662 (can only happen in non SOFTMMU mode for I/O pages or pages
1663 conflicting with the host address space). */
1664 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1665 target_phys_addr_t paddr
, int prot
,
1666 int mmu_idx
, int is_softmmu
)
1671 target_ulong address
;
1672 target_phys_addr_t addend
;
1677 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1679 pd
= IO_MEM_UNASSIGNED
;
1681 pd
= p
->phys_offset
;
1683 #if defined(DEBUG_TLB)
1684 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1685 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1689 #if !defined(CONFIG_SOFTMMU)
1693 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1694 /* IO memory case */
1695 address
= vaddr
| pd
;
1698 /* standard memory */
1700 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1703 /* Make accesses to pages with watchpoints go via the
1704 watchpoint trap routines. */
1705 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1706 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1707 if (address
& ~TARGET_PAGE_MASK
) {
1708 env
->watchpoint
[i
].addend
= 0;
1709 address
= vaddr
| io_mem_watch
;
1711 env
->watchpoint
[i
].addend
= pd
- paddr
+
1712 (unsigned long) phys_ram_base
;
1713 /* TODO: Figure out how to make read watchpoints coexist
1715 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1720 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1722 te
= &env
->tlb_table
[mmu_idx
][index
];
1723 te
->addend
= addend
;
1724 if (prot
& PAGE_READ
) {
1725 te
->addr_read
= address
;
1730 if (te
->addr_code
!= -1) {
1731 tlb_flush_jmp_cache(env
, te
->addr_code
);
1733 if (prot
& PAGE_EXEC
) {
1734 te
->addr_code
= address
;
1738 if (prot
& PAGE_WRITE
) {
1739 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1740 (pd
& IO_MEM_ROMD
)) {
1741 /* write access calls the I/O callback */
1742 te
->addr_write
= vaddr
|
1743 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1744 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1745 !cpu_physical_memory_is_dirty(pd
)) {
1746 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1748 te
->addr_write
= address
;
1751 te
->addr_write
= -1;
1754 #if !defined(CONFIG_SOFTMMU)
1756 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1757 /* IO access: no mapping is done as it will be handled by the
1759 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1764 if (vaddr
>= MMAP_AREA_END
) {
1767 if (prot
& PROT_WRITE
) {
1768 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1769 #if defined(TARGET_HAS_SMC) || 1
1772 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1773 !cpu_physical_memory_is_dirty(pd
))) {
1774 /* ROM: we do as if code was inside */
1775 /* if code is present, we only map as read only and save the
1779 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1782 vp
->valid_tag
= virt_valid_tag
;
1783 prot
&= ~PAGE_WRITE
;
1786 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1787 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1788 if (map_addr
== MAP_FAILED
) {
1789 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1799 /* called from signal handler: invalidate the code and unprotect the
1800 page. Return TRUE if the fault was succesfully handled. */
1801 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1803 #if !defined(CONFIG_SOFTMMU)
1806 #if defined(DEBUG_TLB)
1807 printf("page_unprotect: addr=0x%08x\n", addr
);
1809 addr
&= TARGET_PAGE_MASK
;
1811 /* if it is not mapped, no need to worry here */
1812 if (addr
>= MMAP_AREA_END
)
1814 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1817 /* NOTE: in this case, validate_tag is _not_ tested as it
1818 validates only the code TLB */
1819 if (vp
->valid_tag
!= virt_valid_tag
)
1821 if (!(vp
->prot
& PAGE_WRITE
))
1823 #if defined(DEBUG_TLB)
1824 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1825 addr
, vp
->phys_addr
, vp
->prot
);
1827 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1828 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1829 (unsigned long)addr
, vp
->prot
);
1830 /* set the dirty bit */
1831 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1832 /* flush the code inside */
1833 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1842 void tlb_flush(CPUState
*env
, int flush_global
)
1846 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1850 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1851 target_phys_addr_t paddr
, int prot
,
1852 int mmu_idx
, int is_softmmu
)
1857 /* dump memory mappings */
1858 void page_dump(FILE *f
)
1860 unsigned long start
, end
;
1861 int i
, j
, prot
, prot1
;
1864 fprintf(f
, "%-8s %-8s %-8s %s\n",
1865 "start", "end", "size", "prot");
1869 for(i
= 0; i
<= L1_SIZE
; i
++) {
1874 for(j
= 0;j
< L2_SIZE
; j
++) {
1879 if (prot1
!= prot
) {
1880 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1882 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1883 start
, end
, end
- start
,
1884 prot
& PAGE_READ
? 'r' : '-',
1885 prot
& PAGE_WRITE
? 'w' : '-',
1886 prot
& PAGE_EXEC
? 'x' : '-');
1900 int page_get_flags(target_ulong address
)
1904 p
= page_find(address
>> TARGET_PAGE_BITS
);
1910 /* modify the flags of a page and invalidate the code if
1911 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1912 depending on PAGE_WRITE */
1913 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1918 start
= start
& TARGET_PAGE_MASK
;
1919 end
= TARGET_PAGE_ALIGN(end
);
1920 if (flags
& PAGE_WRITE
)
1921 flags
|= PAGE_WRITE_ORG
;
1922 spin_lock(&tb_lock
);
1923 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1924 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1925 /* if the write protection is set, then we invalidate the code
1927 if (!(p
->flags
& PAGE_WRITE
) &&
1928 (flags
& PAGE_WRITE
) &&
1930 tb_invalidate_phys_page(addr
, 0, NULL
);
1934 spin_unlock(&tb_lock
);
1937 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1943 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1944 start
= start
& TARGET_PAGE_MASK
;
1947 /* we've wrapped around */
1949 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1950 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1953 if( !(p
->flags
& PAGE_VALID
) )
1956 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1958 if (flags
& PAGE_WRITE
) {
1959 if (!(p
->flags
& PAGE_WRITE_ORG
))
1961 /* unprotect the page if it was put read-only because it
1962 contains translated code */
1963 if (!(p
->flags
& PAGE_WRITE
)) {
1964 if (!page_unprotect(addr
, 0, NULL
))
1973 /* called from signal handler: invalidate the code and unprotect the
1974 page. Return TRUE if the fault was succesfully handled. */
1975 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1977 unsigned int page_index
, prot
, pindex
;
1979 target_ulong host_start
, host_end
, addr
;
1981 host_start
= address
& qemu_host_page_mask
;
1982 page_index
= host_start
>> TARGET_PAGE_BITS
;
1983 p1
= page_find(page_index
);
1986 host_end
= host_start
+ qemu_host_page_size
;
1989 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1993 /* if the page was really writable, then we change its
1994 protection back to writable */
1995 if (prot
& PAGE_WRITE_ORG
) {
1996 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1997 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1998 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1999 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2000 p1
[pindex
].flags
|= PAGE_WRITE
;
2001 /* and since the content will be modified, we must invalidate
2002 the corresponding translated code. */
2003 tb_invalidate_phys_page(address
, pc
, puc
);
2004 #ifdef DEBUG_TB_CHECK
2005 tb_invalidate_check(address
);
2013 static inline void tlb_set_dirty(CPUState
*env
,
2014 unsigned long addr
, target_ulong vaddr
)
2017 #endif /* defined(CONFIG_USER_ONLY) */
2019 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2021 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2022 ram_addr_t orig_memory
);
2023 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2026 if (addr > start_addr) \
2029 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2030 if (start_addr2 > 0) \
2034 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2035 end_addr2 = TARGET_PAGE_SIZE - 1; \
2037 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2038 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2043 /* register physical memory. 'size' must be a multiple of the target
2044 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2046 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2048 ram_addr_t phys_offset
)
2050 target_phys_addr_t addr
, end_addr
;
2053 ram_addr_t orig_size
= size
;
2056 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2057 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2058 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2059 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2060 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2061 ram_addr_t orig_memory
= p
->phys_offset
;
2062 target_phys_addr_t start_addr2
, end_addr2
;
2063 int need_subpage
= 0;
2065 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2067 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2068 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2069 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2070 &p
->phys_offset
, orig_memory
);
2072 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2075 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2077 p
->phys_offset
= phys_offset
;
2078 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2079 (phys_offset
& IO_MEM_ROMD
))
2080 phys_offset
+= TARGET_PAGE_SIZE
;
2083 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2084 p
->phys_offset
= phys_offset
;
2085 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2086 (phys_offset
& IO_MEM_ROMD
))
2087 phys_offset
+= TARGET_PAGE_SIZE
;
2089 target_phys_addr_t start_addr2
, end_addr2
;
2090 int need_subpage
= 0;
2092 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2093 end_addr2
, need_subpage
);
2095 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2096 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2097 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2098 subpage_register(subpage
, start_addr2
, end_addr2
,
2105 /* since each CPU stores ram addresses in its TLB cache, we must
2106 reset the modified entries */
2108 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2113 /* XXX: temporary until new memory mapping API */
2114 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2118 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2120 return IO_MEM_UNASSIGNED
;
2121 return p
->phys_offset
;
2124 /* XXX: better than nothing */
2125 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2128 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2129 fprintf(stderr
, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
2130 size
, phys_ram_size
);
2133 addr
= phys_ram_alloc_offset
;
2134 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2138 void qemu_ram_free(ram_addr_t addr
)
2142 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2144 #ifdef DEBUG_UNASSIGNED
2145 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2148 do_unassigned_access(addr
, 0, 0, 0);
2150 do_unassigned_access(addr
, 0, 0, 0);
2155 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2157 #ifdef DEBUG_UNASSIGNED
2158 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2161 do_unassigned_access(addr
, 1, 0, 0);
2163 do_unassigned_access(addr
, 1, 0, 0);
2167 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2168 unassigned_mem_readb
,
2169 unassigned_mem_readb
,
2170 unassigned_mem_readb
,
2173 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2174 unassigned_mem_writeb
,
2175 unassigned_mem_writeb
,
2176 unassigned_mem_writeb
,
2179 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2181 unsigned long ram_addr
;
2183 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2184 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2185 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2186 #if !defined(CONFIG_USER_ONLY)
2187 tb_invalidate_phys_page_fast(ram_addr
, 1);
2188 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2191 stb_p((uint8_t *)(long)addr
, val
);
2193 if (cpu_single_env
->kqemu_enabled
&&
2194 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2195 kqemu_modify_page(cpu_single_env
, ram_addr
);
2197 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2198 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2199 /* we remove the notdirty callback only if the code has been
2201 if (dirty_flags
== 0xff)
2202 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2205 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2207 unsigned long ram_addr
;
2209 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2210 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2211 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2212 #if !defined(CONFIG_USER_ONLY)
2213 tb_invalidate_phys_page_fast(ram_addr
, 2);
2214 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2217 stw_p((uint8_t *)(long)addr
, val
);
2219 if (cpu_single_env
->kqemu_enabled
&&
2220 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2221 kqemu_modify_page(cpu_single_env
, ram_addr
);
2223 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2224 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2225 /* we remove the notdirty callback only if the code has been
2227 if (dirty_flags
== 0xff)
2228 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2231 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2233 unsigned long ram_addr
;
2235 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2236 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2237 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2238 #if !defined(CONFIG_USER_ONLY)
2239 tb_invalidate_phys_page_fast(ram_addr
, 4);
2240 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2243 stl_p((uint8_t *)(long)addr
, val
);
2245 if (cpu_single_env
->kqemu_enabled
&&
2246 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2247 kqemu_modify_page(cpu_single_env
, ram_addr
);
2249 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2250 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2251 /* we remove the notdirty callback only if the code has been
2253 if (dirty_flags
== 0xff)
2254 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2257 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2258 NULL
, /* never used */
2259 NULL
, /* never used */
2260 NULL
, /* never used */
2263 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2264 notdirty_mem_writeb
,
2265 notdirty_mem_writew
,
2266 notdirty_mem_writel
,
2269 #if defined(CONFIG_SOFTMMU)
2270 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2271 so these check for a hit then pass through to the normal out-of-line
2273 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2275 return ldub_phys(addr
);
2278 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2280 return lduw_phys(addr
);
2283 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2285 return ldl_phys(addr
);
2288 /* Generate a debug exception if a watchpoint has been hit.
2289 Returns the real physical address of the access. addr will be a host
2290 address in case of a RAM location. */
2291 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2293 CPUState
*env
= cpu_single_env
;
2295 target_ulong retaddr
;
2299 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2300 watch
= env
->watchpoint
[i
].vaddr
;
2301 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2302 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2303 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2304 cpu_single_env
->watchpoint_hit
= i
+ 1;
2305 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2313 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2316 addr
= check_watchpoint(addr
);
2317 stb_phys(addr
, val
);
2320 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2323 addr
= check_watchpoint(addr
);
2324 stw_phys(addr
, val
);
2327 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2330 addr
= check_watchpoint(addr
);
2331 stl_phys(addr
, val
);
2334 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2340 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2347 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2353 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2354 #if defined(DEBUG_SUBPAGE)
2355 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2356 mmio
, len
, addr
, idx
);
2358 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2363 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2364 uint32_t value
, unsigned int len
)
2368 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2369 #if defined(DEBUG_SUBPAGE)
2370 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2371 mmio
, len
, addr
, idx
, value
);
2373 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2376 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2378 #if defined(DEBUG_SUBPAGE)
2379 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2382 return subpage_readlen(opaque
, addr
, 0);
2385 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2388 #if defined(DEBUG_SUBPAGE)
2389 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2391 subpage_writelen(opaque
, addr
, value
, 0);
2394 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2396 #if defined(DEBUG_SUBPAGE)
2397 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2400 return subpage_readlen(opaque
, addr
, 1);
2403 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2406 #if defined(DEBUG_SUBPAGE)
2407 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2409 subpage_writelen(opaque
, addr
, value
, 1);
2412 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2414 #if defined(DEBUG_SUBPAGE)
2415 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2418 return subpage_readlen(opaque
, addr
, 2);
2421 static void subpage_writel (void *opaque
,
2422 target_phys_addr_t addr
, uint32_t value
)
2424 #if defined(DEBUG_SUBPAGE)
2425 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2427 subpage_writelen(opaque
, addr
, value
, 2);
2430 static CPUReadMemoryFunc
*subpage_read
[] = {
2436 static CPUWriteMemoryFunc
*subpage_write
[] = {
2442 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2448 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2450 idx
= SUBPAGE_IDX(start
);
2451 eidx
= SUBPAGE_IDX(end
);
2452 #if defined(DEBUG_SUBPAGE)
2453 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2454 mmio
, start
, end
, idx
, eidx
, memory
);
2456 memory
>>= IO_MEM_SHIFT
;
2457 for (; idx
<= eidx
; idx
++) {
2458 for (i
= 0; i
< 4; i
++) {
2459 if (io_mem_read
[memory
][i
]) {
2460 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2461 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2463 if (io_mem_write
[memory
][i
]) {
2464 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2465 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2473 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2474 ram_addr_t orig_memory
)
2479 mmio
= qemu_mallocz(sizeof(subpage_t
));
2482 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2483 #if defined(DEBUG_SUBPAGE)
2484 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2485 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2487 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2488 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2494 static void io_mem_init(void)
2496 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2497 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2498 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2501 #if defined(CONFIG_SOFTMMU)
2502 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2503 watch_mem_write
, NULL
);
2505 /* alloc dirty bits array */
2506 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2507 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2510 /* mem_read and mem_write are arrays of functions containing the
2511 function to access byte (index 0), word (index 1) and dword (index
2512 2). Functions can be omitted with a NULL function pointer. The
2513 registered functions may be modified dynamically later.
2514 If io_index is non zero, the corresponding io zone is
2515 modified. If it is zero, a new io zone is allocated. The return
2516 value can be used with cpu_register_physical_memory(). (-1) is
2517 returned if error. */
2518 int cpu_register_io_memory(int io_index
,
2519 CPUReadMemoryFunc
**mem_read
,
2520 CPUWriteMemoryFunc
**mem_write
,
2523 int i
, subwidth
= 0;
2525 if (io_index
<= 0) {
2526 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2528 io_index
= io_mem_nb
++;
2530 if (io_index
>= IO_MEM_NB_ENTRIES
)
2534 for(i
= 0;i
< 3; i
++) {
2535 if (!mem_read
[i
] || !mem_write
[i
])
2536 subwidth
= IO_MEM_SUBWIDTH
;
2537 io_mem_read
[io_index
][i
] = mem_read
[i
];
2538 io_mem_write
[io_index
][i
] = mem_write
[i
];
2540 io_mem_opaque
[io_index
] = opaque
;
2541 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2544 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2546 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2549 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2551 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2554 /* physical memory access (slow version, mainly for debug) */
2555 #if defined(CONFIG_USER_ONLY)
2556 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2557 int len
, int is_write
)
2564 page
= addr
& TARGET_PAGE_MASK
;
2565 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2568 flags
= page_get_flags(page
);
2569 if (!(flags
& PAGE_VALID
))
2572 if (!(flags
& PAGE_WRITE
))
2574 /* XXX: this code should not depend on lock_user */
2575 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2576 /* FIXME - should this return an error rather than just fail? */
2579 unlock_user(p
, addr
, l
);
2581 if (!(flags
& PAGE_READ
))
2583 /* XXX: this code should not depend on lock_user */
2584 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2585 /* FIXME - should this return an error rather than just fail? */
2588 unlock_user(p
, addr
, 0);
2597 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2598 int len
, int is_write
)
2603 target_phys_addr_t page
;
2608 page
= addr
& TARGET_PAGE_MASK
;
2609 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2612 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2614 pd
= IO_MEM_UNASSIGNED
;
2616 pd
= p
->phys_offset
;
2620 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2621 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2622 /* XXX: could force cpu_single_env to NULL to avoid
2624 if (l
>= 4 && ((addr
& 3) == 0)) {
2625 /* 32 bit write access */
2627 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2629 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2630 /* 16 bit write access */
2632 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2635 /* 8 bit write access */
2637 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2641 unsigned long addr1
;
2642 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2644 ptr
= phys_ram_base
+ addr1
;
2645 memcpy(ptr
, buf
, l
);
2646 if (!cpu_physical_memory_is_dirty(addr1
)) {
2647 /* invalidate code */
2648 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2650 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2651 (0xff & ~CODE_DIRTY_FLAG
);
2655 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2656 !(pd
& IO_MEM_ROMD
)) {
2658 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2659 if (l
>= 4 && ((addr
& 3) == 0)) {
2660 /* 32 bit read access */
2661 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2664 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2665 /* 16 bit read access */
2666 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2670 /* 8 bit read access */
2671 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2677 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2678 (addr
& ~TARGET_PAGE_MASK
);
2679 memcpy(buf
, ptr
, l
);
2688 /* used for ROM loading : can write in RAM and ROM */
2689 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2690 const uint8_t *buf
, int len
)
2694 target_phys_addr_t page
;
2699 page
= addr
& TARGET_PAGE_MASK
;
2700 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2703 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2705 pd
= IO_MEM_UNASSIGNED
;
2707 pd
= p
->phys_offset
;
2710 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2711 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2712 !(pd
& IO_MEM_ROMD
)) {
2715 unsigned long addr1
;
2716 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2718 ptr
= phys_ram_base
+ addr1
;
2719 memcpy(ptr
, buf
, l
);
2728 /* warning: addr must be aligned */
2729 uint32_t ldl_phys(target_phys_addr_t addr
)
2737 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2739 pd
= IO_MEM_UNASSIGNED
;
2741 pd
= p
->phys_offset
;
2744 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2745 !(pd
& IO_MEM_ROMD
)) {
2747 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2748 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2751 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2752 (addr
& ~TARGET_PAGE_MASK
);
2758 /* warning: addr must be aligned */
2759 uint64_t ldq_phys(target_phys_addr_t addr
)
2767 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2769 pd
= IO_MEM_UNASSIGNED
;
2771 pd
= p
->phys_offset
;
2774 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2775 !(pd
& IO_MEM_ROMD
)) {
2777 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2778 #ifdef TARGET_WORDS_BIGENDIAN
2779 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2780 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2782 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2783 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2787 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2788 (addr
& ~TARGET_PAGE_MASK
);
2795 uint32_t ldub_phys(target_phys_addr_t addr
)
2798 cpu_physical_memory_read(addr
, &val
, 1);
2803 uint32_t lduw_phys(target_phys_addr_t addr
)
2806 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2807 return tswap16(val
);
2810 /* warning: addr must be aligned. The ram page is not masked as dirty
2811 and the code inside is not invalidated. It is useful if the dirty
2812 bits are used to track modified PTEs */
2813 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2820 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2822 pd
= IO_MEM_UNASSIGNED
;
2824 pd
= p
->phys_offset
;
2827 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2828 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2829 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2831 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2832 (addr
& ~TARGET_PAGE_MASK
);
2837 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2844 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2846 pd
= IO_MEM_UNASSIGNED
;
2848 pd
= p
->phys_offset
;
2851 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2852 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2853 #ifdef TARGET_WORDS_BIGENDIAN
2854 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2855 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2857 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2858 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2861 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2862 (addr
& ~TARGET_PAGE_MASK
);
2867 /* warning: addr must be aligned */
2868 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2875 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2877 pd
= IO_MEM_UNASSIGNED
;
2879 pd
= p
->phys_offset
;
2882 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2883 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2884 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2886 unsigned long addr1
;
2887 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2889 ptr
= phys_ram_base
+ addr1
;
2891 if (!cpu_physical_memory_is_dirty(addr1
)) {
2892 /* invalidate code */
2893 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2895 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2896 (0xff & ~CODE_DIRTY_FLAG
);
2902 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2905 cpu_physical_memory_write(addr
, &v
, 1);
2909 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2911 uint16_t v
= tswap16(val
);
2912 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2916 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2919 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2924 /* virtual memory access for debug */
2925 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2926 uint8_t *buf
, int len
, int is_write
)
2929 target_phys_addr_t phys_addr
;
2933 page
= addr
& TARGET_PAGE_MASK
;
2934 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2935 /* if no physical page mapped, return an error */
2936 if (phys_addr
== -1)
2938 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2941 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2950 void dump_exec_info(FILE *f
,
2951 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2953 int i
, target_code_size
, max_target_code_size
;
2954 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2955 TranslationBlock
*tb
;
2957 target_code_size
= 0;
2958 max_target_code_size
= 0;
2960 direct_jmp_count
= 0;
2961 direct_jmp2_count
= 0;
2962 for(i
= 0; i
< nb_tbs
; i
++) {
2964 target_code_size
+= tb
->size
;
2965 if (tb
->size
> max_target_code_size
)
2966 max_target_code_size
= tb
->size
;
2967 if (tb
->page_addr
[1] != -1)
2969 if (tb
->tb_next_offset
[0] != 0xffff) {
2971 if (tb
->tb_next_offset
[1] != 0xffff) {
2972 direct_jmp2_count
++;
2976 /* XXX: avoid using doubles ? */
2977 cpu_fprintf(f
, "Translation buffer state:\n");
2978 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2979 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2980 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2981 max_target_code_size
);
2982 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2983 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2984 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2985 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2987 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2988 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2990 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2992 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2993 cpu_fprintf(f
, "\nStatistics:\n");
2994 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2995 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2996 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2997 #ifdef CONFIG_PROFILER
3000 tot
= dyngen_interm_time
+ dyngen_code_time
;
3001 cpu_fprintf(f
, "JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
3003 cpu_fprintf(f
, "translated TBs %" PRId64
" (aborted=%" PRId64
" %0.1f%%)\n",
3005 dyngen_tb_count1
- dyngen_tb_count
,
3006 dyngen_tb_count1
? (double)(dyngen_tb_count1
- dyngen_tb_count
) / dyngen_tb_count1
* 100.0 : 0);
3007 cpu_fprintf(f
, "avg ops/TB %0.1f max=%d\n",
3008 dyngen_tb_count
? (double)dyngen_op_count
/ dyngen_tb_count
: 0, dyngen_op_count_max
);
3009 cpu_fprintf(f
, "old ops/total ops %0.1f%%\n",
3010 dyngen_op_count
? (double)dyngen_old_op_count
/ dyngen_op_count
* 100.0 : 0);
3011 cpu_fprintf(f
, "deleted ops/TB %0.2f\n",
3013 (double)dyngen_tcg_del_op_count
/ dyngen_tb_count
: 0);
3014 cpu_fprintf(f
, "cycles/op %0.1f\n",
3015 dyngen_op_count
? (double)tot
/ dyngen_op_count
: 0);
3016 cpu_fprintf(f
, "cycles/in byte %0.1f\n",
3017 dyngen_code_in_len
? (double)tot
/ dyngen_code_in_len
: 0);
3018 cpu_fprintf(f
, "cycles/out byte %0.1f\n",
3019 dyngen_code_out_len
? (double)tot
/ dyngen_code_out_len
: 0);
3022 cpu_fprintf(f
, " gen_interm time %0.1f%%\n",
3023 (double)dyngen_interm_time
/ tot
* 100.0);
3024 cpu_fprintf(f
, " gen_code time %0.1f%%\n",
3025 (double)dyngen_code_time
/ tot
* 100.0);
3026 cpu_fprintf(f
, "cpu_restore count %" PRId64
"\n",
3027 dyngen_restore_count
);
3028 cpu_fprintf(f
, " avg cycles %0.1f\n",
3029 dyngen_restore_count
? (double)dyngen_restore_time
/ dyngen_restore_count
: 0);
3031 extern void dump_op_count(void);
3038 #if !defined(CONFIG_USER_ONLY)
3040 #define MMUSUFFIX _cmmu
3041 #define GETPC() NULL
3042 #define env cpu_single_env
3043 #define SOFTMMU_CODE_ACCESS
3046 #include "softmmu_template.h"
3049 #include "softmmu_template.h"
3052 #include "softmmu_template.h"
3055 #include "softmmu_template.h"