2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
39 #if defined(CONFIG_USER_ONLY)
43 //#define DEBUG_TB_INVALIDATE
46 //#define DEBUG_UNASSIGNED
48 /* make various TB consistency checks */
49 //#define DEBUG_TB_CHECK
50 //#define DEBUG_TLB_CHECK
52 //#define DEBUG_IOPORT
53 //#define DEBUG_SUBPAGE
55 #if !defined(CONFIG_USER_ONLY)
56 /* TB consistency checks only implemented for usermode emulation. */
60 /* threshold to flush the translated code buffer */
61 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #define MMAP_AREA_START 0x00000000
66 #define MMAP_AREA_END 0xa8000000
68 #if defined(TARGET_SPARC64)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 41
70 #elif defined(TARGET_SPARC)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 36
72 #elif defined(TARGET_ALPHA)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #define TARGET_VIRT_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_PPC64)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
83 #define TARGET_PHYS_ADDR_SPACE_BITS 32
86 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
87 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
89 /* any access to the tbs or the page table must use this lock */
90 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
92 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
93 uint8_t *code_gen_ptr
;
95 ram_addr_t phys_ram_size
;
97 uint8_t *phys_ram_base
;
98 uint8_t *phys_ram_dirty
;
99 static ram_addr_t phys_ram_alloc_offset
= 0;
102 /* current CPU in the current thread. It is only valid inside
104 CPUState
*cpu_single_env
;
106 typedef struct PageDesc
{
107 /* list of TBs intersecting this ram page */
108 TranslationBlock
*first_tb
;
109 /* in order to optimize self modifying code, we count the number
110 of lookups we do to a given page to use a bitmap */
111 unsigned int code_write_count
;
112 uint8_t *code_bitmap
;
113 #if defined(CONFIG_USER_ONLY)
118 typedef struct PhysPageDesc
{
119 /* offset in host memory of the page + io_index in the low 12 bits */
120 ram_addr_t phys_offset
;
124 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
125 /* XXX: this is a temporary hack for alpha target.
126 * In the future, this is to be replaced by a multi-level table
127 * to actually be able to handle the complete 64 bits address space.
129 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
131 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
134 #define L1_SIZE (1 << L1_BITS)
135 #define L2_SIZE (1 << L2_BITS)
137 static void io_mem_init(void);
139 unsigned long qemu_real_host_page_size
;
140 unsigned long qemu_host_page_bits
;
141 unsigned long qemu_host_page_size
;
142 unsigned long qemu_host_page_mask
;
144 /* XXX: for system emulation, it could just be an array */
145 static PageDesc
*l1_map
[L1_SIZE
];
146 PhysPageDesc
**l1_phys_map
;
148 /* io memory support */
149 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
150 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
151 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
152 static int io_mem_nb
;
153 #if defined(CONFIG_SOFTMMU)
154 static int io_mem_watch
;
158 char *logfilename
= "/tmp/qemu.log";
161 static int log_append
= 0;
164 static int tlb_flush_count
;
165 static int tb_flush_count
;
166 static int tb_phys_invalidate_count
;
168 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
169 typedef struct subpage_t
{
170 target_phys_addr_t base
;
171 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
172 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
173 void *opaque
[TARGET_PAGE_SIZE
][2][4];
176 static void page_init(void)
178 /* NOTE: we can always suppose that qemu_host_page_size >=
182 SYSTEM_INFO system_info
;
185 GetSystemInfo(&system_info
);
186 qemu_real_host_page_size
= system_info
.dwPageSize
;
188 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
189 PAGE_EXECUTE_READWRITE
, &old_protect
);
192 qemu_real_host_page_size
= getpagesize();
194 unsigned long start
, end
;
196 start
= (unsigned long)code_gen_buffer
;
197 start
&= ~(qemu_real_host_page_size
- 1);
199 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
200 end
+= qemu_real_host_page_size
- 1;
201 end
&= ~(qemu_real_host_page_size
- 1);
203 mprotect((void *)start
, end
- start
,
204 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
208 if (qemu_host_page_size
== 0)
209 qemu_host_page_size
= qemu_real_host_page_size
;
210 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
211 qemu_host_page_size
= TARGET_PAGE_SIZE
;
212 qemu_host_page_bits
= 0;
213 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
214 qemu_host_page_bits
++;
215 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
216 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
217 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
219 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
221 long long startaddr
, endaddr
;
225 f
= fopen("/proc/self/maps", "r");
228 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
230 page_set_flags(TARGET_PAGE_ALIGN(startaddr
),
231 TARGET_PAGE_ALIGN(endaddr
),
241 static inline PageDesc
*page_find_alloc(target_ulong index
)
245 lp
= &l1_map
[index
>> L2_BITS
];
248 /* allocate if not found */
249 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
250 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
253 return p
+ (index
& (L2_SIZE
- 1));
256 static inline PageDesc
*page_find(target_ulong index
)
260 p
= l1_map
[index
>> L2_BITS
];
263 return p
+ (index
& (L2_SIZE
- 1));
266 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
271 p
= (void **)l1_phys_map
;
272 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
274 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
275 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
277 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
280 /* allocate if not found */
283 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
284 memset(p
, 0, sizeof(void *) * L1_SIZE
);
288 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
292 /* allocate if not found */
295 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
297 for (i
= 0; i
< L2_SIZE
; i
++)
298 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
300 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
303 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
305 return phys_page_find_alloc(index
, 0);
308 #if !defined(CONFIG_USER_ONLY)
309 static void tlb_protect_code(ram_addr_t ram_addr
);
310 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
314 void cpu_exec_init(CPUState
*env
)
321 code_gen_ptr
= code_gen_buffer
;
325 env
->next_cpu
= NULL
;
328 while (*penv
!= NULL
) {
329 penv
= (CPUState
**)&(*penv
)->next_cpu
;
332 env
->cpu_index
= cpu_index
;
333 env
->nb_watchpoints
= 0;
337 static inline void invalidate_page_bitmap(PageDesc
*p
)
339 if (p
->code_bitmap
) {
340 qemu_free(p
->code_bitmap
);
341 p
->code_bitmap
= NULL
;
343 p
->code_write_count
= 0;
346 /* set to NULL all the 'first_tb' fields in all PageDescs */
347 static void page_flush_tb(void)
352 for(i
= 0; i
< L1_SIZE
; i
++) {
355 for(j
= 0; j
< L2_SIZE
; j
++) {
357 invalidate_page_bitmap(p
);
364 /* flush all the translation blocks */
365 /* XXX: tb_flush is currently not thread safe */
366 void tb_flush(CPUState
*env1
)
369 #if defined(DEBUG_FLUSH)
370 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
371 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
373 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
375 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > CODE_GEN_BUFFER_SIZE
)
376 cpu_abort(env1
, "Internal error: code buffer overflow\n");
380 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
381 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
384 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
387 code_gen_ptr
= code_gen_buffer
;
388 /* XXX: flush processor icache at this point if cache flush is
393 #ifdef DEBUG_TB_CHECK
395 static void tb_invalidate_check(target_ulong address
)
397 TranslationBlock
*tb
;
399 address
&= TARGET_PAGE_MASK
;
400 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
401 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
402 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
403 address
>= tb
->pc
+ tb
->size
)) {
404 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
405 address
, (long)tb
->pc
, tb
->size
);
411 /* verify that all the pages have correct rights for code */
412 static void tb_page_check(void)
414 TranslationBlock
*tb
;
415 int i
, flags1
, flags2
;
417 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
418 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
419 flags1
= page_get_flags(tb
->pc
);
420 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
421 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
422 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
423 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
429 void tb_jmp_check(TranslationBlock
*tb
)
431 TranslationBlock
*tb1
;
434 /* suppress any remaining jumps to this TB */
438 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
441 tb1
= tb1
->jmp_next
[n1
];
443 /* check end of list */
445 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
451 /* invalidate one TB */
452 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
455 TranslationBlock
*tb1
;
459 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
462 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
466 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
468 TranslationBlock
*tb1
;
474 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
476 *ptb
= tb1
->page_next
[n1
];
479 ptb
= &tb1
->page_next
[n1
];
483 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
485 TranslationBlock
*tb1
, **ptb
;
488 ptb
= &tb
->jmp_next
[n
];
491 /* find tb(n) in circular list */
495 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
496 if (n1
== n
&& tb1
== tb
)
499 ptb
= &tb1
->jmp_first
;
501 ptb
= &tb1
->jmp_next
[n1
];
504 /* now we can suppress tb(n) from the list */
505 *ptb
= tb
->jmp_next
[n
];
507 tb
->jmp_next
[n
] = NULL
;
511 /* reset the jump entry 'n' of a TB so that it is not chained to
513 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
515 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
518 static inline void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
523 target_phys_addr_t phys_pc
;
524 TranslationBlock
*tb1
, *tb2
;
526 /* remove the TB from the hash list */
527 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
528 h
= tb_phys_hash_func(phys_pc
);
529 tb_remove(&tb_phys_hash
[h
], tb
,
530 offsetof(TranslationBlock
, phys_hash_next
));
532 /* remove the TB from the page list */
533 if (tb
->page_addr
[0] != page_addr
) {
534 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
535 tb_page_remove(&p
->first_tb
, tb
);
536 invalidate_page_bitmap(p
);
538 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
539 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
540 tb_page_remove(&p
->first_tb
, tb
);
541 invalidate_page_bitmap(p
);
544 tb_invalidated_flag
= 1;
546 /* remove the TB from the hash list */
547 h
= tb_jmp_cache_hash_func(tb
->pc
);
548 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
549 if (env
->tb_jmp_cache
[h
] == tb
)
550 env
->tb_jmp_cache
[h
] = NULL
;
553 /* suppress this TB from the two jump lists */
554 tb_jmp_remove(tb
, 0);
555 tb_jmp_remove(tb
, 1);
557 /* suppress any remaining jumps to this TB */
563 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
564 tb2
= tb1
->jmp_next
[n1
];
565 tb_reset_jump(tb1
, n1
);
566 tb1
->jmp_next
[n1
] = NULL
;
569 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
571 tb_phys_invalidate_count
++;
574 static inline void set_bits(uint8_t *tab
, int start
, int len
)
580 mask
= 0xff << (start
& 7);
581 if ((start
& ~7) == (end
& ~7)) {
583 mask
&= ~(0xff << (end
& 7));
588 start
= (start
+ 8) & ~7;
590 while (start
< end1
) {
595 mask
= ~(0xff << (end
& 7));
601 static void build_page_bitmap(PageDesc
*p
)
603 int n
, tb_start
, tb_end
;
604 TranslationBlock
*tb
;
606 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
609 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
614 tb
= (TranslationBlock
*)((long)tb
& ~3);
615 /* NOTE: this is subtle as a TB may span two physical pages */
617 /* NOTE: tb_end may be after the end of the page, but
618 it is not a problem */
619 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
620 tb_end
= tb_start
+ tb
->size
;
621 if (tb_end
> TARGET_PAGE_SIZE
)
622 tb_end
= TARGET_PAGE_SIZE
;
625 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
627 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
628 tb
= tb
->page_next
[n
];
632 #ifdef TARGET_HAS_PRECISE_SMC
634 static void tb_gen_code(CPUState
*env
,
635 target_ulong pc
, target_ulong cs_base
, int flags
,
638 TranslationBlock
*tb
;
640 target_ulong phys_pc
, phys_page2
, virt_page2
;
643 phys_pc
= get_phys_addr_code(env
, pc
);
646 /* flush must be done */
648 /* cannot fail at this point */
651 tc_ptr
= code_gen_ptr
;
653 tb
->cs_base
= cs_base
;
656 cpu_gen_code(env
, tb
, &code_gen_size
);
657 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
659 /* check next page if needed */
660 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
662 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
663 phys_page2
= get_phys_addr_code(env
, virt_page2
);
665 tb_link_phys(tb
, phys_pc
, phys_page2
);
669 /* invalidate all TBs which intersect with the target physical page
670 starting in range [start;end[. NOTE: start and end must refer to
671 the same physical page. 'is_cpu_write_access' should be true if called
672 from a real cpu write access: the virtual CPU will exit the current
673 TB if code is modified inside this TB. */
674 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
675 int is_cpu_write_access
)
677 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
678 CPUState
*env
= cpu_single_env
;
680 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
681 target_ulong tb_start
, tb_end
;
682 target_ulong current_pc
, current_cs_base
;
684 p
= page_find(start
>> TARGET_PAGE_BITS
);
687 if (!p
->code_bitmap
&&
688 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
689 is_cpu_write_access
) {
690 /* build code bitmap */
691 build_page_bitmap(p
);
694 /* we remove all the TBs in the range [start, end[ */
695 /* XXX: see if in some cases it could be faster to invalidate all the code */
696 current_tb_not_found
= is_cpu_write_access
;
697 current_tb_modified
= 0;
698 current_tb
= NULL
; /* avoid warning */
699 current_pc
= 0; /* avoid warning */
700 current_cs_base
= 0; /* avoid warning */
701 current_flags
= 0; /* avoid warning */
705 tb
= (TranslationBlock
*)((long)tb
& ~3);
706 tb_next
= tb
->page_next
[n
];
707 /* NOTE: this is subtle as a TB may span two physical pages */
709 /* NOTE: tb_end may be after the end of the page, but
710 it is not a problem */
711 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
712 tb_end
= tb_start
+ tb
->size
;
714 tb_start
= tb
->page_addr
[1];
715 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
717 if (!(tb_end
<= start
|| tb_start
>= end
)) {
718 #ifdef TARGET_HAS_PRECISE_SMC
719 if (current_tb_not_found
) {
720 current_tb_not_found
= 0;
722 if (env
->mem_write_pc
) {
723 /* now we have a real cpu fault */
724 current_tb
= tb_find_pc(env
->mem_write_pc
);
727 if (current_tb
== tb
&&
728 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
729 /* If we are modifying the current TB, we must stop
730 its execution. We could be more precise by checking
731 that the modification is after the current PC, but it
732 would require a specialized function to partially
733 restore the CPU state */
735 current_tb_modified
= 1;
736 cpu_restore_state(current_tb
, env
,
737 env
->mem_write_pc
, NULL
);
738 #if defined(TARGET_I386)
739 current_flags
= env
->hflags
;
740 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
741 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
742 current_pc
= current_cs_base
+ env
->eip
;
744 #error unsupported CPU
747 #endif /* TARGET_HAS_PRECISE_SMC */
748 /* we need to do that to handle the case where a signal
749 occurs while doing tb_phys_invalidate() */
752 saved_tb
= env
->current_tb
;
753 env
->current_tb
= NULL
;
755 tb_phys_invalidate(tb
, -1);
757 env
->current_tb
= saved_tb
;
758 if (env
->interrupt_request
&& env
->current_tb
)
759 cpu_interrupt(env
, env
->interrupt_request
);
764 #if !defined(CONFIG_USER_ONLY)
765 /* if no code remaining, no need to continue to use slow writes */
767 invalidate_page_bitmap(p
);
768 if (is_cpu_write_access
) {
769 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
773 #ifdef TARGET_HAS_PRECISE_SMC
774 if (current_tb_modified
) {
775 /* we generate a block containing just the instruction
776 modifying the memory. It will ensure that it cannot modify
778 env
->current_tb
= NULL
;
779 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
781 cpu_resume_from_signal(env
, NULL
);
786 /* len must be <= 8 and start must be a multiple of len */
787 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
794 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
795 cpu_single_env
->mem_write_vaddr
, len
,
797 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
801 p
= page_find(start
>> TARGET_PAGE_BITS
);
804 if (p
->code_bitmap
) {
805 offset
= start
& ~TARGET_PAGE_MASK
;
806 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
807 if (b
& ((1 << len
) - 1))
811 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
815 #if !defined(CONFIG_SOFTMMU)
816 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
817 unsigned long pc
, void *puc
)
819 int n
, current_flags
, current_tb_modified
;
820 target_ulong current_pc
, current_cs_base
;
822 TranslationBlock
*tb
, *current_tb
;
823 #ifdef TARGET_HAS_PRECISE_SMC
824 CPUState
*env
= cpu_single_env
;
827 addr
&= TARGET_PAGE_MASK
;
828 p
= page_find(addr
>> TARGET_PAGE_BITS
);
832 current_tb_modified
= 0;
834 current_pc
= 0; /* avoid warning */
835 current_cs_base
= 0; /* avoid warning */
836 current_flags
= 0; /* avoid warning */
837 #ifdef TARGET_HAS_PRECISE_SMC
839 current_tb
= tb_find_pc(pc
);
844 tb
= (TranslationBlock
*)((long)tb
& ~3);
845 #ifdef TARGET_HAS_PRECISE_SMC
846 if (current_tb
== tb
&&
847 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
848 /* If we are modifying the current TB, we must stop
849 its execution. We could be more precise by checking
850 that the modification is after the current PC, but it
851 would require a specialized function to partially
852 restore the CPU state */
854 current_tb_modified
= 1;
855 cpu_restore_state(current_tb
, env
, pc
, puc
);
856 #if defined(TARGET_I386)
857 current_flags
= env
->hflags
;
858 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
859 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
860 current_pc
= current_cs_base
+ env
->eip
;
862 #error unsupported CPU
865 #endif /* TARGET_HAS_PRECISE_SMC */
866 tb_phys_invalidate(tb
, addr
);
867 tb
= tb
->page_next
[n
];
870 #ifdef TARGET_HAS_PRECISE_SMC
871 if (current_tb_modified
) {
872 /* we generate a block containing just the instruction
873 modifying the memory. It will ensure that it cannot modify
875 env
->current_tb
= NULL
;
876 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
878 cpu_resume_from_signal(env
, puc
);
884 /* add the tb in the target page and protect it if necessary */
885 static inline void tb_alloc_page(TranslationBlock
*tb
,
886 unsigned int n
, target_ulong page_addr
)
889 TranslationBlock
*last_first_tb
;
891 tb
->page_addr
[n
] = page_addr
;
892 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
893 tb
->page_next
[n
] = p
->first_tb
;
894 last_first_tb
= p
->first_tb
;
895 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
896 invalidate_page_bitmap(p
);
898 #if defined(TARGET_HAS_SMC) || 1
900 #if defined(CONFIG_USER_ONLY)
901 if (p
->flags
& PAGE_WRITE
) {
906 /* force the host page as non writable (writes will have a
907 page fault + mprotect overhead) */
908 page_addr
&= qemu_host_page_mask
;
910 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
911 addr
+= TARGET_PAGE_SIZE
) {
913 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
917 p2
->flags
&= ~PAGE_WRITE
;
918 page_get_flags(addr
);
920 mprotect(g2h(page_addr
), qemu_host_page_size
,
921 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
922 #ifdef DEBUG_TB_INVALIDATE
923 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
928 /* if some code is already present, then the pages are already
929 protected. So we handle the case where only the first TB is
930 allocated in a physical page */
931 if (!last_first_tb
) {
932 tlb_protect_code(page_addr
);
936 #endif /* TARGET_HAS_SMC */
939 /* Allocate a new translation block. Flush the translation buffer if
940 too many translation blocks or too much generated code. */
941 TranslationBlock
*tb_alloc(target_ulong pc
)
943 TranslationBlock
*tb
;
945 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
946 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
954 /* add a new TB and link it to the physical page tables. phys_page2 is
955 (-1) to indicate that only one page contains the TB. */
956 void tb_link_phys(TranslationBlock
*tb
,
957 target_ulong phys_pc
, target_ulong phys_page2
)
960 TranslationBlock
**ptb
;
962 /* add in the physical hash table */
963 h
= tb_phys_hash_func(phys_pc
);
964 ptb
= &tb_phys_hash
[h
];
965 tb
->phys_hash_next
= *ptb
;
968 /* add in the page list */
969 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
970 if (phys_page2
!= -1)
971 tb_alloc_page(tb
, 1, phys_page2
);
973 tb
->page_addr
[1] = -1;
975 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
976 tb
->jmp_next
[0] = NULL
;
977 tb
->jmp_next
[1] = NULL
;
979 /* init original jump addresses */
980 if (tb
->tb_next_offset
[0] != 0xffff)
981 tb_reset_jump(tb
, 0);
982 if (tb
->tb_next_offset
[1] != 0xffff)
983 tb_reset_jump(tb
, 1);
985 #ifdef DEBUG_TB_CHECK
990 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
991 tb[1].tc_ptr. Return NULL if not found */
992 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
996 TranslationBlock
*tb
;
1000 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1001 tc_ptr
>= (unsigned long)code_gen_ptr
)
1003 /* binary search (cf Knuth) */
1006 while (m_min
<= m_max
) {
1007 m
= (m_min
+ m_max
) >> 1;
1009 v
= (unsigned long)tb
->tc_ptr
;
1012 else if (tc_ptr
< v
) {
1021 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1023 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1025 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1028 tb1
= tb
->jmp_next
[n
];
1030 /* find head of list */
1033 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1036 tb1
= tb1
->jmp_next
[n1
];
1038 /* we are now sure now that tb jumps to tb1 */
1041 /* remove tb from the jmp_first list */
1042 ptb
= &tb_next
->jmp_first
;
1046 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1047 if (n1
== n
&& tb1
== tb
)
1049 ptb
= &tb1
->jmp_next
[n1
];
1051 *ptb
= tb
->jmp_next
[n
];
1052 tb
->jmp_next
[n
] = NULL
;
1054 /* suppress the jump to next tb in generated code */
1055 tb_reset_jump(tb
, n
);
1057 /* suppress jumps in the tb on which we could have jumped */
1058 tb_reset_jump_recursive(tb_next
);
1062 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1064 tb_reset_jump_recursive2(tb
, 0);
1065 tb_reset_jump_recursive2(tb
, 1);
1068 #if defined(TARGET_HAS_ICE)
1069 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1071 target_phys_addr_t addr
;
1073 ram_addr_t ram_addr
;
1076 addr
= cpu_get_phys_page_debug(env
, pc
);
1077 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1079 pd
= IO_MEM_UNASSIGNED
;
1081 pd
= p
->phys_offset
;
1083 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1084 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1088 /* Add a watchpoint. */
1089 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1093 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1094 if (addr
== env
->watchpoint
[i
].vaddr
)
1097 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1100 i
= env
->nb_watchpoints
++;
1101 env
->watchpoint
[i
].vaddr
= addr
;
1102 tlb_flush_page(env
, addr
);
1103 /* FIXME: This flush is needed because of the hack to make memory ops
1104 terminate the TB. It can be removed once the proper IO trap and
1105 re-execute bits are in. */
1110 /* Remove a watchpoint. */
1111 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1115 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1116 if (addr
== env
->watchpoint
[i
].vaddr
) {
1117 env
->nb_watchpoints
--;
1118 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1119 tlb_flush_page(env
, addr
);
1126 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1127 breakpoint is reached */
1128 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1130 #if defined(TARGET_HAS_ICE)
1133 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1134 if (env
->breakpoints
[i
] == pc
)
1138 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1140 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1142 breakpoint_invalidate(env
, pc
);
1149 /* remove a breakpoint */
1150 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1152 #if defined(TARGET_HAS_ICE)
1154 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1155 if (env
->breakpoints
[i
] == pc
)
1160 env
->nb_breakpoints
--;
1161 if (i
< env
->nb_breakpoints
)
1162 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1164 breakpoint_invalidate(env
, pc
);
1171 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1172 CPU loop after each instruction */
1173 void cpu_single_step(CPUState
*env
, int enabled
)
1175 #if defined(TARGET_HAS_ICE)
1176 if (env
->singlestep_enabled
!= enabled
) {
1177 env
->singlestep_enabled
= enabled
;
1178 /* must flush all the translated code to avoid inconsistancies */
1179 /* XXX: only flush what is necessary */
1185 /* enable or disable low levels log */
1186 void cpu_set_log(int log_flags
)
1188 loglevel
= log_flags
;
1189 if (loglevel
&& !logfile
) {
1190 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1192 perror(logfilename
);
1195 #if !defined(CONFIG_SOFTMMU)
1196 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1198 static uint8_t logfile_buf
[4096];
1199 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1202 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1206 if (!loglevel
&& logfile
) {
1212 void cpu_set_log_filename(const char *filename
)
1214 logfilename
= strdup(filename
);
1219 cpu_set_log(loglevel
);
1222 /* mask must never be zero, except for A20 change call */
1223 void cpu_interrupt(CPUState
*env
, int mask
)
1225 TranslationBlock
*tb
;
1226 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1228 env
->interrupt_request
|= mask
;
1229 /* if the cpu is currently executing code, we must unlink it and
1230 all the potentially executing TB */
1231 tb
= env
->current_tb
;
1232 if (tb
&& !testandset(&interrupt_lock
)) {
1233 env
->current_tb
= NULL
;
1234 tb_reset_jump_recursive(tb
);
1235 resetlock(&interrupt_lock
);
1239 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1241 env
->interrupt_request
&= ~mask
;
1244 CPULogItem cpu_log_items
[] = {
1245 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1246 "show generated host assembly code for each compiled TB" },
1247 { CPU_LOG_TB_IN_ASM
, "in_asm",
1248 "show target assembly code for each compiled TB" },
1249 { CPU_LOG_TB_OP
, "op",
1250 "show micro ops for each compiled TB" },
1251 { CPU_LOG_TB_OP_OPT
, "op_opt",
1254 "before eflags optimization and "
1256 "after liveness analysis" },
1257 { CPU_LOG_INT
, "int",
1258 "show interrupts/exceptions in short format" },
1259 { CPU_LOG_EXEC
, "exec",
1260 "show trace before each executed TB (lots of logs)" },
1261 { CPU_LOG_TB_CPU
, "cpu",
1262 "show CPU state before block translation" },
1264 { CPU_LOG_PCALL
, "pcall",
1265 "show protected mode far calls/returns/exceptions" },
1268 { CPU_LOG_IOPORT
, "ioport",
1269 "show all i/o ports accesses" },
1274 static int cmp1(const char *s1
, int n
, const char *s2
)
1276 if (strlen(s2
) != n
)
1278 return memcmp(s1
, s2
, n
) == 0;
1281 /* takes a comma separated list of log masks. Return 0 if error. */
1282 int cpu_str_to_log_mask(const char *str
)
1291 p1
= strchr(p
, ',');
1294 if(cmp1(p
,p1
-p
,"all")) {
1295 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1299 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1300 if (cmp1(p
, p1
- p
, item
->name
))
1314 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1321 fprintf(stderr
, "qemu: fatal: ");
1322 vfprintf(stderr
, fmt
, ap
);
1323 fprintf(stderr
, "\n");
1325 if(env
->intercept
& INTERCEPT_SVM_MASK
) {
1326 /* most probably the virtual machine should not
1327 be shut down but rather caught by the VMM */
1328 vmexit(SVM_EXIT_SHUTDOWN
, 0);
1330 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1332 cpu_dump_state(env
, stderr
, fprintf
, 0);
1335 fprintf(logfile
, "qemu: fatal: ");
1336 vfprintf(logfile
, fmt
, ap2
);
1337 fprintf(logfile
, "\n");
1339 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1341 cpu_dump_state(env
, logfile
, fprintf
, 0);
1351 CPUState
*cpu_copy(CPUState
*env
)
1353 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1354 /* preserve chaining and index */
1355 CPUState
*next_cpu
= new_env
->next_cpu
;
1356 int cpu_index
= new_env
->cpu_index
;
1357 memcpy(new_env
, env
, sizeof(CPUState
));
1358 new_env
->next_cpu
= next_cpu
;
1359 new_env
->cpu_index
= cpu_index
;
1363 #if !defined(CONFIG_USER_ONLY)
1365 /* NOTE: if flush_global is true, also flush global entries (not
1367 void tlb_flush(CPUState
*env
, int flush_global
)
1371 #if defined(DEBUG_TLB)
1372 printf("tlb_flush:\n");
1374 /* must reset current TB so that interrupts cannot modify the
1375 links while we are modifying them */
1376 env
->current_tb
= NULL
;
1378 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1379 env
->tlb_table
[0][i
].addr_read
= -1;
1380 env
->tlb_table
[0][i
].addr_write
= -1;
1381 env
->tlb_table
[0][i
].addr_code
= -1;
1382 env
->tlb_table
[1][i
].addr_read
= -1;
1383 env
->tlb_table
[1][i
].addr_write
= -1;
1384 env
->tlb_table
[1][i
].addr_code
= -1;
1385 #if (NB_MMU_MODES >= 3)
1386 env
->tlb_table
[2][i
].addr_read
= -1;
1387 env
->tlb_table
[2][i
].addr_write
= -1;
1388 env
->tlb_table
[2][i
].addr_code
= -1;
1389 #if (NB_MMU_MODES == 4)
1390 env
->tlb_table
[3][i
].addr_read
= -1;
1391 env
->tlb_table
[3][i
].addr_write
= -1;
1392 env
->tlb_table
[3][i
].addr_code
= -1;
1397 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1399 #if !defined(CONFIG_SOFTMMU)
1400 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1403 if (env
->kqemu_enabled
) {
1404 kqemu_flush(env
, flush_global
);
1410 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1412 if (addr
== (tlb_entry
->addr_read
&
1413 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1414 addr
== (tlb_entry
->addr_write
&
1415 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1416 addr
== (tlb_entry
->addr_code
&
1417 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1418 tlb_entry
->addr_read
= -1;
1419 tlb_entry
->addr_write
= -1;
1420 tlb_entry
->addr_code
= -1;
1424 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1427 TranslationBlock
*tb
;
1429 #if defined(DEBUG_TLB)
1430 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1432 /* must reset current TB so that interrupts cannot modify the
1433 links while we are modifying them */
1434 env
->current_tb
= NULL
;
1436 addr
&= TARGET_PAGE_MASK
;
1437 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1438 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1439 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1440 #if (NB_MMU_MODES >= 3)
1441 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1442 #if (NB_MMU_MODES == 4)
1443 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1447 /* Discard jump cache entries for any tb which might potentially
1448 overlap the flushed page. */
1449 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1450 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1452 i
= tb_jmp_cache_hash_page(addr
);
1453 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1455 #if !defined(CONFIG_SOFTMMU)
1456 if (addr
< MMAP_AREA_END
)
1457 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1460 if (env
->kqemu_enabled
) {
1461 kqemu_flush_page(env
, addr
);
1466 /* update the TLBs so that writes to code in the virtual page 'addr'
1468 static void tlb_protect_code(ram_addr_t ram_addr
)
1470 cpu_physical_memory_reset_dirty(ram_addr
,
1471 ram_addr
+ TARGET_PAGE_SIZE
,
1475 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1476 tested for self modifying code */
1477 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1480 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1483 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1484 unsigned long start
, unsigned long length
)
1487 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1488 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1489 if ((addr
- start
) < length
) {
1490 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1495 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1499 unsigned long length
, start1
;
1503 start
&= TARGET_PAGE_MASK
;
1504 end
= TARGET_PAGE_ALIGN(end
);
1506 length
= end
- start
;
1509 len
= length
>> TARGET_PAGE_BITS
;
1511 /* XXX: should not depend on cpu context */
1513 if (env
->kqemu_enabled
) {
1516 for(i
= 0; i
< len
; i
++) {
1517 kqemu_set_notdirty(env
, addr
);
1518 addr
+= TARGET_PAGE_SIZE
;
1522 mask
= ~dirty_flags
;
1523 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1524 for(i
= 0; i
< len
; i
++)
1527 /* we modify the TLB cache so that the dirty bit will be set again
1528 when accessing the range */
1529 start1
= start
+ (unsigned long)phys_ram_base
;
1530 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1531 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1532 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1533 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1534 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1535 #if (NB_MMU_MODES >= 3)
1536 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1537 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1538 #if (NB_MMU_MODES == 4)
1539 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1540 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1545 #if !defined(CONFIG_SOFTMMU)
1546 /* XXX: this is expensive */
1552 for(i
= 0; i
< L1_SIZE
; i
++) {
1555 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1556 for(j
= 0; j
< L2_SIZE
; j
++) {
1557 if (p
->valid_tag
== virt_valid_tag
&&
1558 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1559 (p
->prot
& PROT_WRITE
)) {
1560 if (addr
< MMAP_AREA_END
) {
1561 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1562 p
->prot
& ~PROT_WRITE
);
1565 addr
+= TARGET_PAGE_SIZE
;
1574 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1576 ram_addr_t ram_addr
;
1578 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1579 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1580 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1581 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1582 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1587 /* update the TLB according to the current state of the dirty bits */
1588 void cpu_tlb_update_dirty(CPUState
*env
)
1591 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1592 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1593 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1594 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1595 #if (NB_MMU_MODES >= 3)
1596 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1597 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1598 #if (NB_MMU_MODES == 4)
1599 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1600 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1605 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1606 unsigned long start
)
1609 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1610 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1611 if (addr
== start
) {
1612 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1617 /* update the TLB corresponding to virtual page vaddr and phys addr
1618 addr so that it is no longer dirty */
1619 static inline void tlb_set_dirty(CPUState
*env
,
1620 unsigned long addr
, target_ulong vaddr
)
1624 addr
&= TARGET_PAGE_MASK
;
1625 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1626 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1627 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1628 #if (NB_MMU_MODES >= 3)
1629 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1630 #if (NB_MMU_MODES == 4)
1631 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1636 /* add a new TLB entry. At most one entry for a given virtual address
1637 is permitted. Return 0 if OK or 2 if the page could not be mapped
1638 (can only happen in non SOFTMMU mode for I/O pages or pages
1639 conflicting with the host address space). */
1640 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1641 target_phys_addr_t paddr
, int prot
,
1642 int mmu_idx
, int is_softmmu
)
1647 target_ulong address
;
1648 target_phys_addr_t addend
;
1653 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1655 pd
= IO_MEM_UNASSIGNED
;
1657 pd
= p
->phys_offset
;
1659 #if defined(DEBUG_TLB)
1660 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1661 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1665 #if !defined(CONFIG_SOFTMMU)
1669 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1670 /* IO memory case */
1671 address
= vaddr
| pd
;
1674 /* standard memory */
1676 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1679 /* Make accesses to pages with watchpoints go via the
1680 watchpoint trap routines. */
1681 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1682 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1683 if (address
& ~TARGET_PAGE_MASK
) {
1684 env
->watchpoint
[i
].addend
= 0;
1685 address
= vaddr
| io_mem_watch
;
1687 env
->watchpoint
[i
].addend
= pd
- paddr
+
1688 (unsigned long) phys_ram_base
;
1689 /* TODO: Figure out how to make read watchpoints coexist
1691 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1696 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1698 te
= &env
->tlb_table
[mmu_idx
][index
];
1699 te
->addend
= addend
;
1700 if (prot
& PAGE_READ
) {
1701 te
->addr_read
= address
;
1705 if (prot
& PAGE_EXEC
) {
1706 te
->addr_code
= address
;
1710 if (prot
& PAGE_WRITE
) {
1711 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1712 (pd
& IO_MEM_ROMD
)) {
1713 /* write access calls the I/O callback */
1714 te
->addr_write
= vaddr
|
1715 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1716 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1717 !cpu_physical_memory_is_dirty(pd
)) {
1718 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1720 te
->addr_write
= address
;
1723 te
->addr_write
= -1;
1726 #if !defined(CONFIG_SOFTMMU)
1728 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1729 /* IO access: no mapping is done as it will be handled by the
1731 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1736 if (vaddr
>= MMAP_AREA_END
) {
1739 if (prot
& PROT_WRITE
) {
1740 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1741 #if defined(TARGET_HAS_SMC) || 1
1744 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1745 !cpu_physical_memory_is_dirty(pd
))) {
1746 /* ROM: we do as if code was inside */
1747 /* if code is present, we only map as read only and save the
1751 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1754 vp
->valid_tag
= virt_valid_tag
;
1755 prot
&= ~PAGE_WRITE
;
1758 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1759 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1760 if (map_addr
== MAP_FAILED
) {
1761 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1771 /* called from signal handler: invalidate the code and unprotect the
1772 page. Return TRUE if the fault was succesfully handled. */
1773 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1775 #if !defined(CONFIG_SOFTMMU)
1778 #if defined(DEBUG_TLB)
1779 printf("page_unprotect: addr=0x%08x\n", addr
);
1781 addr
&= TARGET_PAGE_MASK
;
1783 /* if it is not mapped, no need to worry here */
1784 if (addr
>= MMAP_AREA_END
)
1786 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1789 /* NOTE: in this case, validate_tag is _not_ tested as it
1790 validates only the code TLB */
1791 if (vp
->valid_tag
!= virt_valid_tag
)
1793 if (!(vp
->prot
& PAGE_WRITE
))
1795 #if defined(DEBUG_TLB)
1796 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1797 addr
, vp
->phys_addr
, vp
->prot
);
1799 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1800 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1801 (unsigned long)addr
, vp
->prot
);
1802 /* set the dirty bit */
1803 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1804 /* flush the code inside */
1805 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1814 void tlb_flush(CPUState
*env
, int flush_global
)
1818 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1822 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1823 target_phys_addr_t paddr
, int prot
,
1824 int mmu_idx
, int is_softmmu
)
1829 /* dump memory mappings */
1830 void page_dump(FILE *f
)
1832 unsigned long start
, end
;
1833 int i
, j
, prot
, prot1
;
1836 fprintf(f
, "%-8s %-8s %-8s %s\n",
1837 "start", "end", "size", "prot");
1841 for(i
= 0; i
<= L1_SIZE
; i
++) {
1846 for(j
= 0;j
< L2_SIZE
; j
++) {
1851 if (prot1
!= prot
) {
1852 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1854 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1855 start
, end
, end
- start
,
1856 prot
& PAGE_READ
? 'r' : '-',
1857 prot
& PAGE_WRITE
? 'w' : '-',
1858 prot
& PAGE_EXEC
? 'x' : '-');
1872 int page_get_flags(target_ulong address
)
1876 p
= page_find(address
>> TARGET_PAGE_BITS
);
1882 /* modify the flags of a page and invalidate the code if
1883 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1884 depending on PAGE_WRITE */
1885 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1890 start
= start
& TARGET_PAGE_MASK
;
1891 end
= TARGET_PAGE_ALIGN(end
);
1892 if (flags
& PAGE_WRITE
)
1893 flags
|= PAGE_WRITE_ORG
;
1894 spin_lock(&tb_lock
);
1895 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1896 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1897 /* if the write protection is set, then we invalidate the code
1899 if (!(p
->flags
& PAGE_WRITE
) &&
1900 (flags
& PAGE_WRITE
) &&
1902 tb_invalidate_phys_page(addr
, 0, NULL
);
1906 spin_unlock(&tb_lock
);
1909 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1915 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1916 start
= start
& TARGET_PAGE_MASK
;
1919 /* we've wrapped around */
1921 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1922 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1925 if( !(p
->flags
& PAGE_VALID
) )
1928 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1930 if (flags
& PAGE_WRITE
) {
1931 if (!(p
->flags
& PAGE_WRITE_ORG
))
1933 /* unprotect the page if it was put read-only because it
1934 contains translated code */
1935 if (!(p
->flags
& PAGE_WRITE
)) {
1936 if (!page_unprotect(addr
, 0, NULL
))
1945 /* called from signal handler: invalidate the code and unprotect the
1946 page. Return TRUE if the fault was succesfully handled. */
1947 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1949 unsigned int page_index
, prot
, pindex
;
1951 target_ulong host_start
, host_end
, addr
;
1953 host_start
= address
& qemu_host_page_mask
;
1954 page_index
= host_start
>> TARGET_PAGE_BITS
;
1955 p1
= page_find(page_index
);
1958 host_end
= host_start
+ qemu_host_page_size
;
1961 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1965 /* if the page was really writable, then we change its
1966 protection back to writable */
1967 if (prot
& PAGE_WRITE_ORG
) {
1968 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1969 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1970 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1971 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1972 p1
[pindex
].flags
|= PAGE_WRITE
;
1973 /* and since the content will be modified, we must invalidate
1974 the corresponding translated code. */
1975 tb_invalidate_phys_page(address
, pc
, puc
);
1976 #ifdef DEBUG_TB_CHECK
1977 tb_invalidate_check(address
);
1985 static inline void tlb_set_dirty(CPUState
*env
,
1986 unsigned long addr
, target_ulong vaddr
)
1989 #endif /* defined(CONFIG_USER_ONLY) */
1991 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1993 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
1994 ram_addr_t orig_memory
);
1995 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1998 if (addr > start_addr) \
2001 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2002 if (start_addr2 > 0) \
2006 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2007 end_addr2 = TARGET_PAGE_SIZE - 1; \
2009 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2010 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2015 /* register physical memory. 'size' must be a multiple of the target
2016 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2018 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2020 ram_addr_t phys_offset
)
2022 target_phys_addr_t addr
, end_addr
;
2025 ram_addr_t orig_size
= size
;
2028 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2029 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2030 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2031 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2032 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2033 ram_addr_t orig_memory
= p
->phys_offset
;
2034 target_phys_addr_t start_addr2
, end_addr2
;
2035 int need_subpage
= 0;
2037 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2039 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2040 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2041 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2042 &p
->phys_offset
, orig_memory
);
2044 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2047 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2049 p
->phys_offset
= phys_offset
;
2050 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2051 (phys_offset
& IO_MEM_ROMD
))
2052 phys_offset
+= TARGET_PAGE_SIZE
;
2055 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2056 p
->phys_offset
= phys_offset
;
2057 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2058 (phys_offset
& IO_MEM_ROMD
))
2059 phys_offset
+= TARGET_PAGE_SIZE
;
2061 target_phys_addr_t start_addr2
, end_addr2
;
2062 int need_subpage
= 0;
2064 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2065 end_addr2
, need_subpage
);
2067 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2068 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2069 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2070 subpage_register(subpage
, start_addr2
, end_addr2
,
2077 /* since each CPU stores ram addresses in its TLB cache, we must
2078 reset the modified entries */
2080 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2085 /* XXX: temporary until new memory mapping API */
2086 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2090 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2092 return IO_MEM_UNASSIGNED
;
2093 return p
->phys_offset
;
2096 /* XXX: better than nothing */
2097 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2100 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2101 fprintf(stderr
, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
2102 size
, phys_ram_size
);
2105 addr
= phys_ram_alloc_offset
;
2106 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2110 void qemu_ram_free(ram_addr_t addr
)
2114 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2116 #ifdef DEBUG_UNASSIGNED
2117 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2120 do_unassigned_access(addr
, 0, 0, 0);
2122 do_unassigned_access(addr
, 0, 0, 0);
2127 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2129 #ifdef DEBUG_UNASSIGNED
2130 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2133 do_unassigned_access(addr
, 1, 0, 0);
2135 do_unassigned_access(addr
, 1, 0, 0);
2139 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2140 unassigned_mem_readb
,
2141 unassigned_mem_readb
,
2142 unassigned_mem_readb
,
2145 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2146 unassigned_mem_writeb
,
2147 unassigned_mem_writeb
,
2148 unassigned_mem_writeb
,
2151 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2153 unsigned long ram_addr
;
2155 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2156 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2157 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2158 #if !defined(CONFIG_USER_ONLY)
2159 tb_invalidate_phys_page_fast(ram_addr
, 1);
2160 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2163 stb_p((uint8_t *)(long)addr
, val
);
2165 if (cpu_single_env
->kqemu_enabled
&&
2166 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2167 kqemu_modify_page(cpu_single_env
, ram_addr
);
2169 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2170 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2171 /* we remove the notdirty callback only if the code has been
2173 if (dirty_flags
== 0xff)
2174 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2177 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2179 unsigned long ram_addr
;
2181 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2182 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2183 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2184 #if !defined(CONFIG_USER_ONLY)
2185 tb_invalidate_phys_page_fast(ram_addr
, 2);
2186 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2189 stw_p((uint8_t *)(long)addr
, val
);
2191 if (cpu_single_env
->kqemu_enabled
&&
2192 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2193 kqemu_modify_page(cpu_single_env
, ram_addr
);
2195 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2196 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2197 /* we remove the notdirty callback only if the code has been
2199 if (dirty_flags
== 0xff)
2200 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2203 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2205 unsigned long ram_addr
;
2207 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2208 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2209 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2210 #if !defined(CONFIG_USER_ONLY)
2211 tb_invalidate_phys_page_fast(ram_addr
, 4);
2212 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2215 stl_p((uint8_t *)(long)addr
, val
);
2217 if (cpu_single_env
->kqemu_enabled
&&
2218 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2219 kqemu_modify_page(cpu_single_env
, ram_addr
);
2221 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2222 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2223 /* we remove the notdirty callback only if the code has been
2225 if (dirty_flags
== 0xff)
2226 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2229 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2230 NULL
, /* never used */
2231 NULL
, /* never used */
2232 NULL
, /* never used */
2235 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2236 notdirty_mem_writeb
,
2237 notdirty_mem_writew
,
2238 notdirty_mem_writel
,
2241 #if defined(CONFIG_SOFTMMU)
2242 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2243 so these check for a hit then pass through to the normal out-of-line
2245 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2247 return ldub_phys(addr
);
2250 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2252 return lduw_phys(addr
);
2255 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2257 return ldl_phys(addr
);
2260 /* Generate a debug exception if a watchpoint has been hit.
2261 Returns the real physical address of the access. addr will be a host
2262 address in case of a RAM location. */
2263 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2265 CPUState
*env
= cpu_single_env
;
2267 target_ulong retaddr
;
2271 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2272 watch
= env
->watchpoint
[i
].vaddr
;
2273 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2274 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2275 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2276 cpu_single_env
->watchpoint_hit
= i
+ 1;
2277 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2285 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2288 addr
= check_watchpoint(addr
);
2289 stb_phys(addr
, val
);
2292 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2295 addr
= check_watchpoint(addr
);
2296 stw_phys(addr
, val
);
2299 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2302 addr
= check_watchpoint(addr
);
2303 stl_phys(addr
, val
);
2306 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2312 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2319 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2325 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2326 #if defined(DEBUG_SUBPAGE)
2327 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2328 mmio
, len
, addr
, idx
);
2330 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2335 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2336 uint32_t value
, unsigned int len
)
2340 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2341 #if defined(DEBUG_SUBPAGE)
2342 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2343 mmio
, len
, addr
, idx
, value
);
2345 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2348 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2350 #if defined(DEBUG_SUBPAGE)
2351 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2354 return subpage_readlen(opaque
, addr
, 0);
2357 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2360 #if defined(DEBUG_SUBPAGE)
2361 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2363 subpage_writelen(opaque
, addr
, value
, 0);
2366 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2368 #if defined(DEBUG_SUBPAGE)
2369 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2372 return subpage_readlen(opaque
, addr
, 1);
2375 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2378 #if defined(DEBUG_SUBPAGE)
2379 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2381 subpage_writelen(opaque
, addr
, value
, 1);
2384 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2386 #if defined(DEBUG_SUBPAGE)
2387 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2390 return subpage_readlen(opaque
, addr
, 2);
2393 static void subpage_writel (void *opaque
,
2394 target_phys_addr_t addr
, uint32_t value
)
2396 #if defined(DEBUG_SUBPAGE)
2397 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2399 subpage_writelen(opaque
, addr
, value
, 2);
2402 static CPUReadMemoryFunc
*subpage_read
[] = {
2408 static CPUWriteMemoryFunc
*subpage_write
[] = {
2414 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2420 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2422 idx
= SUBPAGE_IDX(start
);
2423 eidx
= SUBPAGE_IDX(end
);
2424 #if defined(DEBUG_SUBPAGE)
2425 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2426 mmio
, start
, end
, idx
, eidx
, memory
);
2428 memory
>>= IO_MEM_SHIFT
;
2429 for (; idx
<= eidx
; idx
++) {
2430 for (i
= 0; i
< 4; i
++) {
2431 if (io_mem_read
[memory
][i
]) {
2432 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2433 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2435 if (io_mem_write
[memory
][i
]) {
2436 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2437 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2445 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2446 ram_addr_t orig_memory
)
2451 mmio
= qemu_mallocz(sizeof(subpage_t
));
2454 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2455 #if defined(DEBUG_SUBPAGE)
2456 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2457 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2459 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2460 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2466 static void io_mem_init(void)
2468 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2469 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2470 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2473 #if defined(CONFIG_SOFTMMU)
2474 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2475 watch_mem_write
, NULL
);
2477 /* alloc dirty bits array */
2478 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2479 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2482 /* mem_read and mem_write are arrays of functions containing the
2483 function to access byte (index 0), word (index 1) and dword (index
2484 2). Functions can be omitted with a NULL function pointer. The
2485 registered functions may be modified dynamically later.
2486 If io_index is non zero, the corresponding io zone is
2487 modified. If it is zero, a new io zone is allocated. The return
2488 value can be used with cpu_register_physical_memory(). (-1) is
2489 returned if error. */
2490 int cpu_register_io_memory(int io_index
,
2491 CPUReadMemoryFunc
**mem_read
,
2492 CPUWriteMemoryFunc
**mem_write
,
2495 int i
, subwidth
= 0;
2497 if (io_index
<= 0) {
2498 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2500 io_index
= io_mem_nb
++;
2502 if (io_index
>= IO_MEM_NB_ENTRIES
)
2506 for(i
= 0;i
< 3; i
++) {
2507 if (!mem_read
[i
] || !mem_write
[i
])
2508 subwidth
= IO_MEM_SUBWIDTH
;
2509 io_mem_read
[io_index
][i
] = mem_read
[i
];
2510 io_mem_write
[io_index
][i
] = mem_write
[i
];
2512 io_mem_opaque
[io_index
] = opaque
;
2513 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2516 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2518 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2521 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2523 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2526 /* physical memory access (slow version, mainly for debug) */
2527 #if defined(CONFIG_USER_ONLY)
2528 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2529 int len
, int is_write
)
2536 page
= addr
& TARGET_PAGE_MASK
;
2537 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2540 flags
= page_get_flags(page
);
2541 if (!(flags
& PAGE_VALID
))
2544 if (!(flags
& PAGE_WRITE
))
2546 /* XXX: this code should not depend on lock_user */
2547 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2548 /* FIXME - should this return an error rather than just fail? */
2551 unlock_user(p
, addr
, l
);
2553 if (!(flags
& PAGE_READ
))
2555 /* XXX: this code should not depend on lock_user */
2556 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2557 /* FIXME - should this return an error rather than just fail? */
2560 unlock_user(p
, addr
, 0);
2569 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2570 int len
, int is_write
)
2575 target_phys_addr_t page
;
2580 page
= addr
& TARGET_PAGE_MASK
;
2581 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2584 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2586 pd
= IO_MEM_UNASSIGNED
;
2588 pd
= p
->phys_offset
;
2592 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2593 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2594 /* XXX: could force cpu_single_env to NULL to avoid
2596 if (l
>= 4 && ((addr
& 3) == 0)) {
2597 /* 32 bit write access */
2599 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2601 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2602 /* 16 bit write access */
2604 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2607 /* 8 bit write access */
2609 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2613 unsigned long addr1
;
2614 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2616 ptr
= phys_ram_base
+ addr1
;
2617 memcpy(ptr
, buf
, l
);
2618 if (!cpu_physical_memory_is_dirty(addr1
)) {
2619 /* invalidate code */
2620 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2622 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2623 (0xff & ~CODE_DIRTY_FLAG
);
2627 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2628 !(pd
& IO_MEM_ROMD
)) {
2630 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2631 if (l
>= 4 && ((addr
& 3) == 0)) {
2632 /* 32 bit read access */
2633 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2636 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2637 /* 16 bit read access */
2638 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2642 /* 8 bit read access */
2643 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2649 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2650 (addr
& ~TARGET_PAGE_MASK
);
2651 memcpy(buf
, ptr
, l
);
2660 /* used for ROM loading : can write in RAM and ROM */
2661 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2662 const uint8_t *buf
, int len
)
2666 target_phys_addr_t page
;
2671 page
= addr
& TARGET_PAGE_MASK
;
2672 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2675 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2677 pd
= IO_MEM_UNASSIGNED
;
2679 pd
= p
->phys_offset
;
2682 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2683 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2684 !(pd
& IO_MEM_ROMD
)) {
2687 unsigned long addr1
;
2688 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2690 ptr
= phys_ram_base
+ addr1
;
2691 memcpy(ptr
, buf
, l
);
2700 /* warning: addr must be aligned */
2701 uint32_t ldl_phys(target_phys_addr_t addr
)
2709 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2711 pd
= IO_MEM_UNASSIGNED
;
2713 pd
= p
->phys_offset
;
2716 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2717 !(pd
& IO_MEM_ROMD
)) {
2719 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2720 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2723 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2724 (addr
& ~TARGET_PAGE_MASK
);
2730 /* warning: addr must be aligned */
2731 uint64_t ldq_phys(target_phys_addr_t addr
)
2739 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2741 pd
= IO_MEM_UNASSIGNED
;
2743 pd
= p
->phys_offset
;
2746 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2747 !(pd
& IO_MEM_ROMD
)) {
2749 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2750 #ifdef TARGET_WORDS_BIGENDIAN
2751 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2752 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2754 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2755 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2759 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2760 (addr
& ~TARGET_PAGE_MASK
);
2767 uint32_t ldub_phys(target_phys_addr_t addr
)
2770 cpu_physical_memory_read(addr
, &val
, 1);
2775 uint32_t lduw_phys(target_phys_addr_t addr
)
2778 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2779 return tswap16(val
);
2782 /* warning: addr must be aligned. The ram page is not masked as dirty
2783 and the code inside is not invalidated. It is useful if the dirty
2784 bits are used to track modified PTEs */
2785 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2792 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2794 pd
= IO_MEM_UNASSIGNED
;
2796 pd
= p
->phys_offset
;
2799 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2800 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2801 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2803 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2804 (addr
& ~TARGET_PAGE_MASK
);
2809 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2816 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2818 pd
= IO_MEM_UNASSIGNED
;
2820 pd
= p
->phys_offset
;
2823 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2824 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2825 #ifdef TARGET_WORDS_BIGENDIAN
2826 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2827 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2829 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2830 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2833 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2834 (addr
& ~TARGET_PAGE_MASK
);
2839 /* warning: addr must be aligned */
2840 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2847 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2849 pd
= IO_MEM_UNASSIGNED
;
2851 pd
= p
->phys_offset
;
2854 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2855 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2856 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2858 unsigned long addr1
;
2859 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2861 ptr
= phys_ram_base
+ addr1
;
2863 if (!cpu_physical_memory_is_dirty(addr1
)) {
2864 /* invalidate code */
2865 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2867 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2868 (0xff & ~CODE_DIRTY_FLAG
);
2874 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2877 cpu_physical_memory_write(addr
, &v
, 1);
2881 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2883 uint16_t v
= tswap16(val
);
2884 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2888 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2891 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2896 /* virtual memory access for debug */
2897 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2898 uint8_t *buf
, int len
, int is_write
)
2901 target_phys_addr_t phys_addr
;
2905 page
= addr
& TARGET_PAGE_MASK
;
2906 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2907 /* if no physical page mapped, return an error */
2908 if (phys_addr
== -1)
2910 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2913 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2922 void dump_exec_info(FILE *f
,
2923 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2925 int i
, target_code_size
, max_target_code_size
;
2926 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2927 TranslationBlock
*tb
;
2929 target_code_size
= 0;
2930 max_target_code_size
= 0;
2932 direct_jmp_count
= 0;
2933 direct_jmp2_count
= 0;
2934 for(i
= 0; i
< nb_tbs
; i
++) {
2936 target_code_size
+= tb
->size
;
2937 if (tb
->size
> max_target_code_size
)
2938 max_target_code_size
= tb
->size
;
2939 if (tb
->page_addr
[1] != -1)
2941 if (tb
->tb_next_offset
[0] != 0xffff) {
2943 if (tb
->tb_next_offset
[1] != 0xffff) {
2944 direct_jmp2_count
++;
2948 /* XXX: avoid using doubles ? */
2949 cpu_fprintf(f
, "Translation buffer state:\n");
2950 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2951 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2952 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2953 max_target_code_size
);
2954 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2955 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2956 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2957 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2959 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2960 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2962 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2964 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2965 cpu_fprintf(f
, "\nStatistics:\n");
2966 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2967 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2968 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2969 #ifdef CONFIG_PROFILER
2972 tot
= dyngen_interm_time
+ dyngen_code_time
;
2973 cpu_fprintf(f
, "JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
2975 cpu_fprintf(f
, "translated TBs %" PRId64
" (aborted=%" PRId64
" %0.1f%%)\n",
2977 dyngen_tb_count1
- dyngen_tb_count
,
2978 dyngen_tb_count1
? (double)(dyngen_tb_count1
- dyngen_tb_count
) / dyngen_tb_count1
* 100.0 : 0);
2979 cpu_fprintf(f
, "avg ops/TB %0.1f max=%d\n",
2980 dyngen_tb_count
? (double)dyngen_op_count
/ dyngen_tb_count
: 0, dyngen_op_count_max
);
2981 cpu_fprintf(f
, "old ops/total ops %0.1f%%\n",
2982 dyngen_op_count
? (double)dyngen_old_op_count
/ dyngen_op_count
* 100.0 : 0);
2983 cpu_fprintf(f
, "deleted ops/TB %0.2f\n",
2985 (double)dyngen_tcg_del_op_count
/ dyngen_tb_count
: 0);
2986 cpu_fprintf(f
, "cycles/op %0.1f\n",
2987 dyngen_op_count
? (double)tot
/ dyngen_op_count
: 0);
2988 cpu_fprintf(f
, "cycles/in byte %0.1f\n",
2989 dyngen_code_in_len
? (double)tot
/ dyngen_code_in_len
: 0);
2990 cpu_fprintf(f
, "cycles/out byte %0.1f\n",
2991 dyngen_code_out_len
? (double)tot
/ dyngen_code_out_len
: 0);
2994 cpu_fprintf(f
, " gen_interm time %0.1f%%\n",
2995 (double)dyngen_interm_time
/ tot
* 100.0);
2996 cpu_fprintf(f
, " gen_code time %0.1f%%\n",
2997 (double)dyngen_code_time
/ tot
* 100.0);
2998 cpu_fprintf(f
, "cpu_restore count %" PRId64
"\n",
2999 dyngen_restore_count
);
3000 cpu_fprintf(f
, " avg cycles %0.1f\n",
3001 dyngen_restore_count
? (double)dyngen_restore_time
/ dyngen_restore_count
: 0);
3003 extern void dump_op_count(void);
3010 #if !defined(CONFIG_USER_ONLY)
3012 #define MMUSUFFIX _cmmu
3013 #define GETPC() NULL
3014 #define env cpu_single_env
3015 #define SOFTMMU_CODE_ACCESS
3018 #include "softmmu_template.h"
3021 #include "softmmu_template.h"
3024 #include "softmmu_template.h"
3027 #include "softmmu_template.h"