2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
39 #if !defined(NO_CPU_EMULATION)
40 #include "tcg-target.h"
44 #include "qemu-common.h"
46 #if defined(CONFIG_USER_ONLY)
50 //#define DEBUG_TB_INVALIDATE
53 //#define DEBUG_UNASSIGNED
55 /* make various TB consistency checks */
56 //#define DEBUG_TB_CHECK
57 //#define DEBUG_TLB_CHECK
59 //#define DEBUG_IOPORT
60 //#define DEBUG_SUBPAGE
62 #if !defined(CONFIG_USER_ONLY)
63 /* TB consistency checks only implemented for usermode emulation. */
67 /* threshold to flush the translated code buffer */
68 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
70 #define SMC_BITMAP_USE_THRESHOLD 10
72 #define MMAP_AREA_START 0x00000000
73 #define MMAP_AREA_END 0xa8000000
75 #if defined(TARGET_SPARC64)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 41
77 #elif defined(TARGET_SPARC)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 #elif defined(TARGET_ALPHA)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 42
81 #define TARGET_VIRT_ADDR_SPACE_BITS 42
82 #elif defined(TARGET_PPC64)
83 #define TARGET_PHYS_ADDR_SPACE_BITS 42
85 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
86 #define TARGET_PHYS_ADDR_SPACE_BITS 32
88 #define TARGET_PHYS_ADDR_SPACE_BITS 42
89 #elif defined(TARGET_IA64)
90 #define TARGET_PHYS_ADDR_SPACE_BITS 36
92 #define TARGET_PHYS_ADDR_SPACE_BITS 32
95 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
96 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
98 /* any access to the tbs or the page table must use this lock */
99 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
101 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
102 uint8_t *code_gen_ptr
;
104 ram_addr_t phys_ram_size
;
106 uint8_t *phys_ram_base
;
107 uint8_t *phys_ram_dirty
;
109 static int in_migration
;
110 static ram_addr_t phys_ram_alloc_offset
= 0;
113 /* current CPU in the current thread. It is only valid inside
115 CPUState
*cpu_single_env
;
117 typedef struct PageDesc
{
118 /* list of TBs intersecting this ram page */
119 TranslationBlock
*first_tb
;
120 /* in order to optimize self modifying code, we count the number
121 of lookups we do to a given page to use a bitmap */
122 unsigned int code_write_count
;
123 uint8_t *code_bitmap
;
124 #if defined(CONFIG_USER_ONLY)
129 typedef struct PhysPageDesc
{
130 /* offset in host memory of the page + io_index in the low 12 bits */
131 ram_addr_t phys_offset
;
135 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
136 /* XXX: this is a temporary hack for alpha target.
137 * In the future, this is to be replaced by a multi-level table
138 * to actually be able to handle the complete 64 bits address space.
140 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
142 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
145 #define L1_SIZE (1 << L1_BITS)
146 #define L2_SIZE (1 << L2_BITS)
148 static void io_mem_init(void);
150 unsigned long qemu_real_host_page_size
;
151 unsigned long qemu_host_page_bits
;
152 unsigned long qemu_host_page_size
;
153 unsigned long qemu_host_page_mask
;
155 /* XXX: for system emulation, it could just be an array */
156 static PageDesc
*l1_map
[L1_SIZE
];
157 PhysPageDesc
**l1_phys_map
;
159 /* io memory support */
160 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
161 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
162 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
163 char io_mem_used
[IO_MEM_NB_ENTRIES
];
164 #if defined(CONFIG_SOFTMMU)
165 static int io_mem_watch
;
169 char *logfilename
= "/tmp/qemu.log";
172 static int log_append
= 0;
175 static int tlb_flush_count
;
176 static int tb_flush_count
;
177 static int tb_phys_invalidate_count
;
179 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
180 typedef struct subpage_t
{
181 target_phys_addr_t base
;
182 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
183 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
184 void *opaque
[TARGET_PAGE_SIZE
][2][4];
187 static void page_init(void)
189 /* NOTE: we can always suppose that qemu_host_page_size >=
193 SYSTEM_INFO system_info
;
196 GetSystemInfo(&system_info
);
197 qemu_real_host_page_size
= system_info
.dwPageSize
;
199 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
200 PAGE_EXECUTE_READWRITE
, &old_protect
);
203 qemu_real_host_page_size
= getpagesize();
205 unsigned long start
, end
;
207 start
= (unsigned long)code_gen_buffer
;
208 start
&= ~(qemu_real_host_page_size
- 1);
210 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
211 end
+= qemu_real_host_page_size
- 1;
212 end
&= ~(qemu_real_host_page_size
- 1);
214 mprotect((void *)start
, end
- start
,
215 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
219 if (qemu_host_page_size
== 0)
220 qemu_host_page_size
= qemu_real_host_page_size
;
221 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
222 qemu_host_page_size
= TARGET_PAGE_SIZE
;
223 qemu_host_page_bits
= 0;
224 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
225 qemu_host_page_bits
++;
226 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
227 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
228 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
230 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
232 long long startaddr
, endaddr
;
236 f
= fopen("/proc/self/maps", "r");
239 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
241 page_set_flags(TARGET_PAGE_ALIGN(startaddr
),
242 TARGET_PAGE_ALIGN(endaddr
),
252 static inline PageDesc
*page_find_alloc(unsigned int index
)
256 lp
= &l1_map
[index
>> L2_BITS
];
259 /* allocate if not found */
260 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
261 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
264 return p
+ (index
& (L2_SIZE
- 1));
267 static inline PageDesc
*page_find(unsigned int index
)
271 p
= l1_map
[index
>> L2_BITS
];
274 return p
+ (index
& (L2_SIZE
- 1));
277 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
282 p
= (void **)l1_phys_map
;
283 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
285 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
286 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
288 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
291 /* allocate if not found */
294 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
295 memset(p
, 0, sizeof(void *) * L1_SIZE
);
299 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
303 /* allocate if not found */
306 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
308 for (i
= 0; i
< L2_SIZE
; i
++)
309 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
311 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
314 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
316 return phys_page_find_alloc(index
, 0);
319 #if !defined(CONFIG_USER_ONLY)
320 static void tlb_protect_code(ram_addr_t ram_addr
);
321 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
325 void cpu_exec_init(CPUState
*env
)
332 code_gen_ptr
= code_gen_buffer
;
336 env
->next_cpu
= NULL
;
339 while (*penv
!= NULL
) {
340 penv
= (CPUState
**)&(*penv
)->next_cpu
;
343 env
->cpu_index
= cpu_index
;
344 env
->nb_watchpoints
= 0;
346 env
->thread_id
= GetCurrentProcessId();
348 env
->thread_id
= getpid();
353 static inline void invalidate_page_bitmap(PageDesc
*p
)
355 if (p
->code_bitmap
) {
356 qemu_free(p
->code_bitmap
);
357 p
->code_bitmap
= NULL
;
359 p
->code_write_count
= 0;
362 /* set to NULL all the 'first_tb' fields in all PageDescs */
363 static void page_flush_tb(void)
368 for(i
= 0; i
< L1_SIZE
; i
++) {
371 for(j
= 0; j
< L2_SIZE
; j
++) {
373 invalidate_page_bitmap(p
);
380 /* flush all the translation blocks */
381 /* XXX: tb_flush is currently not thread safe */
382 void tb_flush(CPUState
*env1
)
385 #if defined(DEBUG_FLUSH)
386 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
387 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
389 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
391 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > CODE_GEN_BUFFER_SIZE
)
392 cpu_abort(env1
, "Internal error: code buffer overflow\n");
396 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
397 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
400 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
403 code_gen_ptr
= code_gen_buffer
;
404 /* XXX: flush processor icache at this point if cache flush is
409 #ifdef DEBUG_TB_CHECK
411 static void tb_invalidate_check(target_ulong address
)
413 TranslationBlock
*tb
;
415 address
&= TARGET_PAGE_MASK
;
416 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
417 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
418 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
419 address
>= tb
->pc
+ tb
->size
)) {
420 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
421 address
, (long)tb
->pc
, tb
->size
);
427 /* verify that all the pages have correct rights for code */
428 static void tb_page_check(void)
430 TranslationBlock
*tb
;
431 int i
, flags1
, flags2
;
433 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
434 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
435 flags1
= page_get_flags(tb
->pc
);
436 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
437 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
438 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
439 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
445 void tb_jmp_check(TranslationBlock
*tb
)
447 TranslationBlock
*tb1
;
450 /* suppress any remaining jumps to this TB */
454 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
457 tb1
= tb1
->jmp_next
[n1
];
459 /* check end of list */
461 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
467 /* invalidate one TB */
468 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
471 TranslationBlock
*tb1
;
475 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
478 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
482 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
484 TranslationBlock
*tb1
;
490 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
492 *ptb
= tb1
->page_next
[n1
];
495 ptb
= &tb1
->page_next
[n1
];
499 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
501 TranslationBlock
*tb1
, **ptb
;
504 ptb
= &tb
->jmp_next
[n
];
507 /* find tb(n) in circular list */
511 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
512 if (n1
== n
&& tb1
== tb
)
515 ptb
= &tb1
->jmp_first
;
517 ptb
= &tb1
->jmp_next
[n1
];
520 /* now we can suppress tb(n) from the list */
521 *ptb
= tb
->jmp_next
[n
];
523 tb
->jmp_next
[n
] = NULL
;
527 /* reset the jump entry 'n' of a TB so that it is not chained to
529 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
531 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
534 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
539 target_ulong phys_pc
;
540 TranslationBlock
*tb1
, *tb2
;
542 /* remove the TB from the hash list */
543 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
544 h
= tb_phys_hash_func(phys_pc
);
545 tb_remove(&tb_phys_hash
[h
], tb
,
546 offsetof(TranslationBlock
, phys_hash_next
));
548 /* remove the TB from the page list */
549 if (tb
->page_addr
[0] != page_addr
) {
550 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
551 tb_page_remove(&p
->first_tb
, tb
);
552 invalidate_page_bitmap(p
);
554 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
555 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
556 tb_page_remove(&p
->first_tb
, tb
);
557 invalidate_page_bitmap(p
);
560 tb_invalidated_flag
= 1;
562 /* remove the TB from the hash list */
563 h
= tb_jmp_cache_hash_func(tb
->pc
);
564 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
565 if (env
->tb_jmp_cache
[h
] == tb
)
566 env
->tb_jmp_cache
[h
] = NULL
;
569 /* suppress this TB from the two jump lists */
570 tb_jmp_remove(tb
, 0);
571 tb_jmp_remove(tb
, 1);
573 /* suppress any remaining jumps to this TB */
579 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
580 tb2
= tb1
->jmp_next
[n1
];
581 tb_reset_jump(tb1
, n1
);
582 tb1
->jmp_next
[n1
] = NULL
;
585 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
587 tb_phys_invalidate_count
++;
590 static inline void set_bits(uint8_t *tab
, int start
, int len
)
596 mask
= 0xff << (start
& 7);
597 if ((start
& ~7) == (end
& ~7)) {
599 mask
&= ~(0xff << (end
& 7));
604 start
= (start
+ 8) & ~7;
606 while (start
< end1
) {
611 mask
= ~(0xff << (end
& 7));
617 static void build_page_bitmap(PageDesc
*p
)
619 int n
, tb_start
, tb_end
;
620 TranslationBlock
*tb
;
622 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
625 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
630 tb
= (TranslationBlock
*)((long)tb
& ~3);
631 /* NOTE: this is subtle as a TB may span two physical pages */
633 /* NOTE: tb_end may be after the end of the page, but
634 it is not a problem */
635 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
636 tb_end
= tb_start
+ tb
->size
;
637 if (tb_end
> TARGET_PAGE_SIZE
)
638 tb_end
= TARGET_PAGE_SIZE
;
641 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
643 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
644 tb
= tb
->page_next
[n
];
648 #ifdef TARGET_HAS_PRECISE_SMC
650 static void tb_gen_code(CPUState
*env
,
651 target_ulong pc
, target_ulong cs_base
, int flags
,
654 TranslationBlock
*tb
;
656 target_ulong phys_pc
, phys_page2
, virt_page2
;
659 phys_pc
= get_phys_addr_code(env
, pc
);
662 /* flush must be done */
664 /* cannot fail at this point */
667 tc_ptr
= code_gen_ptr
;
669 tb
->cs_base
= cs_base
;
672 cpu_gen_code(env
, tb
, &code_gen_size
);
673 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
675 /* check next page if needed */
676 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
678 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
679 phys_page2
= get_phys_addr_code(env
, virt_page2
);
681 tb_link_phys(tb
, phys_pc
, phys_page2
);
685 /* invalidate all TBs which intersect with the target physical page
686 starting in range [start;end[. NOTE: start and end must refer to
687 the same physical page. 'is_cpu_write_access' should be true if called
688 from a real cpu write access: the virtual CPU will exit the current
689 TB if code is modified inside this TB. */
690 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
691 int is_cpu_write_access
)
693 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
694 CPUState
*env
= cpu_single_env
;
696 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
697 target_ulong tb_start
, tb_end
;
698 target_ulong current_pc
, current_cs_base
;
700 p
= page_find(start
>> TARGET_PAGE_BITS
);
703 if (!p
->code_bitmap
&&
704 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
705 is_cpu_write_access
) {
706 /* build code bitmap */
707 build_page_bitmap(p
);
710 /* we remove all the TBs in the range [start, end[ */
711 /* XXX: see if in some cases it could be faster to invalidate all the code */
712 current_tb_not_found
= is_cpu_write_access
;
713 current_tb_modified
= 0;
714 current_tb
= NULL
; /* avoid warning */
715 current_pc
= 0; /* avoid warning */
716 current_cs_base
= 0; /* avoid warning */
717 current_flags
= 0; /* avoid warning */
721 tb
= (TranslationBlock
*)((long)tb
& ~3);
722 tb_next
= tb
->page_next
[n
];
723 /* NOTE: this is subtle as a TB may span two physical pages */
725 /* NOTE: tb_end may be after the end of the page, but
726 it is not a problem */
727 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
728 tb_end
= tb_start
+ tb
->size
;
730 tb_start
= tb
->page_addr
[1];
731 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
733 if (!(tb_end
<= start
|| tb_start
>= end
)) {
734 #ifdef TARGET_HAS_PRECISE_SMC
735 if (current_tb_not_found
) {
736 current_tb_not_found
= 0;
738 if (env
->mem_write_pc
) {
739 /* now we have a real cpu fault */
740 current_tb
= tb_find_pc(env
->mem_write_pc
);
743 if (current_tb
== tb
&&
744 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
745 /* If we are modifying the current TB, we must stop
746 its execution. We could be more precise by checking
747 that the modification is after the current PC, but it
748 would require a specialized function to partially
749 restore the CPU state */
751 current_tb_modified
= 1;
752 cpu_restore_state(current_tb
, env
,
753 env
->mem_write_pc
, NULL
);
754 #if defined(TARGET_I386)
755 current_flags
= env
->hflags
;
756 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
757 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
758 current_pc
= current_cs_base
+ env
->eip
;
760 #error unsupported CPU
763 #endif /* TARGET_HAS_PRECISE_SMC */
764 /* we need to do that to handle the case where a signal
765 occurs while doing tb_phys_invalidate() */
768 saved_tb
= env
->current_tb
;
769 env
->current_tb
= NULL
;
771 tb_phys_invalidate(tb
, -1);
773 env
->current_tb
= saved_tb
;
774 if (env
->interrupt_request
&& env
->current_tb
)
775 cpu_interrupt(env
, env
->interrupt_request
);
780 #if !defined(CONFIG_USER_ONLY)
781 /* if no code remaining, no need to continue to use slow writes */
783 invalidate_page_bitmap(p
);
784 if (is_cpu_write_access
) {
785 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
789 #ifdef TARGET_HAS_PRECISE_SMC
790 if (current_tb_modified
) {
791 /* we generate a block containing just the instruction
792 modifying the memory. It will ensure that it cannot modify
794 env
->current_tb
= NULL
;
795 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
797 cpu_resume_from_signal(env
, NULL
);
802 /* len must be <= 8 and start must be a multiple of len */
803 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
810 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
811 cpu_single_env
->mem_write_vaddr
, len
,
813 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
817 p
= page_find(start
>> TARGET_PAGE_BITS
);
820 if (p
->code_bitmap
) {
821 offset
= start
& ~TARGET_PAGE_MASK
;
822 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
823 if (b
& ((1 << len
) - 1))
827 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
831 #if !defined(CONFIG_SOFTMMU)
832 static void tb_invalidate_phys_page(target_ulong addr
,
833 unsigned long pc
, void *puc
)
835 int n
, current_flags
, current_tb_modified
;
836 target_ulong current_pc
, current_cs_base
;
838 TranslationBlock
*tb
, *current_tb
;
839 #ifdef TARGET_HAS_PRECISE_SMC
840 CPUState
*env
= cpu_single_env
;
843 addr
&= TARGET_PAGE_MASK
;
844 p
= page_find(addr
>> TARGET_PAGE_BITS
);
848 current_tb_modified
= 0;
850 current_pc
= 0; /* avoid warning */
851 current_cs_base
= 0; /* avoid warning */
852 current_flags
= 0; /* avoid warning */
853 #ifdef TARGET_HAS_PRECISE_SMC
855 current_tb
= tb_find_pc(pc
);
860 tb
= (TranslationBlock
*)((long)tb
& ~3);
861 #ifdef TARGET_HAS_PRECISE_SMC
862 if (current_tb
== tb
&&
863 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
864 /* If we are modifying the current TB, we must stop
865 its execution. We could be more precise by checking
866 that the modification is after the current PC, but it
867 would require a specialized function to partially
868 restore the CPU state */
870 current_tb_modified
= 1;
871 cpu_restore_state(current_tb
, env
, pc
, puc
);
872 #if defined(TARGET_I386)
873 current_flags
= env
->hflags
;
874 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
875 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
876 current_pc
= current_cs_base
+ env
->eip
;
878 #error unsupported CPU
881 #endif /* TARGET_HAS_PRECISE_SMC */
882 tb_phys_invalidate(tb
, addr
);
883 tb
= tb
->page_next
[n
];
886 #ifdef TARGET_HAS_PRECISE_SMC
887 if (current_tb_modified
) {
888 /* we generate a block containing just the instruction
889 modifying the memory. It will ensure that it cannot modify
891 env
->current_tb
= NULL
;
892 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
894 cpu_resume_from_signal(env
, puc
);
900 /* add the tb in the target page and protect it if necessary */
901 static inline void tb_alloc_page(TranslationBlock
*tb
,
902 unsigned int n
, target_ulong page_addr
)
905 TranslationBlock
*last_first_tb
;
907 tb
->page_addr
[n
] = page_addr
;
908 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
909 tb
->page_next
[n
] = p
->first_tb
;
910 last_first_tb
= p
->first_tb
;
911 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
912 invalidate_page_bitmap(p
);
914 #if defined(TARGET_HAS_SMC) || 1
916 #if defined(CONFIG_USER_ONLY)
917 if (p
->flags
& PAGE_WRITE
) {
922 /* force the host page as non writable (writes will have a
923 page fault + mprotect overhead) */
924 page_addr
&= qemu_host_page_mask
;
926 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
927 addr
+= TARGET_PAGE_SIZE
) {
929 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
933 p2
->flags
&= ~PAGE_WRITE
;
934 page_get_flags(addr
);
936 mprotect(g2h(page_addr
), qemu_host_page_size
,
937 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
938 #ifdef DEBUG_TB_INVALIDATE
939 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
944 /* if some code is already present, then the pages are already
945 protected. So we handle the case where only the first TB is
946 allocated in a physical page */
947 if (!last_first_tb
) {
948 tlb_protect_code(page_addr
);
952 #endif /* TARGET_HAS_SMC */
955 /* Allocate a new translation block. Flush the translation buffer if
956 too many translation blocks or too much generated code. */
957 TranslationBlock
*tb_alloc(target_ulong pc
)
959 TranslationBlock
*tb
;
961 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
962 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
970 /* add a new TB and link it to the physical page tables. phys_page2 is
971 (-1) to indicate that only one page contains the TB. */
972 void tb_link_phys(TranslationBlock
*tb
,
973 target_ulong phys_pc
, target_ulong phys_page2
)
976 TranslationBlock
**ptb
;
978 /* add in the physical hash table */
979 h
= tb_phys_hash_func(phys_pc
);
980 ptb
= &tb_phys_hash
[h
];
981 tb
->phys_hash_next
= *ptb
;
984 /* add in the page list */
985 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
986 if (phys_page2
!= -1)
987 tb_alloc_page(tb
, 1, phys_page2
);
989 tb
->page_addr
[1] = -1;
991 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
992 tb
->jmp_next
[0] = NULL
;
993 tb
->jmp_next
[1] = NULL
;
995 /* init original jump addresses */
996 if (tb
->tb_next_offset
[0] != 0xffff)
997 tb_reset_jump(tb
, 0);
998 if (tb
->tb_next_offset
[1] != 0xffff)
999 tb_reset_jump(tb
, 1);
1001 #ifdef DEBUG_TB_CHECK
1006 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1007 tb[1].tc_ptr. Return NULL if not found */
1008 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1010 int m_min
, m_max
, m
;
1012 TranslationBlock
*tb
;
1016 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1017 tc_ptr
>= (unsigned long)code_gen_ptr
)
1019 /* binary search (cf Knuth) */
1022 while (m_min
<= m_max
) {
1023 m
= (m_min
+ m_max
) >> 1;
1025 v
= (unsigned long)tb
->tc_ptr
;
1028 else if (tc_ptr
< v
) {
1037 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1039 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1041 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1044 tb1
= tb
->jmp_next
[n
];
1046 /* find head of list */
1049 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1052 tb1
= tb1
->jmp_next
[n1
];
1054 /* we are now sure now that tb jumps to tb1 */
1057 /* remove tb from the jmp_first list */
1058 ptb
= &tb_next
->jmp_first
;
1062 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1063 if (n1
== n
&& tb1
== tb
)
1065 ptb
= &tb1
->jmp_next
[n1
];
1067 *ptb
= tb
->jmp_next
[n
];
1068 tb
->jmp_next
[n
] = NULL
;
1070 /* suppress the jump to next tb in generated code */
1071 tb_reset_jump(tb
, n
);
1073 /* suppress jumps in the tb on which we could have jumped */
1074 tb_reset_jump_recursive(tb_next
);
1078 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1080 tb_reset_jump_recursive2(tb
, 0);
1081 tb_reset_jump_recursive2(tb
, 1);
1084 #if defined(TARGET_HAS_ICE)
1085 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1087 target_phys_addr_t addr
;
1089 ram_addr_t ram_addr
;
1092 addr
= cpu_get_phys_page_debug(env
, pc
);
1093 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1095 pd
= IO_MEM_UNASSIGNED
;
1097 pd
= p
->phys_offset
;
1099 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1100 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1104 /* Add a watchpoint. */
1105 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1109 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1110 if (addr
== env
->watchpoint
[i
].vaddr
)
1113 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1116 i
= env
->nb_watchpoints
++;
1117 env
->watchpoint
[i
].vaddr
= addr
;
1118 tlb_flush_page(env
, addr
);
1119 /* FIXME: This flush is needed because of the hack to make memory ops
1120 terminate the TB. It can be removed once the proper IO trap and
1121 re-execute bits are in. */
1126 /* Remove a watchpoint. */
1127 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1131 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1132 if (addr
== env
->watchpoint
[i
].vaddr
) {
1133 env
->nb_watchpoints
--;
1134 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1135 tlb_flush_page(env
, addr
);
1142 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1143 breakpoint is reached */
1144 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1146 #if defined(TARGET_HAS_ICE)
1149 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1150 if (env
->breakpoints
[i
] == pc
)
1154 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1156 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1159 kvm_update_debugger(env
);
1161 breakpoint_invalidate(env
, pc
);
1168 /* remove a breakpoint */
1169 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1171 #if defined(TARGET_HAS_ICE)
1173 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1174 if (env
->breakpoints
[i
] == pc
)
1179 env
->nb_breakpoints
--;
1180 if (i
< env
->nb_breakpoints
)
1181 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1184 kvm_update_debugger(env
);
1186 breakpoint_invalidate(env
, pc
);
1193 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1194 CPU loop after each instruction */
1195 void cpu_single_step(CPUState
*env
, int enabled
)
1197 #if defined(TARGET_HAS_ICE)
1198 if (env
->singlestep_enabled
!= enabled
) {
1199 env
->singlestep_enabled
= enabled
;
1200 /* must flush all the translated code to avoid inconsistancies */
1201 /* XXX: only flush what is necessary */
1205 kvm_update_debugger(env
);
1209 /* enable or disable low levels log */
1210 void cpu_set_log(int log_flags
)
1212 loglevel
= log_flags
;
1213 if (loglevel
&& !logfile
) {
1214 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1216 perror(logfilename
);
1219 #if !defined(CONFIG_SOFTMMU)
1220 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1222 static uint8_t logfile_buf
[4096];
1223 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1226 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1230 if (!loglevel
&& logfile
) {
1236 void cpu_set_log_filename(const char *filename
)
1238 logfilename
= strdup(filename
);
1243 cpu_set_log(loglevel
);
1246 /* mask must never be zero, except for A20 change call */
1247 void cpu_interrupt(CPUState
*env
, int mask
)
1249 TranslationBlock
*tb
;
1250 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1252 env
->interrupt_request
|= mask
;
1253 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1254 kvm_update_interrupt_request(env
);
1256 /* if the cpu is currently executing code, we must unlink it and
1257 all the potentially executing TB */
1258 tb
= env
->current_tb
;
1259 if (tb
&& !testandset(&interrupt_lock
)) {
1260 env
->current_tb
= NULL
;
1261 tb_reset_jump_recursive(tb
);
1262 resetlock(&interrupt_lock
);
1266 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1268 env
->interrupt_request
&= ~mask
;
1271 CPULogItem cpu_log_items
[] = {
1272 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1273 "show generated host assembly code for each compiled TB" },
1274 { CPU_LOG_TB_IN_ASM
, "in_asm",
1275 "show target assembly code for each compiled TB" },
1276 { CPU_LOG_TB_OP
, "op",
1277 "show micro ops for each compiled TB" },
1278 { CPU_LOG_TB_OP_OPT
, "op_opt",
1281 "before eflags optimization and "
1283 "after liveness analysis" },
1284 { CPU_LOG_INT
, "int",
1285 "show interrupts/exceptions in short format" },
1286 { CPU_LOG_EXEC
, "exec",
1287 "show trace before each executed TB (lots of logs)" },
1288 { CPU_LOG_TB_CPU
, "cpu",
1289 "show CPU state before block translation" },
1291 { CPU_LOG_PCALL
, "pcall",
1292 "show protected mode far calls/returns/exceptions" },
1295 { CPU_LOG_IOPORT
, "ioport",
1296 "show all i/o ports accesses" },
1301 static int cmp1(const char *s1
, int n
, const char *s2
)
1303 if (strlen(s2
) != n
)
1305 return memcmp(s1
, s2
, n
) == 0;
1308 /* takes a comma separated list of log masks. Return 0 if error. */
1309 int cpu_str_to_log_mask(const char *str
)
1318 p1
= strchr(p
, ',');
1321 if(cmp1(p
,p1
-p
,"all")) {
1322 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1326 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1327 if (cmp1(p
, p1
- p
, item
->name
))
1341 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1348 fprintf(stderr
, "qemu: fatal: ");
1349 vfprintf(stderr
, fmt
, ap
);
1350 fprintf(stderr
, "\n");
1352 if(env
->intercept
& INTERCEPT_SVM_MASK
) {
1353 /* most probably the virtual machine should not
1354 be shut down but rather caught by the VMM */
1355 vmexit(SVM_EXIT_SHUTDOWN
, 0);
1357 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1359 cpu_dump_state(env
, stderr
, fprintf
, 0);
1362 fprintf(logfile
, "qemu: fatal: ");
1363 vfprintf(logfile
, fmt
, ap2
);
1364 fprintf(logfile
, "\n");
1366 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1368 cpu_dump_state(env
, logfile
, fprintf
, 0);
1378 CPUState
*cpu_copy(CPUState
*env
)
1380 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1381 /* preserve chaining and index */
1382 CPUState
*next_cpu
= new_env
->next_cpu
;
1383 int cpu_index
= new_env
->cpu_index
;
1384 memcpy(new_env
, env
, sizeof(CPUState
));
1385 new_env
->next_cpu
= next_cpu
;
1386 new_env
->cpu_index
= cpu_index
;
1390 #if !defined(CONFIG_USER_ONLY)
1392 /* NOTE: if flush_global is true, also flush global entries (not
1394 void tlb_flush(CPUState
*env
, int flush_global
)
1398 #if defined(DEBUG_TLB)
1399 printf("tlb_flush:\n");
1401 /* must reset current TB so that interrupts cannot modify the
1402 links while we are modifying them */
1403 env
->current_tb
= NULL
;
1405 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1406 env
->tlb_table
[0][i
].addr_read
= -1;
1407 env
->tlb_table
[0][i
].addr_write
= -1;
1408 env
->tlb_table
[0][i
].addr_code
= -1;
1409 env
->tlb_table
[1][i
].addr_read
= -1;
1410 env
->tlb_table
[1][i
].addr_write
= -1;
1411 env
->tlb_table
[1][i
].addr_code
= -1;
1412 #if (NB_MMU_MODES >= 3)
1413 env
->tlb_table
[2][i
].addr_read
= -1;
1414 env
->tlb_table
[2][i
].addr_write
= -1;
1415 env
->tlb_table
[2][i
].addr_code
= -1;
1416 #if (NB_MMU_MODES == 4)
1417 env
->tlb_table
[3][i
].addr_read
= -1;
1418 env
->tlb_table
[3][i
].addr_write
= -1;
1419 env
->tlb_table
[3][i
].addr_code
= -1;
1424 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1426 #if !defined(CONFIG_SOFTMMU)
1427 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1430 if (env
->kqemu_enabled
) {
1431 kqemu_flush(env
, flush_global
);
1437 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1439 if (addr
== (tlb_entry
->addr_read
&
1440 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1441 addr
== (tlb_entry
->addr_write
&
1442 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1443 addr
== (tlb_entry
->addr_code
&
1444 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1445 tlb_entry
->addr_read
= -1;
1446 tlb_entry
->addr_write
= -1;
1447 tlb_entry
->addr_code
= -1;
1451 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1454 TranslationBlock
*tb
;
1456 #if defined(DEBUG_TLB)
1457 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1459 /* must reset current TB so that interrupts cannot modify the
1460 links while we are modifying them */
1461 env
->current_tb
= NULL
;
1463 addr
&= TARGET_PAGE_MASK
;
1464 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1465 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1466 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1467 #if (NB_MMU_MODES >= 3)
1468 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1469 #if (NB_MMU_MODES == 4)
1470 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1474 /* Discard jump cache entries for any tb which might potentially
1475 overlap the flushed page. */
1476 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1477 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1479 i
= tb_jmp_cache_hash_page(addr
);
1480 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1482 #if !defined(CONFIG_SOFTMMU)
1483 if (addr
< MMAP_AREA_END
)
1484 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1487 if (env
->kqemu_enabled
) {
1488 kqemu_flush_page(env
, addr
);
1493 /* update the TLBs so that writes to code in the virtual page 'addr'
1495 static void tlb_protect_code(ram_addr_t ram_addr
)
1497 cpu_physical_memory_reset_dirty(ram_addr
,
1498 ram_addr
+ TARGET_PAGE_SIZE
,
1502 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1503 tested for self modifying code */
1504 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1507 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1510 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1511 unsigned long start
, unsigned long length
)
1514 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1515 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1516 if ((addr
- start
) < length
) {
1517 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1522 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1526 unsigned long length
, start1
;
1530 start
&= TARGET_PAGE_MASK
;
1531 end
= TARGET_PAGE_ALIGN(end
);
1533 length
= end
- start
;
1536 len
= length
>> TARGET_PAGE_BITS
;
1538 /* XXX: should not depend on cpu context */
1540 if (env
->kqemu_enabled
) {
1543 for(i
= 0; i
< len
; i
++) {
1544 kqemu_set_notdirty(env
, addr
);
1545 addr
+= TARGET_PAGE_SIZE
;
1549 mask
= ~dirty_flags
;
1550 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1551 for(i
= 0; i
< len
; i
++)
1554 /* we modify the TLB cache so that the dirty bit will be set again
1555 when accessing the range */
1556 start1
= start
+ (unsigned long)phys_ram_base
;
1557 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1558 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1559 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1560 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1561 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1562 #if (NB_MMU_MODES >= 3)
1563 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1564 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1565 #if (NB_MMU_MODES == 4)
1566 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1567 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1572 #if !defined(CONFIG_SOFTMMU)
1573 /* XXX: this is expensive */
1579 for(i
= 0; i
< L1_SIZE
; i
++) {
1582 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1583 for(j
= 0; j
< L2_SIZE
; j
++) {
1584 if (p
->valid_tag
== virt_valid_tag
&&
1585 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1586 (p
->prot
& PROT_WRITE
)) {
1587 if (addr
< MMAP_AREA_END
) {
1588 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1589 p
->prot
& ~PROT_WRITE
);
1592 addr
+= TARGET_PAGE_SIZE
;
1601 int cpu_physical_memory_set_dirty_tracking(int enable
)
1606 r
= kvm_physical_memory_set_dirty_tracking(enable
);
1607 in_migration
= enable
;
1611 int cpu_physical_memory_get_dirty_tracking(void)
1613 return in_migration
;
1616 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1618 ram_addr_t ram_addr
;
1620 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1621 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1622 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1623 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1624 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1629 /* update the TLB according to the current state of the dirty bits */
1630 void cpu_tlb_update_dirty(CPUState
*env
)
1633 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1634 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1635 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1636 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1637 #if (NB_MMU_MODES >= 3)
1638 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1639 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1640 #if (NB_MMU_MODES == 4)
1641 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1642 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1647 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1648 unsigned long start
)
1651 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1652 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1653 if (addr
== start
) {
1654 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1659 /* update the TLB corresponding to virtual page vaddr and phys addr
1660 addr so that it is no longer dirty */
1661 static inline void tlb_set_dirty(CPUState
*env
,
1662 unsigned long addr
, target_ulong vaddr
)
1666 addr
&= TARGET_PAGE_MASK
;
1667 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1668 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1669 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1670 #if (NB_MMU_MODES >= 3)
1671 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1672 #if (NB_MMU_MODES == 4)
1673 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1678 /* add a new TLB entry. At most one entry for a given virtual address
1679 is permitted. Return 0 if OK or 2 if the page could not be mapped
1680 (can only happen in non SOFTMMU mode for I/O pages or pages
1681 conflicting with the host address space). */
1682 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1683 target_phys_addr_t paddr
, int prot
,
1684 int mmu_idx
, int is_softmmu
)
1689 target_ulong address
;
1690 target_phys_addr_t addend
;
1695 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1697 pd
= IO_MEM_UNASSIGNED
;
1699 pd
= p
->phys_offset
;
1701 #if defined(DEBUG_TLB)
1702 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1703 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1707 #if !defined(CONFIG_SOFTMMU)
1711 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1712 /* IO memory case */
1713 address
= vaddr
| pd
;
1716 /* standard memory */
1718 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1721 /* Make accesses to pages with watchpoints go via the
1722 watchpoint trap routines. */
1723 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1724 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1725 if (address
& ~TARGET_PAGE_MASK
) {
1726 env
->watchpoint
[i
].addend
= 0;
1727 address
= vaddr
| io_mem_watch
;
1729 env
->watchpoint
[i
].addend
= pd
- paddr
+
1730 (unsigned long) phys_ram_base
;
1731 /* TODO: Figure out how to make read watchpoints coexist
1733 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1738 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1740 te
= &env
->tlb_table
[mmu_idx
][index
];
1741 te
->addend
= addend
;
1742 if (prot
& PAGE_READ
) {
1743 te
->addr_read
= address
;
1747 if (prot
& PAGE_EXEC
) {
1748 te
->addr_code
= address
;
1752 if (prot
& PAGE_WRITE
) {
1753 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1754 (pd
& IO_MEM_ROMD
)) {
1755 /* write access calls the I/O callback */
1756 te
->addr_write
= vaddr
|
1757 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1758 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1759 !cpu_physical_memory_is_dirty(pd
)) {
1760 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1762 te
->addr_write
= address
;
1765 te
->addr_write
= -1;
1768 #if !defined(CONFIG_SOFTMMU)
1770 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1771 /* IO access: no mapping is done as it will be handled by the
1773 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1778 if (vaddr
>= MMAP_AREA_END
) {
1781 if (prot
& PROT_WRITE
) {
1782 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1783 #if defined(TARGET_HAS_SMC) || 1
1786 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1787 !cpu_physical_memory_is_dirty(pd
))) {
1788 /* ROM: we do as if code was inside */
1789 /* if code is present, we only map as read only and save the
1793 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1796 vp
->valid_tag
= virt_valid_tag
;
1797 prot
&= ~PAGE_WRITE
;
1800 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1801 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1802 if (map_addr
== MAP_FAILED
) {
1803 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1813 /* called from signal handler: invalidate the code and unprotect the
1814 page. Return TRUE if the fault was succesfully handled. */
1815 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1817 #if !defined(CONFIG_SOFTMMU)
1820 #if defined(DEBUG_TLB)
1821 printf("page_unprotect: addr=0x%08x\n", addr
);
1823 addr
&= TARGET_PAGE_MASK
;
1825 /* if it is not mapped, no need to worry here */
1826 if (addr
>= MMAP_AREA_END
)
1828 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1831 /* NOTE: in this case, validate_tag is _not_ tested as it
1832 validates only the code TLB */
1833 if (vp
->valid_tag
!= virt_valid_tag
)
1835 if (!(vp
->prot
& PAGE_WRITE
))
1837 #if defined(DEBUG_TLB)
1838 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1839 addr
, vp
->phys_addr
, vp
->prot
);
1841 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1842 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1843 (unsigned long)addr
, vp
->prot
);
1844 /* set the dirty bit */
1845 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1846 /* flush the code inside */
1847 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1856 void tlb_flush(CPUState
*env
, int flush_global
)
1860 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1864 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1865 target_phys_addr_t paddr
, int prot
,
1866 int mmu_idx
, int is_softmmu
)
1871 /* dump memory mappings */
1872 void page_dump(FILE *f
)
1874 unsigned long start
, end
;
1875 int i
, j
, prot
, prot1
;
1878 fprintf(f
, "%-8s %-8s %-8s %s\n",
1879 "start", "end", "size", "prot");
1883 for(i
= 0; i
<= L1_SIZE
; i
++) {
1888 for(j
= 0;j
< L2_SIZE
; j
++) {
1893 if (prot1
!= prot
) {
1894 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1896 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1897 start
, end
, end
- start
,
1898 prot
& PAGE_READ
? 'r' : '-',
1899 prot
& PAGE_WRITE
? 'w' : '-',
1900 prot
& PAGE_EXEC
? 'x' : '-');
1914 int page_get_flags(target_ulong address
)
1918 p
= page_find(address
>> TARGET_PAGE_BITS
);
1924 /* modify the flags of a page and invalidate the code if
1925 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1926 depending on PAGE_WRITE */
1927 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1932 start
= start
& TARGET_PAGE_MASK
;
1933 end
= TARGET_PAGE_ALIGN(end
);
1934 if (flags
& PAGE_WRITE
)
1935 flags
|= PAGE_WRITE_ORG
;
1936 spin_lock(&tb_lock
);
1937 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1938 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1939 /* if the write protection is set, then we invalidate the code
1941 if (!(p
->flags
& PAGE_WRITE
) &&
1942 (flags
& PAGE_WRITE
) &&
1944 tb_invalidate_phys_page(addr
, 0, NULL
);
1948 spin_unlock(&tb_lock
);
1951 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1957 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1958 start
= start
& TARGET_PAGE_MASK
;
1961 /* we've wrapped around */
1963 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1964 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1967 if( !(p
->flags
& PAGE_VALID
) )
1970 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1972 if (flags
& PAGE_WRITE
) {
1973 if (!(p
->flags
& PAGE_WRITE_ORG
))
1975 /* unprotect the page if it was put read-only because it
1976 contains translated code */
1977 if (!(p
->flags
& PAGE_WRITE
)) {
1978 if (!page_unprotect(addr
, 0, NULL
))
1987 /* called from signal handler: invalidate the code and unprotect the
1988 page. Return TRUE if the fault was succesfully handled. */
1989 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1991 unsigned int page_index
, prot
, pindex
;
1993 target_ulong host_start
, host_end
, addr
;
1995 host_start
= address
& qemu_host_page_mask
;
1996 page_index
= host_start
>> TARGET_PAGE_BITS
;
1997 p1
= page_find(page_index
);
2000 host_end
= host_start
+ qemu_host_page_size
;
2003 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2007 /* if the page was really writable, then we change its
2008 protection back to writable */
2009 if (prot
& PAGE_WRITE_ORG
) {
2010 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2011 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2012 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2013 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2014 p1
[pindex
].flags
|= PAGE_WRITE
;
2015 /* and since the content will be modified, we must invalidate
2016 the corresponding translated code. */
2017 tb_invalidate_phys_page(address
, pc
, puc
);
2018 #ifdef DEBUG_TB_CHECK
2019 tb_invalidate_check(address
);
2027 static inline void tlb_set_dirty(CPUState
*env
,
2028 unsigned long addr
, target_ulong vaddr
)
2031 #endif /* defined(CONFIG_USER_ONLY) */
2033 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2035 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2037 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2040 if (addr > start_addr) \
2043 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2044 if (start_addr2 > 0) \
2048 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2049 end_addr2 = TARGET_PAGE_SIZE - 1; \
2051 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2052 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2057 /* register physical memory. 'size' must be a multiple of the target
2058 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2060 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2062 unsigned long phys_offset
)
2064 target_phys_addr_t addr
, end_addr
;
2067 unsigned long orig_size
= size
;
2070 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2071 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2072 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2073 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2074 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2075 unsigned long orig_memory
= p
->phys_offset
;
2076 target_phys_addr_t start_addr2
, end_addr2
;
2077 int need_subpage
= 0;
2079 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2081 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2082 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2083 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2084 &p
->phys_offset
, orig_memory
);
2086 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2089 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2091 p
->phys_offset
= phys_offset
;
2092 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2093 (phys_offset
& IO_MEM_ROMD
))
2094 phys_offset
+= TARGET_PAGE_SIZE
;
2097 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2098 p
->phys_offset
= phys_offset
;
2099 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2100 (phys_offset
& IO_MEM_ROMD
))
2101 phys_offset
+= TARGET_PAGE_SIZE
;
2103 target_phys_addr_t start_addr2
, end_addr2
;
2104 int need_subpage
= 0;
2106 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2107 end_addr2
, need_subpage
);
2109 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2110 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2111 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2112 subpage_register(subpage
, start_addr2
, end_addr2
,
2119 /* since each CPU stores ram addresses in its TLB cache, we must
2120 reset the modified entries */
2122 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2127 /* XXX: temporary until new memory mapping API */
2128 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
2132 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2134 return IO_MEM_UNASSIGNED
;
2135 return p
->phys_offset
;
2138 /* XXX: better than nothing */
2139 ram_addr_t
qemu_ram_alloc(unsigned long size
)
2142 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2143 fprintf(stderr
, "Not enough memory (requested_size = %lu, max memory = %d)\n",
2144 size
, phys_ram_size
);
2147 addr
= phys_ram_alloc_offset
;
2148 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2152 void qemu_ram_free(ram_addr_t addr
)
2156 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2158 #ifdef DEBUG_UNASSIGNED
2159 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2162 do_unassigned_access(addr
, 0, 0, 0);
2164 do_unassigned_access(addr
, 0, 0, 0);
2169 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2171 #ifdef DEBUG_UNASSIGNED
2172 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2175 do_unassigned_access(addr
, 1, 0, 0);
2177 do_unassigned_access(addr
, 1, 0, 0);
2181 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2182 unassigned_mem_readb
,
2183 unassigned_mem_readb
,
2184 unassigned_mem_readb
,
2187 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2188 unassigned_mem_writeb
,
2189 unassigned_mem_writeb
,
2190 unassigned_mem_writeb
,
2193 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2195 unsigned long ram_addr
;
2197 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2198 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2199 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2200 #if !defined(CONFIG_USER_ONLY)
2201 tb_invalidate_phys_page_fast(ram_addr
, 1);
2202 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2205 stb_p((uint8_t *)(long)addr
, val
);
2207 if (cpu_single_env
->kqemu_enabled
&&
2208 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2209 kqemu_modify_page(cpu_single_env
, ram_addr
);
2211 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2212 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2213 /* we remove the notdirty callback only if the code has been
2215 if (dirty_flags
== 0xff)
2216 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2219 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2221 unsigned long ram_addr
;
2223 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2224 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2225 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2226 #if !defined(CONFIG_USER_ONLY)
2227 tb_invalidate_phys_page_fast(ram_addr
, 2);
2228 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2231 stw_p((uint8_t *)(long)addr
, val
);
2233 if (cpu_single_env
->kqemu_enabled
&&
2234 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2235 kqemu_modify_page(cpu_single_env
, ram_addr
);
2237 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2238 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2239 /* we remove the notdirty callback only if the code has been
2241 if (dirty_flags
== 0xff)
2242 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2245 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2247 unsigned long ram_addr
;
2249 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2250 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2251 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2252 #if !defined(CONFIG_USER_ONLY)
2253 tb_invalidate_phys_page_fast(ram_addr
, 4);
2254 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2257 stl_p((uint8_t *)(long)addr
, val
);
2259 if (cpu_single_env
->kqemu_enabled
&&
2260 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2261 kqemu_modify_page(cpu_single_env
, ram_addr
);
2263 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2264 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2265 /* we remove the notdirty callback only if the code has been
2267 if (dirty_flags
== 0xff)
2268 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2271 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2272 NULL
, /* never used */
2273 NULL
, /* never used */
2274 NULL
, /* never used */
2277 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2278 notdirty_mem_writeb
,
2279 notdirty_mem_writew
,
2280 notdirty_mem_writel
,
2283 #if defined(CONFIG_SOFTMMU)
2284 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2285 so these check for a hit then pass through to the normal out-of-line
2287 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2289 return ldub_phys(addr
);
2292 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2294 return lduw_phys(addr
);
2297 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2299 return ldl_phys(addr
);
2302 /* Generate a debug exception if a watchpoint has been hit.
2303 Returns the real physical address of the access. addr will be a host
2304 address in case of a RAM location. */
2305 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2307 CPUState
*env
= cpu_single_env
;
2309 target_ulong retaddr
;
2313 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2314 watch
= env
->watchpoint
[i
].vaddr
;
2315 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2316 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2317 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2318 cpu_single_env
->watchpoint_hit
= i
+ 1;
2319 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2327 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2330 addr
= check_watchpoint(addr
);
2331 stb_phys(addr
, val
);
2334 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2337 addr
= check_watchpoint(addr
);
2338 stw_phys(addr
, val
);
2341 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2344 addr
= check_watchpoint(addr
);
2345 stl_phys(addr
, val
);
2348 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2354 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2361 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2367 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2368 #if defined(DEBUG_SUBPAGE)
2369 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2370 mmio
, len
, addr
, idx
);
2372 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2377 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2378 uint32_t value
, unsigned int len
)
2382 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2383 #if defined(DEBUG_SUBPAGE)
2384 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2385 mmio
, len
, addr
, idx
, value
);
2387 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2390 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2392 #if defined(DEBUG_SUBPAGE)
2393 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2396 return subpage_readlen(opaque
, addr
, 0);
2399 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2402 #if defined(DEBUG_SUBPAGE)
2403 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2405 subpage_writelen(opaque
, addr
, value
, 0);
2408 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2410 #if defined(DEBUG_SUBPAGE)
2411 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2414 return subpage_readlen(opaque
, addr
, 1);
2417 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2420 #if defined(DEBUG_SUBPAGE)
2421 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2423 subpage_writelen(opaque
, addr
, value
, 1);
2426 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2428 #if defined(DEBUG_SUBPAGE)
2429 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2432 return subpage_readlen(opaque
, addr
, 2);
2435 static void subpage_writel (void *opaque
,
2436 target_phys_addr_t addr
, uint32_t value
)
2438 #if defined(DEBUG_SUBPAGE)
2439 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2441 subpage_writelen(opaque
, addr
, value
, 2);
2444 static CPUReadMemoryFunc
*subpage_read
[] = {
2450 static CPUWriteMemoryFunc
*subpage_write
[] = {
2456 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2462 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2464 idx
= SUBPAGE_IDX(start
);
2465 eidx
= SUBPAGE_IDX(end
);
2466 #if defined(DEBUG_SUBPAGE)
2467 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2468 mmio
, start
, end
, idx
, eidx
, memory
);
2470 memory
>>= IO_MEM_SHIFT
;
2471 for (; idx
<= eidx
; idx
++) {
2472 for (i
= 0; i
< 4; i
++) {
2473 if (io_mem_read
[memory
][i
]) {
2474 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2475 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2477 if (io_mem_write
[memory
][i
]) {
2478 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2479 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2487 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2493 mmio
= qemu_mallocz(sizeof(subpage_t
));
2496 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2497 #if defined(DEBUG_SUBPAGE)
2498 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2499 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2501 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2502 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2508 static int get_free_io_mem_idx(void)
2512 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
2513 if (!io_mem_used
[i
]) {
2521 static void io_mem_init(void)
2525 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2526 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2527 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2531 #if defined(CONFIG_SOFTMMU)
2532 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2533 watch_mem_write
, NULL
);
2535 /* alloc dirty bits array */
2536 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2537 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2540 /* mem_read and mem_write are arrays of functions containing the
2541 function to access byte (index 0), word (index 1) and dword (index
2542 2). Functions can be omitted with a NULL function pointer. The
2543 registered functions may be modified dynamically later.
2544 If io_index is non zero, the corresponding io zone is
2545 modified. If it is zero, a new io zone is allocated. The return
2546 value can be used with cpu_register_physical_memory(). (-1) is
2547 returned if error. */
2548 int cpu_register_io_memory(int io_index
,
2549 CPUReadMemoryFunc
**mem_read
,
2550 CPUWriteMemoryFunc
**mem_write
,
2553 int i
, subwidth
= 0;
2555 if (io_index
<= 0) {
2556 io_index
= get_free_io_mem_idx();
2560 if (io_index
>= IO_MEM_NB_ENTRIES
)
2564 for(i
= 0;i
< 3; i
++) {
2565 if (!mem_read
[i
] || !mem_write
[i
])
2566 subwidth
= IO_MEM_SUBWIDTH
;
2567 io_mem_read
[io_index
][i
] = mem_read
[i
];
2568 io_mem_write
[io_index
][i
] = mem_write
[i
];
2570 io_mem_opaque
[io_index
] = opaque
;
2571 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2574 void cpu_unregister_io_memory(int io_table_address
)
2577 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
2579 for (i
=0;i
< 3; i
++) {
2580 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
2581 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
2583 io_mem_opaque
[io_index
] = NULL
;
2584 io_mem_used
[io_index
] = 0;
2587 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2589 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2592 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2594 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2597 /* physical memory access (slow version, mainly for debug) */
2598 #if defined(CONFIG_USER_ONLY)
2599 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2600 int len
, int is_write
)
2607 page
= addr
& TARGET_PAGE_MASK
;
2608 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2611 flags
= page_get_flags(page
);
2612 if (!(flags
& PAGE_VALID
))
2615 if (!(flags
& PAGE_WRITE
))
2617 /* XXX: this code should not depend on lock_user */
2618 if (!(p
= lock_user(VERIFY_WRITE
, addr
, len
, 0)))
2619 /* FIXME - should this return an error rather than just fail? */
2621 memcpy(p
, buf
, len
);
2622 unlock_user(p
, addr
, len
);
2624 if (!(flags
& PAGE_READ
))
2626 /* XXX: this code should not depend on lock_user */
2627 if (!(p
= lock_user(VERIFY_READ
, addr
, len
, 1)))
2628 /* FIXME - should this return an error rather than just fail? */
2630 memcpy(buf
, p
, len
);
2631 unlock_user(p
, addr
, 0);
2640 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2641 int len
, int is_write
)
2646 target_phys_addr_t page
;
2651 page
= addr
& TARGET_PAGE_MASK
;
2652 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2655 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2657 pd
= IO_MEM_UNASSIGNED
;
2659 pd
= p
->phys_offset
;
2663 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2664 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2665 /* XXX: could force cpu_single_env to NULL to avoid
2667 if (l
>= 4 && ((addr
& 3) == 0)) {
2668 /* 32 bit write access */
2670 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2672 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2673 /* 16 bit write access */
2675 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2678 /* 8 bit write access */
2680 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2684 unsigned long addr1
;
2685 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2687 ptr
= phys_ram_base
+ addr1
;
2688 memcpy(ptr
, buf
, l
);
2689 if (!cpu_physical_memory_is_dirty(addr1
)) {
2690 /* invalidate code */
2691 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2693 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2694 (0xff & ~CODE_DIRTY_FLAG
);
2696 /* qemu doesn't execute guest code directly, but kvm does
2697 therefore fluch instruction caches */
2699 flush_icache_range((unsigned long)ptr
,
2700 ((unsigned long)ptr
)+l
);
2703 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2704 !(pd
& IO_MEM_ROMD
)) {
2706 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2707 if (l
>= 4 && ((addr
& 3) == 0)) {
2708 /* 32 bit read access */
2709 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2712 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2713 /* 16 bit read access */
2714 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2718 /* 8 bit read access */
2719 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2725 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2726 (addr
& ~TARGET_PAGE_MASK
);
2727 memcpy(buf
, ptr
, l
);
2736 /* used for ROM loading : can write in RAM and ROM */
2737 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2738 const uint8_t *buf
, int len
)
2742 target_phys_addr_t page
;
2747 page
= addr
& TARGET_PAGE_MASK
;
2748 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2751 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2753 pd
= IO_MEM_UNASSIGNED
;
2755 pd
= p
->phys_offset
;
2758 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2759 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2760 !(pd
& IO_MEM_ROMD
)) {
2763 unsigned long addr1
;
2764 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2766 ptr
= phys_ram_base
+ addr1
;
2767 memcpy(ptr
, buf
, l
);
2776 /* warning: addr must be aligned */
2777 uint32_t ldl_phys(target_phys_addr_t addr
)
2785 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2787 pd
= IO_MEM_UNASSIGNED
;
2789 pd
= p
->phys_offset
;
2792 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2793 !(pd
& IO_MEM_ROMD
)) {
2795 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2796 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2799 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2800 (addr
& ~TARGET_PAGE_MASK
);
2806 /* warning: addr must be aligned */
2807 uint64_t ldq_phys(target_phys_addr_t addr
)
2815 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2817 pd
= IO_MEM_UNASSIGNED
;
2819 pd
= p
->phys_offset
;
2822 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2823 !(pd
& IO_MEM_ROMD
)) {
2825 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2826 #ifdef TARGET_WORDS_BIGENDIAN
2827 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2828 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2830 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2831 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2835 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2836 (addr
& ~TARGET_PAGE_MASK
);
2843 uint32_t ldub_phys(target_phys_addr_t addr
)
2846 cpu_physical_memory_read(addr
, &val
, 1);
2851 uint32_t lduw_phys(target_phys_addr_t addr
)
2854 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2855 return tswap16(val
);
2859 #define likely(x) __builtin_expect(!!(x), 1)
2860 #define unlikely(x) __builtin_expect(!!(x), 0)
2863 #define unlikely(x) x
2866 /* warning: addr must be aligned. The ram page is not masked as dirty
2867 and the code inside is not invalidated. It is useful if the dirty
2868 bits are used to track modified PTEs */
2869 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2876 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2878 pd
= IO_MEM_UNASSIGNED
;
2880 pd
= p
->phys_offset
;
2883 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2884 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2885 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2887 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2888 ptr
= phys_ram_base
+ addr1
;
2891 if (unlikely(in_migration
)) {
2892 if (!cpu_physical_memory_is_dirty(addr1
)) {
2893 /* invalidate code */
2894 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2896 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2897 (0xff & ~CODE_DIRTY_FLAG
);
2903 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2910 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2912 pd
= IO_MEM_UNASSIGNED
;
2914 pd
= p
->phys_offset
;
2917 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2918 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2919 #ifdef TARGET_WORDS_BIGENDIAN
2920 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2921 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2923 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2924 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2927 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2928 (addr
& ~TARGET_PAGE_MASK
);
2933 /* warning: addr must be aligned */
2934 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2941 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2943 pd
= IO_MEM_UNASSIGNED
;
2945 pd
= p
->phys_offset
;
2948 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2949 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2950 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2952 unsigned long addr1
;
2953 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2955 ptr
= phys_ram_base
+ addr1
;
2957 if (!cpu_physical_memory_is_dirty(addr1
)) {
2958 /* invalidate code */
2959 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2961 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2962 (0xff & ~CODE_DIRTY_FLAG
);
2968 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2971 cpu_physical_memory_write(addr
, &v
, 1);
2975 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2977 uint16_t v
= tswap16(val
);
2978 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2982 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2985 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2990 /* virtual memory access for debug */
2991 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2992 uint8_t *buf
, int len
, int is_write
)
2995 target_phys_addr_t phys_addr
;
2999 page
= addr
& TARGET_PAGE_MASK
;
3000 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3001 /* if no physical page mapped, return an error */
3002 if (phys_addr
== -1)
3004 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3007 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3016 void dump_exec_info(FILE *f
,
3017 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3019 int i
, target_code_size
, max_target_code_size
;
3020 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3021 TranslationBlock
*tb
;
3023 target_code_size
= 0;
3024 max_target_code_size
= 0;
3026 direct_jmp_count
= 0;
3027 direct_jmp2_count
= 0;
3028 for(i
= 0; i
< nb_tbs
; i
++) {
3030 target_code_size
+= tb
->size
;
3031 if (tb
->size
> max_target_code_size
)
3032 max_target_code_size
= tb
->size
;
3033 if (tb
->page_addr
[1] != -1)
3035 if (tb
->tb_next_offset
[0] != 0xffff) {
3037 if (tb
->tb_next_offset
[1] != 0xffff) {
3038 direct_jmp2_count
++;
3042 /* XXX: avoid using doubles ? */
3043 cpu_fprintf(f
, "Translation buffer state:\n");
3044 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
3045 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3046 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3047 max_target_code_size
);
3048 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3049 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3050 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3051 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3053 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3054 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3056 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3058 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3059 cpu_fprintf(f
, "\nStatistics:\n");
3060 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3061 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3062 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3063 #ifdef CONFIG_PROFILER
3066 tot
= dyngen_interm_time
+ dyngen_code_time
;
3067 cpu_fprintf(f
, "JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
3069 cpu_fprintf(f
, "translated TBs %" PRId64
" (aborted=%" PRId64
" %0.1f%%)\n",
3071 dyngen_tb_count1
- dyngen_tb_count
,
3072 dyngen_tb_count1
? (double)(dyngen_tb_count1
- dyngen_tb_count
) / dyngen_tb_count1
* 100.0 : 0);
3073 cpu_fprintf(f
, "avg ops/TB %0.1f max=%d\n",
3074 dyngen_tb_count
? (double)dyngen_op_count
/ dyngen_tb_count
: 0, dyngen_op_count_max
);
3075 cpu_fprintf(f
, "old ops/total ops %0.1f%%\n",
3076 dyngen_op_count
? (double)dyngen_old_op_count
/ dyngen_op_count
* 100.0 : 0);
3077 cpu_fprintf(f
, "deleted ops/TB %0.2f\n",
3079 (double)dyngen_tcg_del_op_count
/ dyngen_tb_count
: 0);
3080 cpu_fprintf(f
, "cycles/op %0.1f\n",
3081 dyngen_op_count
? (double)tot
/ dyngen_op_count
: 0);
3082 cpu_fprintf(f
, "cycles/in byte %0.1f\n",
3083 dyngen_code_in_len
? (double)tot
/ dyngen_code_in_len
: 0);
3084 cpu_fprintf(f
, "cycles/out byte %0.1f\n",
3085 dyngen_code_out_len
? (double)tot
/ dyngen_code_out_len
: 0);
3088 cpu_fprintf(f
, " gen_interm time %0.1f%%\n",
3089 (double)dyngen_interm_time
/ tot
* 100.0);
3090 cpu_fprintf(f
, " gen_code time %0.1f%%\n",
3091 (double)dyngen_code_time
/ tot
* 100.0);
3092 cpu_fprintf(f
, "cpu_restore count %" PRId64
"\n",
3093 dyngen_restore_count
);
3094 cpu_fprintf(f
, " avg cycles %0.1f\n",
3095 dyngen_restore_count
? (double)dyngen_restore_time
/ dyngen_restore_count
: 0);
3097 extern void dump_op_count(void);
3104 #if !defined(CONFIG_USER_ONLY)
3106 #define MMUSUFFIX _cmmu
3107 #define GETPC() NULL
3108 #define env cpu_single_env
3109 #define SOFTMMU_CODE_ACCESS
3112 #include "softmmu_template.h"
3115 #include "softmmu_template.h"
3118 #include "softmmu_template.h"
3121 #include "softmmu_template.h"