2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
24 #include <sys/types.h>
37 #include "qemu-common.h"
38 #include "cache-utils.h"
40 #if !defined(TARGET_IA64)
48 #if defined(CONFIG_USER_ONLY)
52 //#define DEBUG_TB_INVALIDATE
55 //#define DEBUG_UNASSIGNED
57 /* make various TB consistency checks */
58 //#define DEBUG_TB_CHECK
59 //#define DEBUG_TLB_CHECK
61 //#define DEBUG_IOPORT
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
69 #define SMC_BITMAP_USE_THRESHOLD 10
71 #if defined(TARGET_SPARC64)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 41
73 #elif defined(TARGET_SPARC)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 36
75 #elif defined(TARGET_ALPHA)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #define TARGET_VIRT_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_PPC64)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 42
82 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
83 #define TARGET_PHYS_ADDR_SPACE_BITS 36
84 #elif defined(TARGET_IA64)
85 #define TARGET_PHYS_ADDR_SPACE_BITS 36
87 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
88 #define TARGET_PHYS_ADDR_SPACE_BITS 32
91 static TranslationBlock
*tbs
;
92 int code_gen_max_blocks
;
93 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
95 /* any access to the tbs or the page table must use this lock */
96 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
98 #if defined(__arm__) || defined(__sparc_v9__)
99 /* The prologue must be reachable with a direct jump. ARM and Sparc64
100 have limited branch ranges (possibly also PPC) so place it in a
101 section close to code segment. */
102 #define code_gen_section \
103 __attribute__((__section__(".gen_code"))) \
104 __attribute__((aligned (32)))
106 #define code_gen_section \
107 __attribute__((aligned (32)))
110 uint8_t code_gen_prologue
[1024] code_gen_section
;
111 static uint8_t *code_gen_buffer
;
112 static unsigned long code_gen_buffer_size
;
113 /* threshold to flush the translated code buffer */
114 static unsigned long code_gen_buffer_max_size
;
115 uint8_t *code_gen_ptr
;
117 #if !defined(CONFIG_USER_ONLY)
119 uint8_t *phys_ram_dirty
;
121 static int in_migration
;
123 typedef struct RAMBlock
{
127 struct RAMBlock
*next
;
130 static RAMBlock
*ram_blocks
;
131 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
132 then we can no longer assume contiguous ram offsets, and external uses
133 of this variable will break. */
134 ram_addr_t last_ram_offset
;
138 /* current CPU in the current thread. It is only valid inside
140 CPUState
*cpu_single_env
;
141 /* 0 = Do not count executed instructions.
142 1 = Precise instruction counting.
143 2 = Adaptive rate instruction counting. */
145 /* Current instruction counter. While executing translated code this may
146 include some instructions that have not yet been executed. */
149 typedef struct PageDesc
{
150 /* list of TBs intersecting this ram page */
151 TranslationBlock
*first_tb
;
152 /* in order to optimize self modifying code, we count the number
153 of lookups we do to a given page to use a bitmap */
154 unsigned int code_write_count
;
155 uint8_t *code_bitmap
;
156 #if defined(CONFIG_USER_ONLY)
161 typedef struct PhysPageDesc
{
162 /* offset in host memory of the page + io_index in the low bits */
163 ram_addr_t phys_offset
;
164 ram_addr_t region_offset
;
168 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
169 /* XXX: this is a temporary hack for alpha target.
170 * In the future, this is to be replaced by a multi-level table
171 * to actually be able to handle the complete 64 bits address space.
173 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
175 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
178 #define L1_SIZE (1 << L1_BITS)
179 #define L2_SIZE (1 << L2_BITS)
181 unsigned long qemu_real_host_page_size
;
182 unsigned long qemu_host_page_bits
;
183 unsigned long qemu_host_page_size
;
184 unsigned long qemu_host_page_mask
;
186 /* XXX: for system emulation, it could just be an array */
187 static PageDesc
*l1_map
[L1_SIZE
];
188 static PhysPageDesc
**l1_phys_map
;
190 #if !defined(CONFIG_USER_ONLY)
191 static void io_mem_init(void);
193 /* io memory support */
194 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
195 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
196 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
197 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
198 static int io_mem_watch
;
202 static const char *logfilename
= "/tmp/qemu.log";
205 static int log_append
= 0;
208 static int tlb_flush_count
;
209 static int tb_flush_count
;
210 static int tb_phys_invalidate_count
;
212 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
213 typedef struct subpage_t
{
214 target_phys_addr_t base
;
215 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
216 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
217 void *opaque
[TARGET_PAGE_SIZE
][2][4];
218 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
222 static void map_exec(void *addr
, long size
)
225 VirtualProtect(addr
, size
,
226 PAGE_EXECUTE_READWRITE
, &old_protect
);
230 static void map_exec(void *addr
, long size
)
232 unsigned long start
, end
, page_size
;
234 page_size
= getpagesize();
235 start
= (unsigned long)addr
;
236 start
&= ~(page_size
- 1);
238 end
= (unsigned long)addr
+ size
;
239 end
+= page_size
- 1;
240 end
&= ~(page_size
- 1);
242 mprotect((void *)start
, end
- start
,
243 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
247 static void page_init(void)
249 /* NOTE: we can always suppose that qemu_host_page_size >=
253 SYSTEM_INFO system_info
;
255 GetSystemInfo(&system_info
);
256 qemu_real_host_page_size
= system_info
.dwPageSize
;
259 qemu_real_host_page_size
= getpagesize();
261 if (qemu_host_page_size
== 0)
262 qemu_host_page_size
= qemu_real_host_page_size
;
263 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
264 qemu_host_page_size
= TARGET_PAGE_SIZE
;
265 qemu_host_page_bits
= 0;
266 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
267 qemu_host_page_bits
++;
268 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
269 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
270 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
272 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
274 long long startaddr
, endaddr
;
279 last_brk
= (unsigned long)sbrk(0);
280 f
= fopen("/proc/self/maps", "r");
283 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
285 startaddr
= MIN(startaddr
,
286 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
287 endaddr
= MIN(endaddr
,
288 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
289 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
290 TARGET_PAGE_ALIGN(endaddr
),
301 static inline PageDesc
**page_l1_map(target_ulong index
)
303 #if TARGET_LONG_BITS > 32
304 /* Host memory outside guest VM. For 32-bit targets we have already
305 excluded high addresses. */
306 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
309 return &l1_map
[index
>> L2_BITS
];
312 static inline PageDesc
*page_find_alloc(target_ulong index
)
315 lp
= page_l1_map(index
);
321 /* allocate if not found */
322 #if defined(CONFIG_USER_ONLY)
323 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
324 /* Don't use qemu_malloc because it may recurse. */
325 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
326 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
329 unsigned long addr
= h2g(p
);
330 page_set_flags(addr
& TARGET_PAGE_MASK
,
331 TARGET_PAGE_ALIGN(addr
+ len
),
335 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
339 return p
+ (index
& (L2_SIZE
- 1));
342 static inline PageDesc
*page_find(target_ulong index
)
345 lp
= page_l1_map(index
);
352 return p
+ (index
& (L2_SIZE
- 1));
355 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
360 p
= (void **)l1_phys_map
;
361 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
363 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
364 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
366 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
369 /* allocate if not found */
372 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
373 memset(p
, 0, sizeof(void *) * L1_SIZE
);
377 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
381 /* allocate if not found */
384 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
386 for (i
= 0; i
< L2_SIZE
; i
++) {
387 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
388 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
391 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
394 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
396 return phys_page_find_alloc(index
, 0);
399 #if !defined(CONFIG_USER_ONLY)
400 static void tlb_protect_code(ram_addr_t ram_addr
);
401 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
403 #define mmap_lock() do { } while(0)
404 #define mmap_unlock() do { } while(0)
407 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
409 #if defined(CONFIG_USER_ONLY)
410 /* Currently it is not recommended to allocate big chunks of data in
411 user mode. It will change when a dedicated libc will be used */
412 #define USE_STATIC_CODE_GEN_BUFFER
415 #ifdef USE_STATIC_CODE_GEN_BUFFER
416 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
419 static void code_gen_alloc(unsigned long tb_size
)
424 #ifdef USE_STATIC_CODE_GEN_BUFFER
425 code_gen_buffer
= static_code_gen_buffer
;
426 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
427 map_exec(code_gen_buffer
, code_gen_buffer_size
);
429 code_gen_buffer_size
= tb_size
;
430 if (code_gen_buffer_size
== 0) {
431 #if defined(CONFIG_USER_ONLY)
432 /* in user mode, phys_ram_size is not meaningful */
433 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
435 /* XXX: needs adjustments */
436 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
439 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
440 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
441 /* The code gen buffer location may have constraints depending on
442 the host cpu and OS */
443 #if defined(__linux__)
448 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
449 #if defined(__x86_64__)
451 /* Cannot map more than that */
452 if (code_gen_buffer_size
> (800 * 1024 * 1024))
453 code_gen_buffer_size
= (800 * 1024 * 1024);
454 #elif defined(__sparc_v9__)
455 // Map the buffer below 2G, so we can use direct calls and branches
457 start
= (void *) 0x60000000UL
;
458 if (code_gen_buffer_size
> (512 * 1024 * 1024))
459 code_gen_buffer_size
= (512 * 1024 * 1024);
460 #elif defined(__arm__)
461 /* Map the buffer below 32M, so we can use direct calls and branches */
463 start
= (void *) 0x01000000UL
;
464 if (code_gen_buffer_size
> 16 * 1024 * 1024)
465 code_gen_buffer_size
= 16 * 1024 * 1024;
467 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
468 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
470 if (code_gen_buffer
== MAP_FAILED
) {
471 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
475 #elif defined(__FreeBSD__) || defined(__DragonFly__)
479 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
480 #if defined(__x86_64__)
481 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
482 * 0x40000000 is free */
484 addr
= (void *)0x40000000;
485 /* Cannot map more than that */
486 if (code_gen_buffer_size
> (800 * 1024 * 1024))
487 code_gen_buffer_size
= (800 * 1024 * 1024);
489 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
490 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
492 if (code_gen_buffer
== MAP_FAILED
) {
493 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
498 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
499 map_exec(code_gen_buffer
, code_gen_buffer_size
);
501 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
502 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
503 code_gen_buffer_max_size
= code_gen_buffer_size
-
504 code_gen_max_block_size();
505 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
506 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
509 /* Must be called before using the QEMU cpus. 'tb_size' is the size
510 (in bytes) allocated to the translation buffer. Zero means default
512 void cpu_exec_init_all(unsigned long tb_size
)
515 code_gen_alloc(tb_size
);
516 code_gen_ptr
= code_gen_buffer
;
518 #if !defined(CONFIG_USER_ONLY)
523 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
525 #define CPU_COMMON_SAVE_VERSION 1
527 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
529 CPUState
*env
= opaque
;
531 cpu_synchronize_state(env
, 0);
533 qemu_put_be32s(f
, &env
->halted
);
534 qemu_put_be32s(f
, &env
->interrupt_request
);
537 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
539 CPUState
*env
= opaque
;
541 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
544 qemu_get_be32s(f
, &env
->halted
);
545 qemu_get_be32s(f
, &env
->interrupt_request
);
546 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
547 version_id is increased. */
548 env
->interrupt_request
&= ~0x01;
550 cpu_synchronize_state(env
, 1);
556 CPUState
*qemu_get_cpu(int cpu
)
558 CPUState
*env
= first_cpu
;
561 if (env
->cpu_index
== cpu
)
569 void cpu_exec_init(CPUState
*env
)
574 #if defined(CONFIG_USER_ONLY)
577 env
->next_cpu
= NULL
;
580 while (*penv
!= NULL
) {
581 penv
= &(*penv
)->next_cpu
;
584 env
->cpu_index
= cpu_index
;
586 TAILQ_INIT(&env
->breakpoints
);
587 TAILQ_INIT(&env
->watchpoints
);
589 env
->thread_id
= GetCurrentProcessId();
591 env
->thread_id
= getpid();
594 #if defined(CONFIG_USER_ONLY)
597 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
598 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
599 cpu_common_save
, cpu_common_load
, env
);
600 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
601 cpu_save
, cpu_load
, env
);
605 static inline void invalidate_page_bitmap(PageDesc
*p
)
607 if (p
->code_bitmap
) {
608 qemu_free(p
->code_bitmap
);
609 p
->code_bitmap
= NULL
;
611 p
->code_write_count
= 0;
614 /* set to NULL all the 'first_tb' fields in all PageDescs */
615 static void page_flush_tb(void)
620 for(i
= 0; i
< L1_SIZE
; i
++) {
623 for(j
= 0; j
< L2_SIZE
; j
++) {
625 invalidate_page_bitmap(p
);
632 /* flush all the translation blocks */
633 /* XXX: tb_flush is currently not thread safe */
634 void tb_flush(CPUState
*env1
)
637 #if defined(DEBUG_FLUSH)
638 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
639 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
641 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
643 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
644 cpu_abort(env1
, "Internal error: code buffer overflow\n");
648 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
649 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
652 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
655 code_gen_ptr
= code_gen_buffer
;
656 /* XXX: flush processor icache at this point if cache flush is
661 #ifdef DEBUG_TB_CHECK
663 static void tb_invalidate_check(target_ulong address
)
665 TranslationBlock
*tb
;
667 address
&= TARGET_PAGE_MASK
;
668 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
669 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
670 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
671 address
>= tb
->pc
+ tb
->size
)) {
672 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
673 address
, (long)tb
->pc
, tb
->size
);
679 /* verify that all the pages have correct rights for code */
680 static void tb_page_check(void)
682 TranslationBlock
*tb
;
683 int i
, flags1
, flags2
;
685 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
686 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
687 flags1
= page_get_flags(tb
->pc
);
688 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
689 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
690 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
691 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
697 static void tb_jmp_check(TranslationBlock
*tb
)
699 TranslationBlock
*tb1
;
702 /* suppress any remaining jumps to this TB */
706 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
709 tb1
= tb1
->jmp_next
[n1
];
711 /* check end of list */
713 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
719 /* invalidate one TB */
720 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
723 TranslationBlock
*tb1
;
727 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
730 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
734 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
736 TranslationBlock
*tb1
;
742 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
744 *ptb
= tb1
->page_next
[n1
];
747 ptb
= &tb1
->page_next
[n1
];
751 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
753 TranslationBlock
*tb1
, **ptb
;
756 ptb
= &tb
->jmp_next
[n
];
759 /* find tb(n) in circular list */
763 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
764 if (n1
== n
&& tb1
== tb
)
767 ptb
= &tb1
->jmp_first
;
769 ptb
= &tb1
->jmp_next
[n1
];
772 /* now we can suppress tb(n) from the list */
773 *ptb
= tb
->jmp_next
[n
];
775 tb
->jmp_next
[n
] = NULL
;
779 /* reset the jump entry 'n' of a TB so that it is not chained to
781 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
783 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
786 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
791 target_phys_addr_t phys_pc
;
792 TranslationBlock
*tb1
, *tb2
;
794 /* remove the TB from the hash list */
795 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
796 h
= tb_phys_hash_func(phys_pc
);
797 tb_remove(&tb_phys_hash
[h
], tb
,
798 offsetof(TranslationBlock
, phys_hash_next
));
800 /* remove the TB from the page list */
801 if (tb
->page_addr
[0] != page_addr
) {
802 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
803 tb_page_remove(&p
->first_tb
, tb
);
804 invalidate_page_bitmap(p
);
806 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
807 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
808 tb_page_remove(&p
->first_tb
, tb
);
809 invalidate_page_bitmap(p
);
812 tb_invalidated_flag
= 1;
814 /* remove the TB from the hash list */
815 h
= tb_jmp_cache_hash_func(tb
->pc
);
816 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
817 if (env
->tb_jmp_cache
[h
] == tb
)
818 env
->tb_jmp_cache
[h
] = NULL
;
821 /* suppress this TB from the two jump lists */
822 tb_jmp_remove(tb
, 0);
823 tb_jmp_remove(tb
, 1);
825 /* suppress any remaining jumps to this TB */
831 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
832 tb2
= tb1
->jmp_next
[n1
];
833 tb_reset_jump(tb1
, n1
);
834 tb1
->jmp_next
[n1
] = NULL
;
837 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
839 tb_phys_invalidate_count
++;
842 static inline void set_bits(uint8_t *tab
, int start
, int len
)
848 mask
= 0xff << (start
& 7);
849 if ((start
& ~7) == (end
& ~7)) {
851 mask
&= ~(0xff << (end
& 7));
856 start
= (start
+ 8) & ~7;
858 while (start
< end1
) {
863 mask
= ~(0xff << (end
& 7));
869 static void build_page_bitmap(PageDesc
*p
)
871 int n
, tb_start
, tb_end
;
872 TranslationBlock
*tb
;
874 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
879 tb
= (TranslationBlock
*)((long)tb
& ~3);
880 /* NOTE: this is subtle as a TB may span two physical pages */
882 /* NOTE: tb_end may be after the end of the page, but
883 it is not a problem */
884 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
885 tb_end
= tb_start
+ tb
->size
;
886 if (tb_end
> TARGET_PAGE_SIZE
)
887 tb_end
= TARGET_PAGE_SIZE
;
890 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
892 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
893 tb
= tb
->page_next
[n
];
897 TranslationBlock
*tb_gen_code(CPUState
*env
,
898 target_ulong pc
, target_ulong cs_base
,
899 int flags
, int cflags
)
901 TranslationBlock
*tb
;
903 target_ulong phys_pc
, phys_page2
, virt_page2
;
906 phys_pc
= get_phys_addr_code(env
, pc
);
909 /* flush must be done */
911 /* cannot fail at this point */
913 /* Don't forget to invalidate previous TB info. */
914 tb_invalidated_flag
= 1;
916 tc_ptr
= code_gen_ptr
;
918 tb
->cs_base
= cs_base
;
921 cpu_gen_code(env
, tb
, &code_gen_size
);
922 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
924 /* check next page if needed */
925 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
927 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
928 phys_page2
= get_phys_addr_code(env
, virt_page2
);
930 tb_link_phys(tb
, phys_pc
, phys_page2
);
934 /* invalidate all TBs which intersect with the target physical page
935 starting in range [start;end[. NOTE: start and end must refer to
936 the same physical page. 'is_cpu_write_access' should be true if called
937 from a real cpu write access: the virtual CPU will exit the current
938 TB if code is modified inside this TB. */
939 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
940 int is_cpu_write_access
)
942 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
943 CPUState
*env
= cpu_single_env
;
944 target_ulong tb_start
, tb_end
;
947 #ifdef TARGET_HAS_PRECISE_SMC
948 int current_tb_not_found
= is_cpu_write_access
;
949 TranslationBlock
*current_tb
= NULL
;
950 int current_tb_modified
= 0;
951 target_ulong current_pc
= 0;
952 target_ulong current_cs_base
= 0;
953 int current_flags
= 0;
954 #endif /* TARGET_HAS_PRECISE_SMC */
956 p
= page_find(start
>> TARGET_PAGE_BITS
);
959 if (!p
->code_bitmap
&&
960 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
961 is_cpu_write_access
) {
962 /* build code bitmap */
963 build_page_bitmap(p
);
966 /* we remove all the TBs in the range [start, end[ */
967 /* XXX: see if in some cases it could be faster to invalidate all the code */
971 tb
= (TranslationBlock
*)((long)tb
& ~3);
972 tb_next
= tb
->page_next
[n
];
973 /* NOTE: this is subtle as a TB may span two physical pages */
975 /* NOTE: tb_end may be after the end of the page, but
976 it is not a problem */
977 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
978 tb_end
= tb_start
+ tb
->size
;
980 tb_start
= tb
->page_addr
[1];
981 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
983 if (!(tb_end
<= start
|| tb_start
>= end
)) {
984 #ifdef TARGET_HAS_PRECISE_SMC
985 if (current_tb_not_found
) {
986 current_tb_not_found
= 0;
988 if (env
->mem_io_pc
) {
989 /* now we have a real cpu fault */
990 current_tb
= tb_find_pc(env
->mem_io_pc
);
993 if (current_tb
== tb
&&
994 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
995 /* If we are modifying the current TB, we must stop
996 its execution. We could be more precise by checking
997 that the modification is after the current PC, but it
998 would require a specialized function to partially
999 restore the CPU state */
1001 current_tb_modified
= 1;
1002 cpu_restore_state(current_tb
, env
,
1003 env
->mem_io_pc
, NULL
);
1004 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1007 #endif /* TARGET_HAS_PRECISE_SMC */
1008 /* we need to do that to handle the case where a signal
1009 occurs while doing tb_phys_invalidate() */
1012 saved_tb
= env
->current_tb
;
1013 env
->current_tb
= NULL
;
1015 tb_phys_invalidate(tb
, -1);
1017 env
->current_tb
= saved_tb
;
1018 if (env
->interrupt_request
&& env
->current_tb
)
1019 cpu_interrupt(env
, env
->interrupt_request
);
1024 #if !defined(CONFIG_USER_ONLY)
1025 /* if no code remaining, no need to continue to use slow writes */
1027 invalidate_page_bitmap(p
);
1028 if (is_cpu_write_access
) {
1029 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1033 #ifdef TARGET_HAS_PRECISE_SMC
1034 if (current_tb_modified
) {
1035 /* we generate a block containing just the instruction
1036 modifying the memory. It will ensure that it cannot modify
1038 env
->current_tb
= NULL
;
1039 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1040 cpu_resume_from_signal(env
, NULL
);
1045 /* len must be <= 8 and start must be a multiple of len */
1046 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1052 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1053 cpu_single_env
->mem_io_vaddr
, len
,
1054 cpu_single_env
->eip
,
1055 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1058 p
= page_find(start
>> TARGET_PAGE_BITS
);
1061 if (p
->code_bitmap
) {
1062 offset
= start
& ~TARGET_PAGE_MASK
;
1063 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1064 if (b
& ((1 << len
) - 1))
1068 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1072 #if !defined(CONFIG_SOFTMMU)
1073 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1074 unsigned long pc
, void *puc
)
1076 TranslationBlock
*tb
;
1079 #ifdef TARGET_HAS_PRECISE_SMC
1080 TranslationBlock
*current_tb
= NULL
;
1081 CPUState
*env
= cpu_single_env
;
1082 int current_tb_modified
= 0;
1083 target_ulong current_pc
= 0;
1084 target_ulong current_cs_base
= 0;
1085 int current_flags
= 0;
1088 addr
&= TARGET_PAGE_MASK
;
1089 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1093 #ifdef TARGET_HAS_PRECISE_SMC
1094 if (tb
&& pc
!= 0) {
1095 current_tb
= tb_find_pc(pc
);
1098 while (tb
!= NULL
) {
1100 tb
= (TranslationBlock
*)((long)tb
& ~3);
1101 #ifdef TARGET_HAS_PRECISE_SMC
1102 if (current_tb
== tb
&&
1103 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1104 /* If we are modifying the current TB, we must stop
1105 its execution. We could be more precise by checking
1106 that the modification is after the current PC, but it
1107 would require a specialized function to partially
1108 restore the CPU state */
1110 current_tb_modified
= 1;
1111 cpu_restore_state(current_tb
, env
, pc
, puc
);
1112 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1115 #endif /* TARGET_HAS_PRECISE_SMC */
1116 tb_phys_invalidate(tb
, addr
);
1117 tb
= tb
->page_next
[n
];
1120 #ifdef TARGET_HAS_PRECISE_SMC
1121 if (current_tb_modified
) {
1122 /* we generate a block containing just the instruction
1123 modifying the memory. It will ensure that it cannot modify
1125 env
->current_tb
= NULL
;
1126 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1127 cpu_resume_from_signal(env
, puc
);
1133 /* add the tb in the target page and protect it if necessary */
1134 static inline void tb_alloc_page(TranslationBlock
*tb
,
1135 unsigned int n
, target_ulong page_addr
)
1138 TranslationBlock
*last_first_tb
;
1140 tb
->page_addr
[n
] = page_addr
;
1141 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1142 tb
->page_next
[n
] = p
->first_tb
;
1143 last_first_tb
= p
->first_tb
;
1144 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1145 invalidate_page_bitmap(p
);
1147 #if defined(TARGET_HAS_SMC) || 1
1149 #if defined(CONFIG_USER_ONLY)
1150 if (p
->flags
& PAGE_WRITE
) {
1155 /* force the host page as non writable (writes will have a
1156 page fault + mprotect overhead) */
1157 page_addr
&= qemu_host_page_mask
;
1159 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1160 addr
+= TARGET_PAGE_SIZE
) {
1162 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1166 p2
->flags
&= ~PAGE_WRITE
;
1167 page_get_flags(addr
);
1169 mprotect(g2h(page_addr
), qemu_host_page_size
,
1170 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1171 #ifdef DEBUG_TB_INVALIDATE
1172 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1177 /* if some code is already present, then the pages are already
1178 protected. So we handle the case where only the first TB is
1179 allocated in a physical page */
1180 if (!last_first_tb
) {
1181 tlb_protect_code(page_addr
);
1185 #endif /* TARGET_HAS_SMC */
1188 /* Allocate a new translation block. Flush the translation buffer if
1189 too many translation blocks or too much generated code. */
1190 TranslationBlock
*tb_alloc(target_ulong pc
)
1192 TranslationBlock
*tb
;
1194 if (nb_tbs
>= code_gen_max_blocks
||
1195 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1197 tb
= &tbs
[nb_tbs
++];
1203 void tb_free(TranslationBlock
*tb
)
1205 /* In practice this is mostly used for single use temporary TB
1206 Ignore the hard cases and just back up if this TB happens to
1207 be the last one generated. */
1208 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1209 code_gen_ptr
= tb
->tc_ptr
;
1214 /* add a new TB and link it to the physical page tables. phys_page2 is
1215 (-1) to indicate that only one page contains the TB. */
1216 void tb_link_phys(TranslationBlock
*tb
,
1217 target_ulong phys_pc
, target_ulong phys_page2
)
1220 TranslationBlock
**ptb
;
1222 /* Grab the mmap lock to stop another thread invalidating this TB
1223 before we are done. */
1225 /* add in the physical hash table */
1226 h
= tb_phys_hash_func(phys_pc
);
1227 ptb
= &tb_phys_hash
[h
];
1228 tb
->phys_hash_next
= *ptb
;
1231 /* add in the page list */
1232 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1233 if (phys_page2
!= -1)
1234 tb_alloc_page(tb
, 1, phys_page2
);
1236 tb
->page_addr
[1] = -1;
1238 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1239 tb
->jmp_next
[0] = NULL
;
1240 tb
->jmp_next
[1] = NULL
;
1242 /* init original jump addresses */
1243 if (tb
->tb_next_offset
[0] != 0xffff)
1244 tb_reset_jump(tb
, 0);
1245 if (tb
->tb_next_offset
[1] != 0xffff)
1246 tb_reset_jump(tb
, 1);
1248 #ifdef DEBUG_TB_CHECK
1254 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1255 tb[1].tc_ptr. Return NULL if not found */
1256 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1258 int m_min
, m_max
, m
;
1260 TranslationBlock
*tb
;
1264 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1265 tc_ptr
>= (unsigned long)code_gen_ptr
)
1267 /* binary search (cf Knuth) */
1270 while (m_min
<= m_max
) {
1271 m
= (m_min
+ m_max
) >> 1;
1273 v
= (unsigned long)tb
->tc_ptr
;
1276 else if (tc_ptr
< v
) {
1285 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1287 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1289 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1292 tb1
= tb
->jmp_next
[n
];
1294 /* find head of list */
1297 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1300 tb1
= tb1
->jmp_next
[n1
];
1302 /* we are now sure now that tb jumps to tb1 */
1305 /* remove tb from the jmp_first list */
1306 ptb
= &tb_next
->jmp_first
;
1310 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1311 if (n1
== n
&& tb1
== tb
)
1313 ptb
= &tb1
->jmp_next
[n1
];
1315 *ptb
= tb
->jmp_next
[n
];
1316 tb
->jmp_next
[n
] = NULL
;
1318 /* suppress the jump to next tb in generated code */
1319 tb_reset_jump(tb
, n
);
1321 /* suppress jumps in the tb on which we could have jumped */
1322 tb_reset_jump_recursive(tb_next
);
1326 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1328 tb_reset_jump_recursive2(tb
, 0);
1329 tb_reset_jump_recursive2(tb
, 1);
1332 #if defined(TARGET_HAS_ICE)
1333 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1335 target_phys_addr_t addr
;
1337 ram_addr_t ram_addr
;
1340 addr
= cpu_get_phys_page_debug(env
, pc
);
1341 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1343 pd
= IO_MEM_UNASSIGNED
;
1345 pd
= p
->phys_offset
;
1347 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1348 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1352 /* Add a watchpoint. */
1353 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1354 int flags
, CPUWatchpoint
**watchpoint
)
1356 target_ulong len_mask
= ~(len
- 1);
1359 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1360 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1361 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1362 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1365 wp
= qemu_malloc(sizeof(*wp
));
1368 wp
->len_mask
= len_mask
;
1371 /* keep all GDB-injected watchpoints in front */
1373 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1375 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1377 tlb_flush_page(env
, addr
);
1384 /* Remove a specific watchpoint. */
1385 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1388 target_ulong len_mask
= ~(len
- 1);
1391 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1392 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1393 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1394 cpu_watchpoint_remove_by_ref(env
, wp
);
1401 /* Remove a specific watchpoint by reference. */
1402 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1404 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1406 tlb_flush_page(env
, watchpoint
->vaddr
);
1408 qemu_free(watchpoint
);
1411 /* Remove all matching watchpoints. */
1412 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1414 CPUWatchpoint
*wp
, *next
;
1416 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1417 if (wp
->flags
& mask
)
1418 cpu_watchpoint_remove_by_ref(env
, wp
);
1422 /* Add a breakpoint. */
1423 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1424 CPUBreakpoint
**breakpoint
)
1426 #if defined(TARGET_HAS_ICE)
1429 bp
= qemu_malloc(sizeof(*bp
));
1434 /* keep all GDB-injected breakpoints in front */
1436 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1438 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1440 breakpoint_invalidate(env
, pc
);
1450 /* Remove a specific breakpoint. */
1451 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1453 #if defined(TARGET_HAS_ICE)
1456 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1457 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1458 cpu_breakpoint_remove_by_ref(env
, bp
);
1468 /* Remove a specific breakpoint by reference. */
1469 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1471 #if defined(TARGET_HAS_ICE)
1472 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1474 breakpoint_invalidate(env
, breakpoint
->pc
);
1476 qemu_free(breakpoint
);
1480 /* Remove all matching breakpoints. */
1481 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1483 #if defined(TARGET_HAS_ICE)
1484 CPUBreakpoint
*bp
, *next
;
1486 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1487 if (bp
->flags
& mask
)
1488 cpu_breakpoint_remove_by_ref(env
, bp
);
1493 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1494 CPU loop after each instruction */
1495 void cpu_single_step(CPUState
*env
, int enabled
)
1497 #if defined(TARGET_HAS_ICE)
1498 if (env
->singlestep_enabled
!= enabled
) {
1499 env
->singlestep_enabled
= enabled
;
1501 kvm_update_guest_debug(env
, 0);
1503 /* must flush all the translated code to avoid inconsistencies */
1504 /* XXX: only flush what is necessary */
1511 /* enable or disable low levels log */
1512 void cpu_set_log(int log_flags
)
1514 loglevel
= log_flags
;
1515 if (loglevel
&& !logfile
) {
1516 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1518 perror(logfilename
);
1521 #if !defined(CONFIG_SOFTMMU)
1522 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1524 static char logfile_buf
[4096];
1525 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1528 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1532 if (!loglevel
&& logfile
) {
1538 void cpu_set_log_filename(const char *filename
)
1540 logfilename
= strdup(filename
);
1545 cpu_set_log(loglevel
);
1548 static void cpu_unlink_tb(CPUState
*env
)
1550 #if defined(USE_NPTL)
1551 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1552 problem and hope the cpu will stop of its own accord. For userspace
1553 emulation this often isn't actually as bad as it sounds. Often
1554 signals are used primarily to interrupt blocking syscalls. */
1556 TranslationBlock
*tb
;
1557 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1559 tb
= env
->current_tb
;
1560 /* if the cpu is currently executing code, we must unlink it and
1561 all the potentially executing TB */
1562 if (tb
&& !testandset(&interrupt_lock
)) {
1563 env
->current_tb
= NULL
;
1564 tb_reset_jump_recursive(tb
);
1565 resetlock(&interrupt_lock
);
1570 /* mask must never be zero, except for A20 change call */
1571 void cpu_interrupt(CPUState
*env
, int mask
)
1575 old_mask
= env
->interrupt_request
;
1576 env
->interrupt_request
|= mask
;
1577 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1578 kvm_update_interrupt_request(env
);
1580 #ifndef CONFIG_USER_ONLY
1582 * If called from iothread context, wake the target cpu in
1585 if (!qemu_cpu_self(env
)) {
1592 env
->icount_decr
.u16
.high
= 0xffff;
1593 #ifndef CONFIG_USER_ONLY
1595 && (mask
& ~old_mask
) != 0) {
1596 cpu_abort(env
, "Raised interrupt while not in I/O function");
1604 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1606 env
->interrupt_request
&= ~mask
;
1609 void cpu_exit(CPUState
*env
)
1611 env
->exit_request
= 1;
1615 const CPULogItem cpu_log_items
[] = {
1616 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1617 "show generated host assembly code for each compiled TB" },
1618 { CPU_LOG_TB_IN_ASM
, "in_asm",
1619 "show target assembly code for each compiled TB" },
1620 { CPU_LOG_TB_OP
, "op",
1621 "show micro ops for each compiled TB" },
1622 { CPU_LOG_TB_OP_OPT
, "op_opt",
1625 "before eflags optimization and "
1627 "after liveness analysis" },
1628 { CPU_LOG_INT
, "int",
1629 "show interrupts/exceptions in short format" },
1630 { CPU_LOG_EXEC
, "exec",
1631 "show trace before each executed TB (lots of logs)" },
1632 { CPU_LOG_TB_CPU
, "cpu",
1633 "show CPU state before block translation" },
1635 { CPU_LOG_PCALL
, "pcall",
1636 "show protected mode far calls/returns/exceptions" },
1637 { CPU_LOG_RESET
, "cpu_reset",
1638 "show CPU state before CPU resets" },
1641 { CPU_LOG_IOPORT
, "ioport",
1642 "show all i/o ports accesses" },
1647 static int cmp1(const char *s1
, int n
, const char *s2
)
1649 if (strlen(s2
) != n
)
1651 return memcmp(s1
, s2
, n
) == 0;
1654 /* takes a comma separated list of log masks. Return 0 if error. */
1655 int cpu_str_to_log_mask(const char *str
)
1657 const CPULogItem
*item
;
1664 p1
= strchr(p
, ',');
1667 if(cmp1(p
,p1
-p
,"all")) {
1668 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1672 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1673 if (cmp1(p
, p1
- p
, item
->name
))
1687 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1694 fprintf(stderr
, "qemu: fatal: ");
1695 vfprintf(stderr
, fmt
, ap
);
1696 fprintf(stderr
, "\n");
1698 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1700 cpu_dump_state(env
, stderr
, fprintf
, 0);
1702 if (qemu_log_enabled()) {
1703 qemu_log("qemu: fatal: ");
1704 qemu_log_vprintf(fmt
, ap2
);
1707 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1709 log_cpu_state(env
, 0);
1719 CPUState
*cpu_copy(CPUState
*env
)
1721 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1722 CPUState
*next_cpu
= new_env
->next_cpu
;
1723 int cpu_index
= new_env
->cpu_index
;
1724 #if defined(TARGET_HAS_ICE)
1729 memcpy(new_env
, env
, sizeof(CPUState
));
1731 /* Preserve chaining and index. */
1732 new_env
->next_cpu
= next_cpu
;
1733 new_env
->cpu_index
= cpu_index
;
1735 /* Clone all break/watchpoints.
1736 Note: Once we support ptrace with hw-debug register access, make sure
1737 BP_CPU break/watchpoints are handled correctly on clone. */
1738 TAILQ_INIT(&env
->breakpoints
);
1739 TAILQ_INIT(&env
->watchpoints
);
1740 #if defined(TARGET_HAS_ICE)
1741 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1742 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1744 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1745 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1753 #if !defined(CONFIG_USER_ONLY)
1755 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1759 /* Discard jump cache entries for any tb which might potentially
1760 overlap the flushed page. */
1761 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1762 memset (&env
->tb_jmp_cache
[i
], 0,
1763 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1765 i
= tb_jmp_cache_hash_page(addr
);
1766 memset (&env
->tb_jmp_cache
[i
], 0,
1767 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1770 /* NOTE: if flush_global is true, also flush global entries (not
1772 void tlb_flush(CPUState
*env
, int flush_global
)
1776 #if defined(DEBUG_TLB)
1777 printf("tlb_flush:\n");
1779 /* must reset current TB so that interrupts cannot modify the
1780 links while we are modifying them */
1781 env
->current_tb
= NULL
;
1783 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1785 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1786 env
->tlb_table
[mmu_idx
][i
].addr_read
= -1;
1787 env
->tlb_table
[mmu_idx
][i
].addr_write
= -1;
1788 env
->tlb_table
[mmu_idx
][i
].addr_code
= -1;
1792 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1795 if (env
->kqemu_enabled
) {
1796 kqemu_flush(env
, flush_global
);
1802 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1804 if (addr
== (tlb_entry
->addr_read
&
1805 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1806 addr
== (tlb_entry
->addr_write
&
1807 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1808 addr
== (tlb_entry
->addr_code
&
1809 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1810 tlb_entry
->addr_read
= -1;
1811 tlb_entry
->addr_write
= -1;
1812 tlb_entry
->addr_code
= -1;
1816 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1821 #if defined(DEBUG_TLB)
1822 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1824 /* must reset current TB so that interrupts cannot modify the
1825 links while we are modifying them */
1826 env
->current_tb
= NULL
;
1828 addr
&= TARGET_PAGE_MASK
;
1829 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1830 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1831 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1833 tlb_flush_jmp_cache(env
, addr
);
1836 if (env
->kqemu_enabled
) {
1837 kqemu_flush_page(env
, addr
);
1842 /* update the TLBs so that writes to code in the virtual page 'addr'
1844 static void tlb_protect_code(ram_addr_t ram_addr
)
1846 cpu_physical_memory_reset_dirty(ram_addr
,
1847 ram_addr
+ TARGET_PAGE_SIZE
,
1851 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1852 tested for self modifying code */
1853 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1856 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1859 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1860 unsigned long start
, unsigned long length
)
1863 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1864 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1865 if ((addr
- start
) < length
) {
1866 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1871 /* Note: start and end must be within the same ram block. */
1872 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1876 unsigned long length
, start1
;
1880 start
&= TARGET_PAGE_MASK
;
1881 end
= TARGET_PAGE_ALIGN(end
);
1883 length
= end
- start
;
1886 len
= length
>> TARGET_PAGE_BITS
;
1888 /* XXX: should not depend on cpu context */
1890 if (env
->kqemu_enabled
) {
1893 for(i
= 0; i
< len
; i
++) {
1894 kqemu_set_notdirty(env
, addr
);
1895 addr
+= TARGET_PAGE_SIZE
;
1899 mask
= ~dirty_flags
;
1900 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1901 for(i
= 0; i
< len
; i
++)
1904 /* we modify the TLB cache so that the dirty bit will be set again
1905 when accessing the range */
1906 start1
= (unsigned long)qemu_get_ram_ptr(start
);
1907 /* Chek that we don't span multiple blocks - this breaks the
1908 address comparisons below. */
1909 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
1910 != (end
- 1) - start
) {
1914 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1916 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1917 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1918 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
1924 int cpu_physical_memory_set_dirty_tracking(int enable
)
1926 if (kvm_enabled()) {
1927 return kvm_set_migration_log(enable
);
1932 int cpu_physical_memory_get_dirty_tracking(void)
1934 return in_migration
;
1937 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
1938 target_phys_addr_t end_addr
)
1943 ret
= kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1947 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1949 ram_addr_t ram_addr
;
1952 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1953 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
1954 + tlb_entry
->addend
);
1955 ram_addr
= qemu_ram_addr_from_host(p
);
1956 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1957 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1962 /* update the TLB according to the current state of the dirty bits */
1963 void cpu_tlb_update_dirty(CPUState
*env
)
1967 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1968 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1969 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
1973 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1975 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1976 tlb_entry
->addr_write
= vaddr
;
1979 /* update the TLB corresponding to virtual page vaddr
1980 so that it is no longer dirty */
1981 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1986 vaddr
&= TARGET_PAGE_MASK
;
1987 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1988 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1989 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
1992 /* add a new TLB entry. At most one entry for a given virtual address
1993 is permitted. Return 0 if OK or 2 if the page could not be mapped
1994 (can only happen in non SOFTMMU mode for I/O pages or pages
1995 conflicting with the host address space). */
1996 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1997 target_phys_addr_t paddr
, int prot
,
1998 int mmu_idx
, int is_softmmu
)
2003 target_ulong address
;
2004 target_ulong code_address
;
2005 target_phys_addr_t addend
;
2009 target_phys_addr_t iotlb
;
2011 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2013 pd
= IO_MEM_UNASSIGNED
;
2015 pd
= p
->phys_offset
;
2017 #if defined(DEBUG_TLB)
2018 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2019 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2024 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2025 /* IO memory case (romd handled later) */
2026 address
|= TLB_MMIO
;
2028 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2029 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2031 iotlb
= pd
& TARGET_PAGE_MASK
;
2032 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2033 iotlb
|= IO_MEM_NOTDIRTY
;
2035 iotlb
|= IO_MEM_ROM
;
2037 /* IO handlers are currently passed a physical address.
2038 It would be nice to pass an offset from the base address
2039 of that region. This would avoid having to special case RAM,
2040 and avoid full address decoding in every device.
2041 We can't use the high bits of pd for this because
2042 IO_MEM_ROMD uses these as a ram address. */
2043 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2045 iotlb
+= p
->region_offset
;
2051 code_address
= address
;
2052 /* Make accesses to pages with watchpoints go via the
2053 watchpoint trap routines. */
2054 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2055 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2056 iotlb
= io_mem_watch
+ paddr
;
2057 /* TODO: The memory case can be optimized by not trapping
2058 reads of pages with a write breakpoint. */
2059 address
|= TLB_MMIO
;
2063 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2064 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2065 te
= &env
->tlb_table
[mmu_idx
][index
];
2066 te
->addend
= addend
- vaddr
;
2067 if (prot
& PAGE_READ
) {
2068 te
->addr_read
= address
;
2073 if (prot
& PAGE_EXEC
) {
2074 te
->addr_code
= code_address
;
2078 if (prot
& PAGE_WRITE
) {
2079 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2080 (pd
& IO_MEM_ROMD
)) {
2081 /* Write access calls the I/O callback. */
2082 te
->addr_write
= address
| TLB_MMIO
;
2083 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2084 !cpu_physical_memory_is_dirty(pd
)) {
2085 te
->addr_write
= address
| TLB_NOTDIRTY
;
2087 te
->addr_write
= address
;
2090 te
->addr_write
= -1;
2097 void tlb_flush(CPUState
*env
, int flush_global
)
2101 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2105 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2106 target_phys_addr_t paddr
, int prot
,
2107 int mmu_idx
, int is_softmmu
)
2113 * Walks guest process memory "regions" one by one
2114 * and calls callback function 'fn' for each region.
2116 int walk_memory_regions(void *priv
,
2117 int (*fn
)(void *, unsigned long, unsigned long, unsigned long))
2119 unsigned long start
, end
;
2121 int i
, j
, prot
, prot1
;
2127 for (i
= 0; i
<= L1_SIZE
; i
++) {
2128 p
= (i
< L1_SIZE
) ? l1_map
[i
] : NULL
;
2129 for (j
= 0; j
< L2_SIZE
; j
++) {
2130 prot1
= (p
== NULL
) ? 0 : p
[j
].flags
;
2132 * "region" is one continuous chunk of memory
2133 * that has same protection flags set.
2135 if (prot1
!= prot
) {
2136 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2138 rc
= (*fn
)(priv
, start
, end
, prot
);
2139 /* callback can stop iteration by returning != 0 */
2156 static int dump_region(void *priv
, unsigned long start
,
2157 unsigned long end
, unsigned long prot
)
2159 FILE *f
= (FILE *)priv
;
2161 (void) fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2162 start
, end
, end
- start
,
2163 ((prot
& PAGE_READ
) ? 'r' : '-'),
2164 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2165 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2170 /* dump memory mappings */
2171 void page_dump(FILE *f
)
2173 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2174 "start", "end", "size", "prot");
2175 walk_memory_regions(f
, dump_region
);
2178 int page_get_flags(target_ulong address
)
2182 p
= page_find(address
>> TARGET_PAGE_BITS
);
2188 /* modify the flags of a page and invalidate the code if
2189 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2190 depending on PAGE_WRITE */
2191 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2196 /* mmap_lock should already be held. */
2197 start
= start
& TARGET_PAGE_MASK
;
2198 end
= TARGET_PAGE_ALIGN(end
);
2199 if (flags
& PAGE_WRITE
)
2200 flags
|= PAGE_WRITE_ORG
;
2201 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2202 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2203 /* We may be called for host regions that are outside guest
2207 /* if the write protection is set, then we invalidate the code
2209 if (!(p
->flags
& PAGE_WRITE
) &&
2210 (flags
& PAGE_WRITE
) &&
2212 tb_invalidate_phys_page(addr
, 0, NULL
);
2218 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2224 if (start
+ len
< start
)
2225 /* we've wrapped around */
2228 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2229 start
= start
& TARGET_PAGE_MASK
;
2231 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2232 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2235 if( !(p
->flags
& PAGE_VALID
) )
2238 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2240 if (flags
& PAGE_WRITE
) {
2241 if (!(p
->flags
& PAGE_WRITE_ORG
))
2243 /* unprotect the page if it was put read-only because it
2244 contains translated code */
2245 if (!(p
->flags
& PAGE_WRITE
)) {
2246 if (!page_unprotect(addr
, 0, NULL
))
2255 /* called from signal handler: invalidate the code and unprotect the
2256 page. Return TRUE if the fault was successfully handled. */
2257 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2259 unsigned int page_index
, prot
, pindex
;
2261 target_ulong host_start
, host_end
, addr
;
2263 /* Technically this isn't safe inside a signal handler. However we
2264 know this only ever happens in a synchronous SEGV handler, so in
2265 practice it seems to be ok. */
2268 host_start
= address
& qemu_host_page_mask
;
2269 page_index
= host_start
>> TARGET_PAGE_BITS
;
2270 p1
= page_find(page_index
);
2275 host_end
= host_start
+ qemu_host_page_size
;
2278 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2282 /* if the page was really writable, then we change its
2283 protection back to writable */
2284 if (prot
& PAGE_WRITE_ORG
) {
2285 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2286 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2287 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2288 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2289 p1
[pindex
].flags
|= PAGE_WRITE
;
2290 /* and since the content will be modified, we must invalidate
2291 the corresponding translated code. */
2292 tb_invalidate_phys_page(address
, pc
, puc
);
2293 #ifdef DEBUG_TB_CHECK
2294 tb_invalidate_check(address
);
2304 static inline void tlb_set_dirty(CPUState
*env
,
2305 unsigned long addr
, target_ulong vaddr
)
2308 #endif /* defined(CONFIG_USER_ONLY) */
2310 #if !defined(CONFIG_USER_ONLY)
2312 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2313 ram_addr_t memory
, ram_addr_t region_offset
);
2314 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2315 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2316 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2319 if (addr > start_addr) \
2322 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2323 if (start_addr2 > 0) \
2327 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2328 end_addr2 = TARGET_PAGE_SIZE - 1; \
2330 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2331 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2336 /* register physical memory. 'size' must be a multiple of the target
2337 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2338 io memory page. The address used when calling the IO function is
2339 the offset from the start of the region, plus region_offset. Both
2340 start_addr and region_offset are rounded down to a page boundary
2341 before calculating this offset. This should not be a problem unless
2342 the low bits of start_addr and region_offset differ. */
2343 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2345 ram_addr_t phys_offset
,
2346 ram_addr_t region_offset
)
2348 target_phys_addr_t addr
, end_addr
;
2351 ram_addr_t orig_size
= size
;
2355 /* XXX: should not depend on cpu context */
2357 if (env
->kqemu_enabled
) {
2358 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2362 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2364 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2365 region_offset
= start_addr
;
2367 region_offset
&= TARGET_PAGE_MASK
;
2368 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2369 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2370 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2371 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2372 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2373 ram_addr_t orig_memory
= p
->phys_offset
;
2374 target_phys_addr_t start_addr2
, end_addr2
;
2375 int need_subpage
= 0;
2377 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2379 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2380 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2381 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2382 &p
->phys_offset
, orig_memory
,
2385 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2388 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2390 p
->region_offset
= 0;
2392 p
->phys_offset
= phys_offset
;
2393 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2394 (phys_offset
& IO_MEM_ROMD
))
2395 phys_offset
+= TARGET_PAGE_SIZE
;
2398 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2399 p
->phys_offset
= phys_offset
;
2400 p
->region_offset
= region_offset
;
2401 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2402 (phys_offset
& IO_MEM_ROMD
)) {
2403 phys_offset
+= TARGET_PAGE_SIZE
;
2405 target_phys_addr_t start_addr2
, end_addr2
;
2406 int need_subpage
= 0;
2408 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2409 end_addr2
, need_subpage
);
2411 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2412 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2413 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2414 addr
& TARGET_PAGE_MASK
);
2415 subpage_register(subpage
, start_addr2
, end_addr2
,
2416 phys_offset
, region_offset
);
2417 p
->region_offset
= 0;
2421 region_offset
+= TARGET_PAGE_SIZE
;
2424 /* since each CPU stores ram addresses in its TLB cache, we must
2425 reset the modified entries */
2427 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2432 /* XXX: temporary until new memory mapping API */
2433 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2437 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2439 return IO_MEM_UNASSIGNED
;
2440 return p
->phys_offset
;
2443 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2446 kvm_coalesce_mmio_region(addr
, size
);
2449 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2452 kvm_uncoalesce_mmio_region(addr
, size
);
2456 /* XXX: better than nothing */
2457 static ram_addr_t
kqemu_ram_alloc(ram_addr_t size
)
2460 if ((last_ram_offset
+ size
) > kqemu_phys_ram_size
) {
2461 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2462 (uint64_t)size
, (uint64_t)kqemu_phys_ram_size
);
2465 addr
= last_ram_offset
;
2466 last_ram_offset
= TARGET_PAGE_ALIGN(last_ram_offset
+ size
);
2473 #include <sys/vfs.h>
2475 #define HUGETLBFS_MAGIC 0x958458f6
2477 static long gethugepagesize(const char *path
)
2483 ret
= statfs(path
, &fs
);
2484 } while (ret
!= 0 && errno
== EINTR
);
2491 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2492 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2497 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2505 unsigned long hpagesize
;
2506 extern int mem_prealloc
;
2512 hpagesize
= gethugepagesize(path
);
2517 if (memory
< hpagesize
) {
2521 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2522 fprintf(stderr
, "host lacks mmu notifiers, disabling --mem-path\n");
2526 if (asprintf(&filename
, "%s/kvm.XXXXXX", path
) == -1) {
2530 fd
= mkstemp(filename
);
2539 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2542 * ftruncate is not supported by hugetlbfs in older
2543 * hosts, so don't bother checking for errors.
2544 * If anything goes wrong with it under other filesystems,
2547 ftruncate(fd
, memory
);
2550 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2551 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2552 * to sidestep this quirk.
2554 flags
= mem_prealloc
? MAP_POPULATE
|MAP_SHARED
: MAP_PRIVATE
;
2555 area
= mmap(0, memory
, PROT_READ
|PROT_WRITE
, flags
, fd
, 0);
2557 area
= mmap(0, memory
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2559 if (area
== MAP_FAILED
) {
2560 perror("alloc_mem_area: can't mmap hugetlbfs pages");
2569 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2576 extern const char *mem_path
;
2578 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2580 RAMBlock
*new_block
;
2583 if (kqemu_phys_ram_base
) {
2584 return kqemu_ram_alloc(size
);
2588 size
= TARGET_PAGE_ALIGN(size
);
2589 new_block
= qemu_malloc(sizeof(*new_block
));
2591 new_block
->host
= file_ram_alloc(size
, mem_path
);
2592 if (!new_block
->host
) {
2593 new_block
->host
= qemu_vmalloc(size
);
2595 new_block
->offset
= last_ram_offset
;
2596 new_block
->length
= size
;
2598 new_block
->next
= ram_blocks
;
2599 ram_blocks
= new_block
;
2601 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2602 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2603 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2604 0xff, size
>> TARGET_PAGE_BITS
);
2606 last_ram_offset
+= size
;
2609 kvm_setup_guest_memory(new_block
->host
, size
);
2611 return new_block
->offset
;
2614 void qemu_ram_free(ram_addr_t addr
)
2616 /* TODO: implement this. */
2619 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2620 With the exception of the softmmu code in this file, this should
2621 only be used for local memory (e.g. video ram) that the device owns,
2622 and knows it isn't going to access beyond the end of the block.
2624 It should not be used for general purpose DMA.
2625 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2627 void *qemu_get_ram_ptr(ram_addr_t addr
)
2634 if (kqemu_phys_ram_base
) {
2635 return kqemu_phys_ram_base
+ addr
;
2640 prevp
= &ram_blocks
;
2642 while (block
&& (block
->offset
> addr
2643 || block
->offset
+ block
->length
<= addr
)) {
2645 prevp
= &prev
->next
;
2647 block
= block
->next
;
2650 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2653 /* Move this entry to to start of the list. */
2655 prev
->next
= block
->next
;
2656 block
->next
= *prevp
;
2659 return block
->host
+ (addr
- block
->offset
);
2662 /* Some of the softmmu routines need to translate from a host pointer
2663 (typically a TLB entry) back to a ram offset. */
2664 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2669 uint8_t *host
= ptr
;
2672 if (kqemu_phys_ram_base
) {
2673 return host
- kqemu_phys_ram_base
;
2678 prevp
= &ram_blocks
;
2680 while (block
&& (block
->host
> host
2681 || block
->host
+ block
->length
<= host
)) {
2683 prevp
= &prev
->next
;
2685 block
= block
->next
;
2688 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2691 return block
->offset
+ (host
- block
->host
);
2694 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2696 #ifdef DEBUG_UNASSIGNED
2697 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2699 #if defined(TARGET_SPARC)
2700 do_unassigned_access(addr
, 0, 0, 0, 1);
2705 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2707 #ifdef DEBUG_UNASSIGNED
2708 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2710 #if defined(TARGET_SPARC)
2711 do_unassigned_access(addr
, 0, 0, 0, 2);
2716 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2718 #ifdef DEBUG_UNASSIGNED
2719 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2721 #if defined(TARGET_SPARC)
2722 do_unassigned_access(addr
, 0, 0, 0, 4);
2727 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2729 #ifdef DEBUG_UNASSIGNED
2730 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2732 #if defined(TARGET_SPARC)
2733 do_unassigned_access(addr
, 1, 0, 0, 1);
2737 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2739 #ifdef DEBUG_UNASSIGNED
2740 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2742 #if defined(TARGET_SPARC)
2743 do_unassigned_access(addr
, 1, 0, 0, 2);
2747 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2749 #ifdef DEBUG_UNASSIGNED
2750 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2752 #if defined(TARGET_SPARC)
2753 do_unassigned_access(addr
, 1, 0, 0, 4);
2757 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2758 unassigned_mem_readb
,
2759 unassigned_mem_readw
,
2760 unassigned_mem_readl
,
2763 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2764 unassigned_mem_writeb
,
2765 unassigned_mem_writew
,
2766 unassigned_mem_writel
,
2769 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2773 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2774 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2775 #if !defined(CONFIG_USER_ONLY)
2776 tb_invalidate_phys_page_fast(ram_addr
, 1);
2777 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2780 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2782 if (cpu_single_env
->kqemu_enabled
&&
2783 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2784 kqemu_modify_page(cpu_single_env
, ram_addr
);
2786 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2787 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2788 /* we remove the notdirty callback only if the code has been
2790 if (dirty_flags
== 0xff)
2791 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2794 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2798 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2799 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2800 #if !defined(CONFIG_USER_ONLY)
2801 tb_invalidate_phys_page_fast(ram_addr
, 2);
2802 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2805 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2807 if (cpu_single_env
->kqemu_enabled
&&
2808 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2809 kqemu_modify_page(cpu_single_env
, ram_addr
);
2811 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2812 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2813 /* we remove the notdirty callback only if the code has been
2815 if (dirty_flags
== 0xff)
2816 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2819 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2823 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2824 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2825 #if !defined(CONFIG_USER_ONLY)
2826 tb_invalidate_phys_page_fast(ram_addr
, 4);
2827 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2830 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2832 if (cpu_single_env
->kqemu_enabled
&&
2833 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2834 kqemu_modify_page(cpu_single_env
, ram_addr
);
2836 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2837 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2838 /* we remove the notdirty callback only if the code has been
2840 if (dirty_flags
== 0xff)
2841 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2844 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2845 NULL
, /* never used */
2846 NULL
, /* never used */
2847 NULL
, /* never used */
2850 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2851 notdirty_mem_writeb
,
2852 notdirty_mem_writew
,
2853 notdirty_mem_writel
,
2856 /* Generate a debug exception if a watchpoint has been hit. */
2857 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2859 CPUState
*env
= cpu_single_env
;
2860 target_ulong pc
, cs_base
;
2861 TranslationBlock
*tb
;
2866 if (env
->watchpoint_hit
) {
2867 /* We re-entered the check after replacing the TB. Now raise
2868 * the debug interrupt so that is will trigger after the
2869 * current instruction. */
2870 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2873 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2874 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2875 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2876 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2877 wp
->flags
|= BP_WATCHPOINT_HIT
;
2878 if (!env
->watchpoint_hit
) {
2879 env
->watchpoint_hit
= wp
;
2880 tb
= tb_find_pc(env
->mem_io_pc
);
2882 cpu_abort(env
, "check_watchpoint: could not find TB for "
2883 "pc=%p", (void *)env
->mem_io_pc
);
2885 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2886 tb_phys_invalidate(tb
, -1);
2887 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2888 env
->exception_index
= EXCP_DEBUG
;
2890 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2891 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2893 cpu_resume_from_signal(env
, NULL
);
2896 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2901 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2902 so these check for a hit then pass through to the normal out-of-line
2904 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2906 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2907 return ldub_phys(addr
);
2910 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2912 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2913 return lduw_phys(addr
);
2916 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2918 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2919 return ldl_phys(addr
);
2922 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2925 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2926 stb_phys(addr
, val
);
2929 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2932 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2933 stw_phys(addr
, val
);
2936 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2939 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2940 stl_phys(addr
, val
);
2943 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2949 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2955 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2961 idx
= SUBPAGE_IDX(addr
);
2962 #if defined(DEBUG_SUBPAGE)
2963 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2964 mmio
, len
, addr
, idx
);
2966 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2967 addr
+ mmio
->region_offset
[idx
][0][len
]);
2972 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2973 uint32_t value
, unsigned int len
)
2977 idx
= SUBPAGE_IDX(addr
);
2978 #if defined(DEBUG_SUBPAGE)
2979 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2980 mmio
, len
, addr
, idx
, value
);
2982 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2983 addr
+ mmio
->region_offset
[idx
][1][len
],
2987 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2989 #if defined(DEBUG_SUBPAGE)
2990 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2993 return subpage_readlen(opaque
, addr
, 0);
2996 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2999 #if defined(DEBUG_SUBPAGE)
3000 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3002 subpage_writelen(opaque
, addr
, value
, 0);
3005 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3007 #if defined(DEBUG_SUBPAGE)
3008 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3011 return subpage_readlen(opaque
, addr
, 1);
3014 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3017 #if defined(DEBUG_SUBPAGE)
3018 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3020 subpage_writelen(opaque
, addr
, value
, 1);
3023 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3025 #if defined(DEBUG_SUBPAGE)
3026 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3029 return subpage_readlen(opaque
, addr
, 2);
3032 static void subpage_writel (void *opaque
,
3033 target_phys_addr_t addr
, uint32_t value
)
3035 #if defined(DEBUG_SUBPAGE)
3036 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3038 subpage_writelen(opaque
, addr
, value
, 2);
3041 static CPUReadMemoryFunc
*subpage_read
[] = {
3047 static CPUWriteMemoryFunc
*subpage_write
[] = {
3053 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3054 ram_addr_t memory
, ram_addr_t region_offset
)
3059 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3061 idx
= SUBPAGE_IDX(start
);
3062 eidx
= SUBPAGE_IDX(end
);
3063 #if defined(DEBUG_SUBPAGE)
3064 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
3065 mmio
, start
, end
, idx
, eidx
, memory
);
3067 memory
>>= IO_MEM_SHIFT
;
3068 for (; idx
<= eidx
; idx
++) {
3069 for (i
= 0; i
< 4; i
++) {
3070 if (io_mem_read
[memory
][i
]) {
3071 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
3072 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
3073 mmio
->region_offset
[idx
][0][i
] = region_offset
;
3075 if (io_mem_write
[memory
][i
]) {
3076 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
3077 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
3078 mmio
->region_offset
[idx
][1][i
] = region_offset
;
3086 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3087 ram_addr_t orig_memory
, ram_addr_t region_offset
)
3092 mmio
= qemu_mallocz(sizeof(subpage_t
));
3095 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3096 #if defined(DEBUG_SUBPAGE)
3097 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3098 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3100 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3101 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
3107 static int get_free_io_mem_idx(void)
3111 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3112 if (!io_mem_used
[i
]) {
3120 /* mem_read and mem_write are arrays of functions containing the
3121 function to access byte (index 0), word (index 1) and dword (index
3122 2). Functions can be omitted with a NULL function pointer.
3123 If io_index is non zero, the corresponding io zone is
3124 modified. If it is zero, a new io zone is allocated. The return
3125 value can be used with cpu_register_physical_memory(). (-1) is
3126 returned if error. */
3127 static int cpu_register_io_memory_fixed(int io_index
,
3128 CPUReadMemoryFunc
**mem_read
,
3129 CPUWriteMemoryFunc
**mem_write
,
3132 int i
, subwidth
= 0;
3134 if (io_index
<= 0) {
3135 io_index
= get_free_io_mem_idx();
3139 io_index
>>= IO_MEM_SHIFT
;
3140 if (io_index
>= IO_MEM_NB_ENTRIES
)
3144 for(i
= 0;i
< 3; i
++) {
3145 if (!mem_read
[i
] || !mem_write
[i
])
3146 subwidth
= IO_MEM_SUBWIDTH
;
3147 io_mem_read
[io_index
][i
] = mem_read
[i
];
3148 io_mem_write
[io_index
][i
] = mem_write
[i
];
3150 io_mem_opaque
[io_index
] = opaque
;
3151 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
3154 int cpu_register_io_memory(CPUReadMemoryFunc
**mem_read
,
3155 CPUWriteMemoryFunc
**mem_write
,
3158 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3161 void cpu_unregister_io_memory(int io_table_address
)
3164 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3166 for (i
=0;i
< 3; i
++) {
3167 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3168 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3170 io_mem_opaque
[io_index
] = NULL
;
3171 io_mem_used
[io_index
] = 0;
3174 static void io_mem_init(void)
3178 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3179 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3180 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3184 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3185 watch_mem_write
, NULL
);
3187 if (kqemu_phys_ram_base
) {
3188 /* alloc dirty bits array */
3189 phys_ram_dirty
= qemu_vmalloc(kqemu_phys_ram_size
>> TARGET_PAGE_BITS
);
3190 memset(phys_ram_dirty
, 0xff, kqemu_phys_ram_size
>> TARGET_PAGE_BITS
);
3195 #endif /* !defined(CONFIG_USER_ONLY) */
3197 /* physical memory access (slow version, mainly for debug) */
3198 #if defined(CONFIG_USER_ONLY)
3199 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3200 int len
, int is_write
)
3207 page
= addr
& TARGET_PAGE_MASK
;
3208 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3211 flags
= page_get_flags(page
);
3212 if (!(flags
& PAGE_VALID
))
3215 if (!(flags
& PAGE_WRITE
))
3217 /* XXX: this code should not depend on lock_user */
3218 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3219 /* FIXME - should this return an error rather than just fail? */
3222 unlock_user(p
, addr
, l
);
3224 if (!(flags
& PAGE_READ
))
3226 /* XXX: this code should not depend on lock_user */
3227 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3228 /* FIXME - should this return an error rather than just fail? */
3231 unlock_user(p
, addr
, 0);
3240 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3241 int len
, int is_write
)
3246 target_phys_addr_t page
;
3251 page
= addr
& TARGET_PAGE_MASK
;
3252 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3255 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3257 pd
= IO_MEM_UNASSIGNED
;
3259 pd
= p
->phys_offset
;
3263 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3264 target_phys_addr_t addr1
= addr
;
3265 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3267 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3268 /* XXX: could force cpu_single_env to NULL to avoid
3270 if (l
>= 4 && ((addr1
& 3) == 0)) {
3271 /* 32 bit write access */
3273 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3275 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3276 /* 16 bit write access */
3278 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3281 /* 8 bit write access */
3283 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3287 unsigned long addr1
;
3288 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3290 ptr
= qemu_get_ram_ptr(addr1
);
3291 memcpy(ptr
, buf
, l
);
3292 if (!cpu_physical_memory_is_dirty(addr1
)) {
3293 /* invalidate code */
3294 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3296 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3297 (0xff & ~CODE_DIRTY_FLAG
);
3299 /* qemu doesn't execute guest code directly, but kvm does
3300 therefore flush instruction caches */
3302 flush_icache_range((unsigned long)ptr
,
3303 ((unsigned long)ptr
)+l
);
3306 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3307 !(pd
& IO_MEM_ROMD
)) {
3308 target_phys_addr_t addr1
= addr
;
3310 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3312 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3313 if (l
>= 4 && ((addr1
& 3) == 0)) {
3314 /* 32 bit read access */
3315 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3318 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3319 /* 16 bit read access */
3320 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3324 /* 8 bit read access */
3325 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3331 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3332 (addr
& ~TARGET_PAGE_MASK
);
3333 memcpy(buf
, ptr
, l
);
3342 /* used for ROM loading : can write in RAM and ROM */
3343 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3344 const uint8_t *buf
, int len
)
3348 target_phys_addr_t page
;
3353 page
= addr
& TARGET_PAGE_MASK
;
3354 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3357 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3359 pd
= IO_MEM_UNASSIGNED
;
3361 pd
= p
->phys_offset
;
3364 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3365 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3366 !(pd
& IO_MEM_ROMD
)) {
3369 unsigned long addr1
;
3370 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3372 ptr
= qemu_get_ram_ptr(addr1
);
3373 memcpy(ptr
, buf
, l
);
3383 target_phys_addr_t addr
;
3384 target_phys_addr_t len
;
3387 static BounceBuffer bounce
;
3389 typedef struct MapClient
{
3391 void (*callback
)(void *opaque
);
3392 LIST_ENTRY(MapClient
) link
;
3395 static LIST_HEAD(map_client_list
, MapClient
) map_client_list
3396 = LIST_HEAD_INITIALIZER(map_client_list
);
3398 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3400 MapClient
*client
= qemu_malloc(sizeof(*client
));
3402 client
->opaque
= opaque
;
3403 client
->callback
= callback
;
3404 LIST_INSERT_HEAD(&map_client_list
, client
, link
);
3408 void cpu_unregister_map_client(void *_client
)
3410 MapClient
*client
= (MapClient
*)_client
;
3412 LIST_REMOVE(client
, link
);
3415 static void cpu_notify_map_clients(void)
3419 while (!LIST_EMPTY(&map_client_list
)) {
3420 client
= LIST_FIRST(&map_client_list
);
3421 client
->callback(client
->opaque
);
3422 LIST_REMOVE(client
, link
);
3426 /* Map a physical memory region into a host virtual address.
3427 * May map a subset of the requested range, given by and returned in *plen.
3428 * May return NULL if resources needed to perform the mapping are exhausted.
3429 * Use only for reads OR writes - not for read-modify-write operations.
3430 * Use cpu_register_map_client() to know when retrying the map operation is
3431 * likely to succeed.
3433 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3434 target_phys_addr_t
*plen
,
3437 target_phys_addr_t len
= *plen
;
3438 target_phys_addr_t done
= 0;
3440 uint8_t *ret
= NULL
;
3442 target_phys_addr_t page
;
3445 unsigned long addr1
;
3448 page
= addr
& TARGET_PAGE_MASK
;
3449 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3452 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3454 pd
= IO_MEM_UNASSIGNED
;
3456 pd
= p
->phys_offset
;
3459 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3460 if (done
|| bounce
.buffer
) {
3463 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3467 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3469 ptr
= bounce
.buffer
;
3471 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3472 ptr
= qemu_get_ram_ptr(addr1
);
3476 } else if (ret
+ done
!= ptr
) {
3488 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3489 * Will also mark the memory as dirty if is_write == 1. access_len gives
3490 * the amount of memory that was actually read or written by the caller.
3492 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3493 int is_write
, target_phys_addr_t access_len
)
3495 unsigned long flush_len
= (unsigned long)access_len
;
3497 if (buffer
!= bounce
.buffer
) {
3499 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3500 while (access_len
) {
3502 l
= TARGET_PAGE_SIZE
;
3505 if (!cpu_physical_memory_is_dirty(addr1
)) {
3506 /* invalidate code */
3507 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3509 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3510 (0xff & ~CODE_DIRTY_FLAG
);
3515 dma_flush_range((unsigned long)buffer
,
3516 (unsigned long)buffer
+ flush_len
);
3521 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3523 qemu_free(bounce
.buffer
);
3524 bounce
.buffer
= NULL
;
3525 cpu_notify_map_clients();
3528 /* warning: addr must be aligned */
3529 uint32_t ldl_phys(target_phys_addr_t addr
)
3537 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3539 pd
= IO_MEM_UNASSIGNED
;
3541 pd
= p
->phys_offset
;
3544 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3545 !(pd
& IO_MEM_ROMD
)) {
3547 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3549 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3550 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3553 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3554 (addr
& ~TARGET_PAGE_MASK
);
3560 /* warning: addr must be aligned */
3561 uint64_t ldq_phys(target_phys_addr_t addr
)
3569 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3571 pd
= IO_MEM_UNASSIGNED
;
3573 pd
= p
->phys_offset
;
3576 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3577 !(pd
& IO_MEM_ROMD
)) {
3579 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3581 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3582 #ifdef TARGET_WORDS_BIGENDIAN
3583 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3584 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3586 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3587 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3591 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3592 (addr
& ~TARGET_PAGE_MASK
);
3599 uint32_t ldub_phys(target_phys_addr_t addr
)
3602 cpu_physical_memory_read(addr
, &val
, 1);
3607 uint32_t lduw_phys(target_phys_addr_t addr
)
3610 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3611 return tswap16(val
);
3614 /* warning: addr must be aligned. The ram page is not masked as dirty
3615 and the code inside is not invalidated. It is useful if the dirty
3616 bits are used to track modified PTEs */
3617 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3624 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3626 pd
= IO_MEM_UNASSIGNED
;
3628 pd
= p
->phys_offset
;
3631 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3632 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3634 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3635 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3637 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3638 ptr
= qemu_get_ram_ptr(addr1
);
3641 if (unlikely(in_migration
)) {
3642 if (!cpu_physical_memory_is_dirty(addr1
)) {
3643 /* invalidate code */
3644 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3646 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3647 (0xff & ~CODE_DIRTY_FLAG
);
3653 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3660 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3662 pd
= IO_MEM_UNASSIGNED
;
3664 pd
= p
->phys_offset
;
3667 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3668 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3670 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3671 #ifdef TARGET_WORDS_BIGENDIAN
3672 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3673 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3675 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3676 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3679 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3680 (addr
& ~TARGET_PAGE_MASK
);
3685 /* warning: addr must be aligned */
3686 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3693 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3695 pd
= IO_MEM_UNASSIGNED
;
3697 pd
= p
->phys_offset
;
3700 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3701 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3703 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3704 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3706 unsigned long addr1
;
3707 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3709 ptr
= qemu_get_ram_ptr(addr1
);
3711 if (!cpu_physical_memory_is_dirty(addr1
)) {
3712 /* invalidate code */
3713 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3715 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3716 (0xff & ~CODE_DIRTY_FLAG
);
3722 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3725 cpu_physical_memory_write(addr
, &v
, 1);
3729 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3731 uint16_t v
= tswap16(val
);
3732 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3736 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3739 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3744 /* virtual memory access for debug (includes writing to ROM) */
3745 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3746 uint8_t *buf
, int len
, int is_write
)
3749 target_phys_addr_t phys_addr
;
3753 page
= addr
& TARGET_PAGE_MASK
;
3754 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3755 /* if no physical page mapped, return an error */
3756 if (phys_addr
== -1)
3758 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3761 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3762 #if !defined(CONFIG_USER_ONLY)
3764 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3767 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3775 /* in deterministic execution mode, instructions doing device I/Os
3776 must be at the end of the TB */
3777 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3779 TranslationBlock
*tb
;
3781 target_ulong pc
, cs_base
;
3784 tb
= tb_find_pc((unsigned long)retaddr
);
3786 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3789 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3790 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3791 /* Calculate how many instructions had been executed before the fault
3793 n
= n
- env
->icount_decr
.u16
.low
;
3794 /* Generate a new TB ending on the I/O insn. */
3796 /* On MIPS and SH, delay slot instructions can only be restarted if
3797 they were already the first instruction in the TB. If this is not
3798 the first instruction in a TB then re-execute the preceding
3800 #if defined(TARGET_MIPS)
3801 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3802 env
->active_tc
.PC
-= 4;
3803 env
->icount_decr
.u16
.low
++;
3804 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3806 #elif defined(TARGET_SH4)
3807 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3810 env
->icount_decr
.u16
.low
++;
3811 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3814 /* This should never happen. */
3815 if (n
> CF_COUNT_MASK
)
3816 cpu_abort(env
, "TB too big during recompile");
3818 cflags
= n
| CF_LAST_IO
;
3820 cs_base
= tb
->cs_base
;
3822 tb_phys_invalidate(tb
, -1);
3823 /* FIXME: In theory this could raise an exception. In practice
3824 we have already translated the block once so it's probably ok. */
3825 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3826 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3827 the first in the TB) then we end up generating a whole new TB and
3828 repeating the fault, which is horribly inefficient.
3829 Better would be to execute just this insn uncached, or generate a
3831 cpu_resume_from_signal(env
, NULL
);
3834 void dump_exec_info(FILE *f
,
3835 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3837 int i
, target_code_size
, max_target_code_size
;
3838 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3839 TranslationBlock
*tb
;
3841 target_code_size
= 0;
3842 max_target_code_size
= 0;
3844 direct_jmp_count
= 0;
3845 direct_jmp2_count
= 0;
3846 for(i
= 0; i
< nb_tbs
; i
++) {
3848 target_code_size
+= tb
->size
;
3849 if (tb
->size
> max_target_code_size
)
3850 max_target_code_size
= tb
->size
;
3851 if (tb
->page_addr
[1] != -1)
3853 if (tb
->tb_next_offset
[0] != 0xffff) {
3855 if (tb
->tb_next_offset
[1] != 0xffff) {
3856 direct_jmp2_count
++;
3860 /* XXX: avoid using doubles ? */
3861 cpu_fprintf(f
, "Translation buffer state:\n");
3862 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3863 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3864 cpu_fprintf(f
, "TB count %d/%d\n",
3865 nb_tbs
, code_gen_max_blocks
);
3866 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3867 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3868 max_target_code_size
);
3869 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3870 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3871 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3872 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3874 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3875 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3877 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3879 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3880 cpu_fprintf(f
, "\nStatistics:\n");
3881 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3882 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3883 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3884 tcg_dump_info(f
, cpu_fprintf
);
3887 #if !defined(CONFIG_USER_ONLY)
3889 #define MMUSUFFIX _cmmu
3890 #define GETPC() NULL
3891 #define env cpu_single_env
3892 #define SOFTMMU_CODE_ACCESS
3895 #include "softmmu_template.h"
3898 #include "softmmu_template.h"
3901 #include "softmmu_template.h"
3904 #include "softmmu_template.h"