2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
36 #include "qemu-common.h"
37 #include "cache-utils.h"
39 #if !defined(TARGET_IA64)
47 #if defined(CONFIG_USER_ONLY)
51 //#define DEBUG_TB_INVALIDATE
54 //#define DEBUG_UNASSIGNED
56 /* make various TB consistency checks */
57 //#define DEBUG_TB_CHECK
58 //#define DEBUG_TLB_CHECK
60 //#define DEBUG_IOPORT
61 //#define DEBUG_SUBPAGE
63 #if !defined(CONFIG_USER_ONLY)
64 /* TB consistency checks only implemented for usermode emulation. */
68 #define SMC_BITMAP_USE_THRESHOLD 10
70 #if defined(TARGET_SPARC64)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 41
72 #elif defined(TARGET_SPARC)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 36
74 #elif defined(TARGET_ALPHA)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #define TARGET_VIRT_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_PPC64)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_X86_64)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_I386)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 #elif defined(TARGET_IA64)
84 #define TARGET_PHYS_ADDR_SPACE_BITS 36
86 #define TARGET_PHYS_ADDR_SPACE_BITS 32
89 static TranslationBlock
*tbs
;
90 int code_gen_max_blocks
;
91 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
93 /* any access to the tbs or the page table must use this lock */
94 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
96 #if defined(__arm__) || defined(__sparc_v9__)
97 /* The prologue must be reachable with a direct jump. ARM and Sparc64
98 have limited branch ranges (possibly also PPC) so place it in a
99 section close to code segment. */
100 #define code_gen_section \
101 __attribute__((__section__(".gen_code"))) \
102 __attribute__((aligned (32)))
103 #elif defined(_WIN32)
104 /* Maximum alignment for Win32 is 16. */
105 #define code_gen_section \
106 __attribute__((aligned (16)))
108 #define code_gen_section \
109 __attribute__((aligned (32)))
112 uint8_t code_gen_prologue
[1024] code_gen_section
;
113 static uint8_t *code_gen_buffer
;
114 static unsigned long code_gen_buffer_size
;
115 /* threshold to flush the translated code buffer */
116 static unsigned long code_gen_buffer_max_size
;
117 uint8_t *code_gen_ptr
;
119 #if !defined(CONFIG_USER_ONLY)
121 uint8_t *phys_ram_dirty
;
123 static int in_migration
;
125 typedef struct RAMBlock
{
129 struct RAMBlock
*next
;
132 static RAMBlock
*ram_blocks
;
133 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
134 then we can no longer assume contiguous ram offsets, and external uses
135 of this variable will break. */
136 ram_addr_t last_ram_offset
;
140 /* current CPU in the current thread. It is only valid inside
142 CPUState
*cpu_single_env
;
143 /* 0 = Do not count executed instructions.
144 1 = Precise instruction counting.
145 2 = Adaptive rate instruction counting. */
147 /* Current instruction counter. While executing translated code this may
148 include some instructions that have not yet been executed. */
151 typedef struct PageDesc
{
152 /* list of TBs intersecting this ram page */
153 TranslationBlock
*first_tb
;
154 /* in order to optimize self modifying code, we count the number
155 of lookups we do to a given page to use a bitmap */
156 unsigned int code_write_count
;
157 uint8_t *code_bitmap
;
158 #if defined(CONFIG_USER_ONLY)
163 typedef struct PhysPageDesc
{
164 /* offset in host memory of the page + io_index in the low bits */
165 ram_addr_t phys_offset
;
166 ram_addr_t region_offset
;
170 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
171 /* XXX: this is a temporary hack for alpha target.
172 * In the future, this is to be replaced by a multi-level table
173 * to actually be able to handle the complete 64 bits address space.
175 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
177 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
180 #define L1_SIZE (1 << L1_BITS)
181 #define L2_SIZE (1 << L2_BITS)
183 unsigned long qemu_real_host_page_size
;
184 unsigned long qemu_host_page_bits
;
185 unsigned long qemu_host_page_size
;
186 unsigned long qemu_host_page_mask
;
188 /* XXX: for system emulation, it could just be an array */
189 static PageDesc
*l1_map
[L1_SIZE
];
190 static PhysPageDesc
**l1_phys_map
;
192 #if !defined(CONFIG_USER_ONLY)
193 static void io_mem_init(void);
195 /* io memory support */
196 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
197 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
198 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
199 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
200 static int io_mem_watch
;
204 static const char *logfilename
= "/tmp/qemu.log";
207 static int log_append
= 0;
210 static int tlb_flush_count
;
211 static int tb_flush_count
;
212 static int tb_phys_invalidate_count
;
214 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
215 typedef struct subpage_t
{
216 target_phys_addr_t base
;
217 CPUReadMemoryFunc
* const *mem_read
[TARGET_PAGE_SIZE
][4];
218 CPUWriteMemoryFunc
* const *mem_write
[TARGET_PAGE_SIZE
][4];
219 void *opaque
[TARGET_PAGE_SIZE
][2][4];
220 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
224 static void map_exec(void *addr
, long size
)
227 VirtualProtect(addr
, size
,
228 PAGE_EXECUTE_READWRITE
, &old_protect
);
232 static void map_exec(void *addr
, long size
)
234 unsigned long start
, end
, page_size
;
236 page_size
= getpagesize();
237 start
= (unsigned long)addr
;
238 start
&= ~(page_size
- 1);
240 end
= (unsigned long)addr
+ size
;
241 end
+= page_size
- 1;
242 end
&= ~(page_size
- 1);
244 mprotect((void *)start
, end
- start
,
245 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
249 static void page_init(void)
251 /* NOTE: we can always suppose that qemu_host_page_size >=
255 SYSTEM_INFO system_info
;
257 GetSystemInfo(&system_info
);
258 qemu_real_host_page_size
= system_info
.dwPageSize
;
261 qemu_real_host_page_size
= getpagesize();
263 if (qemu_host_page_size
== 0)
264 qemu_host_page_size
= qemu_real_host_page_size
;
265 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
266 qemu_host_page_size
= TARGET_PAGE_SIZE
;
267 qemu_host_page_bits
= 0;
268 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
269 qemu_host_page_bits
++;
270 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
271 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
272 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
274 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
276 long long startaddr
, endaddr
;
281 last_brk
= (unsigned long)sbrk(0);
282 f
= fopen("/proc/self/maps", "r");
285 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
287 startaddr
= MIN(startaddr
,
288 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
289 endaddr
= MIN(endaddr
,
290 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
291 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
292 TARGET_PAGE_ALIGN(endaddr
),
303 static inline PageDesc
**page_l1_map(target_ulong index
)
305 #if TARGET_LONG_BITS > 32
306 /* Host memory outside guest VM. For 32-bit targets we have already
307 excluded high addresses. */
308 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
311 return &l1_map
[index
>> L2_BITS
];
314 static inline PageDesc
*page_find_alloc(target_ulong index
)
317 lp
= page_l1_map(index
);
323 /* allocate if not found */
324 #if defined(CONFIG_USER_ONLY)
325 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
326 /* Don't use qemu_malloc because it may recurse. */
327 p
= mmap(NULL
, len
, PROT_READ
| PROT_WRITE
,
328 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
331 unsigned long addr
= h2g(p
);
332 page_set_flags(addr
& TARGET_PAGE_MASK
,
333 TARGET_PAGE_ALIGN(addr
+ len
),
337 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
341 return p
+ (index
& (L2_SIZE
- 1));
344 static inline PageDesc
*page_find(target_ulong index
)
347 lp
= page_l1_map(index
);
355 return p
+ (index
& (L2_SIZE
- 1));
358 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
363 p
= (void **)l1_phys_map
;
364 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
366 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
367 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
369 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
372 /* allocate if not found */
375 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
376 memset(p
, 0, sizeof(void *) * L1_SIZE
);
380 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
384 /* allocate if not found */
387 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
389 for (i
= 0; i
< L2_SIZE
; i
++) {
390 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
391 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
394 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
397 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
399 return phys_page_find_alloc(index
, 0);
402 #if !defined(CONFIG_USER_ONLY)
403 static void tlb_protect_code(ram_addr_t ram_addr
);
404 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
406 #define mmap_lock() do { } while(0)
407 #define mmap_unlock() do { } while(0)
410 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
412 #if defined(CONFIG_USER_ONLY)
413 /* Currently it is not recommended to allocate big chunks of data in
414 user mode. It will change when a dedicated libc will be used */
415 #define USE_STATIC_CODE_GEN_BUFFER
418 #ifdef USE_STATIC_CODE_GEN_BUFFER
419 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
422 static void code_gen_alloc(unsigned long tb_size
)
427 #ifdef USE_STATIC_CODE_GEN_BUFFER
428 code_gen_buffer
= static_code_gen_buffer
;
429 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
430 map_exec(code_gen_buffer
, code_gen_buffer_size
);
432 code_gen_buffer_size
= tb_size
;
433 if (code_gen_buffer_size
== 0) {
434 #if defined(CONFIG_USER_ONLY)
435 /* in user mode, phys_ram_size is not meaningful */
436 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
438 /* XXX: needs adjustments */
439 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
442 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
443 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
444 /* The code gen buffer location may have constraints depending on
445 the host cpu and OS */
446 #if defined(__linux__)
451 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
452 #if defined(__x86_64__)
454 /* Cannot map more than that */
455 if (code_gen_buffer_size
> (800 * 1024 * 1024))
456 code_gen_buffer_size
= (800 * 1024 * 1024);
457 #elif defined(__sparc_v9__)
458 // Map the buffer below 2G, so we can use direct calls and branches
460 start
= (void *) 0x60000000UL
;
461 if (code_gen_buffer_size
> (512 * 1024 * 1024))
462 code_gen_buffer_size
= (512 * 1024 * 1024);
463 #elif defined(__arm__)
464 /* Map the buffer below 32M, so we can use direct calls and branches */
466 start
= (void *) 0x01000000UL
;
467 if (code_gen_buffer_size
> 16 * 1024 * 1024)
468 code_gen_buffer_size
= 16 * 1024 * 1024;
470 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
471 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
473 if (code_gen_buffer
== MAP_FAILED
) {
474 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
478 #elif defined(__FreeBSD__) || defined(__DragonFly__)
482 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
483 #if defined(__x86_64__)
484 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
485 * 0x40000000 is free */
487 addr
= (void *)0x40000000;
488 /* Cannot map more than that */
489 if (code_gen_buffer_size
> (800 * 1024 * 1024))
490 code_gen_buffer_size
= (800 * 1024 * 1024);
492 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
493 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
495 if (code_gen_buffer
== MAP_FAILED
) {
496 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
501 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
502 map_exec(code_gen_buffer
, code_gen_buffer_size
);
504 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
505 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
506 code_gen_buffer_max_size
= code_gen_buffer_size
-
507 code_gen_max_block_size();
508 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
509 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
512 /* Must be called before using the QEMU cpus. 'tb_size' is the size
513 (in bytes) allocated to the translation buffer. Zero means default
515 void cpu_exec_init_all(unsigned long tb_size
)
518 code_gen_alloc(tb_size
);
519 code_gen_ptr
= code_gen_buffer
;
521 #if !defined(CONFIG_USER_ONLY)
526 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
528 static void cpu_common_pre_save(const void *opaque
)
530 CPUState
*env
= (void *)opaque
;
532 cpu_synchronize_state(env
);
535 static int cpu_common_pre_load(void *opaque
)
537 CPUState
*env
= opaque
;
539 cpu_synchronize_state(env
);
543 static int cpu_common_post_load(void *opaque
)
545 CPUState
*env
= opaque
;
547 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
548 version_id is increased. */
549 env
->interrupt_request
&= ~0x01;
555 static const VMStateDescription vmstate_cpu_common
= {
556 .name
= "cpu_common",
558 .minimum_version_id
= 1,
559 .minimum_version_id_old
= 1,
560 .pre_save
= cpu_common_pre_save
,
561 .pre_load
= cpu_common_pre_load
,
562 .post_load
= cpu_common_post_load
,
563 .fields
= (VMStateField
[]) {
564 VMSTATE_UINT32(halted
, CPUState
),
565 VMSTATE_UINT32(interrupt_request
, CPUState
),
566 VMSTATE_END_OF_LIST()
571 CPUState
*qemu_get_cpu(int cpu
)
573 CPUState
*env
= first_cpu
;
576 if (env
->cpu_index
== cpu
)
584 void cpu_exec_init(CPUState
*env
)
589 #if defined(CONFIG_USER_ONLY)
592 env
->next_cpu
= NULL
;
595 while (*penv
!= NULL
) {
596 penv
= &(*penv
)->next_cpu
;
599 env
->cpu_index
= cpu_index
;
601 QTAILQ_INIT(&env
->breakpoints
);
602 QTAILQ_INIT(&env
->watchpoints
);
604 env
->thread_id
= GetCurrentProcessId();
606 env
->thread_id
= getpid();
609 #if defined(CONFIG_USER_ONLY)
612 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
613 vmstate_register(cpu_index
, &vmstate_cpu_common
, env
);
614 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
615 cpu_save
, cpu_load
, env
);
619 static inline void invalidate_page_bitmap(PageDesc
*p
)
621 if (p
->code_bitmap
) {
622 qemu_free(p
->code_bitmap
);
623 p
->code_bitmap
= NULL
;
625 p
->code_write_count
= 0;
628 /* set to NULL all the 'first_tb' fields in all PageDescs */
629 static void page_flush_tb(void)
634 for(i
= 0; i
< L1_SIZE
; i
++) {
637 for(j
= 0; j
< L2_SIZE
; j
++) {
639 invalidate_page_bitmap(p
);
646 /* flush all the translation blocks */
647 /* XXX: tb_flush is currently not thread safe */
648 void tb_flush(CPUState
*env1
)
651 #if defined(DEBUG_FLUSH)
652 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
653 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
655 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
657 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
658 cpu_abort(env1
, "Internal error: code buffer overflow\n");
662 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
663 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
666 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
669 code_gen_ptr
= code_gen_buffer
;
670 /* XXX: flush processor icache at this point if cache flush is
675 #ifdef DEBUG_TB_CHECK
677 static void tb_invalidate_check(target_ulong address
)
679 TranslationBlock
*tb
;
681 address
&= TARGET_PAGE_MASK
;
682 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
683 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
684 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
685 address
>= tb
->pc
+ tb
->size
)) {
686 printf("ERROR invalidate: address=" TARGET_FMT_lx
687 " PC=%08lx size=%04x\n",
688 address
, (long)tb
->pc
, tb
->size
);
694 /* verify that all the pages have correct rights for code */
695 static void tb_page_check(void)
697 TranslationBlock
*tb
;
698 int i
, flags1
, flags2
;
700 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
701 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
702 flags1
= page_get_flags(tb
->pc
);
703 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
704 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
705 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
706 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
714 /* invalidate one TB */
715 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
718 TranslationBlock
*tb1
;
722 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
725 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
729 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
731 TranslationBlock
*tb1
;
737 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
739 *ptb
= tb1
->page_next
[n1
];
742 ptb
= &tb1
->page_next
[n1
];
746 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
748 TranslationBlock
*tb1
, **ptb
;
751 ptb
= &tb
->jmp_next
[n
];
754 /* find tb(n) in circular list */
758 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
759 if (n1
== n
&& tb1
== tb
)
762 ptb
= &tb1
->jmp_first
;
764 ptb
= &tb1
->jmp_next
[n1
];
767 /* now we can suppress tb(n) from the list */
768 *ptb
= tb
->jmp_next
[n
];
770 tb
->jmp_next
[n
] = NULL
;
774 /* reset the jump entry 'n' of a TB so that it is not chained to
776 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
778 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
781 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
786 target_phys_addr_t phys_pc
;
787 TranslationBlock
*tb1
, *tb2
;
789 /* remove the TB from the hash list */
790 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
791 h
= tb_phys_hash_func(phys_pc
);
792 tb_remove(&tb_phys_hash
[h
], tb
,
793 offsetof(TranslationBlock
, phys_hash_next
));
795 /* remove the TB from the page list */
796 if (tb
->page_addr
[0] != page_addr
) {
797 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
798 tb_page_remove(&p
->first_tb
, tb
);
799 invalidate_page_bitmap(p
);
801 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
802 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
803 tb_page_remove(&p
->first_tb
, tb
);
804 invalidate_page_bitmap(p
);
807 tb_invalidated_flag
= 1;
809 /* remove the TB from the hash list */
810 h
= tb_jmp_cache_hash_func(tb
->pc
);
811 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
812 if (env
->tb_jmp_cache
[h
] == tb
)
813 env
->tb_jmp_cache
[h
] = NULL
;
816 /* suppress this TB from the two jump lists */
817 tb_jmp_remove(tb
, 0);
818 tb_jmp_remove(tb
, 1);
820 /* suppress any remaining jumps to this TB */
826 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
827 tb2
= tb1
->jmp_next
[n1
];
828 tb_reset_jump(tb1
, n1
);
829 tb1
->jmp_next
[n1
] = NULL
;
832 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
834 tb_phys_invalidate_count
++;
837 static inline void set_bits(uint8_t *tab
, int start
, int len
)
843 mask
= 0xff << (start
& 7);
844 if ((start
& ~7) == (end
& ~7)) {
846 mask
&= ~(0xff << (end
& 7));
851 start
= (start
+ 8) & ~7;
853 while (start
< end1
) {
858 mask
= ~(0xff << (end
& 7));
864 static void build_page_bitmap(PageDesc
*p
)
866 int n
, tb_start
, tb_end
;
867 TranslationBlock
*tb
;
869 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
874 tb
= (TranslationBlock
*)((long)tb
& ~3);
875 /* NOTE: this is subtle as a TB may span two physical pages */
877 /* NOTE: tb_end may be after the end of the page, but
878 it is not a problem */
879 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
880 tb_end
= tb_start
+ tb
->size
;
881 if (tb_end
> TARGET_PAGE_SIZE
)
882 tb_end
= TARGET_PAGE_SIZE
;
885 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
887 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
888 tb
= tb
->page_next
[n
];
892 TranslationBlock
*tb_gen_code(CPUState
*env
,
893 target_ulong pc
, target_ulong cs_base
,
894 int flags
, int cflags
)
896 TranslationBlock
*tb
;
898 target_ulong phys_pc
, phys_page2
, virt_page2
;
901 phys_pc
= get_phys_addr_code(env
, pc
);
904 /* flush must be done */
906 /* cannot fail at this point */
908 /* Don't forget to invalidate previous TB info. */
909 tb_invalidated_flag
= 1;
911 tc_ptr
= code_gen_ptr
;
913 tb
->cs_base
= cs_base
;
916 cpu_gen_code(env
, tb
, &code_gen_size
);
917 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
919 /* check next page if needed */
920 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
922 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
923 phys_page2
= get_phys_addr_code(env
, virt_page2
);
925 tb_link_phys(tb
, phys_pc
, phys_page2
);
929 /* invalidate all TBs which intersect with the target physical page
930 starting in range [start;end[. NOTE: start and end must refer to
931 the same physical page. 'is_cpu_write_access' should be true if called
932 from a real cpu write access: the virtual CPU will exit the current
933 TB if code is modified inside this TB. */
934 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
935 int is_cpu_write_access
)
937 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
938 CPUState
*env
= cpu_single_env
;
939 target_ulong tb_start
, tb_end
;
942 #ifdef TARGET_HAS_PRECISE_SMC
943 int current_tb_not_found
= is_cpu_write_access
;
944 TranslationBlock
*current_tb
= NULL
;
945 int current_tb_modified
= 0;
946 target_ulong current_pc
= 0;
947 target_ulong current_cs_base
= 0;
948 int current_flags
= 0;
949 #endif /* TARGET_HAS_PRECISE_SMC */
951 p
= page_find(start
>> TARGET_PAGE_BITS
);
954 if (!p
->code_bitmap
&&
955 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
956 is_cpu_write_access
) {
957 /* build code bitmap */
958 build_page_bitmap(p
);
961 /* we remove all the TBs in the range [start, end[ */
962 /* XXX: see if in some cases it could be faster to invalidate all the code */
966 tb
= (TranslationBlock
*)((long)tb
& ~3);
967 tb_next
= tb
->page_next
[n
];
968 /* NOTE: this is subtle as a TB may span two physical pages */
970 /* NOTE: tb_end may be after the end of the page, but
971 it is not a problem */
972 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
973 tb_end
= tb_start
+ tb
->size
;
975 tb_start
= tb
->page_addr
[1];
976 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
978 if (!(tb_end
<= start
|| tb_start
>= end
)) {
979 #ifdef TARGET_HAS_PRECISE_SMC
980 if (current_tb_not_found
) {
981 current_tb_not_found
= 0;
983 if (env
->mem_io_pc
) {
984 /* now we have a real cpu fault */
985 current_tb
= tb_find_pc(env
->mem_io_pc
);
988 if (current_tb
== tb
&&
989 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
990 /* If we are modifying the current TB, we must stop
991 its execution. We could be more precise by checking
992 that the modification is after the current PC, but it
993 would require a specialized function to partially
994 restore the CPU state */
996 current_tb_modified
= 1;
997 cpu_restore_state(current_tb
, env
,
998 env
->mem_io_pc
, NULL
);
999 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1002 #endif /* TARGET_HAS_PRECISE_SMC */
1003 /* we need to do that to handle the case where a signal
1004 occurs while doing tb_phys_invalidate() */
1007 saved_tb
= env
->current_tb
;
1008 env
->current_tb
= NULL
;
1010 tb_phys_invalidate(tb
, -1);
1012 env
->current_tb
= saved_tb
;
1013 if (env
->interrupt_request
&& env
->current_tb
)
1014 cpu_interrupt(env
, env
->interrupt_request
);
1019 #if !defined(CONFIG_USER_ONLY)
1020 /* if no code remaining, no need to continue to use slow writes */
1022 invalidate_page_bitmap(p
);
1023 if (is_cpu_write_access
) {
1024 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1028 #ifdef TARGET_HAS_PRECISE_SMC
1029 if (current_tb_modified
) {
1030 /* we generate a block containing just the instruction
1031 modifying the memory. It will ensure that it cannot modify
1033 env
->current_tb
= NULL
;
1034 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1035 cpu_resume_from_signal(env
, NULL
);
1040 /* len must be <= 8 and start must be a multiple of len */
1041 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1047 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1048 cpu_single_env
->mem_io_vaddr
, len
,
1049 cpu_single_env
->eip
,
1050 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1053 p
= page_find(start
>> TARGET_PAGE_BITS
);
1056 if (p
->code_bitmap
) {
1057 offset
= start
& ~TARGET_PAGE_MASK
;
1058 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1059 if (b
& ((1 << len
) - 1))
1063 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1067 #if !defined(CONFIG_SOFTMMU)
1068 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1069 unsigned long pc
, void *puc
)
1071 TranslationBlock
*tb
;
1074 #ifdef TARGET_HAS_PRECISE_SMC
1075 TranslationBlock
*current_tb
= NULL
;
1076 CPUState
*env
= cpu_single_env
;
1077 int current_tb_modified
= 0;
1078 target_ulong current_pc
= 0;
1079 target_ulong current_cs_base
= 0;
1080 int current_flags
= 0;
1083 addr
&= TARGET_PAGE_MASK
;
1084 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1088 #ifdef TARGET_HAS_PRECISE_SMC
1089 if (tb
&& pc
!= 0) {
1090 current_tb
= tb_find_pc(pc
);
1093 while (tb
!= NULL
) {
1095 tb
= (TranslationBlock
*)((long)tb
& ~3);
1096 #ifdef TARGET_HAS_PRECISE_SMC
1097 if (current_tb
== tb
&&
1098 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1099 /* If we are modifying the current TB, we must stop
1100 its execution. We could be more precise by checking
1101 that the modification is after the current PC, but it
1102 would require a specialized function to partially
1103 restore the CPU state */
1105 current_tb_modified
= 1;
1106 cpu_restore_state(current_tb
, env
, pc
, puc
);
1107 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1110 #endif /* TARGET_HAS_PRECISE_SMC */
1111 tb_phys_invalidate(tb
, addr
);
1112 tb
= tb
->page_next
[n
];
1115 #ifdef TARGET_HAS_PRECISE_SMC
1116 if (current_tb_modified
) {
1117 /* we generate a block containing just the instruction
1118 modifying the memory. It will ensure that it cannot modify
1120 env
->current_tb
= NULL
;
1121 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1122 cpu_resume_from_signal(env
, puc
);
1128 /* add the tb in the target page and protect it if necessary */
1129 static inline void tb_alloc_page(TranslationBlock
*tb
,
1130 unsigned int n
, target_ulong page_addr
)
1133 TranslationBlock
*last_first_tb
;
1135 tb
->page_addr
[n
] = page_addr
;
1136 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1137 tb
->page_next
[n
] = p
->first_tb
;
1138 last_first_tb
= p
->first_tb
;
1139 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1140 invalidate_page_bitmap(p
);
1142 #if defined(TARGET_HAS_SMC) || 1
1144 #if defined(CONFIG_USER_ONLY)
1145 if (p
->flags
& PAGE_WRITE
) {
1150 /* force the host page as non writable (writes will have a
1151 page fault + mprotect overhead) */
1152 page_addr
&= qemu_host_page_mask
;
1154 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1155 addr
+= TARGET_PAGE_SIZE
) {
1157 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1161 p2
->flags
&= ~PAGE_WRITE
;
1162 page_get_flags(addr
);
1164 mprotect(g2h(page_addr
), qemu_host_page_size
,
1165 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1166 #ifdef DEBUG_TB_INVALIDATE
1167 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1172 /* if some code is already present, then the pages are already
1173 protected. So we handle the case where only the first TB is
1174 allocated in a physical page */
1175 if (!last_first_tb
) {
1176 tlb_protect_code(page_addr
);
1180 #endif /* TARGET_HAS_SMC */
1183 /* Allocate a new translation block. Flush the translation buffer if
1184 too many translation blocks or too much generated code. */
1185 TranslationBlock
*tb_alloc(target_ulong pc
)
1187 TranslationBlock
*tb
;
1189 if (nb_tbs
>= code_gen_max_blocks
||
1190 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1192 tb
= &tbs
[nb_tbs
++];
1198 void tb_free(TranslationBlock
*tb
)
1200 /* In practice this is mostly used for single use temporary TB
1201 Ignore the hard cases and just back up if this TB happens to
1202 be the last one generated. */
1203 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1204 code_gen_ptr
= tb
->tc_ptr
;
1209 /* add a new TB and link it to the physical page tables. phys_page2 is
1210 (-1) to indicate that only one page contains the TB. */
1211 void tb_link_phys(TranslationBlock
*tb
,
1212 target_ulong phys_pc
, target_ulong phys_page2
)
1215 TranslationBlock
**ptb
;
1217 /* Grab the mmap lock to stop another thread invalidating this TB
1218 before we are done. */
1220 /* add in the physical hash table */
1221 h
= tb_phys_hash_func(phys_pc
);
1222 ptb
= &tb_phys_hash
[h
];
1223 tb
->phys_hash_next
= *ptb
;
1226 /* add in the page list */
1227 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1228 if (phys_page2
!= -1)
1229 tb_alloc_page(tb
, 1, phys_page2
);
1231 tb
->page_addr
[1] = -1;
1233 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1234 tb
->jmp_next
[0] = NULL
;
1235 tb
->jmp_next
[1] = NULL
;
1237 /* init original jump addresses */
1238 if (tb
->tb_next_offset
[0] != 0xffff)
1239 tb_reset_jump(tb
, 0);
1240 if (tb
->tb_next_offset
[1] != 0xffff)
1241 tb_reset_jump(tb
, 1);
1243 #ifdef DEBUG_TB_CHECK
1249 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1250 tb[1].tc_ptr. Return NULL if not found */
1251 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1253 int m_min
, m_max
, m
;
1255 TranslationBlock
*tb
;
1259 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1260 tc_ptr
>= (unsigned long)code_gen_ptr
)
1262 /* binary search (cf Knuth) */
1265 while (m_min
<= m_max
) {
1266 m
= (m_min
+ m_max
) >> 1;
1268 v
= (unsigned long)tb
->tc_ptr
;
1271 else if (tc_ptr
< v
) {
1280 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1282 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1284 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1287 tb1
= tb
->jmp_next
[n
];
1289 /* find head of list */
1292 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1295 tb1
= tb1
->jmp_next
[n1
];
1297 /* we are now sure now that tb jumps to tb1 */
1300 /* remove tb from the jmp_first list */
1301 ptb
= &tb_next
->jmp_first
;
1305 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1306 if (n1
== n
&& tb1
== tb
)
1308 ptb
= &tb1
->jmp_next
[n1
];
1310 *ptb
= tb
->jmp_next
[n
];
1311 tb
->jmp_next
[n
] = NULL
;
1313 /* suppress the jump to next tb in generated code */
1314 tb_reset_jump(tb
, n
);
1316 /* suppress jumps in the tb on which we could have jumped */
1317 tb_reset_jump_recursive(tb_next
);
1321 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1323 tb_reset_jump_recursive2(tb
, 0);
1324 tb_reset_jump_recursive2(tb
, 1);
1327 #if defined(TARGET_HAS_ICE)
1328 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1330 target_phys_addr_t addr
;
1332 ram_addr_t ram_addr
;
1335 addr
= cpu_get_phys_page_debug(env
, pc
);
1336 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1338 pd
= IO_MEM_UNASSIGNED
;
1340 pd
= p
->phys_offset
;
1342 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1343 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1347 /* Add a watchpoint. */
1348 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1349 int flags
, CPUWatchpoint
**watchpoint
)
1351 target_ulong len_mask
= ~(len
- 1);
1354 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1355 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1356 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1357 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1360 wp
= qemu_malloc(sizeof(*wp
));
1363 wp
->len_mask
= len_mask
;
1366 /* keep all GDB-injected watchpoints in front */
1368 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1370 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1372 tlb_flush_page(env
, addr
);
1379 /* Remove a specific watchpoint. */
1380 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1383 target_ulong len_mask
= ~(len
- 1);
1386 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1387 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1388 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1389 cpu_watchpoint_remove_by_ref(env
, wp
);
1396 /* Remove a specific watchpoint by reference. */
1397 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1399 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1401 tlb_flush_page(env
, watchpoint
->vaddr
);
1403 qemu_free(watchpoint
);
1406 /* Remove all matching watchpoints. */
1407 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1409 CPUWatchpoint
*wp
, *next
;
1411 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1412 if (wp
->flags
& mask
)
1413 cpu_watchpoint_remove_by_ref(env
, wp
);
1417 /* Add a breakpoint. */
1418 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1419 CPUBreakpoint
**breakpoint
)
1421 #if defined(TARGET_HAS_ICE)
1424 bp
= qemu_malloc(sizeof(*bp
));
1429 /* keep all GDB-injected breakpoints in front */
1431 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1433 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1435 breakpoint_invalidate(env
, pc
);
1445 /* Remove a specific breakpoint. */
1446 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1448 #if defined(TARGET_HAS_ICE)
1451 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1452 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1453 cpu_breakpoint_remove_by_ref(env
, bp
);
1463 /* Remove a specific breakpoint by reference. */
1464 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1466 #if defined(TARGET_HAS_ICE)
1467 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1469 breakpoint_invalidate(env
, breakpoint
->pc
);
1471 qemu_free(breakpoint
);
1475 /* Remove all matching breakpoints. */
1476 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1478 #if defined(TARGET_HAS_ICE)
1479 CPUBreakpoint
*bp
, *next
;
1481 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1482 if (bp
->flags
& mask
)
1483 cpu_breakpoint_remove_by_ref(env
, bp
);
1488 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1489 CPU loop after each instruction */
1490 void cpu_single_step(CPUState
*env
, int enabled
)
1492 #if defined(TARGET_HAS_ICE)
1493 if (env
->singlestep_enabled
!= enabled
) {
1494 env
->singlestep_enabled
= enabled
;
1496 kvm_update_guest_debug(env
, 0);
1498 /* must flush all the translated code to avoid inconsistencies */
1499 /* XXX: only flush what is necessary */
1506 /* enable or disable low levels log */
1507 void cpu_set_log(int log_flags
)
1509 loglevel
= log_flags
;
1510 if (loglevel
&& !logfile
) {
1511 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1513 perror(logfilename
);
1516 #if !defined(CONFIG_SOFTMMU)
1517 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1519 static char logfile_buf
[4096];
1520 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1522 #elif !defined(_WIN32)
1523 /* Win32 doesn't support line-buffering and requires size >= 2 */
1524 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1528 if (!loglevel
&& logfile
) {
1534 void cpu_set_log_filename(const char *filename
)
1536 logfilename
= strdup(filename
);
1541 cpu_set_log(loglevel
);
1544 static void cpu_unlink_tb(CPUState
*env
)
1546 #if defined(CONFIG_USE_NPTL)
1547 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1548 problem and hope the cpu will stop of its own accord. For userspace
1549 emulation this often isn't actually as bad as it sounds. Often
1550 signals are used primarily to interrupt blocking syscalls. */
1552 TranslationBlock
*tb
;
1553 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1555 tb
= env
->current_tb
;
1556 /* if the cpu is currently executing code, we must unlink it and
1557 all the potentially executing TB */
1558 if (tb
&& !testandset(&interrupt_lock
)) {
1559 env
->current_tb
= NULL
;
1560 tb_reset_jump_recursive(tb
);
1561 resetlock(&interrupt_lock
);
1566 /* mask must never be zero, except for A20 change call */
1567 void cpu_interrupt(CPUState
*env
, int mask
)
1571 old_mask
= env
->interrupt_request
;
1572 env
->interrupt_request
|= mask
;
1573 if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
1574 kvm_update_interrupt_request(env
);
1576 #ifndef CONFIG_USER_ONLY
1578 * If called from iothread context, wake the target cpu in
1581 if (!qemu_cpu_self(env
)) {
1588 env
->icount_decr
.u16
.high
= 0xffff;
1589 #ifndef CONFIG_USER_ONLY
1591 && (mask
& ~old_mask
) != 0) {
1592 cpu_abort(env
, "Raised interrupt while not in I/O function");
1600 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1602 env
->interrupt_request
&= ~mask
;
1605 void cpu_exit(CPUState
*env
)
1607 env
->exit_request
= 1;
1611 const CPULogItem cpu_log_items
[] = {
1612 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1613 "show generated host assembly code for each compiled TB" },
1614 { CPU_LOG_TB_IN_ASM
, "in_asm",
1615 "show target assembly code for each compiled TB" },
1616 { CPU_LOG_TB_OP
, "op",
1617 "show micro ops for each compiled TB" },
1618 { CPU_LOG_TB_OP_OPT
, "op_opt",
1621 "before eflags optimization and "
1623 "after liveness analysis" },
1624 { CPU_LOG_INT
, "int",
1625 "show interrupts/exceptions in short format" },
1626 { CPU_LOG_EXEC
, "exec",
1627 "show trace before each executed TB (lots of logs)" },
1628 { CPU_LOG_TB_CPU
, "cpu",
1629 "show CPU state before block translation" },
1631 { CPU_LOG_PCALL
, "pcall",
1632 "show protected mode far calls/returns/exceptions" },
1633 { CPU_LOG_RESET
, "cpu_reset",
1634 "show CPU state before CPU resets" },
1637 { CPU_LOG_IOPORT
, "ioport",
1638 "show all i/o ports accesses" },
1643 static int cmp1(const char *s1
, int n
, const char *s2
)
1645 if (strlen(s2
) != n
)
1647 return memcmp(s1
, s2
, n
) == 0;
1650 /* takes a comma separated list of log masks. Return 0 if error. */
1651 int cpu_str_to_log_mask(const char *str
)
1653 const CPULogItem
*item
;
1660 p1
= strchr(p
, ',');
1663 if(cmp1(p
,p1
-p
,"all")) {
1664 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1668 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1669 if (cmp1(p
, p1
- p
, item
->name
))
1683 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1690 fprintf(stderr
, "qemu: fatal: ");
1691 vfprintf(stderr
, fmt
, ap
);
1692 fprintf(stderr
, "\n");
1694 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1696 cpu_dump_state(env
, stderr
, fprintf
, 0);
1698 if (qemu_log_enabled()) {
1699 qemu_log("qemu: fatal: ");
1700 qemu_log_vprintf(fmt
, ap2
);
1703 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1705 log_cpu_state(env
, 0);
1715 CPUState
*cpu_copy(CPUState
*env
)
1717 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1718 CPUState
*next_cpu
= new_env
->next_cpu
;
1719 int cpu_index
= new_env
->cpu_index
;
1720 #if defined(TARGET_HAS_ICE)
1725 memcpy(new_env
, env
, sizeof(CPUState
));
1727 /* Preserve chaining and index. */
1728 new_env
->next_cpu
= next_cpu
;
1729 new_env
->cpu_index
= cpu_index
;
1731 /* Clone all break/watchpoints.
1732 Note: Once we support ptrace with hw-debug register access, make sure
1733 BP_CPU break/watchpoints are handled correctly on clone. */
1734 QTAILQ_INIT(&env
->breakpoints
);
1735 QTAILQ_INIT(&env
->watchpoints
);
1736 #if defined(TARGET_HAS_ICE)
1737 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1738 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1740 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1741 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1749 #if !defined(CONFIG_USER_ONLY)
1751 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1755 /* Discard jump cache entries for any tb which might potentially
1756 overlap the flushed page. */
1757 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1758 memset (&env
->tb_jmp_cache
[i
], 0,
1759 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1761 i
= tb_jmp_cache_hash_page(addr
);
1762 memset (&env
->tb_jmp_cache
[i
], 0,
1763 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1766 static CPUTLBEntry s_cputlb_empty_entry
= {
1773 /* NOTE: if flush_global is true, also flush global entries (not
1775 void tlb_flush(CPUState
*env
, int flush_global
)
1779 #if defined(DEBUG_TLB)
1780 printf("tlb_flush:\n");
1782 /* must reset current TB so that interrupts cannot modify the
1783 links while we are modifying them */
1784 env
->current_tb
= NULL
;
1786 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1788 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1789 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1793 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1798 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1800 if (addr
== (tlb_entry
->addr_read
&
1801 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1802 addr
== (tlb_entry
->addr_write
&
1803 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1804 addr
== (tlb_entry
->addr_code
&
1805 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1806 *tlb_entry
= s_cputlb_empty_entry
;
1810 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1815 #if defined(DEBUG_TLB)
1816 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1818 /* must reset current TB so that interrupts cannot modify the
1819 links while we are modifying them */
1820 env
->current_tb
= NULL
;
1822 addr
&= TARGET_PAGE_MASK
;
1823 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1824 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1825 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1827 tlb_flush_jmp_cache(env
, addr
);
1830 /* update the TLBs so that writes to code in the virtual page 'addr'
1832 static void tlb_protect_code(ram_addr_t ram_addr
)
1834 cpu_physical_memory_reset_dirty(ram_addr
,
1835 ram_addr
+ TARGET_PAGE_SIZE
,
1839 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1840 tested for self modifying code */
1841 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1844 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1847 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1848 unsigned long start
, unsigned long length
)
1851 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1852 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1853 if ((addr
- start
) < length
) {
1854 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1859 /* Note: start and end must be within the same ram block. */
1860 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1864 unsigned long length
, start1
;
1868 start
&= TARGET_PAGE_MASK
;
1869 end
= TARGET_PAGE_ALIGN(end
);
1871 length
= end
- start
;
1874 len
= length
>> TARGET_PAGE_BITS
;
1875 mask
= ~dirty_flags
;
1876 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1877 for(i
= 0; i
< len
; i
++)
1880 /* we modify the TLB cache so that the dirty bit will be set again
1881 when accessing the range */
1882 start1
= (unsigned long)qemu_get_ram_ptr(start
);
1883 /* Chek that we don't span multiple blocks - this breaks the
1884 address comparisons below. */
1885 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
1886 != (end
- 1) - start
) {
1890 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1892 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1893 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1894 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
1900 int cpu_physical_memory_set_dirty_tracking(int enable
)
1902 if (kvm_enabled()) {
1903 return kvm_set_migration_log(enable
);
1908 int cpu_physical_memory_get_dirty_tracking(void)
1910 return in_migration
;
1913 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
1914 target_phys_addr_t end_addr
)
1919 ret
= kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1923 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1925 ram_addr_t ram_addr
;
1928 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1929 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
1930 + tlb_entry
->addend
);
1931 ram_addr
= qemu_ram_addr_from_host(p
);
1932 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1933 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1938 /* update the TLB according to the current state of the dirty bits */
1939 void cpu_tlb_update_dirty(CPUState
*env
)
1943 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1944 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1945 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
1949 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1951 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1952 tlb_entry
->addr_write
= vaddr
;
1955 /* update the TLB corresponding to virtual page vaddr
1956 so that it is no longer dirty */
1957 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1962 vaddr
&= TARGET_PAGE_MASK
;
1963 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1964 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1965 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
1968 /* add a new TLB entry. At most one entry for a given virtual address
1969 is permitted. Return 0 if OK or 2 if the page could not be mapped
1970 (can only happen in non SOFTMMU mode for I/O pages or pages
1971 conflicting with the host address space). */
1972 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1973 target_phys_addr_t paddr
, int prot
,
1974 int mmu_idx
, int is_softmmu
)
1979 target_ulong address
;
1980 target_ulong code_address
;
1981 target_phys_addr_t addend
;
1985 target_phys_addr_t iotlb
;
1987 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1989 pd
= IO_MEM_UNASSIGNED
;
1991 pd
= p
->phys_offset
;
1993 #if defined(DEBUG_TLB)
1994 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1995 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2000 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2001 /* IO memory case (romd handled later) */
2002 address
|= TLB_MMIO
;
2004 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2005 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2007 iotlb
= pd
& TARGET_PAGE_MASK
;
2008 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2009 iotlb
|= IO_MEM_NOTDIRTY
;
2011 iotlb
|= IO_MEM_ROM
;
2013 /* IO handlers are currently passed a physical address.
2014 It would be nice to pass an offset from the base address
2015 of that region. This would avoid having to special case RAM,
2016 and avoid full address decoding in every device.
2017 We can't use the high bits of pd for this because
2018 IO_MEM_ROMD uses these as a ram address. */
2019 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2021 iotlb
+= p
->region_offset
;
2027 code_address
= address
;
2028 /* Make accesses to pages with watchpoints go via the
2029 watchpoint trap routines. */
2030 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2031 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2032 iotlb
= io_mem_watch
+ paddr
;
2033 /* TODO: The memory case can be optimized by not trapping
2034 reads of pages with a write breakpoint. */
2035 address
|= TLB_MMIO
;
2039 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2040 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2041 te
= &env
->tlb_table
[mmu_idx
][index
];
2042 te
->addend
= addend
- vaddr
;
2043 if (prot
& PAGE_READ
) {
2044 te
->addr_read
= address
;
2049 if (prot
& PAGE_EXEC
) {
2050 te
->addr_code
= code_address
;
2054 if (prot
& PAGE_WRITE
) {
2055 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2056 (pd
& IO_MEM_ROMD
)) {
2057 /* Write access calls the I/O callback. */
2058 te
->addr_write
= address
| TLB_MMIO
;
2059 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2060 !cpu_physical_memory_is_dirty(pd
)) {
2061 te
->addr_write
= address
| TLB_NOTDIRTY
;
2063 te
->addr_write
= address
;
2066 te
->addr_write
= -1;
2073 void tlb_flush(CPUState
*env
, int flush_global
)
2077 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2081 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2082 target_phys_addr_t paddr
, int prot
,
2083 int mmu_idx
, int is_softmmu
)
2089 * Walks guest process memory "regions" one by one
2090 * and calls callback function 'fn' for each region.
2092 int walk_memory_regions(void *priv
,
2093 int (*fn
)(void *, unsigned long, unsigned long, unsigned long))
2095 unsigned long start
, end
;
2097 int i
, j
, prot
, prot1
;
2103 for (i
= 0; i
<= L1_SIZE
; i
++) {
2104 p
= (i
< L1_SIZE
) ? l1_map
[i
] : NULL
;
2105 for (j
= 0; j
< L2_SIZE
; j
++) {
2106 prot1
= (p
== NULL
) ? 0 : p
[j
].flags
;
2108 * "region" is one continuous chunk of memory
2109 * that has same protection flags set.
2111 if (prot1
!= prot
) {
2112 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2114 rc
= (*fn
)(priv
, start
, end
, prot
);
2115 /* callback can stop iteration by returning != 0 */
2132 static int dump_region(void *priv
, unsigned long start
,
2133 unsigned long end
, unsigned long prot
)
2135 FILE *f
= (FILE *)priv
;
2137 (void) fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2138 start
, end
, end
- start
,
2139 ((prot
& PAGE_READ
) ? 'r' : '-'),
2140 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2141 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2146 /* dump memory mappings */
2147 void page_dump(FILE *f
)
2149 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2150 "start", "end", "size", "prot");
2151 walk_memory_regions(f
, dump_region
);
2154 int page_get_flags(target_ulong address
)
2158 p
= page_find(address
>> TARGET_PAGE_BITS
);
2164 /* modify the flags of a page and invalidate the code if
2165 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2166 depending on PAGE_WRITE */
2167 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2172 /* mmap_lock should already be held. */
2173 start
= start
& TARGET_PAGE_MASK
;
2174 end
= TARGET_PAGE_ALIGN(end
);
2175 if (flags
& PAGE_WRITE
)
2176 flags
|= PAGE_WRITE_ORG
;
2177 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2178 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2179 /* We may be called for host regions that are outside guest
2183 /* if the write protection is set, then we invalidate the code
2185 if (!(p
->flags
& PAGE_WRITE
) &&
2186 (flags
& PAGE_WRITE
) &&
2188 tb_invalidate_phys_page(addr
, 0, NULL
);
2194 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2200 if (start
+ len
< start
)
2201 /* we've wrapped around */
2204 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2205 start
= start
& TARGET_PAGE_MASK
;
2207 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2208 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2211 if( !(p
->flags
& PAGE_VALID
) )
2214 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2216 if (flags
& PAGE_WRITE
) {
2217 if (!(p
->flags
& PAGE_WRITE_ORG
))
2219 /* unprotect the page if it was put read-only because it
2220 contains translated code */
2221 if (!(p
->flags
& PAGE_WRITE
)) {
2222 if (!page_unprotect(addr
, 0, NULL
))
2231 /* called from signal handler: invalidate the code and unprotect the
2232 page. Return TRUE if the fault was successfully handled. */
2233 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2235 unsigned int page_index
, prot
, pindex
;
2237 target_ulong host_start
, host_end
, addr
;
2239 /* Technically this isn't safe inside a signal handler. However we
2240 know this only ever happens in a synchronous SEGV handler, so in
2241 practice it seems to be ok. */
2244 host_start
= address
& qemu_host_page_mask
;
2245 page_index
= host_start
>> TARGET_PAGE_BITS
;
2246 p1
= page_find(page_index
);
2251 host_end
= host_start
+ qemu_host_page_size
;
2254 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2258 /* if the page was really writable, then we change its
2259 protection back to writable */
2260 if (prot
& PAGE_WRITE_ORG
) {
2261 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2262 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2263 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2264 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2265 p1
[pindex
].flags
|= PAGE_WRITE
;
2266 /* and since the content will be modified, we must invalidate
2267 the corresponding translated code. */
2268 tb_invalidate_phys_page(address
, pc
, puc
);
2269 #ifdef DEBUG_TB_CHECK
2270 tb_invalidate_check(address
);
2280 static inline void tlb_set_dirty(CPUState
*env
,
2281 unsigned long addr
, target_ulong vaddr
)
2284 #endif /* defined(CONFIG_USER_ONLY) */
2286 #if !defined(CONFIG_USER_ONLY)
2288 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2289 ram_addr_t memory
, ram_addr_t region_offset
);
2290 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2291 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2292 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2295 if (addr > start_addr) \
2298 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2299 if (start_addr2 > 0) \
2303 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2304 end_addr2 = TARGET_PAGE_SIZE - 1; \
2306 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2307 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2312 /* register physical memory. 'size' must be a multiple of the target
2313 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2314 io memory page. The address used when calling the IO function is
2315 the offset from the start of the region, plus region_offset. Both
2316 start_addr and region_offset are rounded down to a page boundary
2317 before calculating this offset. This should not be a problem unless
2318 the low bits of start_addr and region_offset differ. */
2319 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2321 ram_addr_t phys_offset
,
2322 ram_addr_t region_offset
)
2324 target_phys_addr_t addr
, end_addr
;
2327 ram_addr_t orig_size
= size
;
2331 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2333 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2334 region_offset
= start_addr
;
2336 region_offset
&= TARGET_PAGE_MASK
;
2337 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2338 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2339 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2340 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2341 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2342 ram_addr_t orig_memory
= p
->phys_offset
;
2343 target_phys_addr_t start_addr2
, end_addr2
;
2344 int need_subpage
= 0;
2346 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2348 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2349 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2350 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2351 &p
->phys_offset
, orig_memory
,
2354 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2357 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2359 p
->region_offset
= 0;
2361 p
->phys_offset
= phys_offset
;
2362 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2363 (phys_offset
& IO_MEM_ROMD
))
2364 phys_offset
+= TARGET_PAGE_SIZE
;
2367 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2368 p
->phys_offset
= phys_offset
;
2369 p
->region_offset
= region_offset
;
2370 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2371 (phys_offset
& IO_MEM_ROMD
)) {
2372 phys_offset
+= TARGET_PAGE_SIZE
;
2374 target_phys_addr_t start_addr2
, end_addr2
;
2375 int need_subpage
= 0;
2377 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2378 end_addr2
, need_subpage
);
2380 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2381 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2382 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2383 addr
& TARGET_PAGE_MASK
);
2384 subpage_register(subpage
, start_addr2
, end_addr2
,
2385 phys_offset
, region_offset
);
2386 p
->region_offset
= 0;
2390 region_offset
+= TARGET_PAGE_SIZE
;
2393 /* since each CPU stores ram addresses in its TLB cache, we must
2394 reset the modified entries */
2396 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2401 /* XXX: temporary until new memory mapping API */
2402 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2406 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2408 return IO_MEM_UNASSIGNED
;
2409 return p
->phys_offset
;
2412 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2415 kvm_coalesce_mmio_region(addr
, size
);
2418 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2421 kvm_uncoalesce_mmio_region(addr
, size
);
2426 #include <sys/vfs.h>
2428 #define HUGETLBFS_MAGIC 0x958458f6
2430 static long gethugepagesize(const char *path
)
2436 ret
= statfs(path
, &fs
);
2437 } while (ret
!= 0 && errno
== EINTR
);
2444 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2445 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2450 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2458 unsigned long hpagesize
;
2459 extern int mem_prealloc
;
2465 hpagesize
= gethugepagesize(path
);
2470 if (memory
< hpagesize
) {
2474 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2475 fprintf(stderr
, "host lacks mmu notifiers, disabling --mem-path\n");
2479 if (asprintf(&filename
, "%s/kvm.XXXXXX", path
) == -1) {
2483 fd
= mkstemp(filename
);
2492 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2495 * ftruncate is not supported by hugetlbfs in older
2496 * hosts, so don't bother checking for errors.
2497 * If anything goes wrong with it under other filesystems,
2500 ftruncate(fd
, memory
);
2503 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2504 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2505 * to sidestep this quirk.
2507 flags
= mem_prealloc
? MAP_POPULATE
|MAP_SHARED
: MAP_PRIVATE
;
2508 area
= mmap(0, memory
, PROT_READ
|PROT_WRITE
, flags
, fd
, 0);
2510 area
= mmap(0, memory
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2512 if (area
== MAP_FAILED
) {
2513 perror("alloc_mem_area: can't mmap hugetlbfs pages");
2522 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2529 extern const char *mem_path
;
2531 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2533 RAMBlock
*new_block
;
2535 size
= TARGET_PAGE_ALIGN(size
);
2536 new_block
= qemu_malloc(sizeof(*new_block
));
2538 new_block
->host
= file_ram_alloc(size
, mem_path
);
2539 if (!new_block
->host
) {
2540 new_block
->host
= qemu_vmalloc(size
);
2542 new_block
->offset
= last_ram_offset
;
2543 new_block
->length
= size
;
2545 new_block
->next
= ram_blocks
;
2546 ram_blocks
= new_block
;
2548 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2549 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2550 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2551 0xff, size
>> TARGET_PAGE_BITS
);
2553 last_ram_offset
+= size
;
2556 kvm_setup_guest_memory(new_block
->host
, size
);
2558 return new_block
->offset
;
2561 void qemu_ram_free(ram_addr_t addr
)
2563 /* TODO: implement this. */
2566 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2567 With the exception of the softmmu code in this file, this should
2568 only be used for local memory (e.g. video ram) that the device owns,
2569 and knows it isn't going to access beyond the end of the block.
2571 It should not be used for general purpose DMA.
2572 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2574 void *qemu_get_ram_ptr(ram_addr_t addr
)
2581 prevp
= &ram_blocks
;
2583 while (block
&& (block
->offset
> addr
2584 || block
->offset
+ block
->length
<= addr
)) {
2586 prevp
= &prev
->next
;
2588 block
= block
->next
;
2591 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2594 /* Move this entry to to start of the list. */
2596 prev
->next
= block
->next
;
2597 block
->next
= *prevp
;
2600 return block
->host
+ (addr
- block
->offset
);
2603 int do_qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2608 uint8_t *host
= ptr
;
2611 prevp
= &ram_blocks
;
2613 while (block
&& (block
->host
> host
2614 || block
->host
+ block
->length
<= host
)) {
2616 prevp
= &prev
->next
;
2618 block
= block
->next
;
2622 *ram_addr
= block
->offset
+ (host
- block
->host
);
2626 /* Some of the softmmu routines need to translate from a host pointer
2627 (typically a TLB entry) back to a ram offset. */
2628 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2630 ram_addr_t ram_addr
;
2632 if (do_qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
2633 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2639 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2641 #ifdef DEBUG_UNASSIGNED
2642 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2644 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2645 do_unassigned_access(addr
, 0, 0, 0, 1);
2650 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2652 #ifdef DEBUG_UNASSIGNED
2653 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2655 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2656 do_unassigned_access(addr
, 0, 0, 0, 2);
2661 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2663 #ifdef DEBUG_UNASSIGNED
2664 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2666 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2667 do_unassigned_access(addr
, 0, 0, 0, 4);
2672 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2674 #ifdef DEBUG_UNASSIGNED
2675 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2677 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2678 do_unassigned_access(addr
, 1, 0, 0, 1);
2682 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2684 #ifdef DEBUG_UNASSIGNED
2685 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2687 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2688 do_unassigned_access(addr
, 1, 0, 0, 2);
2692 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2694 #ifdef DEBUG_UNASSIGNED
2695 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2697 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2698 do_unassigned_access(addr
, 1, 0, 0, 4);
2702 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
2703 unassigned_mem_readb
,
2704 unassigned_mem_readw
,
2705 unassigned_mem_readl
,
2708 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
2709 unassigned_mem_writeb
,
2710 unassigned_mem_writew
,
2711 unassigned_mem_writel
,
2714 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2718 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2719 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2720 #if !defined(CONFIG_USER_ONLY)
2721 tb_invalidate_phys_page_fast(ram_addr
, 1);
2722 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2725 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2726 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2727 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2728 /* we remove the notdirty callback only if the code has been
2730 if (dirty_flags
== 0xff)
2731 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2734 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2738 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2739 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2740 #if !defined(CONFIG_USER_ONLY)
2741 tb_invalidate_phys_page_fast(ram_addr
, 2);
2742 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2745 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2746 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2747 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2748 /* we remove the notdirty callback only if the code has been
2750 if (dirty_flags
== 0xff)
2751 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2754 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2758 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2759 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2760 #if !defined(CONFIG_USER_ONLY)
2761 tb_invalidate_phys_page_fast(ram_addr
, 4);
2762 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2765 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2766 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2767 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2768 /* we remove the notdirty callback only if the code has been
2770 if (dirty_flags
== 0xff)
2771 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2774 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
2775 NULL
, /* never used */
2776 NULL
, /* never used */
2777 NULL
, /* never used */
2780 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
2781 notdirty_mem_writeb
,
2782 notdirty_mem_writew
,
2783 notdirty_mem_writel
,
2786 /* Generate a debug exception if a watchpoint has been hit. */
2787 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2789 CPUState
*env
= cpu_single_env
;
2790 target_ulong pc
, cs_base
;
2791 TranslationBlock
*tb
;
2796 if (env
->watchpoint_hit
) {
2797 /* We re-entered the check after replacing the TB. Now raise
2798 * the debug interrupt so that is will trigger after the
2799 * current instruction. */
2800 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2803 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2804 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2805 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2806 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2807 wp
->flags
|= BP_WATCHPOINT_HIT
;
2808 if (!env
->watchpoint_hit
) {
2809 env
->watchpoint_hit
= wp
;
2810 tb
= tb_find_pc(env
->mem_io_pc
);
2812 cpu_abort(env
, "check_watchpoint: could not find TB for "
2813 "pc=%p", (void *)env
->mem_io_pc
);
2815 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2816 tb_phys_invalidate(tb
, -1);
2817 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2818 env
->exception_index
= EXCP_DEBUG
;
2820 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2821 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2823 cpu_resume_from_signal(env
, NULL
);
2826 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2831 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2832 so these check for a hit then pass through to the normal out-of-line
2834 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2836 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2837 return ldub_phys(addr
);
2840 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2842 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2843 return lduw_phys(addr
);
2846 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2848 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2849 return ldl_phys(addr
);
2852 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2855 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2856 stb_phys(addr
, val
);
2859 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2862 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2863 stw_phys(addr
, val
);
2866 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2869 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2870 stl_phys(addr
, val
);
2873 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
2879 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
2885 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2891 idx
= SUBPAGE_IDX(addr
);
2892 #if defined(DEBUG_SUBPAGE)
2893 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2894 mmio
, len
, addr
, idx
);
2896 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2897 addr
+ mmio
->region_offset
[idx
][0][len
]);
2902 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2903 uint32_t value
, unsigned int len
)
2907 idx
= SUBPAGE_IDX(addr
);
2908 #if defined(DEBUG_SUBPAGE)
2909 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2910 mmio
, len
, addr
, idx
, value
);
2912 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2913 addr
+ mmio
->region_offset
[idx
][1][len
],
2917 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2919 #if defined(DEBUG_SUBPAGE)
2920 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2923 return subpage_readlen(opaque
, addr
, 0);
2926 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2929 #if defined(DEBUG_SUBPAGE)
2930 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2932 subpage_writelen(opaque
, addr
, value
, 0);
2935 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2937 #if defined(DEBUG_SUBPAGE)
2938 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2941 return subpage_readlen(opaque
, addr
, 1);
2944 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2947 #if defined(DEBUG_SUBPAGE)
2948 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2950 subpage_writelen(opaque
, addr
, value
, 1);
2953 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2955 #if defined(DEBUG_SUBPAGE)
2956 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2959 return subpage_readlen(opaque
, addr
, 2);
2962 static void subpage_writel (void *opaque
,
2963 target_phys_addr_t addr
, uint32_t value
)
2965 #if defined(DEBUG_SUBPAGE)
2966 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2968 subpage_writelen(opaque
, addr
, value
, 2);
2971 static CPUReadMemoryFunc
* const subpage_read
[] = {
2977 static CPUWriteMemoryFunc
* const subpage_write
[] = {
2983 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2984 ram_addr_t memory
, ram_addr_t region_offset
)
2989 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2991 idx
= SUBPAGE_IDX(start
);
2992 eidx
= SUBPAGE_IDX(end
);
2993 #if defined(DEBUG_SUBPAGE)
2994 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
2995 mmio
, start
, end
, idx
, eidx
, memory
);
2997 memory
>>= IO_MEM_SHIFT
;
2998 for (; idx
<= eidx
; idx
++) {
2999 for (i
= 0; i
< 4; i
++) {
3000 if (io_mem_read
[memory
][i
]) {
3001 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
3002 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
3003 mmio
->region_offset
[idx
][0][i
] = region_offset
;
3005 if (io_mem_write
[memory
][i
]) {
3006 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
3007 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
3008 mmio
->region_offset
[idx
][1][i
] = region_offset
;
3016 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3017 ram_addr_t orig_memory
, ram_addr_t region_offset
)
3022 mmio
= qemu_mallocz(sizeof(subpage_t
));
3025 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3026 #if defined(DEBUG_SUBPAGE)
3027 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3028 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3030 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3031 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
3037 static int get_free_io_mem_idx(void)
3041 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3042 if (!io_mem_used
[i
]) {
3050 /* mem_read and mem_write are arrays of functions containing the
3051 function to access byte (index 0), word (index 1) and dword (index
3052 2). Functions can be omitted with a NULL function pointer.
3053 If io_index is non zero, the corresponding io zone is
3054 modified. If it is zero, a new io zone is allocated. The return
3055 value can be used with cpu_register_physical_memory(). (-1) is
3056 returned if error. */
3057 static int cpu_register_io_memory_fixed(int io_index
,
3058 CPUReadMemoryFunc
* const *mem_read
,
3059 CPUWriteMemoryFunc
* const *mem_write
,
3062 int i
, subwidth
= 0;
3064 if (io_index
<= 0) {
3065 io_index
= get_free_io_mem_idx();
3069 io_index
>>= IO_MEM_SHIFT
;
3070 if (io_index
>= IO_MEM_NB_ENTRIES
)
3074 for(i
= 0;i
< 3; i
++) {
3075 if (!mem_read
[i
] || !mem_write
[i
])
3076 subwidth
= IO_MEM_SUBWIDTH
;
3077 io_mem_read
[io_index
][i
] = mem_read
[i
];
3078 io_mem_write
[io_index
][i
] = mem_write
[i
];
3080 io_mem_opaque
[io_index
] = opaque
;
3081 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
3084 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3085 CPUWriteMemoryFunc
* const *mem_write
,
3088 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3091 void cpu_unregister_io_memory(int io_table_address
)
3094 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3096 for (i
=0;i
< 3; i
++) {
3097 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3098 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3100 io_mem_opaque
[io_index
] = NULL
;
3101 io_mem_used
[io_index
] = 0;
3104 static void io_mem_init(void)
3108 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3109 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3110 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3114 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3115 watch_mem_write
, NULL
);
3118 #endif /* !defined(CONFIG_USER_ONLY) */
3120 /* physical memory access (slow version, mainly for debug) */
3121 #if defined(CONFIG_USER_ONLY)
3122 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3123 int len
, int is_write
)
3130 page
= addr
& TARGET_PAGE_MASK
;
3131 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3134 flags
= page_get_flags(page
);
3135 if (!(flags
& PAGE_VALID
))
3138 if (!(flags
& PAGE_WRITE
))
3140 /* XXX: this code should not depend on lock_user */
3141 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3142 /* FIXME - should this return an error rather than just fail? */
3145 unlock_user(p
, addr
, l
);
3147 if (!(flags
& PAGE_READ
))
3149 /* XXX: this code should not depend on lock_user */
3150 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3151 /* FIXME - should this return an error rather than just fail? */
3154 unlock_user(p
, addr
, 0);
3163 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3164 int len
, int is_write
)
3169 target_phys_addr_t page
;
3174 page
= addr
& TARGET_PAGE_MASK
;
3175 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3178 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3180 pd
= IO_MEM_UNASSIGNED
;
3182 pd
= p
->phys_offset
;
3186 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3187 target_phys_addr_t addr1
= addr
;
3188 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3190 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3191 /* XXX: could force cpu_single_env to NULL to avoid
3193 if (l
>= 4 && ((addr1
& 3) == 0)) {
3194 /* 32 bit write access */
3196 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3198 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3199 /* 16 bit write access */
3201 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3204 /* 8 bit write access */
3206 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3210 unsigned long addr1
;
3211 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3213 ptr
= qemu_get_ram_ptr(addr1
);
3214 memcpy(ptr
, buf
, l
);
3215 if (!cpu_physical_memory_is_dirty(addr1
)) {
3216 /* invalidate code */
3217 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3219 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3220 (0xff & ~CODE_DIRTY_FLAG
);
3222 /* qemu doesn't execute guest code directly, but kvm does
3223 therefore flush instruction caches */
3225 flush_icache_range((unsigned long)ptr
,
3226 ((unsigned long)ptr
)+l
);
3229 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3230 !(pd
& IO_MEM_ROMD
)) {
3231 target_phys_addr_t addr1
= addr
;
3233 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3235 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3236 if (l
>= 4 && ((addr1
& 3) == 0)) {
3237 /* 32 bit read access */
3238 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3241 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3242 /* 16 bit read access */
3243 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3247 /* 8 bit read access */
3248 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3254 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3255 (addr
& ~TARGET_PAGE_MASK
);
3256 memcpy(buf
, ptr
, l
);
3265 /* used for ROM loading : can write in RAM and ROM */
3266 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3267 const uint8_t *buf
, int len
)
3271 target_phys_addr_t page
;
3276 page
= addr
& TARGET_PAGE_MASK
;
3277 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3280 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3282 pd
= IO_MEM_UNASSIGNED
;
3284 pd
= p
->phys_offset
;
3287 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3288 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3289 !(pd
& IO_MEM_ROMD
)) {
3292 unsigned long addr1
;
3293 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3295 ptr
= qemu_get_ram_ptr(addr1
);
3296 memcpy(ptr
, buf
, l
);
3306 target_phys_addr_t addr
;
3307 target_phys_addr_t len
;
3310 static BounceBuffer bounce
;
3312 typedef struct MapClient
{
3314 void (*callback
)(void *opaque
);
3315 QLIST_ENTRY(MapClient
) link
;
3318 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3319 = QLIST_HEAD_INITIALIZER(map_client_list
);
3321 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3323 MapClient
*client
= qemu_malloc(sizeof(*client
));
3325 client
->opaque
= opaque
;
3326 client
->callback
= callback
;
3327 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3331 void cpu_unregister_map_client(void *_client
)
3333 MapClient
*client
= (MapClient
*)_client
;
3335 QLIST_REMOVE(client
, link
);
3339 static void cpu_notify_map_clients(void)
3343 while (!QLIST_EMPTY(&map_client_list
)) {
3344 client
= QLIST_FIRST(&map_client_list
);
3345 client
->callback(client
->opaque
);
3346 cpu_unregister_map_client(client
);
3350 /* Map a physical memory region into a host virtual address.
3351 * May map a subset of the requested range, given by and returned in *plen.
3352 * May return NULL if resources needed to perform the mapping are exhausted.
3353 * Use only for reads OR writes - not for read-modify-write operations.
3354 * Use cpu_register_map_client() to know when retrying the map operation is
3355 * likely to succeed.
3357 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3358 target_phys_addr_t
*plen
,
3361 target_phys_addr_t len
= *plen
;
3362 target_phys_addr_t done
= 0;
3364 uint8_t *ret
= NULL
;
3366 target_phys_addr_t page
;
3369 unsigned long addr1
;
3372 page
= addr
& TARGET_PAGE_MASK
;
3373 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3376 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3378 pd
= IO_MEM_UNASSIGNED
;
3380 pd
= p
->phys_offset
;
3383 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3384 if (done
|| bounce
.buffer
) {
3387 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3391 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3393 ptr
= bounce
.buffer
;
3395 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3396 ptr
= qemu_get_ram_ptr(addr1
);
3400 } else if (ret
+ done
!= ptr
) {
3412 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3413 * Will also mark the memory as dirty if is_write == 1. access_len gives
3414 * the amount of memory that was actually read or written by the caller.
3416 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3417 int is_write
, target_phys_addr_t access_len
)
3419 unsigned long flush_len
= (unsigned long)access_len
;
3421 if (buffer
!= bounce
.buffer
) {
3423 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3424 while (access_len
) {
3426 l
= TARGET_PAGE_SIZE
;
3429 if (!cpu_physical_memory_is_dirty(addr1
)) {
3430 /* invalidate code */
3431 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3433 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3434 (0xff & ~CODE_DIRTY_FLAG
);
3439 dma_flush_range((unsigned long)buffer
,
3440 (unsigned long)buffer
+ flush_len
);
3445 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3447 qemu_free(bounce
.buffer
);
3448 bounce
.buffer
= NULL
;
3449 cpu_notify_map_clients();
3452 /* warning: addr must be aligned */
3453 uint32_t ldl_phys(target_phys_addr_t addr
)
3461 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3463 pd
= IO_MEM_UNASSIGNED
;
3465 pd
= p
->phys_offset
;
3468 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3469 !(pd
& IO_MEM_ROMD
)) {
3471 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3473 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3474 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3477 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3478 (addr
& ~TARGET_PAGE_MASK
);
3484 /* warning: addr must be aligned */
3485 uint64_t ldq_phys(target_phys_addr_t addr
)
3493 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3495 pd
= IO_MEM_UNASSIGNED
;
3497 pd
= p
->phys_offset
;
3500 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3501 !(pd
& IO_MEM_ROMD
)) {
3503 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3505 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3506 #ifdef TARGET_WORDS_BIGENDIAN
3507 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3508 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3510 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3511 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3515 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3516 (addr
& ~TARGET_PAGE_MASK
);
3523 uint32_t ldub_phys(target_phys_addr_t addr
)
3526 cpu_physical_memory_read(addr
, &val
, 1);
3531 uint32_t lduw_phys(target_phys_addr_t addr
)
3534 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3535 return tswap16(val
);
3538 /* warning: addr must be aligned. The ram page is not masked as dirty
3539 and the code inside is not invalidated. It is useful if the dirty
3540 bits are used to track modified PTEs */
3541 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3548 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3550 pd
= IO_MEM_UNASSIGNED
;
3552 pd
= p
->phys_offset
;
3555 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3556 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3558 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3559 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3561 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3562 ptr
= qemu_get_ram_ptr(addr1
);
3565 if (unlikely(in_migration
)) {
3566 if (!cpu_physical_memory_is_dirty(addr1
)) {
3567 /* invalidate code */
3568 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3570 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3571 (0xff & ~CODE_DIRTY_FLAG
);
3577 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3584 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3586 pd
= IO_MEM_UNASSIGNED
;
3588 pd
= p
->phys_offset
;
3591 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3592 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3594 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3595 #ifdef TARGET_WORDS_BIGENDIAN
3596 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3597 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3599 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3600 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3603 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3604 (addr
& ~TARGET_PAGE_MASK
);
3609 /* warning: addr must be aligned */
3610 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3617 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3619 pd
= IO_MEM_UNASSIGNED
;
3621 pd
= p
->phys_offset
;
3624 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3625 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3627 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3628 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3630 unsigned long addr1
;
3631 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3633 ptr
= qemu_get_ram_ptr(addr1
);
3635 if (!cpu_physical_memory_is_dirty(addr1
)) {
3636 /* invalidate code */
3637 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3639 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3640 (0xff & ~CODE_DIRTY_FLAG
);
3646 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3649 cpu_physical_memory_write(addr
, &v
, 1);
3653 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3655 uint16_t v
= tswap16(val
);
3656 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3660 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3663 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3668 /* virtual memory access for debug (includes writing to ROM) */
3669 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3670 uint8_t *buf
, int len
, int is_write
)
3673 target_phys_addr_t phys_addr
;
3677 page
= addr
& TARGET_PAGE_MASK
;
3678 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3679 /* if no physical page mapped, return an error */
3680 if (phys_addr
== -1)
3682 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3685 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3686 #if !defined(CONFIG_USER_ONLY)
3688 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3691 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3699 /* in deterministic execution mode, instructions doing device I/Os
3700 must be at the end of the TB */
3701 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3703 TranslationBlock
*tb
;
3705 target_ulong pc
, cs_base
;
3708 tb
= tb_find_pc((unsigned long)retaddr
);
3710 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3713 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3714 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3715 /* Calculate how many instructions had been executed before the fault
3717 n
= n
- env
->icount_decr
.u16
.low
;
3718 /* Generate a new TB ending on the I/O insn. */
3720 /* On MIPS and SH, delay slot instructions can only be restarted if
3721 they were already the first instruction in the TB. If this is not
3722 the first instruction in a TB then re-execute the preceding
3724 #if defined(TARGET_MIPS)
3725 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3726 env
->active_tc
.PC
-= 4;
3727 env
->icount_decr
.u16
.low
++;
3728 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3730 #elif defined(TARGET_SH4)
3731 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3734 env
->icount_decr
.u16
.low
++;
3735 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3738 /* This should never happen. */
3739 if (n
> CF_COUNT_MASK
)
3740 cpu_abort(env
, "TB too big during recompile");
3742 cflags
= n
| CF_LAST_IO
;
3744 cs_base
= tb
->cs_base
;
3746 tb_phys_invalidate(tb
, -1);
3747 /* FIXME: In theory this could raise an exception. In practice
3748 we have already translated the block once so it's probably ok. */
3749 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3750 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3751 the first in the TB) then we end up generating a whole new TB and
3752 repeating the fault, which is horribly inefficient.
3753 Better would be to execute just this insn uncached, or generate a
3755 cpu_resume_from_signal(env
, NULL
);
3758 void dump_exec_info(FILE *f
,
3759 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3761 int i
, target_code_size
, max_target_code_size
;
3762 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3763 TranslationBlock
*tb
;
3765 target_code_size
= 0;
3766 max_target_code_size
= 0;
3768 direct_jmp_count
= 0;
3769 direct_jmp2_count
= 0;
3770 for(i
= 0; i
< nb_tbs
; i
++) {
3772 target_code_size
+= tb
->size
;
3773 if (tb
->size
> max_target_code_size
)
3774 max_target_code_size
= tb
->size
;
3775 if (tb
->page_addr
[1] != -1)
3777 if (tb
->tb_next_offset
[0] != 0xffff) {
3779 if (tb
->tb_next_offset
[1] != 0xffff) {
3780 direct_jmp2_count
++;
3784 /* XXX: avoid using doubles ? */
3785 cpu_fprintf(f
, "Translation buffer state:\n");
3786 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3787 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3788 cpu_fprintf(f
, "TB count %d/%d\n",
3789 nb_tbs
, code_gen_max_blocks
);
3790 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3791 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3792 max_target_code_size
);
3793 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3794 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3795 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3796 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3798 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3799 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3801 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3803 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3804 cpu_fprintf(f
, "\nStatistics:\n");
3805 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3806 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3807 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3808 #ifdef CONFIG_PROFILER
3809 tcg_dump_info(f
, cpu_fprintf
);
3813 #if !defined(CONFIG_USER_ONLY)
3815 #define MMUSUFFIX _cmmu
3816 #define GETPC() NULL
3817 #define env cpu_single_env
3818 #define SOFTMMU_CODE_ACCESS
3821 #include "softmmu_template.h"
3824 #include "softmmu_template.h"
3827 #include "softmmu_template.h"
3830 #include "softmmu_template.h"