2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
63 //#define DEBUG_TB_INVALIDATE
66 //#define DEBUG_UNASSIGNED
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue
[1024] code_gen_section
;
106 static uint8_t *code_gen_buffer
;
107 static unsigned long code_gen_buffer_size
;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size
;
110 static uint8_t *code_gen_ptr
;
112 #if !defined(CONFIG_USER_ONLY)
114 static int in_migration
;
116 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
118 static MemoryRegion
*system_memory
;
119 static MemoryRegion
*system_io
;
124 /* current CPU in the current thread. It is only valid inside
126 DEFINE_TLS(CPUState
*,cpu_single_env
);
127 /* 0 = Do not count executed instructions.
128 1 = Precise instruction counting.
129 2 = Adaptive rate instruction counting. */
132 typedef struct PageDesc
{
133 /* list of TBs intersecting this ram page */
134 TranslationBlock
*first_tb
;
135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count
;
138 uint8_t *code_bitmap
;
139 #if defined(CONFIG_USER_ONLY)
144 /* In system mode we want L1_MAP to be based on ram offsets,
145 while in user mode we want it to be based on virtual addresses. */
146 #if !defined(CONFIG_USER_ONLY)
147 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
150 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
156 /* Size of the L2 (and L3, etc) page tables. */
158 #define L2_SIZE (1 << L2_BITS)
160 /* The bits remaining after N lower levels of page tables. */
161 #define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163 #define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166 /* Size of the L1 page table. Avoid silly small sizes. */
167 #if P_L1_BITS_REM < 4
168 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
170 #define P_L1_BITS P_L1_BITS_REM
173 #if V_L1_BITS_REM < 4
174 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
176 #define V_L1_BITS V_L1_BITS_REM
179 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
182 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
185 unsigned long qemu_real_host_page_size
;
186 unsigned long qemu_host_page_size
;
187 unsigned long qemu_host_page_mask
;
189 /* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191 static void *l1_map
[V_L1_SIZE
];
193 #if !defined(CONFIG_USER_ONLY)
194 typedef struct PhysPageDesc
{
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset
;
197 ram_addr_t region_offset
;
200 /* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202 static void *l1_phys_map
[P_L1_SIZE
];
204 static void io_mem_init(void);
205 static void memory_map_init(void);
207 /* io memory support */
208 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
209 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
210 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
211 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
212 static int io_mem_watch
;
217 static const char *logfilename
= "qemu.log";
219 static const char *logfilename
= "/tmp/qemu.log";
223 static int log_append
= 0;
226 #if !defined(CONFIG_USER_ONLY)
227 static int tlb_flush_count
;
229 static int tb_flush_count
;
230 static int tb_phys_invalidate_count
;
233 static void map_exec(void *addr
, long size
)
236 VirtualProtect(addr
, size
,
237 PAGE_EXECUTE_READWRITE
, &old_protect
);
241 static void map_exec(void *addr
, long size
)
243 unsigned long start
, end
, page_size
;
245 page_size
= getpagesize();
246 start
= (unsigned long)addr
;
247 start
&= ~(page_size
- 1);
249 end
= (unsigned long)addr
+ size
;
250 end
+= page_size
- 1;
251 end
&= ~(page_size
- 1);
253 mprotect((void *)start
, end
- start
,
254 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
258 static void page_init(void)
260 /* NOTE: we can always suppose that qemu_host_page_size >=
264 SYSTEM_INFO system_info
;
266 GetSystemInfo(&system_info
);
267 qemu_real_host_page_size
= system_info
.dwPageSize
;
270 qemu_real_host_page_size
= getpagesize();
272 if (qemu_host_page_size
== 0)
273 qemu_host_page_size
= qemu_real_host_page_size
;
274 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
275 qemu_host_page_size
= TARGET_PAGE_SIZE
;
276 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
278 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
280 #ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry
*freep
;
284 freep
= kinfo_getvmmap(getpid(), &cnt
);
287 for (i
= 0; i
< cnt
; i
++) {
288 unsigned long startaddr
, endaddr
;
290 startaddr
= freep
[i
].kve_start
;
291 endaddr
= freep
[i
].kve_end
;
292 if (h2g_valid(startaddr
)) {
293 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
295 if (h2g_valid(endaddr
)) {
296 endaddr
= h2g(endaddr
);
297 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
299 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
312 last_brk
= (unsigned long)sbrk(0);
314 f
= fopen("/compat/linux/proc/self/maps", "r");
319 unsigned long startaddr
, endaddr
;
322 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
324 if (n
== 2 && h2g_valid(startaddr
)) {
325 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
327 if (h2g_valid(endaddr
)) {
328 endaddr
= h2g(endaddr
);
332 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
344 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
350 #if defined(CONFIG_USER_ONLY)
351 /* We can't use g_malloc because it may recurse into a locked mutex. */
352 # define ALLOC(P, SIZE) \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
358 # define ALLOC(P, SIZE) \
359 do { P = g_malloc0(SIZE); } while (0)
362 /* Level 1. Always allocated. */
363 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
366 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
373 ALLOC(p
, sizeof(void *) * L2_SIZE
);
377 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
385 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
391 return pd
+ (index
& (L2_SIZE
- 1));
394 static inline PageDesc
*page_find(tb_page_addr_t index
)
396 return page_find_alloc(index
, 0);
399 #if !defined(CONFIG_USER_ONLY)
400 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
406 /* Level 1. Always allocated. */
407 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
410 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
416 *lp
= p
= g_malloc0(sizeof(void *) * L2_SIZE
);
418 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
424 int first_index
= index
& ~(L2_SIZE
- 1);
430 *lp
= pd
= g_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
432 for (i
= 0; i
< L2_SIZE
; i
++) {
433 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
434 pd
[i
].region_offset
= (first_index
+ i
) << TARGET_PAGE_BITS
;
438 return pd
+ (index
& (L2_SIZE
- 1));
441 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
443 return phys_page_find_alloc(index
, 0);
446 static void tlb_protect_code(ram_addr_t ram_addr
);
447 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
449 #define mmap_lock() do { } while(0)
450 #define mmap_unlock() do { } while(0)
453 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
455 #if defined(CONFIG_USER_ONLY)
456 /* Currently it is not recommended to allocate big chunks of data in
457 user mode. It will change when a dedicated libc will be used */
458 #define USE_STATIC_CODE_GEN_BUFFER
461 #ifdef USE_STATIC_CODE_GEN_BUFFER
462 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
463 __attribute__((aligned (CODE_GEN_ALIGN
)));
466 static void code_gen_alloc(unsigned long tb_size
)
468 #ifdef USE_STATIC_CODE_GEN_BUFFER
469 code_gen_buffer
= static_code_gen_buffer
;
470 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
471 map_exec(code_gen_buffer
, code_gen_buffer_size
);
473 code_gen_buffer_size
= tb_size
;
474 if (code_gen_buffer_size
== 0) {
475 #if defined(CONFIG_USER_ONLY)
476 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
478 /* XXX: needs adjustments */
479 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
482 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
483 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
484 /* The code gen buffer location may have constraints depending on
485 the host cpu and OS */
486 #if defined(__linux__)
491 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
492 #if defined(__x86_64__)
494 /* Cannot map more than that */
495 if (code_gen_buffer_size
> (800 * 1024 * 1024))
496 code_gen_buffer_size
= (800 * 1024 * 1024);
497 #elif defined(__sparc_v9__)
498 // Map the buffer below 2G, so we can use direct calls and branches
500 start
= (void *) 0x60000000UL
;
501 if (code_gen_buffer_size
> (512 * 1024 * 1024))
502 code_gen_buffer_size
= (512 * 1024 * 1024);
503 #elif defined(__arm__)
504 /* Keep the buffer no bigger than 16GB to branch between blocks */
505 if (code_gen_buffer_size
> 16 * 1024 * 1024)
506 code_gen_buffer_size
= 16 * 1024 * 1024;
507 #elif defined(__s390x__)
508 /* Map the buffer so that we can use direct calls and branches. */
509 /* We have a +- 4GB range on the branches; leave some slop. */
510 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
511 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
513 start
= (void *)0x90000000UL
;
515 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
516 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
518 if (code_gen_buffer
== MAP_FAILED
) {
519 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
523 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
524 || defined(__DragonFly__) || defined(__OpenBSD__) \
525 || defined(__NetBSD__)
529 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
530 #if defined(__x86_64__)
531 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
532 * 0x40000000 is free */
534 addr
= (void *)0x40000000;
535 /* Cannot map more than that */
536 if (code_gen_buffer_size
> (800 * 1024 * 1024))
537 code_gen_buffer_size
= (800 * 1024 * 1024);
538 #elif defined(__sparc_v9__)
539 // Map the buffer below 2G, so we can use direct calls and branches
541 addr
= (void *) 0x60000000UL
;
542 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
543 code_gen_buffer_size
= (512 * 1024 * 1024);
546 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
547 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
549 if (code_gen_buffer
== MAP_FAILED
) {
550 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
555 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
556 map_exec(code_gen_buffer
, code_gen_buffer_size
);
558 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
559 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
560 code_gen_buffer_max_size
= code_gen_buffer_size
-
561 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
562 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
563 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
566 /* Must be called before using the QEMU cpus. 'tb_size' is the size
567 (in bytes) allocated to the translation buffer. Zero means default
569 void tcg_exec_init(unsigned long tb_size
)
572 code_gen_alloc(tb_size
);
573 code_gen_ptr
= code_gen_buffer
;
575 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
576 /* There's no guest base to take into account, so go ahead and
577 initialize the prologue now. */
578 tcg_prologue_init(&tcg_ctx
);
582 bool tcg_enabled(void)
584 return code_gen_buffer
!= NULL
;
587 void cpu_exec_init_all(void)
589 #if !defined(CONFIG_USER_ONLY)
595 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
597 static int cpu_common_post_load(void *opaque
, int version_id
)
599 CPUState
*env
= opaque
;
601 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
602 version_id is increased. */
603 env
->interrupt_request
&= ~0x01;
609 static const VMStateDescription vmstate_cpu_common
= {
610 .name
= "cpu_common",
612 .minimum_version_id
= 1,
613 .minimum_version_id_old
= 1,
614 .post_load
= cpu_common_post_load
,
615 .fields
= (VMStateField
[]) {
616 VMSTATE_UINT32(halted
, CPUState
),
617 VMSTATE_UINT32(interrupt_request
, CPUState
),
618 VMSTATE_END_OF_LIST()
623 CPUState
*qemu_get_cpu(int cpu
)
625 CPUState
*env
= first_cpu
;
628 if (env
->cpu_index
== cpu
)
636 void cpu_exec_init(CPUState
*env
)
641 #if defined(CONFIG_USER_ONLY)
644 env
->next_cpu
= NULL
;
647 while (*penv
!= NULL
) {
648 penv
= &(*penv
)->next_cpu
;
651 env
->cpu_index
= cpu_index
;
653 QTAILQ_INIT(&env
->breakpoints
);
654 QTAILQ_INIT(&env
->watchpoints
);
655 #ifndef CONFIG_USER_ONLY
656 env
->thread_id
= qemu_get_thread_id();
659 #if defined(CONFIG_USER_ONLY)
662 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
663 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
664 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
665 cpu_save
, cpu_load
, env
);
669 /* Allocate a new translation block. Flush the translation buffer if
670 too many translation blocks or too much generated code. */
671 static TranslationBlock
*tb_alloc(target_ulong pc
)
673 TranslationBlock
*tb
;
675 if (nb_tbs
>= code_gen_max_blocks
||
676 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
684 void tb_free(TranslationBlock
*tb
)
686 /* In practice this is mostly used for single use temporary TB
687 Ignore the hard cases and just back up if this TB happens to
688 be the last one generated. */
689 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
690 code_gen_ptr
= tb
->tc_ptr
;
695 static inline void invalidate_page_bitmap(PageDesc
*p
)
697 if (p
->code_bitmap
) {
698 g_free(p
->code_bitmap
);
699 p
->code_bitmap
= NULL
;
701 p
->code_write_count
= 0;
704 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
706 static void page_flush_tb_1 (int level
, void **lp
)
715 for (i
= 0; i
< L2_SIZE
; ++i
) {
716 pd
[i
].first_tb
= NULL
;
717 invalidate_page_bitmap(pd
+ i
);
721 for (i
= 0; i
< L2_SIZE
; ++i
) {
722 page_flush_tb_1 (level
- 1, pp
+ i
);
727 static void page_flush_tb(void)
730 for (i
= 0; i
< V_L1_SIZE
; i
++) {
731 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
735 /* flush all the translation blocks */
736 /* XXX: tb_flush is currently not thread safe */
737 void tb_flush(CPUState
*env1
)
740 #if defined(DEBUG_FLUSH)
741 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
742 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
744 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
746 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
747 cpu_abort(env1
, "Internal error: code buffer overflow\n");
751 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
752 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
755 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
758 code_gen_ptr
= code_gen_buffer
;
759 /* XXX: flush processor icache at this point if cache flush is
764 #ifdef DEBUG_TB_CHECK
766 static void tb_invalidate_check(target_ulong address
)
768 TranslationBlock
*tb
;
770 address
&= TARGET_PAGE_MASK
;
771 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
772 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
773 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
774 address
>= tb
->pc
+ tb
->size
)) {
775 printf("ERROR invalidate: address=" TARGET_FMT_lx
776 " PC=%08lx size=%04x\n",
777 address
, (long)tb
->pc
, tb
->size
);
783 /* verify that all the pages have correct rights for code */
784 static void tb_page_check(void)
786 TranslationBlock
*tb
;
787 int i
, flags1
, flags2
;
789 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
790 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
791 flags1
= page_get_flags(tb
->pc
);
792 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
793 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
794 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
795 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
803 /* invalidate one TB */
804 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
807 TranslationBlock
*tb1
;
811 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
814 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
818 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
820 TranslationBlock
*tb1
;
826 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
828 *ptb
= tb1
->page_next
[n1
];
831 ptb
= &tb1
->page_next
[n1
];
835 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
837 TranslationBlock
*tb1
, **ptb
;
840 ptb
= &tb
->jmp_next
[n
];
843 /* find tb(n) in circular list */
847 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
848 if (n1
== n
&& tb1
== tb
)
851 ptb
= &tb1
->jmp_first
;
853 ptb
= &tb1
->jmp_next
[n1
];
856 /* now we can suppress tb(n) from the list */
857 *ptb
= tb
->jmp_next
[n
];
859 tb
->jmp_next
[n
] = NULL
;
863 /* reset the jump entry 'n' of a TB so that it is not chained to
865 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
867 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
870 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
875 tb_page_addr_t phys_pc
;
876 TranslationBlock
*tb1
, *tb2
;
878 /* remove the TB from the hash list */
879 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
880 h
= tb_phys_hash_func(phys_pc
);
881 tb_remove(&tb_phys_hash
[h
], tb
,
882 offsetof(TranslationBlock
, phys_hash_next
));
884 /* remove the TB from the page list */
885 if (tb
->page_addr
[0] != page_addr
) {
886 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
887 tb_page_remove(&p
->first_tb
, tb
);
888 invalidate_page_bitmap(p
);
890 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
891 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
892 tb_page_remove(&p
->first_tb
, tb
);
893 invalidate_page_bitmap(p
);
896 tb_invalidated_flag
= 1;
898 /* remove the TB from the hash list */
899 h
= tb_jmp_cache_hash_func(tb
->pc
);
900 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
901 if (env
->tb_jmp_cache
[h
] == tb
)
902 env
->tb_jmp_cache
[h
] = NULL
;
905 /* suppress this TB from the two jump lists */
906 tb_jmp_remove(tb
, 0);
907 tb_jmp_remove(tb
, 1);
909 /* suppress any remaining jumps to this TB */
915 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
916 tb2
= tb1
->jmp_next
[n1
];
917 tb_reset_jump(tb1
, n1
);
918 tb1
->jmp_next
[n1
] = NULL
;
921 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
923 tb_phys_invalidate_count
++;
926 static inline void set_bits(uint8_t *tab
, int start
, int len
)
932 mask
= 0xff << (start
& 7);
933 if ((start
& ~7) == (end
& ~7)) {
935 mask
&= ~(0xff << (end
& 7));
940 start
= (start
+ 8) & ~7;
942 while (start
< end1
) {
947 mask
= ~(0xff << (end
& 7));
953 static void build_page_bitmap(PageDesc
*p
)
955 int n
, tb_start
, tb_end
;
956 TranslationBlock
*tb
;
958 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
963 tb
= (TranslationBlock
*)((long)tb
& ~3);
964 /* NOTE: this is subtle as a TB may span two physical pages */
966 /* NOTE: tb_end may be after the end of the page, but
967 it is not a problem */
968 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
969 tb_end
= tb_start
+ tb
->size
;
970 if (tb_end
> TARGET_PAGE_SIZE
)
971 tb_end
= TARGET_PAGE_SIZE
;
974 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
976 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
977 tb
= tb
->page_next
[n
];
981 TranslationBlock
*tb_gen_code(CPUState
*env
,
982 target_ulong pc
, target_ulong cs_base
,
983 int flags
, int cflags
)
985 TranslationBlock
*tb
;
987 tb_page_addr_t phys_pc
, phys_page2
;
988 target_ulong virt_page2
;
991 phys_pc
= get_page_addr_code(env
, pc
);
994 /* flush must be done */
996 /* cannot fail at this point */
998 /* Don't forget to invalidate previous TB info. */
999 tb_invalidated_flag
= 1;
1001 tc_ptr
= code_gen_ptr
;
1002 tb
->tc_ptr
= tc_ptr
;
1003 tb
->cs_base
= cs_base
;
1005 tb
->cflags
= cflags
;
1006 cpu_gen_code(env
, tb
, &code_gen_size
);
1007 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1009 /* check next page if needed */
1010 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1012 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1013 phys_page2
= get_page_addr_code(env
, virt_page2
);
1015 tb_link_page(tb
, phys_pc
, phys_page2
);
1019 /* invalidate all TBs which intersect with the target physical page
1020 starting in range [start;end[. NOTE: start and end must refer to
1021 the same physical page. 'is_cpu_write_access' should be true if called
1022 from a real cpu write access: the virtual CPU will exit the current
1023 TB if code is modified inside this TB. */
1024 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1025 int is_cpu_write_access
)
1027 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1028 CPUState
*env
= cpu_single_env
;
1029 tb_page_addr_t tb_start
, tb_end
;
1032 #ifdef TARGET_HAS_PRECISE_SMC
1033 int current_tb_not_found
= is_cpu_write_access
;
1034 TranslationBlock
*current_tb
= NULL
;
1035 int current_tb_modified
= 0;
1036 target_ulong current_pc
= 0;
1037 target_ulong current_cs_base
= 0;
1038 int current_flags
= 0;
1039 #endif /* TARGET_HAS_PRECISE_SMC */
1041 p
= page_find(start
>> TARGET_PAGE_BITS
);
1044 if (!p
->code_bitmap
&&
1045 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1046 is_cpu_write_access
) {
1047 /* build code bitmap */
1048 build_page_bitmap(p
);
1051 /* we remove all the TBs in the range [start, end[ */
1052 /* XXX: see if in some cases it could be faster to invalidate all the code */
1054 while (tb
!= NULL
) {
1056 tb
= (TranslationBlock
*)((long)tb
& ~3);
1057 tb_next
= tb
->page_next
[n
];
1058 /* NOTE: this is subtle as a TB may span two physical pages */
1060 /* NOTE: tb_end may be after the end of the page, but
1061 it is not a problem */
1062 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1063 tb_end
= tb_start
+ tb
->size
;
1065 tb_start
= tb
->page_addr
[1];
1066 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1068 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1069 #ifdef TARGET_HAS_PRECISE_SMC
1070 if (current_tb_not_found
) {
1071 current_tb_not_found
= 0;
1073 if (env
->mem_io_pc
) {
1074 /* now we have a real cpu fault */
1075 current_tb
= tb_find_pc(env
->mem_io_pc
);
1078 if (current_tb
== tb
&&
1079 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1080 /* If we are modifying the current TB, we must stop
1081 its execution. We could be more precise by checking
1082 that the modification is after the current PC, but it
1083 would require a specialized function to partially
1084 restore the CPU state */
1086 current_tb_modified
= 1;
1087 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1088 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1091 #endif /* TARGET_HAS_PRECISE_SMC */
1092 /* we need to do that to handle the case where a signal
1093 occurs while doing tb_phys_invalidate() */
1096 saved_tb
= env
->current_tb
;
1097 env
->current_tb
= NULL
;
1099 tb_phys_invalidate(tb
, -1);
1101 env
->current_tb
= saved_tb
;
1102 if (env
->interrupt_request
&& env
->current_tb
)
1103 cpu_interrupt(env
, env
->interrupt_request
);
1108 #if !defined(CONFIG_USER_ONLY)
1109 /* if no code remaining, no need to continue to use slow writes */
1111 invalidate_page_bitmap(p
);
1112 if (is_cpu_write_access
) {
1113 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1117 #ifdef TARGET_HAS_PRECISE_SMC
1118 if (current_tb_modified
) {
1119 /* we generate a block containing just the instruction
1120 modifying the memory. It will ensure that it cannot modify
1122 env
->current_tb
= NULL
;
1123 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1124 cpu_resume_from_signal(env
, NULL
);
1129 /* len must be <= 8 and start must be a multiple of len */
1130 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1136 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1137 cpu_single_env
->mem_io_vaddr
, len
,
1138 cpu_single_env
->eip
,
1139 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1142 p
= page_find(start
>> TARGET_PAGE_BITS
);
1145 if (p
->code_bitmap
) {
1146 offset
= start
& ~TARGET_PAGE_MASK
;
1147 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1148 if (b
& ((1 << len
) - 1))
1152 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1156 #if !defined(CONFIG_SOFTMMU)
1157 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1158 unsigned long pc
, void *puc
)
1160 TranslationBlock
*tb
;
1163 #ifdef TARGET_HAS_PRECISE_SMC
1164 TranslationBlock
*current_tb
= NULL
;
1165 CPUState
*env
= cpu_single_env
;
1166 int current_tb_modified
= 0;
1167 target_ulong current_pc
= 0;
1168 target_ulong current_cs_base
= 0;
1169 int current_flags
= 0;
1172 addr
&= TARGET_PAGE_MASK
;
1173 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1177 #ifdef TARGET_HAS_PRECISE_SMC
1178 if (tb
&& pc
!= 0) {
1179 current_tb
= tb_find_pc(pc
);
1182 while (tb
!= NULL
) {
1184 tb
= (TranslationBlock
*)((long)tb
& ~3);
1185 #ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb
== tb
&&
1187 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1188 /* If we are modifying the current TB, we must stop
1189 its execution. We could be more precise by checking
1190 that the modification is after the current PC, but it
1191 would require a specialized function to partially
1192 restore the CPU state */
1194 current_tb_modified
= 1;
1195 cpu_restore_state(current_tb
, env
, pc
);
1196 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1199 #endif /* TARGET_HAS_PRECISE_SMC */
1200 tb_phys_invalidate(tb
, addr
);
1201 tb
= tb
->page_next
[n
];
1204 #ifdef TARGET_HAS_PRECISE_SMC
1205 if (current_tb_modified
) {
1206 /* we generate a block containing just the instruction
1207 modifying the memory. It will ensure that it cannot modify
1209 env
->current_tb
= NULL
;
1210 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1211 cpu_resume_from_signal(env
, puc
);
1217 /* add the tb in the target page and protect it if necessary */
1218 static inline void tb_alloc_page(TranslationBlock
*tb
,
1219 unsigned int n
, tb_page_addr_t page_addr
)
1222 #ifndef CONFIG_USER_ONLY
1223 bool page_already_protected
;
1226 tb
->page_addr
[n
] = page_addr
;
1227 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1228 tb
->page_next
[n
] = p
->first_tb
;
1229 #ifndef CONFIG_USER_ONLY
1230 page_already_protected
= p
->first_tb
!= NULL
;
1232 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1233 invalidate_page_bitmap(p
);
1235 #if defined(TARGET_HAS_SMC) || 1
1237 #if defined(CONFIG_USER_ONLY)
1238 if (p
->flags
& PAGE_WRITE
) {
1243 /* force the host page as non writable (writes will have a
1244 page fault + mprotect overhead) */
1245 page_addr
&= qemu_host_page_mask
;
1247 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1248 addr
+= TARGET_PAGE_SIZE
) {
1250 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1254 p2
->flags
&= ~PAGE_WRITE
;
1256 mprotect(g2h(page_addr
), qemu_host_page_size
,
1257 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1258 #ifdef DEBUG_TB_INVALIDATE
1259 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1264 /* if some code is already present, then the pages are already
1265 protected. So we handle the case where only the first TB is
1266 allocated in a physical page */
1267 if (!page_already_protected
) {
1268 tlb_protect_code(page_addr
);
1272 #endif /* TARGET_HAS_SMC */
1275 /* add a new TB and link it to the physical page tables. phys_page2 is
1276 (-1) to indicate that only one page contains the TB. */
1277 void tb_link_page(TranslationBlock
*tb
,
1278 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1281 TranslationBlock
**ptb
;
1283 /* Grab the mmap lock to stop another thread invalidating this TB
1284 before we are done. */
1286 /* add in the physical hash table */
1287 h
= tb_phys_hash_func(phys_pc
);
1288 ptb
= &tb_phys_hash
[h
];
1289 tb
->phys_hash_next
= *ptb
;
1292 /* add in the page list */
1293 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1294 if (phys_page2
!= -1)
1295 tb_alloc_page(tb
, 1, phys_page2
);
1297 tb
->page_addr
[1] = -1;
1299 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1300 tb
->jmp_next
[0] = NULL
;
1301 tb
->jmp_next
[1] = NULL
;
1303 /* init original jump addresses */
1304 if (tb
->tb_next_offset
[0] != 0xffff)
1305 tb_reset_jump(tb
, 0);
1306 if (tb
->tb_next_offset
[1] != 0xffff)
1307 tb_reset_jump(tb
, 1);
1309 #ifdef DEBUG_TB_CHECK
1315 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1316 tb[1].tc_ptr. Return NULL if not found */
1317 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1319 int m_min
, m_max
, m
;
1321 TranslationBlock
*tb
;
1325 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1326 tc_ptr
>= (unsigned long)code_gen_ptr
)
1328 /* binary search (cf Knuth) */
1331 while (m_min
<= m_max
) {
1332 m
= (m_min
+ m_max
) >> 1;
1334 v
= (unsigned long)tb
->tc_ptr
;
1337 else if (tc_ptr
< v
) {
1346 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1348 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1350 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1353 tb1
= tb
->jmp_next
[n
];
1355 /* find head of list */
1358 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1361 tb1
= tb1
->jmp_next
[n1
];
1363 /* we are now sure now that tb jumps to tb1 */
1366 /* remove tb from the jmp_first list */
1367 ptb
= &tb_next
->jmp_first
;
1371 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1372 if (n1
== n
&& tb1
== tb
)
1374 ptb
= &tb1
->jmp_next
[n1
];
1376 *ptb
= tb
->jmp_next
[n
];
1377 tb
->jmp_next
[n
] = NULL
;
1379 /* suppress the jump to next tb in generated code */
1380 tb_reset_jump(tb
, n
);
1382 /* suppress jumps in the tb on which we could have jumped */
1383 tb_reset_jump_recursive(tb_next
);
1387 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1389 tb_reset_jump_recursive2(tb
, 0);
1390 tb_reset_jump_recursive2(tb
, 1);
1393 #if defined(TARGET_HAS_ICE)
1394 #if defined(CONFIG_USER_ONLY)
1395 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1397 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1400 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1402 target_phys_addr_t addr
;
1404 ram_addr_t ram_addr
;
1407 addr
= cpu_get_phys_page_debug(env
, pc
);
1408 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1410 pd
= IO_MEM_UNASSIGNED
;
1412 pd
= p
->phys_offset
;
1414 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1415 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1418 #endif /* TARGET_HAS_ICE */
1420 #if defined(CONFIG_USER_ONLY)
1421 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1426 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1427 int flags
, CPUWatchpoint
**watchpoint
)
1432 /* Add a watchpoint. */
1433 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1434 int flags
, CPUWatchpoint
**watchpoint
)
1436 target_ulong len_mask
= ~(len
- 1);
1439 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1440 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1441 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1442 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1445 wp
= g_malloc(sizeof(*wp
));
1448 wp
->len_mask
= len_mask
;
1451 /* keep all GDB-injected watchpoints in front */
1453 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1455 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1457 tlb_flush_page(env
, addr
);
1464 /* Remove a specific watchpoint. */
1465 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1468 target_ulong len_mask
= ~(len
- 1);
1471 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1472 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1473 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1474 cpu_watchpoint_remove_by_ref(env
, wp
);
1481 /* Remove a specific watchpoint by reference. */
1482 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1484 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1486 tlb_flush_page(env
, watchpoint
->vaddr
);
1491 /* Remove all matching watchpoints. */
1492 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1494 CPUWatchpoint
*wp
, *next
;
1496 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1497 if (wp
->flags
& mask
)
1498 cpu_watchpoint_remove_by_ref(env
, wp
);
1503 /* Add a breakpoint. */
1504 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1505 CPUBreakpoint
**breakpoint
)
1507 #if defined(TARGET_HAS_ICE)
1510 bp
= g_malloc(sizeof(*bp
));
1515 /* keep all GDB-injected breakpoints in front */
1517 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1519 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1521 breakpoint_invalidate(env
, pc
);
1531 /* Remove a specific breakpoint. */
1532 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1534 #if defined(TARGET_HAS_ICE)
1537 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1538 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1539 cpu_breakpoint_remove_by_ref(env
, bp
);
1549 /* Remove a specific breakpoint by reference. */
1550 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1552 #if defined(TARGET_HAS_ICE)
1553 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1555 breakpoint_invalidate(env
, breakpoint
->pc
);
1561 /* Remove all matching breakpoints. */
1562 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1564 #if defined(TARGET_HAS_ICE)
1565 CPUBreakpoint
*bp
, *next
;
1567 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1568 if (bp
->flags
& mask
)
1569 cpu_breakpoint_remove_by_ref(env
, bp
);
1574 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1575 CPU loop after each instruction */
1576 void cpu_single_step(CPUState
*env
, int enabled
)
1578 #if defined(TARGET_HAS_ICE)
1579 if (env
->singlestep_enabled
!= enabled
) {
1580 env
->singlestep_enabled
= enabled
;
1582 kvm_update_guest_debug(env
, 0);
1584 /* must flush all the translated code to avoid inconsistencies */
1585 /* XXX: only flush what is necessary */
1592 /* enable or disable low levels log */
1593 void cpu_set_log(int log_flags
)
1595 loglevel
= log_flags
;
1596 if (loglevel
&& !logfile
) {
1597 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1599 perror(logfilename
);
1602 #if !defined(CONFIG_SOFTMMU)
1603 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1605 static char logfile_buf
[4096];
1606 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1608 #elif defined(_WIN32)
1609 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1610 setvbuf(logfile
, NULL
, _IONBF
, 0);
1612 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1616 if (!loglevel
&& logfile
) {
1622 void cpu_set_log_filename(const char *filename
)
1624 logfilename
= strdup(filename
);
1629 cpu_set_log(loglevel
);
1632 static void cpu_unlink_tb(CPUState
*env
)
1634 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1635 problem and hope the cpu will stop of its own accord. For userspace
1636 emulation this often isn't actually as bad as it sounds. Often
1637 signals are used primarily to interrupt blocking syscalls. */
1638 TranslationBlock
*tb
;
1639 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1641 spin_lock(&interrupt_lock
);
1642 tb
= env
->current_tb
;
1643 /* if the cpu is currently executing code, we must unlink it and
1644 all the potentially executing TB */
1646 env
->current_tb
= NULL
;
1647 tb_reset_jump_recursive(tb
);
1649 spin_unlock(&interrupt_lock
);
1652 #ifndef CONFIG_USER_ONLY
1653 /* mask must never be zero, except for A20 change call */
1654 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1658 old_mask
= env
->interrupt_request
;
1659 env
->interrupt_request
|= mask
;
1662 * If called from iothread context, wake the target cpu in
1665 if (!qemu_cpu_is_self(env
)) {
1671 env
->icount_decr
.u16
.high
= 0xffff;
1673 && (mask
& ~old_mask
) != 0) {
1674 cpu_abort(env
, "Raised interrupt while not in I/O function");
1681 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1683 #else /* CONFIG_USER_ONLY */
1685 void cpu_interrupt(CPUState
*env
, int mask
)
1687 env
->interrupt_request
|= mask
;
1690 #endif /* CONFIG_USER_ONLY */
1692 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1694 env
->interrupt_request
&= ~mask
;
1697 void cpu_exit(CPUState
*env
)
1699 env
->exit_request
= 1;
1703 const CPULogItem cpu_log_items
[] = {
1704 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1705 "show generated host assembly code for each compiled TB" },
1706 { CPU_LOG_TB_IN_ASM
, "in_asm",
1707 "show target assembly code for each compiled TB" },
1708 { CPU_LOG_TB_OP
, "op",
1709 "show micro ops for each compiled TB" },
1710 { CPU_LOG_TB_OP_OPT
, "op_opt",
1713 "before eflags optimization and "
1715 "after liveness analysis" },
1716 { CPU_LOG_INT
, "int",
1717 "show interrupts/exceptions in short format" },
1718 { CPU_LOG_EXEC
, "exec",
1719 "show trace before each executed TB (lots of logs)" },
1720 { CPU_LOG_TB_CPU
, "cpu",
1721 "show CPU state before block translation" },
1723 { CPU_LOG_PCALL
, "pcall",
1724 "show protected mode far calls/returns/exceptions" },
1725 { CPU_LOG_RESET
, "cpu_reset",
1726 "show CPU state before CPU resets" },
1729 { CPU_LOG_IOPORT
, "ioport",
1730 "show all i/o ports accesses" },
1735 static int cmp1(const char *s1
, int n
, const char *s2
)
1737 if (strlen(s2
) != n
)
1739 return memcmp(s1
, s2
, n
) == 0;
1742 /* takes a comma separated list of log masks. Return 0 if error. */
1743 int cpu_str_to_log_mask(const char *str
)
1745 const CPULogItem
*item
;
1752 p1
= strchr(p
, ',');
1755 if(cmp1(p
,p1
-p
,"all")) {
1756 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1760 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1761 if (cmp1(p
, p1
- p
, item
->name
))
1775 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1782 fprintf(stderr
, "qemu: fatal: ");
1783 vfprintf(stderr
, fmt
, ap
);
1784 fprintf(stderr
, "\n");
1786 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1788 cpu_dump_state(env
, stderr
, fprintf
, 0);
1790 if (qemu_log_enabled()) {
1791 qemu_log("qemu: fatal: ");
1792 qemu_log_vprintf(fmt
, ap2
);
1795 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1797 log_cpu_state(env
, 0);
1804 #if defined(CONFIG_USER_ONLY)
1806 struct sigaction act
;
1807 sigfillset(&act
.sa_mask
);
1808 act
.sa_handler
= SIG_DFL
;
1809 sigaction(SIGABRT
, &act
, NULL
);
1815 CPUState
*cpu_copy(CPUState
*env
)
1817 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1818 CPUState
*next_cpu
= new_env
->next_cpu
;
1819 int cpu_index
= new_env
->cpu_index
;
1820 #if defined(TARGET_HAS_ICE)
1825 memcpy(new_env
, env
, sizeof(CPUState
));
1827 /* Preserve chaining and index. */
1828 new_env
->next_cpu
= next_cpu
;
1829 new_env
->cpu_index
= cpu_index
;
1831 /* Clone all break/watchpoints.
1832 Note: Once we support ptrace with hw-debug register access, make sure
1833 BP_CPU break/watchpoints are handled correctly on clone. */
1834 QTAILQ_INIT(&env
->breakpoints
);
1835 QTAILQ_INIT(&env
->watchpoints
);
1836 #if defined(TARGET_HAS_ICE)
1837 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1838 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1840 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1841 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1849 #if !defined(CONFIG_USER_ONLY)
1851 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1855 /* Discard jump cache entries for any tb which might potentially
1856 overlap the flushed page. */
1857 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1858 memset (&env
->tb_jmp_cache
[i
], 0,
1859 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1861 i
= tb_jmp_cache_hash_page(addr
);
1862 memset (&env
->tb_jmp_cache
[i
], 0,
1863 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1866 static CPUTLBEntry s_cputlb_empty_entry
= {
1873 /* NOTE: if flush_global is true, also flush global entries (not
1875 void tlb_flush(CPUState
*env
, int flush_global
)
1879 #if defined(DEBUG_TLB)
1880 printf("tlb_flush:\n");
1882 /* must reset current TB so that interrupts cannot modify the
1883 links while we are modifying them */
1884 env
->current_tb
= NULL
;
1886 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1888 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1889 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1893 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1895 env
->tlb_flush_addr
= -1;
1896 env
->tlb_flush_mask
= 0;
1900 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1902 if (addr
== (tlb_entry
->addr_read
&
1903 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1904 addr
== (tlb_entry
->addr_write
&
1905 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1906 addr
== (tlb_entry
->addr_code
&
1907 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1908 *tlb_entry
= s_cputlb_empty_entry
;
1912 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1917 #if defined(DEBUG_TLB)
1918 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1920 /* Check if we need to flush due to large pages. */
1921 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1922 #if defined(DEBUG_TLB)
1923 printf("tlb_flush_page: forced full flush ("
1924 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1925 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1930 /* must reset current TB so that interrupts cannot modify the
1931 links while we are modifying them */
1932 env
->current_tb
= NULL
;
1934 addr
&= TARGET_PAGE_MASK
;
1935 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1936 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1937 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1939 tlb_flush_jmp_cache(env
, addr
);
1942 /* update the TLBs so that writes to code in the virtual page 'addr'
1944 static void tlb_protect_code(ram_addr_t ram_addr
)
1946 cpu_physical_memory_reset_dirty(ram_addr
,
1947 ram_addr
+ TARGET_PAGE_SIZE
,
1951 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1952 tested for self modifying code */
1953 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1956 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
1959 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1960 unsigned long start
, unsigned long length
)
1963 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1964 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1965 if ((addr
- start
) < length
) {
1966 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1971 /* Note: start and end must be within the same ram block. */
1972 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1976 unsigned long length
, start1
;
1979 start
&= TARGET_PAGE_MASK
;
1980 end
= TARGET_PAGE_ALIGN(end
);
1982 length
= end
- start
;
1985 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
1987 /* we modify the TLB cache so that the dirty bit will be set again
1988 when accessing the range */
1989 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
1990 /* Check that we don't span multiple blocks - this breaks the
1991 address comparisons below. */
1992 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
1993 != (end
- 1) - start
) {
1997 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1999 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2000 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2001 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2007 int cpu_physical_memory_set_dirty_tracking(int enable
)
2010 in_migration
= enable
;
2012 memory_global_dirty_log_start();
2014 memory_global_dirty_log_stop();
2019 int cpu_physical_memory_get_dirty_tracking(void)
2021 return in_migration
;
2024 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2026 ram_addr_t ram_addr
;
2029 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2030 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2031 + tlb_entry
->addend
);
2032 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2033 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2034 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2039 /* update the TLB according to the current state of the dirty bits */
2040 void cpu_tlb_update_dirty(CPUState
*env
)
2044 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2045 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2046 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2050 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2052 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2053 tlb_entry
->addr_write
= vaddr
;
2056 /* update the TLB corresponding to virtual page vaddr
2057 so that it is no longer dirty */
2058 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2063 vaddr
&= TARGET_PAGE_MASK
;
2064 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2065 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2066 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2069 /* Our TLB does not support large pages, so remember the area covered by
2070 large pages and trigger a full TLB flush if these are invalidated. */
2071 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2074 target_ulong mask
= ~(size
- 1);
2076 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2077 env
->tlb_flush_addr
= vaddr
& mask
;
2078 env
->tlb_flush_mask
= mask
;
2081 /* Extend the existing region to include the new page.
2082 This is a compromise between unnecessary flushes and the cost
2083 of maintaining a full variable size TLB. */
2084 mask
&= env
->tlb_flush_mask
;
2085 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2088 env
->tlb_flush_addr
&= mask
;
2089 env
->tlb_flush_mask
= mask
;
2092 /* Add a new TLB entry. At most one entry for a given virtual address
2093 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2094 supplied size is only used by tlb_flush_page. */
2095 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2096 target_phys_addr_t paddr
, int prot
,
2097 int mmu_idx
, target_ulong size
)
2102 target_ulong address
;
2103 target_ulong code_address
;
2104 unsigned long addend
;
2107 target_phys_addr_t iotlb
;
2109 assert(size
>= TARGET_PAGE_SIZE
);
2110 if (size
!= TARGET_PAGE_SIZE
) {
2111 tlb_add_large_page(env
, vaddr
, size
);
2113 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2115 pd
= IO_MEM_UNASSIGNED
;
2117 pd
= p
->phys_offset
;
2119 #if defined(DEBUG_TLB)
2120 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2121 " prot=%x idx=%d pd=0x%08lx\n",
2122 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2126 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2127 /* IO memory case (romd handled later) */
2128 address
|= TLB_MMIO
;
2130 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2131 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2133 iotlb
= pd
& TARGET_PAGE_MASK
;
2134 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2135 iotlb
|= IO_MEM_NOTDIRTY
;
2137 iotlb
|= IO_MEM_ROM
;
2139 /* IO handlers are currently passed a physical address.
2140 It would be nice to pass an offset from the base address
2141 of that region. This would avoid having to special case RAM,
2142 and avoid full address decoding in every device.
2143 We can't use the high bits of pd for this because
2144 IO_MEM_ROMD uses these as a ram address. */
2145 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2147 iotlb
+= p
->region_offset
;
2153 code_address
= address
;
2154 /* Make accesses to pages with watchpoints go via the
2155 watchpoint trap routines. */
2156 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2157 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2158 /* Avoid trapping reads of pages with a write breakpoint. */
2159 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2160 iotlb
= io_mem_watch
+ paddr
;
2161 address
|= TLB_MMIO
;
2167 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2168 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2169 te
= &env
->tlb_table
[mmu_idx
][index
];
2170 te
->addend
= addend
- vaddr
;
2171 if (prot
& PAGE_READ
) {
2172 te
->addr_read
= address
;
2177 if (prot
& PAGE_EXEC
) {
2178 te
->addr_code
= code_address
;
2182 if (prot
& PAGE_WRITE
) {
2183 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2184 (pd
& IO_MEM_ROMD
)) {
2185 /* Write access calls the I/O callback. */
2186 te
->addr_write
= address
| TLB_MMIO
;
2187 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2188 !cpu_physical_memory_is_dirty(pd
)) {
2189 te
->addr_write
= address
| TLB_NOTDIRTY
;
2191 te
->addr_write
= address
;
2194 te
->addr_write
= -1;
2200 void tlb_flush(CPUState
*env
, int flush_global
)
2204 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2209 * Walks guest process memory "regions" one by one
2210 * and calls callback function 'fn' for each region.
2213 struct walk_memory_regions_data
2215 walk_memory_regions_fn fn
;
2217 unsigned long start
;
2221 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2222 abi_ulong end
, int new_prot
)
2224 if (data
->start
!= -1ul) {
2225 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2231 data
->start
= (new_prot
? end
: -1ul);
2232 data
->prot
= new_prot
;
2237 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2238 abi_ulong base
, int level
, void **lp
)
2244 return walk_memory_regions_end(data
, base
, 0);
2249 for (i
= 0; i
< L2_SIZE
; ++i
) {
2250 int prot
= pd
[i
].flags
;
2252 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2253 if (prot
!= data
->prot
) {
2254 rc
= walk_memory_regions_end(data
, pa
, prot
);
2262 for (i
= 0; i
< L2_SIZE
; ++i
) {
2263 pa
= base
| ((abi_ulong
)i
<<
2264 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2265 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2275 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2277 struct walk_memory_regions_data data
;
2285 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2286 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2287 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2293 return walk_memory_regions_end(&data
, 0, 0);
2296 static int dump_region(void *priv
, abi_ulong start
,
2297 abi_ulong end
, unsigned long prot
)
2299 FILE *f
= (FILE *)priv
;
2301 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2302 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2303 start
, end
, end
- start
,
2304 ((prot
& PAGE_READ
) ? 'r' : '-'),
2305 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2306 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2311 /* dump memory mappings */
2312 void page_dump(FILE *f
)
2314 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2315 "start", "end", "size", "prot");
2316 walk_memory_regions(f
, dump_region
);
2319 int page_get_flags(target_ulong address
)
2323 p
= page_find(address
>> TARGET_PAGE_BITS
);
2329 /* Modify the flags of a page and invalidate the code if necessary.
2330 The flag PAGE_WRITE_ORG is positioned automatically depending
2331 on PAGE_WRITE. The mmap_lock should already be held. */
2332 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2334 target_ulong addr
, len
;
2336 /* This function should never be called with addresses outside the
2337 guest address space. If this assert fires, it probably indicates
2338 a missing call to h2g_valid. */
2339 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2340 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2342 assert(start
< end
);
2344 start
= start
& TARGET_PAGE_MASK
;
2345 end
= TARGET_PAGE_ALIGN(end
);
2347 if (flags
& PAGE_WRITE
) {
2348 flags
|= PAGE_WRITE_ORG
;
2351 for (addr
= start
, len
= end
- start
;
2353 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2354 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2356 /* If the write protection bit is set, then we invalidate
2358 if (!(p
->flags
& PAGE_WRITE
) &&
2359 (flags
& PAGE_WRITE
) &&
2361 tb_invalidate_phys_page(addr
, 0, NULL
);
2367 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2373 /* This function should never be called with addresses outside the
2374 guest address space. If this assert fires, it probably indicates
2375 a missing call to h2g_valid. */
2376 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2377 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2383 if (start
+ len
- 1 < start
) {
2384 /* We've wrapped around. */
2388 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2389 start
= start
& TARGET_PAGE_MASK
;
2391 for (addr
= start
, len
= end
- start
;
2393 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2394 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2397 if( !(p
->flags
& PAGE_VALID
) )
2400 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2402 if (flags
& PAGE_WRITE
) {
2403 if (!(p
->flags
& PAGE_WRITE_ORG
))
2405 /* unprotect the page if it was put read-only because it
2406 contains translated code */
2407 if (!(p
->flags
& PAGE_WRITE
)) {
2408 if (!page_unprotect(addr
, 0, NULL
))
2417 /* called from signal handler: invalidate the code and unprotect the
2418 page. Return TRUE if the fault was successfully handled. */
2419 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2423 target_ulong host_start
, host_end
, addr
;
2425 /* Technically this isn't safe inside a signal handler. However we
2426 know this only ever happens in a synchronous SEGV handler, so in
2427 practice it seems to be ok. */
2430 p
= page_find(address
>> TARGET_PAGE_BITS
);
2436 /* if the page was really writable, then we change its
2437 protection back to writable */
2438 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2439 host_start
= address
& qemu_host_page_mask
;
2440 host_end
= host_start
+ qemu_host_page_size
;
2443 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2444 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2445 p
->flags
|= PAGE_WRITE
;
2448 /* and since the content will be modified, we must invalidate
2449 the corresponding translated code. */
2450 tb_invalidate_phys_page(addr
, pc
, puc
);
2451 #ifdef DEBUG_TB_CHECK
2452 tb_invalidate_check(addr
);
2455 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2465 static inline void tlb_set_dirty(CPUState
*env
,
2466 unsigned long addr
, target_ulong vaddr
)
2469 #endif /* defined(CONFIG_USER_ONLY) */
2471 #if !defined(CONFIG_USER_ONLY)
2473 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2474 typedef struct subpage_t
{
2475 target_phys_addr_t base
;
2476 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2477 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2480 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2481 ram_addr_t memory
, ram_addr_t region_offset
);
2482 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2483 ram_addr_t orig_memory
,
2484 ram_addr_t region_offset
);
2485 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2488 if (addr > start_addr) \
2491 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2492 if (start_addr2 > 0) \
2496 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2497 end_addr2 = TARGET_PAGE_SIZE - 1; \
2499 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2500 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2505 /* register physical memory.
2506 For RAM, 'size' must be a multiple of the target page size.
2507 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2508 io memory page. The address used when calling the IO function is
2509 the offset from the start of the region, plus region_offset. Both
2510 start_addr and region_offset are rounded down to a page boundary
2511 before calculating this offset. This should not be a problem unless
2512 the low bits of start_addr and region_offset differ. */
2513 void cpu_register_physical_memory_log(target_phys_addr_t start_addr
,
2515 ram_addr_t phys_offset
,
2516 ram_addr_t region_offset
,
2519 target_phys_addr_t addr
, end_addr
;
2522 ram_addr_t orig_size
= size
;
2527 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2528 region_offset
= start_addr
;
2530 region_offset
&= TARGET_PAGE_MASK
;
2531 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2532 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2536 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2537 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2538 ram_addr_t orig_memory
= p
->phys_offset
;
2539 target_phys_addr_t start_addr2
, end_addr2
;
2540 int need_subpage
= 0;
2542 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2545 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2546 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2547 &p
->phys_offset
, orig_memory
,
2550 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2553 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2555 p
->region_offset
= 0;
2557 p
->phys_offset
= phys_offset
;
2558 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2559 (phys_offset
& IO_MEM_ROMD
))
2560 phys_offset
+= TARGET_PAGE_SIZE
;
2563 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2564 p
->phys_offset
= phys_offset
;
2565 p
->region_offset
= region_offset
;
2566 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2567 (phys_offset
& IO_MEM_ROMD
)) {
2568 phys_offset
+= TARGET_PAGE_SIZE
;
2570 target_phys_addr_t start_addr2
, end_addr2
;
2571 int need_subpage
= 0;
2573 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2574 end_addr2
, need_subpage
);
2577 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2578 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2579 addr
& TARGET_PAGE_MASK
);
2580 subpage_register(subpage
, start_addr2
, end_addr2
,
2581 phys_offset
, region_offset
);
2582 p
->region_offset
= 0;
2586 region_offset
+= TARGET_PAGE_SIZE
;
2587 addr
+= TARGET_PAGE_SIZE
;
2588 } while (addr
!= end_addr
);
2590 /* since each CPU stores ram addresses in its TLB cache, we must
2591 reset the modified entries */
2593 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2598 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2601 kvm_coalesce_mmio_region(addr
, size
);
2604 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2607 kvm_uncoalesce_mmio_region(addr
, size
);
2610 void qemu_flush_coalesced_mmio_buffer(void)
2613 kvm_flush_coalesced_mmio_buffer();
2616 #if defined(__linux__) && !defined(TARGET_S390X)
2618 #include <sys/vfs.h>
2620 #define HUGETLBFS_MAGIC 0x958458f6
2622 static long gethugepagesize(const char *path
)
2628 ret
= statfs(path
, &fs
);
2629 } while (ret
!= 0 && errno
== EINTR
);
2636 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2637 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2642 static void *file_ram_alloc(RAMBlock
*block
,
2652 unsigned long hpagesize
;
2654 hpagesize
= gethugepagesize(path
);
2659 if (memory
< hpagesize
) {
2663 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2664 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2668 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2672 fd
= mkstemp(filename
);
2674 perror("unable to create backing store for hugepages");
2681 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2684 * ftruncate is not supported by hugetlbfs in older
2685 * hosts, so don't bother bailing out on errors.
2686 * If anything goes wrong with it under other filesystems,
2689 if (ftruncate(fd
, memory
))
2690 perror("ftruncate");
2693 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2694 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2695 * to sidestep this quirk.
2697 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2698 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2700 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2702 if (area
== MAP_FAILED
) {
2703 perror("file_ram_alloc: can't mmap RAM pages");
2712 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2714 RAMBlock
*block
, *next_block
;
2715 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2717 if (QLIST_EMPTY(&ram_list
.blocks
))
2720 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2721 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2723 end
= block
->offset
+ block
->length
;
2725 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2726 if (next_block
->offset
>= end
) {
2727 next
= MIN(next
, next_block
->offset
);
2730 if (next
- end
>= size
&& next
- end
< mingap
) {
2732 mingap
= next
- end
;
2736 if (offset
== RAM_ADDR_MAX
) {
2737 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2745 static ram_addr_t
last_ram_offset(void)
2748 ram_addr_t last
= 0;
2750 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2751 last
= MAX(last
, block
->offset
+ block
->length
);
2756 ram_addr_t
qemu_ram_alloc_from_ptr(DeviceState
*dev
, const char *name
,
2757 ram_addr_t size
, void *host
,
2760 RAMBlock
*new_block
, *block
;
2762 size
= TARGET_PAGE_ALIGN(size
);
2763 new_block
= g_malloc0(sizeof(*new_block
));
2765 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2766 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2768 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2772 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2774 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2775 if (!strcmp(block
->idstr
, new_block
->idstr
)) {
2776 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2782 new_block
->offset
= find_ram_offset(size
);
2784 new_block
->host
= host
;
2785 new_block
->flags
|= RAM_PREALLOC_MASK
;
2788 #if defined (__linux__) && !defined(TARGET_S390X)
2789 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2790 if (!new_block
->host
) {
2791 new_block
->host
= qemu_vmalloc(size
);
2792 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2795 fprintf(stderr
, "-mem-path option unsupported\n");
2799 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2800 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2801 an system defined value, which is at least 256GB. Larger systems
2802 have larger values. We put the guest between the end of data
2803 segment (system break) and this value. We use 32GB as a base to
2804 have enough room for the system break to grow. */
2805 new_block
->host
= mmap((void*)0x800000000, size
,
2806 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2807 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2808 if (new_block
->host
== MAP_FAILED
) {
2809 fprintf(stderr
, "Allocating RAM failed\n");
2813 if (xen_enabled()) {
2814 xen_ram_alloc(new_block
->offset
, size
, mr
);
2816 new_block
->host
= qemu_vmalloc(size
);
2819 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2822 new_block
->length
= size
;
2824 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2826 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2827 last_ram_offset() >> TARGET_PAGE_BITS
);
2828 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2829 0xff, size
>> TARGET_PAGE_BITS
);
2832 kvm_setup_guest_memory(new_block
->host
, size
);
2834 return new_block
->offset
;
2837 ram_addr_t
qemu_ram_alloc(DeviceState
*dev
, const char *name
, ram_addr_t size
,
2840 return qemu_ram_alloc_from_ptr(dev
, name
, size
, NULL
, mr
);
2843 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2847 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2848 if (addr
== block
->offset
) {
2849 QLIST_REMOVE(block
, next
);
2856 void qemu_ram_free(ram_addr_t addr
)
2860 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2861 if (addr
== block
->offset
) {
2862 QLIST_REMOVE(block
, next
);
2863 if (block
->flags
& RAM_PREALLOC_MASK
) {
2865 } else if (mem_path
) {
2866 #if defined (__linux__) && !defined(TARGET_S390X)
2868 munmap(block
->host
, block
->length
);
2871 qemu_vfree(block
->host
);
2877 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2878 munmap(block
->host
, block
->length
);
2880 if (xen_enabled()) {
2881 xen_invalidate_map_cache_entry(block
->host
);
2883 qemu_vfree(block
->host
);
2895 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2902 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2903 offset
= addr
- block
->offset
;
2904 if (offset
< block
->length
) {
2905 vaddr
= block
->host
+ offset
;
2906 if (block
->flags
& RAM_PREALLOC_MASK
) {
2910 munmap(vaddr
, length
);
2912 #if defined(__linux__) && !defined(TARGET_S390X)
2915 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2918 flags
|= MAP_PRIVATE
;
2920 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2921 flags
, block
->fd
, offset
);
2923 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2924 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2931 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2932 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2933 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2936 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2937 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2941 if (area
!= vaddr
) {
2942 fprintf(stderr
, "Could not remap addr: "
2943 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2947 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
2953 #endif /* !_WIN32 */
2955 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2956 With the exception of the softmmu code in this file, this should
2957 only be used for local memory (e.g. video ram) that the device owns,
2958 and knows it isn't going to access beyond the end of the block.
2960 It should not be used for general purpose DMA.
2961 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2963 void *qemu_get_ram_ptr(ram_addr_t addr
)
2967 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2968 if (addr
- block
->offset
< block
->length
) {
2969 /* Move this entry to to start of the list. */
2970 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
2971 QLIST_REMOVE(block
, next
);
2972 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
2974 if (xen_enabled()) {
2975 /* We need to check if the requested address is in the RAM
2976 * because we don't want to map the entire memory in QEMU.
2977 * In that case just map until the end of the page.
2979 if (block
->offset
== 0) {
2980 return xen_map_cache(addr
, 0, 0);
2981 } else if (block
->host
== NULL
) {
2983 xen_map_cache(block
->offset
, block
->length
, 1);
2986 return block
->host
+ (addr
- block
->offset
);
2990 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2996 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2997 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2999 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3003 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3004 if (addr
- block
->offset
< block
->length
) {
3005 if (xen_enabled()) {
3006 /* We need to check if the requested address is in the RAM
3007 * because we don't want to map the entire memory in QEMU.
3008 * In that case just map until the end of the page.
3010 if (block
->offset
== 0) {
3011 return xen_map_cache(addr
, 0, 0);
3012 } else if (block
->host
== NULL
) {
3014 xen_map_cache(block
->offset
, block
->length
, 1);
3017 return block
->host
+ (addr
- block
->offset
);
3021 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3027 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3028 * but takes a size argument */
3029 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
3034 if (xen_enabled()) {
3035 return xen_map_cache(addr
, *size
, 1);
3039 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3040 if (addr
- block
->offset
< block
->length
) {
3041 if (addr
- block
->offset
+ *size
> block
->length
)
3042 *size
= block
->length
- addr
+ block
->offset
;
3043 return block
->host
+ (addr
- block
->offset
);
3047 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3052 void qemu_put_ram_ptr(void *addr
)
3054 trace_qemu_put_ram_ptr(addr
);
3057 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3060 uint8_t *host
= ptr
;
3062 if (xen_enabled()) {
3063 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
3067 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3068 /* This case append when the block is not mapped. */
3069 if (block
->host
== NULL
) {
3072 if (host
- block
->host
< block
->length
) {
3073 *ram_addr
= block
->offset
+ (host
- block
->host
);
3081 /* Some of the softmmu routines need to translate from a host pointer
3082 (typically a TLB entry) back to a ram offset. */
3083 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3085 ram_addr_t ram_addr
;
3087 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3088 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3094 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3096 #ifdef DEBUG_UNASSIGNED
3097 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3099 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3100 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 1);
3105 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3107 #ifdef DEBUG_UNASSIGNED
3108 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3110 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3111 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 2);
3116 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3118 #ifdef DEBUG_UNASSIGNED
3119 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3121 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3122 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 4);
3127 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3129 #ifdef DEBUG_UNASSIGNED
3130 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3132 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3133 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 1);
3137 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3139 #ifdef DEBUG_UNASSIGNED
3140 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3142 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3143 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 2);
3147 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3149 #ifdef DEBUG_UNASSIGNED
3150 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3152 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3153 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 4);
3157 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3158 unassigned_mem_readb
,
3159 unassigned_mem_readw
,
3160 unassigned_mem_readl
,
3163 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3164 unassigned_mem_writeb
,
3165 unassigned_mem_writew
,
3166 unassigned_mem_writel
,
3169 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3173 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3174 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3175 #if !defined(CONFIG_USER_ONLY)
3176 tb_invalidate_phys_page_fast(ram_addr
, 1);
3177 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3180 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3181 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3182 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3183 /* we remove the notdirty callback only if the code has been
3185 if (dirty_flags
== 0xff)
3186 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3189 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3193 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3194 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3195 #if !defined(CONFIG_USER_ONLY)
3196 tb_invalidate_phys_page_fast(ram_addr
, 2);
3197 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3200 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3201 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3202 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3203 /* we remove the notdirty callback only if the code has been
3205 if (dirty_flags
== 0xff)
3206 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3209 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3213 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3214 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3215 #if !defined(CONFIG_USER_ONLY)
3216 tb_invalidate_phys_page_fast(ram_addr
, 4);
3217 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3220 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3221 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3222 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3223 /* we remove the notdirty callback only if the code has been
3225 if (dirty_flags
== 0xff)
3226 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3229 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3230 NULL
, /* never used */
3231 NULL
, /* never used */
3232 NULL
, /* never used */
3235 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3236 notdirty_mem_writeb
,
3237 notdirty_mem_writew
,
3238 notdirty_mem_writel
,
3241 /* Generate a debug exception if a watchpoint has been hit. */
3242 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3244 CPUState
*env
= cpu_single_env
;
3245 target_ulong pc
, cs_base
;
3246 TranslationBlock
*tb
;
3251 if (env
->watchpoint_hit
) {
3252 /* We re-entered the check after replacing the TB. Now raise
3253 * the debug interrupt so that is will trigger after the
3254 * current instruction. */
3255 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3258 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3259 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3260 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3261 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3262 wp
->flags
|= BP_WATCHPOINT_HIT
;
3263 if (!env
->watchpoint_hit
) {
3264 env
->watchpoint_hit
= wp
;
3265 tb
= tb_find_pc(env
->mem_io_pc
);
3267 cpu_abort(env
, "check_watchpoint: could not find TB for "
3268 "pc=%p", (void *)env
->mem_io_pc
);
3270 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3271 tb_phys_invalidate(tb
, -1);
3272 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3273 env
->exception_index
= EXCP_DEBUG
;
3275 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3276 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3278 cpu_resume_from_signal(env
, NULL
);
3281 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3286 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3287 so these check for a hit then pass through to the normal out-of-line
3289 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3291 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3292 return ldub_phys(addr
);
3295 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3297 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3298 return lduw_phys(addr
);
3301 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3303 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3304 return ldl_phys(addr
);
3307 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3310 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3311 stb_phys(addr
, val
);
3314 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3317 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3318 stw_phys(addr
, val
);
3321 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3324 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3325 stl_phys(addr
, val
);
3328 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3334 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3340 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3341 target_phys_addr_t addr
,
3344 unsigned int idx
= SUBPAGE_IDX(addr
);
3345 #if defined(DEBUG_SUBPAGE)
3346 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3347 mmio
, len
, addr
, idx
);
3350 addr
+= mmio
->region_offset
[idx
];
3351 idx
= mmio
->sub_io_index
[idx
];
3352 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3355 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3356 uint32_t value
, unsigned int len
)
3358 unsigned int idx
= SUBPAGE_IDX(addr
);
3359 #if defined(DEBUG_SUBPAGE)
3360 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3361 __func__
, mmio
, len
, addr
, idx
, value
);
3364 addr
+= mmio
->region_offset
[idx
];
3365 idx
= mmio
->sub_io_index
[idx
];
3366 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3369 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3371 return subpage_readlen(opaque
, addr
, 0);
3374 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3377 subpage_writelen(opaque
, addr
, value
, 0);
3380 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3382 return subpage_readlen(opaque
, addr
, 1);
3385 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3388 subpage_writelen(opaque
, addr
, value
, 1);
3391 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3393 return subpage_readlen(opaque
, addr
, 2);
3396 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3399 subpage_writelen(opaque
, addr
, value
, 2);
3402 static CPUReadMemoryFunc
* const subpage_read
[] = {
3408 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3414 static uint32_t subpage_ram_readb(void *opaque
, target_phys_addr_t addr
)
3416 ram_addr_t raddr
= addr
;
3417 void *ptr
= qemu_get_ram_ptr(raddr
);
3421 static void subpage_ram_writeb(void *opaque
, target_phys_addr_t addr
,
3424 ram_addr_t raddr
= addr
;
3425 void *ptr
= qemu_get_ram_ptr(raddr
);
3429 static uint32_t subpage_ram_readw(void *opaque
, target_phys_addr_t addr
)
3431 ram_addr_t raddr
= addr
;
3432 void *ptr
= qemu_get_ram_ptr(raddr
);
3436 static void subpage_ram_writew(void *opaque
, target_phys_addr_t addr
,
3439 ram_addr_t raddr
= addr
;
3440 void *ptr
= qemu_get_ram_ptr(raddr
);
3444 static uint32_t subpage_ram_readl(void *opaque
, target_phys_addr_t addr
)
3446 ram_addr_t raddr
= addr
;
3447 void *ptr
= qemu_get_ram_ptr(raddr
);
3451 static void subpage_ram_writel(void *opaque
, target_phys_addr_t addr
,
3454 ram_addr_t raddr
= addr
;
3455 void *ptr
= qemu_get_ram_ptr(raddr
);
3459 static CPUReadMemoryFunc
* const subpage_ram_read
[] = {
3465 static CPUWriteMemoryFunc
* const subpage_ram_write
[] = {
3466 &subpage_ram_writeb
,
3467 &subpage_ram_writew
,
3468 &subpage_ram_writel
,
3471 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3472 ram_addr_t memory
, ram_addr_t region_offset
)
3476 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3478 idx
= SUBPAGE_IDX(start
);
3479 eidx
= SUBPAGE_IDX(end
);
3480 #if defined(DEBUG_SUBPAGE)
3481 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3482 mmio
, start
, end
, idx
, eidx
, memory
);
3484 if ((memory
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
3485 memory
= IO_MEM_SUBPAGE_RAM
;
3487 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3488 for (; idx
<= eidx
; idx
++) {
3489 mmio
->sub_io_index
[idx
] = memory
;
3490 mmio
->region_offset
[idx
] = region_offset
;
3496 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3497 ram_addr_t orig_memory
,
3498 ram_addr_t region_offset
)
3503 mmio
= g_malloc0(sizeof(subpage_t
));
3506 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
,
3507 DEVICE_NATIVE_ENDIAN
);
3508 #if defined(DEBUG_SUBPAGE)
3509 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3510 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3512 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3513 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3518 static int get_free_io_mem_idx(void)
3522 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3523 if (!io_mem_used
[i
]) {
3527 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3532 * Usually, devices operate in little endian mode. There are devices out
3533 * there that operate in big endian too. Each device gets byte swapped
3534 * mmio if plugged onto a CPU that does the other endianness.
3544 typedef struct SwapEndianContainer
{
3545 CPUReadMemoryFunc
*read
[3];
3546 CPUWriteMemoryFunc
*write
[3];
3548 } SwapEndianContainer
;
3550 static uint32_t swapendian_mem_readb (void *opaque
, target_phys_addr_t addr
)
3553 SwapEndianContainer
*c
= opaque
;
3554 val
= c
->read
[0](c
->opaque
, addr
);
3558 static uint32_t swapendian_mem_readw(void *opaque
, target_phys_addr_t addr
)
3561 SwapEndianContainer
*c
= opaque
;
3562 val
= bswap16(c
->read
[1](c
->opaque
, addr
));
3566 static uint32_t swapendian_mem_readl(void *opaque
, target_phys_addr_t addr
)
3569 SwapEndianContainer
*c
= opaque
;
3570 val
= bswap32(c
->read
[2](c
->opaque
, addr
));
3574 static CPUReadMemoryFunc
* const swapendian_readfn
[3]={
3575 swapendian_mem_readb
,
3576 swapendian_mem_readw
,
3577 swapendian_mem_readl
3580 static void swapendian_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3583 SwapEndianContainer
*c
= opaque
;
3584 c
->write
[0](c
->opaque
, addr
, val
);
3587 static void swapendian_mem_writew(void *opaque
, target_phys_addr_t addr
,
3590 SwapEndianContainer
*c
= opaque
;
3591 c
->write
[1](c
->opaque
, addr
, bswap16(val
));
3594 static void swapendian_mem_writel(void *opaque
, target_phys_addr_t addr
,
3597 SwapEndianContainer
*c
= opaque
;
3598 c
->write
[2](c
->opaque
, addr
, bswap32(val
));
3601 static CPUWriteMemoryFunc
* const swapendian_writefn
[3]={
3602 swapendian_mem_writeb
,
3603 swapendian_mem_writew
,
3604 swapendian_mem_writel
3607 static void swapendian_init(int io_index
)
3609 SwapEndianContainer
*c
= g_malloc(sizeof(SwapEndianContainer
));
3612 /* Swap mmio for big endian targets */
3613 c
->opaque
= io_mem_opaque
[io_index
];
3614 for (i
= 0; i
< 3; i
++) {
3615 c
->read
[i
] = io_mem_read
[io_index
][i
];
3616 c
->write
[i
] = io_mem_write
[io_index
][i
];
3618 io_mem_read
[io_index
][i
] = swapendian_readfn
[i
];
3619 io_mem_write
[io_index
][i
] = swapendian_writefn
[i
];
3621 io_mem_opaque
[io_index
] = c
;
3624 static void swapendian_del(int io_index
)
3626 if (io_mem_read
[io_index
][0] == swapendian_readfn
[0]) {
3627 g_free(io_mem_opaque
[io_index
]);
3631 /* mem_read and mem_write are arrays of functions containing the
3632 function to access byte (index 0), word (index 1) and dword (index
3633 2). Functions can be omitted with a NULL function pointer.
3634 If io_index is non zero, the corresponding io zone is
3635 modified. If it is zero, a new io zone is allocated. The return
3636 value can be used with cpu_register_physical_memory(). (-1) is
3637 returned if error. */
3638 static int cpu_register_io_memory_fixed(int io_index
,
3639 CPUReadMemoryFunc
* const *mem_read
,
3640 CPUWriteMemoryFunc
* const *mem_write
,
3641 void *opaque
, enum device_endian endian
)
3645 if (io_index
<= 0) {
3646 io_index
= get_free_io_mem_idx();
3650 io_index
>>= IO_MEM_SHIFT
;
3651 if (io_index
>= IO_MEM_NB_ENTRIES
)
3655 for (i
= 0; i
< 3; ++i
) {
3656 io_mem_read
[io_index
][i
]
3657 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3659 for (i
= 0; i
< 3; ++i
) {
3660 io_mem_write
[io_index
][i
]
3661 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3663 io_mem_opaque
[io_index
] = opaque
;
3666 case DEVICE_BIG_ENDIAN
:
3667 #ifndef TARGET_WORDS_BIGENDIAN
3668 swapendian_init(io_index
);
3671 case DEVICE_LITTLE_ENDIAN
:
3672 #ifdef TARGET_WORDS_BIGENDIAN
3673 swapendian_init(io_index
);
3676 case DEVICE_NATIVE_ENDIAN
:
3681 return (io_index
<< IO_MEM_SHIFT
);
3684 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3685 CPUWriteMemoryFunc
* const *mem_write
,
3686 void *opaque
, enum device_endian endian
)
3688 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
, endian
);
3691 void cpu_unregister_io_memory(int io_table_address
)
3694 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3696 swapendian_del(io_index
);
3698 for (i
=0;i
< 3; i
++) {
3699 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3700 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3702 io_mem_opaque
[io_index
] = NULL
;
3703 io_mem_used
[io_index
] = 0;
3706 static void io_mem_init(void)
3710 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
,
3711 unassigned_mem_write
, NULL
,
3712 DEVICE_NATIVE_ENDIAN
);
3713 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
,
3714 unassigned_mem_write
, NULL
,
3715 DEVICE_NATIVE_ENDIAN
);
3716 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
,
3717 notdirty_mem_write
, NULL
,
3718 DEVICE_NATIVE_ENDIAN
);
3719 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM
, subpage_ram_read
,
3720 subpage_ram_write
, NULL
,
3721 DEVICE_NATIVE_ENDIAN
);
3725 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3726 watch_mem_write
, NULL
,
3727 DEVICE_NATIVE_ENDIAN
);
3730 static void memory_map_init(void)
3732 system_memory
= g_malloc(sizeof(*system_memory
));
3733 memory_region_init(system_memory
, "system", INT64_MAX
);
3734 set_system_memory_map(system_memory
);
3736 system_io
= g_malloc(sizeof(*system_io
));
3737 memory_region_init(system_io
, "io", 65536);
3738 set_system_io_map(system_io
);
3741 MemoryRegion
*get_system_memory(void)
3743 return system_memory
;
3746 MemoryRegion
*get_system_io(void)
3751 #endif /* !defined(CONFIG_USER_ONLY) */
3753 /* physical memory access (slow version, mainly for debug) */
3754 #if defined(CONFIG_USER_ONLY)
3755 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3756 uint8_t *buf
, int len
, int is_write
)
3763 page
= addr
& TARGET_PAGE_MASK
;
3764 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3767 flags
= page_get_flags(page
);
3768 if (!(flags
& PAGE_VALID
))
3771 if (!(flags
& PAGE_WRITE
))
3773 /* XXX: this code should not depend on lock_user */
3774 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3777 unlock_user(p
, addr
, l
);
3779 if (!(flags
& PAGE_READ
))
3781 /* XXX: this code should not depend on lock_user */
3782 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3785 unlock_user(p
, addr
, 0);
3795 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3796 int len
, int is_write
)
3801 target_phys_addr_t page
;
3806 page
= addr
& TARGET_PAGE_MASK
;
3807 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3810 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3812 pd
= IO_MEM_UNASSIGNED
;
3814 pd
= p
->phys_offset
;
3818 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3819 target_phys_addr_t addr1
= addr
;
3820 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3822 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3823 /* XXX: could force cpu_single_env to NULL to avoid
3825 if (l
>= 4 && ((addr1
& 3) == 0)) {
3826 /* 32 bit write access */
3828 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3830 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3831 /* 16 bit write access */
3833 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3836 /* 8 bit write access */
3838 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3843 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3845 ptr
= qemu_get_ram_ptr(addr1
);
3846 memcpy(ptr
, buf
, l
);
3847 if (!cpu_physical_memory_is_dirty(addr1
)) {
3848 /* invalidate code */
3849 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3851 cpu_physical_memory_set_dirty_flags(
3852 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3854 qemu_put_ram_ptr(ptr
);
3857 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3858 !(pd
& IO_MEM_ROMD
)) {
3859 target_phys_addr_t addr1
= addr
;
3861 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3863 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3864 if (l
>= 4 && ((addr1
& 3) == 0)) {
3865 /* 32 bit read access */
3866 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3869 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3870 /* 16 bit read access */
3871 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3875 /* 8 bit read access */
3876 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3882 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
3883 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
3884 qemu_put_ram_ptr(ptr
);
3893 /* used for ROM loading : can write in RAM and ROM */
3894 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3895 const uint8_t *buf
, int len
)
3899 target_phys_addr_t page
;
3904 page
= addr
& TARGET_PAGE_MASK
;
3905 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3908 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3910 pd
= IO_MEM_UNASSIGNED
;
3912 pd
= p
->phys_offset
;
3915 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3916 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3917 !(pd
& IO_MEM_ROMD
)) {
3920 unsigned long addr1
;
3921 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3923 ptr
= qemu_get_ram_ptr(addr1
);
3924 memcpy(ptr
, buf
, l
);
3925 qemu_put_ram_ptr(ptr
);
3935 target_phys_addr_t addr
;
3936 target_phys_addr_t len
;
3939 static BounceBuffer bounce
;
3941 typedef struct MapClient
{
3943 void (*callback
)(void *opaque
);
3944 QLIST_ENTRY(MapClient
) link
;
3947 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3948 = QLIST_HEAD_INITIALIZER(map_client_list
);
3950 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3952 MapClient
*client
= g_malloc(sizeof(*client
));
3954 client
->opaque
= opaque
;
3955 client
->callback
= callback
;
3956 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3960 void cpu_unregister_map_client(void *_client
)
3962 MapClient
*client
= (MapClient
*)_client
;
3964 QLIST_REMOVE(client
, link
);
3968 static void cpu_notify_map_clients(void)
3972 while (!QLIST_EMPTY(&map_client_list
)) {
3973 client
= QLIST_FIRST(&map_client_list
);
3974 client
->callback(client
->opaque
);
3975 cpu_unregister_map_client(client
);
3979 /* Map a physical memory region into a host virtual address.
3980 * May map a subset of the requested range, given by and returned in *plen.
3981 * May return NULL if resources needed to perform the mapping are exhausted.
3982 * Use only for reads OR writes - not for read-modify-write operations.
3983 * Use cpu_register_map_client() to know when retrying the map operation is
3984 * likely to succeed.
3986 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3987 target_phys_addr_t
*plen
,
3990 target_phys_addr_t len
= *plen
;
3991 target_phys_addr_t todo
= 0;
3993 target_phys_addr_t page
;
3996 ram_addr_t raddr
= RAM_ADDR_MAX
;
4001 page
= addr
& TARGET_PAGE_MASK
;
4002 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4005 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
4007 pd
= IO_MEM_UNASSIGNED
;
4009 pd
= p
->phys_offset
;
4012 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4013 if (todo
|| bounce
.buffer
) {
4016 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
4020 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
4024 return bounce
.buffer
;
4027 raddr
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4035 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
4040 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4041 * Will also mark the memory as dirty if is_write == 1. access_len gives
4042 * the amount of memory that was actually read or written by the caller.
4044 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
4045 int is_write
, target_phys_addr_t access_len
)
4047 if (buffer
!= bounce
.buffer
) {
4049 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
4050 while (access_len
) {
4052 l
= TARGET_PAGE_SIZE
;
4055 if (!cpu_physical_memory_is_dirty(addr1
)) {
4056 /* invalidate code */
4057 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
4059 cpu_physical_memory_set_dirty_flags(
4060 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4066 if (xen_enabled()) {
4067 xen_invalidate_map_cache_entry(buffer
);
4072 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
4074 qemu_vfree(bounce
.buffer
);
4075 bounce
.buffer
= NULL
;
4076 cpu_notify_map_clients();
4079 /* warning: addr must be aligned */
4080 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
4081 enum device_endian endian
)
4089 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4091 pd
= IO_MEM_UNASSIGNED
;
4093 pd
= p
->phys_offset
;
4096 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4097 !(pd
& IO_MEM_ROMD
)) {
4099 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4101 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4102 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4103 #if defined(TARGET_WORDS_BIGENDIAN)
4104 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4108 if (endian
== DEVICE_BIG_ENDIAN
) {
4114 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4115 (addr
& ~TARGET_PAGE_MASK
);
4117 case DEVICE_LITTLE_ENDIAN
:
4118 val
= ldl_le_p(ptr
);
4120 case DEVICE_BIG_ENDIAN
:
4121 val
= ldl_be_p(ptr
);
4131 uint32_t ldl_phys(target_phys_addr_t addr
)
4133 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4136 uint32_t ldl_le_phys(target_phys_addr_t addr
)
4138 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4141 uint32_t ldl_be_phys(target_phys_addr_t addr
)
4143 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4146 /* warning: addr must be aligned */
4147 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
4148 enum device_endian endian
)
4156 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4158 pd
= IO_MEM_UNASSIGNED
;
4160 pd
= p
->phys_offset
;
4163 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4164 !(pd
& IO_MEM_ROMD
)) {
4166 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4168 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4170 /* XXX This is broken when device endian != cpu endian.
4171 Fix and add "endian" variable check */
4172 #ifdef TARGET_WORDS_BIGENDIAN
4173 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
4174 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
4176 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4177 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
4181 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4182 (addr
& ~TARGET_PAGE_MASK
);
4184 case DEVICE_LITTLE_ENDIAN
:
4185 val
= ldq_le_p(ptr
);
4187 case DEVICE_BIG_ENDIAN
:
4188 val
= ldq_be_p(ptr
);
4198 uint64_t ldq_phys(target_phys_addr_t addr
)
4200 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4203 uint64_t ldq_le_phys(target_phys_addr_t addr
)
4205 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4208 uint64_t ldq_be_phys(target_phys_addr_t addr
)
4210 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4214 uint32_t ldub_phys(target_phys_addr_t addr
)
4217 cpu_physical_memory_read(addr
, &val
, 1);
4221 /* warning: addr must be aligned */
4222 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
4223 enum device_endian endian
)
4231 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4233 pd
= IO_MEM_UNASSIGNED
;
4235 pd
= p
->phys_offset
;
4238 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4239 !(pd
& IO_MEM_ROMD
)) {
4241 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4243 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4244 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
4245 #if defined(TARGET_WORDS_BIGENDIAN)
4246 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4250 if (endian
== DEVICE_BIG_ENDIAN
) {
4256 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4257 (addr
& ~TARGET_PAGE_MASK
);
4259 case DEVICE_LITTLE_ENDIAN
:
4260 val
= lduw_le_p(ptr
);
4262 case DEVICE_BIG_ENDIAN
:
4263 val
= lduw_be_p(ptr
);
4273 uint32_t lduw_phys(target_phys_addr_t addr
)
4275 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4278 uint32_t lduw_le_phys(target_phys_addr_t addr
)
4280 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4283 uint32_t lduw_be_phys(target_phys_addr_t addr
)
4285 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4288 /* warning: addr must be aligned. The ram page is not masked as dirty
4289 and the code inside is not invalidated. It is useful if the dirty
4290 bits are used to track modified PTEs */
4291 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4298 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4300 pd
= IO_MEM_UNASSIGNED
;
4302 pd
= p
->phys_offset
;
4305 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4306 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4308 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4309 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4311 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4312 ptr
= qemu_get_ram_ptr(addr1
);
4315 if (unlikely(in_migration
)) {
4316 if (!cpu_physical_memory_is_dirty(addr1
)) {
4317 /* invalidate code */
4318 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4320 cpu_physical_memory_set_dirty_flags(
4321 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4327 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4334 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4336 pd
= IO_MEM_UNASSIGNED
;
4338 pd
= p
->phys_offset
;
4341 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4342 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4344 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4345 #ifdef TARGET_WORDS_BIGENDIAN
4346 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
4347 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
4349 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4350 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
4353 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4354 (addr
& ~TARGET_PAGE_MASK
);
4359 /* warning: addr must be aligned */
4360 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4361 enum device_endian endian
)
4368 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4370 pd
= IO_MEM_UNASSIGNED
;
4372 pd
= p
->phys_offset
;
4375 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4376 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4378 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4379 #if defined(TARGET_WORDS_BIGENDIAN)
4380 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4384 if (endian
== DEVICE_BIG_ENDIAN
) {
4388 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4390 unsigned long addr1
;
4391 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4393 ptr
= qemu_get_ram_ptr(addr1
);
4395 case DEVICE_LITTLE_ENDIAN
:
4398 case DEVICE_BIG_ENDIAN
:
4405 if (!cpu_physical_memory_is_dirty(addr1
)) {
4406 /* invalidate code */
4407 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4409 cpu_physical_memory_set_dirty_flags(addr1
,
4410 (0xff & ~CODE_DIRTY_FLAG
));
4415 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4417 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4420 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
4422 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4425 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4427 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4431 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4434 cpu_physical_memory_write(addr
, &v
, 1);
4437 /* warning: addr must be aligned */
4438 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4439 enum device_endian endian
)
4446 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4448 pd
= IO_MEM_UNASSIGNED
;
4450 pd
= p
->phys_offset
;
4453 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4454 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4456 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4457 #if defined(TARGET_WORDS_BIGENDIAN)
4458 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4462 if (endian
== DEVICE_BIG_ENDIAN
) {
4466 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4468 unsigned long addr1
;
4469 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4471 ptr
= qemu_get_ram_ptr(addr1
);
4473 case DEVICE_LITTLE_ENDIAN
:
4476 case DEVICE_BIG_ENDIAN
:
4483 if (!cpu_physical_memory_is_dirty(addr1
)) {
4484 /* invalidate code */
4485 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4487 cpu_physical_memory_set_dirty_flags(addr1
,
4488 (0xff & ~CODE_DIRTY_FLAG
));
4493 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4495 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4498 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4500 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4503 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4505 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4509 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4512 cpu_physical_memory_write(addr
, &val
, 8);
4515 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4517 val
= cpu_to_le64(val
);
4518 cpu_physical_memory_write(addr
, &val
, 8);
4521 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4523 val
= cpu_to_be64(val
);
4524 cpu_physical_memory_write(addr
, &val
, 8);
4527 /* virtual memory access for debug (includes writing to ROM) */
4528 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4529 uint8_t *buf
, int len
, int is_write
)
4532 target_phys_addr_t phys_addr
;
4536 page
= addr
& TARGET_PAGE_MASK
;
4537 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4538 /* if no physical page mapped, return an error */
4539 if (phys_addr
== -1)
4541 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4544 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4546 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4548 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4557 /* in deterministic execution mode, instructions doing device I/Os
4558 must be at the end of the TB */
4559 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4561 TranslationBlock
*tb
;
4563 target_ulong pc
, cs_base
;
4566 tb
= tb_find_pc((unsigned long)retaddr
);
4568 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4571 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4572 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4573 /* Calculate how many instructions had been executed before the fault
4575 n
= n
- env
->icount_decr
.u16
.low
;
4576 /* Generate a new TB ending on the I/O insn. */
4578 /* On MIPS and SH, delay slot instructions can only be restarted if
4579 they were already the first instruction in the TB. If this is not
4580 the first instruction in a TB then re-execute the preceding
4582 #if defined(TARGET_MIPS)
4583 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4584 env
->active_tc
.PC
-= 4;
4585 env
->icount_decr
.u16
.low
++;
4586 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4588 #elif defined(TARGET_SH4)
4589 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4592 env
->icount_decr
.u16
.low
++;
4593 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4596 /* This should never happen. */
4597 if (n
> CF_COUNT_MASK
)
4598 cpu_abort(env
, "TB too big during recompile");
4600 cflags
= n
| CF_LAST_IO
;
4602 cs_base
= tb
->cs_base
;
4604 tb_phys_invalidate(tb
, -1);
4605 /* FIXME: In theory this could raise an exception. In practice
4606 we have already translated the block once so it's probably ok. */
4607 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4608 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4609 the first in the TB) then we end up generating a whole new TB and
4610 repeating the fault, which is horribly inefficient.
4611 Better would be to execute just this insn uncached, or generate a
4613 cpu_resume_from_signal(env
, NULL
);
4616 #if !defined(CONFIG_USER_ONLY)
4618 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4620 int i
, target_code_size
, max_target_code_size
;
4621 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4622 TranslationBlock
*tb
;
4624 target_code_size
= 0;
4625 max_target_code_size
= 0;
4627 direct_jmp_count
= 0;
4628 direct_jmp2_count
= 0;
4629 for(i
= 0; i
< nb_tbs
; i
++) {
4631 target_code_size
+= tb
->size
;
4632 if (tb
->size
> max_target_code_size
)
4633 max_target_code_size
= tb
->size
;
4634 if (tb
->page_addr
[1] != -1)
4636 if (tb
->tb_next_offset
[0] != 0xffff) {
4638 if (tb
->tb_next_offset
[1] != 0xffff) {
4639 direct_jmp2_count
++;
4643 /* XXX: avoid using doubles ? */
4644 cpu_fprintf(f
, "Translation buffer state:\n");
4645 cpu_fprintf(f
, "gen code size %td/%ld\n",
4646 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4647 cpu_fprintf(f
, "TB count %d/%d\n",
4648 nb_tbs
, code_gen_max_blocks
);
4649 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4650 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4651 max_target_code_size
);
4652 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4653 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4654 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4655 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4657 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4658 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4660 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4662 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4663 cpu_fprintf(f
, "\nStatistics:\n");
4664 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4665 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4666 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4667 tcg_dump_info(f
, cpu_fprintf
);
4670 #define MMUSUFFIX _cmmu
4672 #define GETPC() NULL
4673 #define env cpu_single_env
4674 #define SOFTMMU_CODE_ACCESS
4677 #include "softmmu_template.h"
4680 #include "softmmu_template.h"
4683 #include "softmmu_template.h"
4686 #include "softmmu_template.h"