2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
62 #define WANT_EXEC_OBSOLETE
63 #include "exec-obsolete.h"
65 //#define DEBUG_TB_INVALIDATE
67 //#define DEBUG_UNASSIGNED
69 /* make various TB consistency checks */
70 //#define DEBUG_TB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32) && !defined(_WIN64)
97 #define code_gen_section \
98 __attribute__((aligned (16)))
100 #define code_gen_section \
101 __attribute__((aligned (32)))
104 uint8_t code_gen_prologue
[1024] code_gen_section
;
105 static uint8_t *code_gen_buffer
;
106 static unsigned long code_gen_buffer_size
;
107 /* threshold to flush the translated code buffer */
108 static unsigned long code_gen_buffer_max_size
;
109 static uint8_t *code_gen_ptr
;
111 #if !defined(CONFIG_USER_ONLY)
113 static int in_migration
;
115 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
117 static MemoryRegion
*system_memory
;
118 static MemoryRegion
*system_io
;
120 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
121 static MemoryRegion io_mem_subpage_ram
;
125 CPUArchState
*first_cpu
;
126 /* current CPU in the current thread. It is only valid inside
128 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
129 /* 0 = Do not count executed instructions.
130 1 = Precise instruction counting.
131 2 = Adaptive rate instruction counting. */
134 typedef struct PageDesc
{
135 /* list of TBs intersecting this ram page */
136 TranslationBlock
*first_tb
;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count
;
140 uint8_t *code_bitmap
;
141 #if defined(CONFIG_USER_ONLY)
146 /* In system mode we want L1_MAP to be based on ram offsets,
147 while in user mode we want it to be based on virtual addresses. */
148 #if !defined(CONFIG_USER_ONLY)
149 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
155 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
158 /* Size of the L2 (and L3, etc) page tables. */
160 #define L2_SIZE (1 << L2_BITS)
162 #define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165 /* The bits remaining after N lower levels of page tables. */
166 #define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169 #if V_L1_BITS_REM < 4
170 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172 #define V_L1_BITS V_L1_BITS_REM
175 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179 uintptr_t qemu_real_host_page_size
;
180 uintptr_t qemu_host_page_size
;
181 uintptr_t qemu_host_page_mask
;
183 /* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185 static void *l1_map
[V_L1_SIZE
];
187 #if !defined(CONFIG_USER_ONLY)
188 typedef struct PhysPageEntry PhysPageEntry
;
190 static MemoryRegionSection
*phys_sections
;
191 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
192 static uint16_t phys_section_unassigned
;
193 static uint16_t phys_section_notdirty
;
194 static uint16_t phys_section_rom
;
195 static uint16_t phys_section_watch
;
197 struct PhysPageEntry
{
198 uint16_t is_leaf
: 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
203 /* Simple allocator for PhysPageEntry nodes */
204 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
205 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
207 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
209 /* This is a multi-level map on the physical address space.
210 The bottom level has pointers to MemoryRegionSections. */
211 static PhysPageEntry phys_map
= { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
213 static void io_mem_init(void);
214 static void memory_map_init(void);
216 static MemoryRegion io_mem_watch
;
220 static int tb_flush_count
;
221 static int tb_phys_invalidate_count
;
224 static void map_exec(void *addr
, long size
)
227 VirtualProtect(addr
, size
,
228 PAGE_EXECUTE_READWRITE
, &old_protect
);
232 static void map_exec(void *addr
, long size
)
234 unsigned long start
, end
, page_size
;
236 page_size
= getpagesize();
237 start
= (unsigned long)addr
;
238 start
&= ~(page_size
- 1);
240 end
= (unsigned long)addr
+ size
;
241 end
+= page_size
- 1;
242 end
&= ~(page_size
- 1);
244 mprotect((void *)start
, end
- start
,
245 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
249 static void page_init(void)
251 /* NOTE: we can always suppose that qemu_host_page_size >=
255 SYSTEM_INFO system_info
;
257 GetSystemInfo(&system_info
);
258 qemu_real_host_page_size
= system_info
.dwPageSize
;
261 qemu_real_host_page_size
= getpagesize();
263 if (qemu_host_page_size
== 0)
264 qemu_host_page_size
= qemu_real_host_page_size
;
265 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
266 qemu_host_page_size
= TARGET_PAGE_SIZE
;
267 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
269 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
271 #ifdef HAVE_KINFO_GETVMMAP
272 struct kinfo_vmentry
*freep
;
275 freep
= kinfo_getvmmap(getpid(), &cnt
);
278 for (i
= 0; i
< cnt
; i
++) {
279 unsigned long startaddr
, endaddr
;
281 startaddr
= freep
[i
].kve_start
;
282 endaddr
= freep
[i
].kve_end
;
283 if (h2g_valid(startaddr
)) {
284 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
286 if (h2g_valid(endaddr
)) {
287 endaddr
= h2g(endaddr
);
288 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
290 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
292 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
303 last_brk
= (unsigned long)sbrk(0);
305 f
= fopen("/compat/linux/proc/self/maps", "r");
310 unsigned long startaddr
, endaddr
;
313 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
315 if (n
== 2 && h2g_valid(startaddr
)) {
316 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
318 if (h2g_valid(endaddr
)) {
319 endaddr
= h2g(endaddr
);
323 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
335 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
341 #if defined(CONFIG_USER_ONLY)
342 /* We can't use g_malloc because it may recurse into a locked mutex. */
343 # define ALLOC(P, SIZE) \
345 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
346 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
349 # define ALLOC(P, SIZE) \
350 do { P = g_malloc0(SIZE); } while (0)
353 /* Level 1. Always allocated. */
354 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
357 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
364 ALLOC(p
, sizeof(void *) * L2_SIZE
);
368 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
376 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
382 return pd
+ (index
& (L2_SIZE
- 1));
385 static inline PageDesc
*page_find(tb_page_addr_t index
)
387 return page_find_alloc(index
, 0);
390 #if !defined(CONFIG_USER_ONLY)
392 static void phys_map_node_reserve(unsigned nodes
)
394 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
395 typedef PhysPageEntry Node
[L2_SIZE
];
396 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
397 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
398 phys_map_nodes_nb
+ nodes
);
399 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
400 phys_map_nodes_nb_alloc
);
404 static uint16_t phys_map_node_alloc(void)
409 ret
= phys_map_nodes_nb
++;
410 assert(ret
!= PHYS_MAP_NODE_NIL
);
411 assert(ret
!= phys_map_nodes_nb_alloc
);
412 for (i
= 0; i
< L2_SIZE
; ++i
) {
413 phys_map_nodes
[ret
][i
].is_leaf
= 0;
414 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
419 static void phys_map_nodes_reset(void)
421 phys_map_nodes_nb
= 0;
425 static void phys_page_set_level(PhysPageEntry
*lp
, target_phys_addr_t
*index
,
426 target_phys_addr_t
*nb
, uint16_t leaf
,
431 target_phys_addr_t step
= (target_phys_addr_t
)1 << (level
* L2_BITS
);
433 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
434 lp
->ptr
= phys_map_node_alloc();
435 p
= phys_map_nodes
[lp
->ptr
];
437 for (i
= 0; i
< L2_SIZE
; i
++) {
439 p
[i
].ptr
= phys_section_unassigned
;
443 p
= phys_map_nodes
[lp
->ptr
];
445 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
447 while (*nb
&& lp
< &p
[L2_SIZE
]) {
448 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
454 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
460 static void phys_page_set(target_phys_addr_t index
, target_phys_addr_t nb
,
463 /* Wildly overreserve - it doesn't matter much. */
464 phys_map_node_reserve(3 * P_L2_LEVELS
);
466 phys_page_set_level(&phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
469 MemoryRegionSection
*phys_page_find(target_phys_addr_t index
)
471 PhysPageEntry lp
= phys_map
;
474 uint16_t s_index
= phys_section_unassigned
;
476 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
477 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
480 p
= phys_map_nodes
[lp
.ptr
];
481 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
486 return &phys_sections
[s_index
];
489 bool memory_region_is_unassigned(MemoryRegion
*mr
)
491 return mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
492 && mr
!= &io_mem_notdirty
&& !mr
->rom_device
493 && mr
!= &io_mem_watch
;
496 #define mmap_lock() do { } while(0)
497 #define mmap_unlock() do { } while(0)
500 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
502 #if defined(CONFIG_USER_ONLY)
503 /* Currently it is not recommended to allocate big chunks of data in
504 user mode. It will change when a dedicated libc will be used */
505 #define USE_STATIC_CODE_GEN_BUFFER
508 #ifdef USE_STATIC_CODE_GEN_BUFFER
509 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
510 __attribute__((aligned (CODE_GEN_ALIGN
)));
513 static void code_gen_alloc(unsigned long tb_size
)
515 #ifdef USE_STATIC_CODE_GEN_BUFFER
516 code_gen_buffer
= static_code_gen_buffer
;
517 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
518 map_exec(code_gen_buffer
, code_gen_buffer_size
);
520 code_gen_buffer_size
= tb_size
;
521 if (code_gen_buffer_size
== 0) {
522 #if defined(CONFIG_USER_ONLY)
523 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
525 /* XXX: needs adjustments */
526 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
529 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
530 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
531 /* The code gen buffer location may have constraints depending on
532 the host cpu and OS */
533 #if defined(__linux__)
538 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
539 #if defined(__x86_64__)
541 /* Cannot map more than that */
542 if (code_gen_buffer_size
> (800 * 1024 * 1024))
543 code_gen_buffer_size
= (800 * 1024 * 1024);
544 #elif defined(__sparc_v9__)
545 // Map the buffer below 2G, so we can use direct calls and branches
547 start
= (void *) 0x60000000UL
;
548 if (code_gen_buffer_size
> (512 * 1024 * 1024))
549 code_gen_buffer_size
= (512 * 1024 * 1024);
550 #elif defined(__arm__)
551 /* Keep the buffer no bigger than 16MB to branch between blocks */
552 if (code_gen_buffer_size
> 16 * 1024 * 1024)
553 code_gen_buffer_size
= 16 * 1024 * 1024;
554 #elif defined(__s390x__)
555 /* Map the buffer so that we can use direct calls and branches. */
556 /* We have a +- 4GB range on the branches; leave some slop. */
557 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
558 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
560 start
= (void *)0x90000000UL
;
562 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
563 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
565 if (code_gen_buffer
== MAP_FAILED
) {
566 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
570 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
571 || defined(__DragonFly__) || defined(__OpenBSD__) \
572 || defined(__NetBSD__)
576 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
577 #if defined(__x86_64__)
578 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
579 * 0x40000000 is free */
581 addr
= (void *)0x40000000;
582 /* Cannot map more than that */
583 if (code_gen_buffer_size
> (800 * 1024 * 1024))
584 code_gen_buffer_size
= (800 * 1024 * 1024);
585 #elif defined(__sparc_v9__)
586 // Map the buffer below 2G, so we can use direct calls and branches
588 addr
= (void *) 0x60000000UL
;
589 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
590 code_gen_buffer_size
= (512 * 1024 * 1024);
593 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
594 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
596 if (code_gen_buffer
== MAP_FAILED
) {
597 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
602 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
603 map_exec(code_gen_buffer
, code_gen_buffer_size
);
605 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
606 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
607 code_gen_buffer_max_size
= code_gen_buffer_size
-
608 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
609 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
610 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
613 /* Must be called before using the QEMU cpus. 'tb_size' is the size
614 (in bytes) allocated to the translation buffer. Zero means default
616 void tcg_exec_init(unsigned long tb_size
)
619 code_gen_alloc(tb_size
);
620 code_gen_ptr
= code_gen_buffer
;
621 tcg_register_jit(code_gen_buffer
, code_gen_buffer_size
);
623 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
624 /* There's no guest base to take into account, so go ahead and
625 initialize the prologue now. */
626 tcg_prologue_init(&tcg_ctx
);
630 bool tcg_enabled(void)
632 return code_gen_buffer
!= NULL
;
635 void cpu_exec_init_all(void)
637 #if !defined(CONFIG_USER_ONLY)
643 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
645 static int cpu_common_post_load(void *opaque
, int version_id
)
647 CPUArchState
*env
= opaque
;
649 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
650 version_id is increased. */
651 env
->interrupt_request
&= ~0x01;
657 static const VMStateDescription vmstate_cpu_common
= {
658 .name
= "cpu_common",
660 .minimum_version_id
= 1,
661 .minimum_version_id_old
= 1,
662 .post_load
= cpu_common_post_load
,
663 .fields
= (VMStateField
[]) {
664 VMSTATE_UINT32(halted
, CPUArchState
),
665 VMSTATE_UINT32(interrupt_request
, CPUArchState
),
666 VMSTATE_END_OF_LIST()
671 CPUArchState
*qemu_get_cpu(int cpu
)
673 CPUArchState
*env
= first_cpu
;
676 if (env
->cpu_index
== cpu
)
684 void cpu_exec_init(CPUArchState
*env
)
689 #if defined(CONFIG_USER_ONLY)
692 env
->next_cpu
= NULL
;
695 while (*penv
!= NULL
) {
696 penv
= &(*penv
)->next_cpu
;
699 env
->cpu_index
= cpu_index
;
701 QTAILQ_INIT(&env
->breakpoints
);
702 QTAILQ_INIT(&env
->watchpoints
);
703 #ifndef CONFIG_USER_ONLY
704 env
->thread_id
= qemu_get_thread_id();
707 #if defined(CONFIG_USER_ONLY)
710 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
711 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
712 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
713 cpu_save
, cpu_load
, env
);
717 /* Allocate a new translation block. Flush the translation buffer if
718 too many translation blocks or too much generated code. */
719 static TranslationBlock
*tb_alloc(target_ulong pc
)
721 TranslationBlock
*tb
;
723 if (nb_tbs
>= code_gen_max_blocks
||
724 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
732 void tb_free(TranslationBlock
*tb
)
734 /* In practice this is mostly used for single use temporary TB
735 Ignore the hard cases and just back up if this TB happens to
736 be the last one generated. */
737 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
738 code_gen_ptr
= tb
->tc_ptr
;
743 static inline void invalidate_page_bitmap(PageDesc
*p
)
745 if (p
->code_bitmap
) {
746 g_free(p
->code_bitmap
);
747 p
->code_bitmap
= NULL
;
749 p
->code_write_count
= 0;
752 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
754 static void page_flush_tb_1 (int level
, void **lp
)
763 for (i
= 0; i
< L2_SIZE
; ++i
) {
764 pd
[i
].first_tb
= NULL
;
765 invalidate_page_bitmap(pd
+ i
);
769 for (i
= 0; i
< L2_SIZE
; ++i
) {
770 page_flush_tb_1 (level
- 1, pp
+ i
);
775 static void page_flush_tb(void)
778 for (i
= 0; i
< V_L1_SIZE
; i
++) {
779 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
783 /* flush all the translation blocks */
784 /* XXX: tb_flush is currently not thread safe */
785 void tb_flush(CPUArchState
*env1
)
788 #if defined(DEBUG_FLUSH)
789 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
790 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
792 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
794 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
795 cpu_abort(env1
, "Internal error: code buffer overflow\n");
799 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
800 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
803 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
806 code_gen_ptr
= code_gen_buffer
;
807 /* XXX: flush processor icache at this point if cache flush is
812 #ifdef DEBUG_TB_CHECK
814 static void tb_invalidate_check(target_ulong address
)
816 TranslationBlock
*tb
;
818 address
&= TARGET_PAGE_MASK
;
819 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
820 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
821 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
822 address
>= tb
->pc
+ tb
->size
)) {
823 printf("ERROR invalidate: address=" TARGET_FMT_lx
824 " PC=%08lx size=%04x\n",
825 address
, (long)tb
->pc
, tb
->size
);
831 /* verify that all the pages have correct rights for code */
832 static void tb_page_check(void)
834 TranslationBlock
*tb
;
835 int i
, flags1
, flags2
;
837 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
838 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
839 flags1
= page_get_flags(tb
->pc
);
840 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
841 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
842 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
843 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
851 /* invalidate one TB */
852 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
855 TranslationBlock
*tb1
;
859 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
862 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
866 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
868 TranslationBlock
*tb1
;
873 n1
= (uintptr_t)tb1
& 3;
874 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
876 *ptb
= tb1
->page_next
[n1
];
879 ptb
= &tb1
->page_next
[n1
];
883 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
885 TranslationBlock
*tb1
, **ptb
;
888 ptb
= &tb
->jmp_next
[n
];
891 /* find tb(n) in circular list */
894 n1
= (uintptr_t)tb1
& 3;
895 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
896 if (n1
== n
&& tb1
== tb
)
899 ptb
= &tb1
->jmp_first
;
901 ptb
= &tb1
->jmp_next
[n1
];
904 /* now we can suppress tb(n) from the list */
905 *ptb
= tb
->jmp_next
[n
];
907 tb
->jmp_next
[n
] = NULL
;
911 /* reset the jump entry 'n' of a TB so that it is not chained to
913 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
915 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
918 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
923 tb_page_addr_t phys_pc
;
924 TranslationBlock
*tb1
, *tb2
;
926 /* remove the TB from the hash list */
927 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
928 h
= tb_phys_hash_func(phys_pc
);
929 tb_remove(&tb_phys_hash
[h
], tb
,
930 offsetof(TranslationBlock
, phys_hash_next
));
932 /* remove the TB from the page list */
933 if (tb
->page_addr
[0] != page_addr
) {
934 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
935 tb_page_remove(&p
->first_tb
, tb
);
936 invalidate_page_bitmap(p
);
938 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
939 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
940 tb_page_remove(&p
->first_tb
, tb
);
941 invalidate_page_bitmap(p
);
944 tb_invalidated_flag
= 1;
946 /* remove the TB from the hash list */
947 h
= tb_jmp_cache_hash_func(tb
->pc
);
948 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
949 if (env
->tb_jmp_cache
[h
] == tb
)
950 env
->tb_jmp_cache
[h
] = NULL
;
953 /* suppress this TB from the two jump lists */
954 tb_jmp_remove(tb
, 0);
955 tb_jmp_remove(tb
, 1);
957 /* suppress any remaining jumps to this TB */
960 n1
= (uintptr_t)tb1
& 3;
963 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
964 tb2
= tb1
->jmp_next
[n1
];
965 tb_reset_jump(tb1
, n1
);
966 tb1
->jmp_next
[n1
] = NULL
;
969 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
971 tb_phys_invalidate_count
++;
974 static inline void set_bits(uint8_t *tab
, int start
, int len
)
980 mask
= 0xff << (start
& 7);
981 if ((start
& ~7) == (end
& ~7)) {
983 mask
&= ~(0xff << (end
& 7));
988 start
= (start
+ 8) & ~7;
990 while (start
< end1
) {
995 mask
= ~(0xff << (end
& 7));
1001 static void build_page_bitmap(PageDesc
*p
)
1003 int n
, tb_start
, tb_end
;
1004 TranslationBlock
*tb
;
1006 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
1009 while (tb
!= NULL
) {
1010 n
= (uintptr_t)tb
& 3;
1011 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1012 /* NOTE: this is subtle as a TB may span two physical pages */
1014 /* NOTE: tb_end may be after the end of the page, but
1015 it is not a problem */
1016 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1017 tb_end
= tb_start
+ tb
->size
;
1018 if (tb_end
> TARGET_PAGE_SIZE
)
1019 tb_end
= TARGET_PAGE_SIZE
;
1022 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1024 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1025 tb
= tb
->page_next
[n
];
1029 TranslationBlock
*tb_gen_code(CPUArchState
*env
,
1030 target_ulong pc
, target_ulong cs_base
,
1031 int flags
, int cflags
)
1033 TranslationBlock
*tb
;
1035 tb_page_addr_t phys_pc
, phys_page2
;
1036 target_ulong virt_page2
;
1039 phys_pc
= get_page_addr_code(env
, pc
);
1042 /* flush must be done */
1044 /* cannot fail at this point */
1046 /* Don't forget to invalidate previous TB info. */
1047 tb_invalidated_flag
= 1;
1049 tc_ptr
= code_gen_ptr
;
1050 tb
->tc_ptr
= tc_ptr
;
1051 tb
->cs_base
= cs_base
;
1053 tb
->cflags
= cflags
;
1054 cpu_gen_code(env
, tb
, &code_gen_size
);
1055 code_gen_ptr
= (void *)(((uintptr_t)code_gen_ptr
+ code_gen_size
+
1056 CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1058 /* check next page if needed */
1059 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1061 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1062 phys_page2
= get_page_addr_code(env
, virt_page2
);
1064 tb_link_page(tb
, phys_pc
, phys_page2
);
1069 * Invalidate all TBs which intersect with the target physical address range
1070 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1071 * 'is_cpu_write_access' should be true if called from a real cpu write
1072 * access: the virtual CPU will exit the current TB if code is modified inside
1075 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
,
1076 int is_cpu_write_access
)
1078 while (start
< end
) {
1079 tb_invalidate_phys_page_range(start
, end
, is_cpu_write_access
);
1080 start
&= TARGET_PAGE_MASK
;
1081 start
+= TARGET_PAGE_SIZE
;
1086 * Invalidate all TBs which intersect with the target physical address range
1087 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1088 * 'is_cpu_write_access' should be true if called from a real cpu write
1089 * access: the virtual CPU will exit the current TB if code is modified inside
1092 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1093 int is_cpu_write_access
)
1095 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1096 CPUArchState
*env
= cpu_single_env
;
1097 tb_page_addr_t tb_start
, tb_end
;
1100 #ifdef TARGET_HAS_PRECISE_SMC
1101 int current_tb_not_found
= is_cpu_write_access
;
1102 TranslationBlock
*current_tb
= NULL
;
1103 int current_tb_modified
= 0;
1104 target_ulong current_pc
= 0;
1105 target_ulong current_cs_base
= 0;
1106 int current_flags
= 0;
1107 #endif /* TARGET_HAS_PRECISE_SMC */
1109 p
= page_find(start
>> TARGET_PAGE_BITS
);
1112 if (!p
->code_bitmap
&&
1113 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1114 is_cpu_write_access
) {
1115 /* build code bitmap */
1116 build_page_bitmap(p
);
1119 /* we remove all the TBs in the range [start, end[ */
1120 /* XXX: see if in some cases it could be faster to invalidate all the code */
1122 while (tb
!= NULL
) {
1123 n
= (uintptr_t)tb
& 3;
1124 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1125 tb_next
= tb
->page_next
[n
];
1126 /* NOTE: this is subtle as a TB may span two physical pages */
1128 /* NOTE: tb_end may be after the end of the page, but
1129 it is not a problem */
1130 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1131 tb_end
= tb_start
+ tb
->size
;
1133 tb_start
= tb
->page_addr
[1];
1134 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1136 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1137 #ifdef TARGET_HAS_PRECISE_SMC
1138 if (current_tb_not_found
) {
1139 current_tb_not_found
= 0;
1141 if (env
->mem_io_pc
) {
1142 /* now we have a real cpu fault */
1143 current_tb
= tb_find_pc(env
->mem_io_pc
);
1146 if (current_tb
== tb
&&
1147 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1148 /* If we are modifying the current TB, we must stop
1149 its execution. We could be more precise by checking
1150 that the modification is after the current PC, but it
1151 would require a specialized function to partially
1152 restore the CPU state */
1154 current_tb_modified
= 1;
1155 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1156 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1159 #endif /* TARGET_HAS_PRECISE_SMC */
1160 /* we need to do that to handle the case where a signal
1161 occurs while doing tb_phys_invalidate() */
1164 saved_tb
= env
->current_tb
;
1165 env
->current_tb
= NULL
;
1167 tb_phys_invalidate(tb
, -1);
1169 env
->current_tb
= saved_tb
;
1170 if (env
->interrupt_request
&& env
->current_tb
)
1171 cpu_interrupt(env
, env
->interrupt_request
);
1176 #if !defined(CONFIG_USER_ONLY)
1177 /* if no code remaining, no need to continue to use slow writes */
1179 invalidate_page_bitmap(p
);
1180 if (is_cpu_write_access
) {
1181 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1185 #ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb_modified
) {
1187 /* we generate a block containing just the instruction
1188 modifying the memory. It will ensure that it cannot modify
1190 env
->current_tb
= NULL
;
1191 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1192 cpu_resume_from_signal(env
, NULL
);
1197 /* len must be <= 8 and start must be a multiple of len */
1198 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1204 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1205 cpu_single_env
->mem_io_vaddr
, len
,
1206 cpu_single_env
->eip
,
1207 cpu_single_env
->eip
+
1208 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1211 p
= page_find(start
>> TARGET_PAGE_BITS
);
1214 if (p
->code_bitmap
) {
1215 offset
= start
& ~TARGET_PAGE_MASK
;
1216 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1217 if (b
& ((1 << len
) - 1))
1221 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1225 #if !defined(CONFIG_SOFTMMU)
1226 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1227 uintptr_t pc
, void *puc
)
1229 TranslationBlock
*tb
;
1232 #ifdef TARGET_HAS_PRECISE_SMC
1233 TranslationBlock
*current_tb
= NULL
;
1234 CPUArchState
*env
= cpu_single_env
;
1235 int current_tb_modified
= 0;
1236 target_ulong current_pc
= 0;
1237 target_ulong current_cs_base
= 0;
1238 int current_flags
= 0;
1241 addr
&= TARGET_PAGE_MASK
;
1242 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1246 #ifdef TARGET_HAS_PRECISE_SMC
1247 if (tb
&& pc
!= 0) {
1248 current_tb
= tb_find_pc(pc
);
1251 while (tb
!= NULL
) {
1252 n
= (uintptr_t)tb
& 3;
1253 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1254 #ifdef TARGET_HAS_PRECISE_SMC
1255 if (current_tb
== tb
&&
1256 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1257 /* If we are modifying the current TB, we must stop
1258 its execution. We could be more precise by checking
1259 that the modification is after the current PC, but it
1260 would require a specialized function to partially
1261 restore the CPU state */
1263 current_tb_modified
= 1;
1264 cpu_restore_state(current_tb
, env
, pc
);
1265 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1268 #endif /* TARGET_HAS_PRECISE_SMC */
1269 tb_phys_invalidate(tb
, addr
);
1270 tb
= tb
->page_next
[n
];
1273 #ifdef TARGET_HAS_PRECISE_SMC
1274 if (current_tb_modified
) {
1275 /* we generate a block containing just the instruction
1276 modifying the memory. It will ensure that it cannot modify
1278 env
->current_tb
= NULL
;
1279 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1280 cpu_resume_from_signal(env
, puc
);
1286 /* add the tb in the target page and protect it if necessary */
1287 static inline void tb_alloc_page(TranslationBlock
*tb
,
1288 unsigned int n
, tb_page_addr_t page_addr
)
1291 #ifndef CONFIG_USER_ONLY
1292 bool page_already_protected
;
1295 tb
->page_addr
[n
] = page_addr
;
1296 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1297 tb
->page_next
[n
] = p
->first_tb
;
1298 #ifndef CONFIG_USER_ONLY
1299 page_already_protected
= p
->first_tb
!= NULL
;
1301 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1302 invalidate_page_bitmap(p
);
1304 #if defined(TARGET_HAS_SMC) || 1
1306 #if defined(CONFIG_USER_ONLY)
1307 if (p
->flags
& PAGE_WRITE
) {
1312 /* force the host page as non writable (writes will have a
1313 page fault + mprotect overhead) */
1314 page_addr
&= qemu_host_page_mask
;
1316 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1317 addr
+= TARGET_PAGE_SIZE
) {
1319 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1323 p2
->flags
&= ~PAGE_WRITE
;
1325 mprotect(g2h(page_addr
), qemu_host_page_size
,
1326 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1327 #ifdef DEBUG_TB_INVALIDATE
1328 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1333 /* if some code is already present, then the pages are already
1334 protected. So we handle the case where only the first TB is
1335 allocated in a physical page */
1336 if (!page_already_protected
) {
1337 tlb_protect_code(page_addr
);
1341 #endif /* TARGET_HAS_SMC */
1344 /* add a new TB and link it to the physical page tables. phys_page2 is
1345 (-1) to indicate that only one page contains the TB. */
1346 void tb_link_page(TranslationBlock
*tb
,
1347 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1350 TranslationBlock
**ptb
;
1352 /* Grab the mmap lock to stop another thread invalidating this TB
1353 before we are done. */
1355 /* add in the physical hash table */
1356 h
= tb_phys_hash_func(phys_pc
);
1357 ptb
= &tb_phys_hash
[h
];
1358 tb
->phys_hash_next
= *ptb
;
1361 /* add in the page list */
1362 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1363 if (phys_page2
!= -1)
1364 tb_alloc_page(tb
, 1, phys_page2
);
1366 tb
->page_addr
[1] = -1;
1368 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1369 tb
->jmp_next
[0] = NULL
;
1370 tb
->jmp_next
[1] = NULL
;
1372 /* init original jump addresses */
1373 if (tb
->tb_next_offset
[0] != 0xffff)
1374 tb_reset_jump(tb
, 0);
1375 if (tb
->tb_next_offset
[1] != 0xffff)
1376 tb_reset_jump(tb
, 1);
1378 #ifdef DEBUG_TB_CHECK
1384 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1385 tb[1].tc_ptr. Return NULL if not found */
1386 TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1388 int m_min
, m_max
, m
;
1390 TranslationBlock
*tb
;
1394 if (tc_ptr
< (uintptr_t)code_gen_buffer
||
1395 tc_ptr
>= (uintptr_t)code_gen_ptr
) {
1398 /* binary search (cf Knuth) */
1401 while (m_min
<= m_max
) {
1402 m
= (m_min
+ m_max
) >> 1;
1404 v
= (uintptr_t)tb
->tc_ptr
;
1407 else if (tc_ptr
< v
) {
1416 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1418 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1420 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1423 tb1
= tb
->jmp_next
[n
];
1425 /* find head of list */
1427 n1
= (uintptr_t)tb1
& 3;
1428 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1431 tb1
= tb1
->jmp_next
[n1
];
1433 /* we are now sure now that tb jumps to tb1 */
1436 /* remove tb from the jmp_first list */
1437 ptb
= &tb_next
->jmp_first
;
1440 n1
= (uintptr_t)tb1
& 3;
1441 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1442 if (n1
== n
&& tb1
== tb
)
1444 ptb
= &tb1
->jmp_next
[n1
];
1446 *ptb
= tb
->jmp_next
[n
];
1447 tb
->jmp_next
[n
] = NULL
;
1449 /* suppress the jump to next tb in generated code */
1450 tb_reset_jump(tb
, n
);
1452 /* suppress jumps in the tb on which we could have jumped */
1453 tb_reset_jump_recursive(tb_next
);
1457 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1459 tb_reset_jump_recursive2(tb
, 0);
1460 tb_reset_jump_recursive2(tb
, 1);
1463 #if defined(TARGET_HAS_ICE)
1464 #if defined(CONFIG_USER_ONLY)
1465 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1467 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1470 void tb_invalidate_phys_addr(target_phys_addr_t addr
)
1472 ram_addr_t ram_addr
;
1473 MemoryRegionSection
*section
;
1475 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1476 if (!(memory_region_is_ram(section
->mr
)
1477 || (section
->mr
->rom_device
&& section
->mr
->readable
))) {
1480 ram_addr
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1481 + memory_region_section_addr(section
, addr
);
1482 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1485 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1487 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
1488 (pc
& ~TARGET_PAGE_MASK
));
1491 #endif /* TARGET_HAS_ICE */
1493 #if defined(CONFIG_USER_ONLY)
1494 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1499 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1500 int flags
, CPUWatchpoint
**watchpoint
)
1505 /* Add a watchpoint. */
1506 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1507 int flags
, CPUWatchpoint
**watchpoint
)
1509 target_ulong len_mask
= ~(len
- 1);
1512 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1513 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
1514 len
== 0 || len
> TARGET_PAGE_SIZE
) {
1515 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1516 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1519 wp
= g_malloc(sizeof(*wp
));
1522 wp
->len_mask
= len_mask
;
1525 /* keep all GDB-injected watchpoints in front */
1527 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1529 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1531 tlb_flush_page(env
, addr
);
1538 /* Remove a specific watchpoint. */
1539 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1542 target_ulong len_mask
= ~(len
- 1);
1545 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1546 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1547 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1548 cpu_watchpoint_remove_by_ref(env
, wp
);
1555 /* Remove a specific watchpoint by reference. */
1556 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
1558 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1560 tlb_flush_page(env
, watchpoint
->vaddr
);
1565 /* Remove all matching watchpoints. */
1566 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1568 CPUWatchpoint
*wp
, *next
;
1570 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1571 if (wp
->flags
& mask
)
1572 cpu_watchpoint_remove_by_ref(env
, wp
);
1577 /* Add a breakpoint. */
1578 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
1579 CPUBreakpoint
**breakpoint
)
1581 #if defined(TARGET_HAS_ICE)
1584 bp
= g_malloc(sizeof(*bp
));
1589 /* keep all GDB-injected breakpoints in front */
1591 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1593 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1595 breakpoint_invalidate(env
, pc
);
1605 /* Remove a specific breakpoint. */
1606 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
1608 #if defined(TARGET_HAS_ICE)
1611 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1612 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1613 cpu_breakpoint_remove_by_ref(env
, bp
);
1623 /* Remove a specific breakpoint by reference. */
1624 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
1626 #if defined(TARGET_HAS_ICE)
1627 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1629 breakpoint_invalidate(env
, breakpoint
->pc
);
1635 /* Remove all matching breakpoints. */
1636 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
1638 #if defined(TARGET_HAS_ICE)
1639 CPUBreakpoint
*bp
, *next
;
1641 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1642 if (bp
->flags
& mask
)
1643 cpu_breakpoint_remove_by_ref(env
, bp
);
1648 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1649 CPU loop after each instruction */
1650 void cpu_single_step(CPUArchState
*env
, int enabled
)
1652 #if defined(TARGET_HAS_ICE)
1653 if (env
->singlestep_enabled
!= enabled
) {
1654 env
->singlestep_enabled
= enabled
;
1656 kvm_update_guest_debug(env
, 0);
1658 /* must flush all the translated code to avoid inconsistencies */
1659 /* XXX: only flush what is necessary */
1666 static void cpu_unlink_tb(CPUArchState
*env
)
1668 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1669 problem and hope the cpu will stop of its own accord. For userspace
1670 emulation this often isn't actually as bad as it sounds. Often
1671 signals are used primarily to interrupt blocking syscalls. */
1672 TranslationBlock
*tb
;
1673 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1675 spin_lock(&interrupt_lock
);
1676 tb
= env
->current_tb
;
1677 /* if the cpu is currently executing code, we must unlink it and
1678 all the potentially executing TB */
1680 env
->current_tb
= NULL
;
1681 tb_reset_jump_recursive(tb
);
1683 spin_unlock(&interrupt_lock
);
1686 #ifndef CONFIG_USER_ONLY
1687 /* mask must never be zero, except for A20 change call */
1688 static void tcg_handle_interrupt(CPUArchState
*env
, int mask
)
1692 old_mask
= env
->interrupt_request
;
1693 env
->interrupt_request
|= mask
;
1696 * If called from iothread context, wake the target cpu in
1699 if (!qemu_cpu_is_self(env
)) {
1705 env
->icount_decr
.u16
.high
= 0xffff;
1707 && (mask
& ~old_mask
) != 0) {
1708 cpu_abort(env
, "Raised interrupt while not in I/O function");
1715 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1717 #else /* CONFIG_USER_ONLY */
1719 void cpu_interrupt(CPUArchState
*env
, int mask
)
1721 env
->interrupt_request
|= mask
;
1724 #endif /* CONFIG_USER_ONLY */
1726 void cpu_reset_interrupt(CPUArchState
*env
, int mask
)
1728 env
->interrupt_request
&= ~mask
;
1731 void cpu_exit(CPUArchState
*env
)
1733 env
->exit_request
= 1;
1737 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
1744 fprintf(stderr
, "qemu: fatal: ");
1745 vfprintf(stderr
, fmt
, ap
);
1746 fprintf(stderr
, "\n");
1748 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1750 cpu_dump_state(env
, stderr
, fprintf
, 0);
1752 if (qemu_log_enabled()) {
1753 qemu_log("qemu: fatal: ");
1754 qemu_log_vprintf(fmt
, ap2
);
1757 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1759 log_cpu_state(env
, 0);
1766 #if defined(CONFIG_USER_ONLY)
1768 struct sigaction act
;
1769 sigfillset(&act
.sa_mask
);
1770 act
.sa_handler
= SIG_DFL
;
1771 sigaction(SIGABRT
, &act
, NULL
);
1777 CPUArchState
*cpu_copy(CPUArchState
*env
)
1779 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
1780 CPUArchState
*next_cpu
= new_env
->next_cpu
;
1781 int cpu_index
= new_env
->cpu_index
;
1782 #if defined(TARGET_HAS_ICE)
1787 memcpy(new_env
, env
, sizeof(CPUArchState
));
1789 /* Preserve chaining and index. */
1790 new_env
->next_cpu
= next_cpu
;
1791 new_env
->cpu_index
= cpu_index
;
1793 /* Clone all break/watchpoints.
1794 Note: Once we support ptrace with hw-debug register access, make sure
1795 BP_CPU break/watchpoints are handled correctly on clone. */
1796 QTAILQ_INIT(&env
->breakpoints
);
1797 QTAILQ_INIT(&env
->watchpoints
);
1798 #if defined(TARGET_HAS_ICE)
1799 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1800 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1802 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1803 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1811 #if !defined(CONFIG_USER_ONLY)
1812 void tb_flush_jmp_cache(CPUArchState
*env
, target_ulong addr
)
1816 /* Discard jump cache entries for any tb which might potentially
1817 overlap the flushed page. */
1818 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1819 memset (&env
->tb_jmp_cache
[i
], 0,
1820 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1822 i
= tb_jmp_cache_hash_page(addr
);
1823 memset (&env
->tb_jmp_cache
[i
], 0,
1824 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1827 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
1832 /* we modify the TLB cache so that the dirty bit will be set again
1833 when accessing the range */
1834 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
1835 /* Check that we don't span multiple blocks - this breaks the
1836 address comparisons below. */
1837 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
1838 != (end
- 1) - start
) {
1841 cpu_tlb_reset_dirty_all(start1
, length
);
1845 /* Note: start and end must be within the same ram block. */
1846 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1851 start
&= TARGET_PAGE_MASK
;
1852 end
= TARGET_PAGE_ALIGN(end
);
1854 length
= end
- start
;
1857 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
1859 if (tcg_enabled()) {
1860 tlb_reset_dirty_range_all(start
, end
, length
);
1864 int cpu_physical_memory_set_dirty_tracking(int enable
)
1867 in_migration
= enable
;
1871 target_phys_addr_t
memory_region_section_get_iotlb(CPUArchState
*env
,
1872 MemoryRegionSection
*section
,
1874 target_phys_addr_t paddr
,
1876 target_ulong
*address
)
1878 target_phys_addr_t iotlb
;
1881 if (memory_region_is_ram(section
->mr
)) {
1883 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1884 + memory_region_section_addr(section
, paddr
);
1885 if (!section
->readonly
) {
1886 iotlb
|= phys_section_notdirty
;
1888 iotlb
|= phys_section_rom
;
1891 /* IO handlers are currently passed a physical address.
1892 It would be nice to pass an offset from the base address
1893 of that region. This would avoid having to special case RAM,
1894 and avoid full address decoding in every device.
1895 We can't use the high bits of pd for this because
1896 IO_MEM_ROMD uses these as a ram address. */
1897 iotlb
= section
- phys_sections
;
1898 iotlb
+= memory_region_section_addr(section
, paddr
);
1901 /* Make accesses to pages with watchpoints go via the
1902 watchpoint trap routines. */
1903 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1904 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
1905 /* Avoid trapping reads of pages with a write breakpoint. */
1906 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1907 iotlb
= phys_section_watch
+ paddr
;
1908 *address
|= TLB_MMIO
;
1919 * Walks guest process memory "regions" one by one
1920 * and calls callback function 'fn' for each region.
1923 struct walk_memory_regions_data
1925 walk_memory_regions_fn fn
;
1931 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1932 abi_ulong end
, int new_prot
)
1934 if (data
->start
!= -1ul) {
1935 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1941 data
->start
= (new_prot
? end
: -1ul);
1942 data
->prot
= new_prot
;
1947 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1948 abi_ulong base
, int level
, void **lp
)
1954 return walk_memory_regions_end(data
, base
, 0);
1959 for (i
= 0; i
< L2_SIZE
; ++i
) {
1960 int prot
= pd
[i
].flags
;
1962 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1963 if (prot
!= data
->prot
) {
1964 rc
= walk_memory_regions_end(data
, pa
, prot
);
1972 for (i
= 0; i
< L2_SIZE
; ++i
) {
1973 pa
= base
| ((abi_ulong
)i
<<
1974 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
1975 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1985 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1987 struct walk_memory_regions_data data
;
1995 for (i
= 0; i
< V_L1_SIZE
; i
++) {
1996 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
1997 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2003 return walk_memory_regions_end(&data
, 0, 0);
2006 static int dump_region(void *priv
, abi_ulong start
,
2007 abi_ulong end
, unsigned long prot
)
2009 FILE *f
= (FILE *)priv
;
2011 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2012 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2013 start
, end
, end
- start
,
2014 ((prot
& PAGE_READ
) ? 'r' : '-'),
2015 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2016 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2021 /* dump memory mappings */
2022 void page_dump(FILE *f
)
2024 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2025 "start", "end", "size", "prot");
2026 walk_memory_regions(f
, dump_region
);
2029 int page_get_flags(target_ulong address
)
2033 p
= page_find(address
>> TARGET_PAGE_BITS
);
2039 /* Modify the flags of a page and invalidate the code if necessary.
2040 The flag PAGE_WRITE_ORG is positioned automatically depending
2041 on PAGE_WRITE. The mmap_lock should already be held. */
2042 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2044 target_ulong addr
, len
;
2046 /* This function should never be called with addresses outside the
2047 guest address space. If this assert fires, it probably indicates
2048 a missing call to h2g_valid. */
2049 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2050 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2052 assert(start
< end
);
2054 start
= start
& TARGET_PAGE_MASK
;
2055 end
= TARGET_PAGE_ALIGN(end
);
2057 if (flags
& PAGE_WRITE
) {
2058 flags
|= PAGE_WRITE_ORG
;
2061 for (addr
= start
, len
= end
- start
;
2063 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2064 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2066 /* If the write protection bit is set, then we invalidate
2068 if (!(p
->flags
& PAGE_WRITE
) &&
2069 (flags
& PAGE_WRITE
) &&
2071 tb_invalidate_phys_page(addr
, 0, NULL
);
2077 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2083 /* This function should never be called with addresses outside the
2084 guest address space. If this assert fires, it probably indicates
2085 a missing call to h2g_valid. */
2086 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2087 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2093 if (start
+ len
- 1 < start
) {
2094 /* We've wrapped around. */
2098 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2099 start
= start
& TARGET_PAGE_MASK
;
2101 for (addr
= start
, len
= end
- start
;
2103 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2104 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2107 if( !(p
->flags
& PAGE_VALID
) )
2110 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2112 if (flags
& PAGE_WRITE
) {
2113 if (!(p
->flags
& PAGE_WRITE_ORG
))
2115 /* unprotect the page if it was put read-only because it
2116 contains translated code */
2117 if (!(p
->flags
& PAGE_WRITE
)) {
2118 if (!page_unprotect(addr
, 0, NULL
))
2127 /* called from signal handler: invalidate the code and unprotect the
2128 page. Return TRUE if the fault was successfully handled. */
2129 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
2133 target_ulong host_start
, host_end
, addr
;
2135 /* Technically this isn't safe inside a signal handler. However we
2136 know this only ever happens in a synchronous SEGV handler, so in
2137 practice it seems to be ok. */
2140 p
= page_find(address
>> TARGET_PAGE_BITS
);
2146 /* if the page was really writable, then we change its
2147 protection back to writable */
2148 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2149 host_start
= address
& qemu_host_page_mask
;
2150 host_end
= host_start
+ qemu_host_page_size
;
2153 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2154 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2155 p
->flags
|= PAGE_WRITE
;
2158 /* and since the content will be modified, we must invalidate
2159 the corresponding translated code. */
2160 tb_invalidate_phys_page(addr
, pc
, puc
);
2161 #ifdef DEBUG_TB_CHECK
2162 tb_invalidate_check(addr
);
2165 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2174 #endif /* defined(CONFIG_USER_ONLY) */
2176 #if !defined(CONFIG_USER_ONLY)
2178 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2179 typedef struct subpage_t
{
2181 target_phys_addr_t base
;
2182 uint16_t sub_section
[TARGET_PAGE_SIZE
];
2185 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2187 static subpage_t
*subpage_init(target_phys_addr_t base
);
2188 static void destroy_page_desc(uint16_t section_index
)
2190 MemoryRegionSection
*section
= &phys_sections
[section_index
];
2191 MemoryRegion
*mr
= section
->mr
;
2194 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
2195 memory_region_destroy(&subpage
->iomem
);
2200 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
2205 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
2209 p
= phys_map_nodes
[lp
->ptr
];
2210 for (i
= 0; i
< L2_SIZE
; ++i
) {
2211 if (!p
[i
].is_leaf
) {
2212 destroy_l2_mapping(&p
[i
], level
- 1);
2214 destroy_page_desc(p
[i
].ptr
);
2218 lp
->ptr
= PHYS_MAP_NODE_NIL
;
2221 static void destroy_all_mappings(void)
2223 destroy_l2_mapping(&phys_map
, P_L2_LEVELS
- 1);
2224 phys_map_nodes_reset();
2227 static uint16_t phys_section_add(MemoryRegionSection
*section
)
2229 if (phys_sections_nb
== phys_sections_nb_alloc
) {
2230 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
2231 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
2232 phys_sections_nb_alloc
);
2234 phys_sections
[phys_sections_nb
] = *section
;
2235 return phys_sections_nb
++;
2238 static void phys_sections_clear(void)
2240 phys_sections_nb
= 0;
2243 static void register_subpage(MemoryRegionSection
*section
)
2246 target_phys_addr_t base
= section
->offset_within_address_space
2248 MemoryRegionSection
*existing
= phys_page_find(base
>> TARGET_PAGE_BITS
);
2249 MemoryRegionSection subsection
= {
2250 .offset_within_address_space
= base
,
2251 .size
= TARGET_PAGE_SIZE
,
2253 target_phys_addr_t start
, end
;
2255 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
2257 if (!(existing
->mr
->subpage
)) {
2258 subpage
= subpage_init(base
);
2259 subsection
.mr
= &subpage
->iomem
;
2260 phys_page_set(base
>> TARGET_PAGE_BITS
, 1,
2261 phys_section_add(&subsection
));
2263 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
2265 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
2266 end
= start
+ section
->size
- 1;
2267 subpage_register(subpage
, start
, end
, phys_section_add(section
));
2271 static void register_multipage(MemoryRegionSection
*section
)
2273 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
2274 ram_addr_t size
= section
->size
;
2275 target_phys_addr_t addr
;
2276 uint16_t section_index
= phys_section_add(section
);
2281 phys_page_set(addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
2285 void cpu_register_physical_memory_log(MemoryRegionSection
*section
,
2288 MemoryRegionSection now
= *section
, remain
= *section
;
2290 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
2291 || (now
.size
< TARGET_PAGE_SIZE
)) {
2292 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
2293 - now
.offset_within_address_space
,
2295 register_subpage(&now
);
2296 remain
.size
-= now
.size
;
2297 remain
.offset_within_address_space
+= now
.size
;
2298 remain
.offset_within_region
+= now
.size
;
2300 while (remain
.size
>= TARGET_PAGE_SIZE
) {
2302 if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
2303 now
.size
= TARGET_PAGE_SIZE
;
2304 register_subpage(&now
);
2306 now
.size
&= TARGET_PAGE_MASK
;
2307 register_multipage(&now
);
2309 remain
.size
-= now
.size
;
2310 remain
.offset_within_address_space
+= now
.size
;
2311 remain
.offset_within_region
+= now
.size
;
2315 register_subpage(&now
);
2320 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2323 kvm_coalesce_mmio_region(addr
, size
);
2326 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2329 kvm_uncoalesce_mmio_region(addr
, size
);
2332 void qemu_flush_coalesced_mmio_buffer(void)
2335 kvm_flush_coalesced_mmio_buffer();
2338 #if defined(__linux__) && !defined(TARGET_S390X)
2340 #include <sys/vfs.h>
2342 #define HUGETLBFS_MAGIC 0x958458f6
2344 static long gethugepagesize(const char *path
)
2350 ret
= statfs(path
, &fs
);
2351 } while (ret
!= 0 && errno
== EINTR
);
2358 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2359 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2364 static void *file_ram_alloc(RAMBlock
*block
,
2374 unsigned long hpagesize
;
2376 hpagesize
= gethugepagesize(path
);
2381 if (memory
< hpagesize
) {
2385 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2386 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2390 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2394 fd
= mkstemp(filename
);
2396 perror("unable to create backing store for hugepages");
2403 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2406 * ftruncate is not supported by hugetlbfs in older
2407 * hosts, so don't bother bailing out on errors.
2408 * If anything goes wrong with it under other filesystems,
2411 if (ftruncate(fd
, memory
))
2412 perror("ftruncate");
2415 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2416 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2417 * to sidestep this quirk.
2419 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2420 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2422 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2424 if (area
== MAP_FAILED
) {
2425 perror("file_ram_alloc: can't mmap RAM pages");
2434 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2436 RAMBlock
*block
, *next_block
;
2437 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2439 if (QLIST_EMPTY(&ram_list
.blocks
))
2442 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2443 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2445 end
= block
->offset
+ block
->length
;
2447 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2448 if (next_block
->offset
>= end
) {
2449 next
= MIN(next
, next_block
->offset
);
2452 if (next
- end
>= size
&& next
- end
< mingap
) {
2454 mingap
= next
- end
;
2458 if (offset
== RAM_ADDR_MAX
) {
2459 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2467 static ram_addr_t
last_ram_offset(void)
2470 ram_addr_t last
= 0;
2472 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2473 last
= MAX(last
, block
->offset
+ block
->length
);
2478 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2480 RAMBlock
*new_block
, *block
;
2483 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2484 if (block
->offset
== addr
) {
2490 assert(!new_block
->idstr
[0]);
2493 char *id
= qdev_get_dev_path(dev
);
2495 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2499 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2501 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2502 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2503 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2510 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2513 RAMBlock
*new_block
;
2515 size
= TARGET_PAGE_ALIGN(size
);
2516 new_block
= g_malloc0(sizeof(*new_block
));
2519 new_block
->offset
= find_ram_offset(size
);
2521 new_block
->host
= host
;
2522 new_block
->flags
|= RAM_PREALLOC_MASK
;
2525 #if defined (__linux__) && !defined(TARGET_S390X)
2526 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2527 if (!new_block
->host
) {
2528 new_block
->host
= qemu_vmalloc(size
);
2529 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2532 fprintf(stderr
, "-mem-path option unsupported\n");
2536 if (xen_enabled()) {
2537 xen_ram_alloc(new_block
->offset
, size
, mr
);
2538 } else if (kvm_enabled()) {
2539 /* some s390/kvm configurations have special constraints */
2540 new_block
->host
= kvm_vmalloc(size
);
2542 new_block
->host
= qemu_vmalloc(size
);
2544 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2547 new_block
->length
= size
;
2549 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2551 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2552 last_ram_offset() >> TARGET_PAGE_BITS
);
2553 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2554 0, size
>> TARGET_PAGE_BITS
);
2555 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
2558 kvm_setup_guest_memory(new_block
->host
, size
);
2560 return new_block
->offset
;
2563 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2565 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2568 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2572 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2573 if (addr
== block
->offset
) {
2574 QLIST_REMOVE(block
, next
);
2581 void qemu_ram_free(ram_addr_t addr
)
2585 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2586 if (addr
== block
->offset
) {
2587 QLIST_REMOVE(block
, next
);
2588 if (block
->flags
& RAM_PREALLOC_MASK
) {
2590 } else if (mem_path
) {
2591 #if defined (__linux__) && !defined(TARGET_S390X)
2593 munmap(block
->host
, block
->length
);
2596 qemu_vfree(block
->host
);
2602 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2603 munmap(block
->host
, block
->length
);
2605 if (xen_enabled()) {
2606 xen_invalidate_map_cache_entry(block
->host
);
2608 qemu_vfree(block
->host
);
2620 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2627 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2628 offset
= addr
- block
->offset
;
2629 if (offset
< block
->length
) {
2630 vaddr
= block
->host
+ offset
;
2631 if (block
->flags
& RAM_PREALLOC_MASK
) {
2635 munmap(vaddr
, length
);
2637 #if defined(__linux__) && !defined(TARGET_S390X)
2640 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2643 flags
|= MAP_PRIVATE
;
2645 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2646 flags
, block
->fd
, offset
);
2648 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2649 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2656 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2657 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2658 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2661 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2662 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2666 if (area
!= vaddr
) {
2667 fprintf(stderr
, "Could not remap addr: "
2668 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2672 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
2678 #endif /* !_WIN32 */
2680 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2681 With the exception of the softmmu code in this file, this should
2682 only be used for local memory (e.g. video ram) that the device owns,
2683 and knows it isn't going to access beyond the end of the block.
2685 It should not be used for general purpose DMA.
2686 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2688 void *qemu_get_ram_ptr(ram_addr_t addr
)
2692 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2693 if (addr
- block
->offset
< block
->length
) {
2694 /* Move this entry to to start of the list. */
2695 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
2696 QLIST_REMOVE(block
, next
);
2697 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
2699 if (xen_enabled()) {
2700 /* We need to check if the requested address is in the RAM
2701 * because we don't want to map the entire memory in QEMU.
2702 * In that case just map until the end of the page.
2704 if (block
->offset
== 0) {
2705 return xen_map_cache(addr
, 0, 0);
2706 } else if (block
->host
== NULL
) {
2708 xen_map_cache(block
->offset
, block
->length
, 1);
2711 return block
->host
+ (addr
- block
->offset
);
2715 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2721 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2722 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2724 void *qemu_safe_ram_ptr(ram_addr_t addr
)
2728 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2729 if (addr
- block
->offset
< block
->length
) {
2730 if (xen_enabled()) {
2731 /* We need to check if the requested address is in the RAM
2732 * because we don't want to map the entire memory in QEMU.
2733 * In that case just map until the end of the page.
2735 if (block
->offset
== 0) {
2736 return xen_map_cache(addr
, 0, 0);
2737 } else if (block
->host
== NULL
) {
2739 xen_map_cache(block
->offset
, block
->length
, 1);
2742 return block
->host
+ (addr
- block
->offset
);
2746 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2752 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2753 * but takes a size argument */
2754 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
2759 if (xen_enabled()) {
2760 return xen_map_cache(addr
, *size
, 1);
2764 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2765 if (addr
- block
->offset
< block
->length
) {
2766 if (addr
- block
->offset
+ *size
> block
->length
)
2767 *size
= block
->length
- addr
+ block
->offset
;
2768 return block
->host
+ (addr
- block
->offset
);
2772 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2777 void qemu_put_ram_ptr(void *addr
)
2779 trace_qemu_put_ram_ptr(addr
);
2782 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2785 uint8_t *host
= ptr
;
2787 if (xen_enabled()) {
2788 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
2792 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2793 /* This case append when the block is not mapped. */
2794 if (block
->host
== NULL
) {
2797 if (host
- block
->host
< block
->length
) {
2798 *ram_addr
= block
->offset
+ (host
- block
->host
);
2806 /* Some of the softmmu routines need to translate from a host pointer
2807 (typically a TLB entry) back to a ram offset. */
2808 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
2810 ram_addr_t ram_addr
;
2812 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
2813 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2819 static uint64_t unassigned_mem_read(void *opaque
, target_phys_addr_t addr
,
2822 #ifdef DEBUG_UNASSIGNED
2823 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2825 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2826 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
2831 static void unassigned_mem_write(void *opaque
, target_phys_addr_t addr
,
2832 uint64_t val
, unsigned size
)
2834 #ifdef DEBUG_UNASSIGNED
2835 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
2837 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2838 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
2842 static const MemoryRegionOps unassigned_mem_ops
= {
2843 .read
= unassigned_mem_read
,
2844 .write
= unassigned_mem_write
,
2845 .endianness
= DEVICE_NATIVE_ENDIAN
,
2848 static uint64_t error_mem_read(void *opaque
, target_phys_addr_t addr
,
2854 static void error_mem_write(void *opaque
, target_phys_addr_t addr
,
2855 uint64_t value
, unsigned size
)
2860 static const MemoryRegionOps error_mem_ops
= {
2861 .read
= error_mem_read
,
2862 .write
= error_mem_write
,
2863 .endianness
= DEVICE_NATIVE_ENDIAN
,
2866 static const MemoryRegionOps rom_mem_ops
= {
2867 .read
= error_mem_read
,
2868 .write
= unassigned_mem_write
,
2869 .endianness
= DEVICE_NATIVE_ENDIAN
,
2872 static void notdirty_mem_write(void *opaque
, target_phys_addr_t ram_addr
,
2873 uint64_t val
, unsigned size
)
2876 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2877 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2878 #if !defined(CONFIG_USER_ONLY)
2879 tb_invalidate_phys_page_fast(ram_addr
, size
);
2880 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2885 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2888 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2891 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2896 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2897 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
2898 /* we remove the notdirty callback only if the code has been
2900 if (dirty_flags
== 0xff)
2901 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2904 static const MemoryRegionOps notdirty_mem_ops
= {
2905 .read
= error_mem_read
,
2906 .write
= notdirty_mem_write
,
2907 .endianness
= DEVICE_NATIVE_ENDIAN
,
2910 /* Generate a debug exception if a watchpoint has been hit. */
2911 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2913 CPUArchState
*env
= cpu_single_env
;
2914 target_ulong pc
, cs_base
;
2915 TranslationBlock
*tb
;
2920 if (env
->watchpoint_hit
) {
2921 /* We re-entered the check after replacing the TB. Now raise
2922 * the debug interrupt so that is will trigger after the
2923 * current instruction. */
2924 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2927 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2928 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2929 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2930 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2931 wp
->flags
|= BP_WATCHPOINT_HIT
;
2932 if (!env
->watchpoint_hit
) {
2933 env
->watchpoint_hit
= wp
;
2934 tb
= tb_find_pc(env
->mem_io_pc
);
2936 cpu_abort(env
, "check_watchpoint: could not find TB for "
2937 "pc=%p", (void *)env
->mem_io_pc
);
2939 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
2940 tb_phys_invalidate(tb
, -1);
2941 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2942 env
->exception_index
= EXCP_DEBUG
;
2945 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2946 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2947 cpu_resume_from_signal(env
, NULL
);
2951 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2956 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2957 so these check for a hit then pass through to the normal out-of-line
2959 static uint64_t watch_mem_read(void *opaque
, target_phys_addr_t addr
,
2962 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
2964 case 1: return ldub_phys(addr
);
2965 case 2: return lduw_phys(addr
);
2966 case 4: return ldl_phys(addr
);
2971 static void watch_mem_write(void *opaque
, target_phys_addr_t addr
,
2972 uint64_t val
, unsigned size
)
2974 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
2977 stb_phys(addr
, val
);
2980 stw_phys(addr
, val
);
2983 stl_phys(addr
, val
);
2989 static const MemoryRegionOps watch_mem_ops
= {
2990 .read
= watch_mem_read
,
2991 .write
= watch_mem_write
,
2992 .endianness
= DEVICE_NATIVE_ENDIAN
,
2995 static uint64_t subpage_read(void *opaque
, target_phys_addr_t addr
,
2998 subpage_t
*mmio
= opaque
;
2999 unsigned int idx
= SUBPAGE_IDX(addr
);
3000 MemoryRegionSection
*section
;
3001 #if defined(DEBUG_SUBPAGE)
3002 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3003 mmio
, len
, addr
, idx
);
3006 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3008 addr
-= section
->offset_within_address_space
;
3009 addr
+= section
->offset_within_region
;
3010 return io_mem_read(section
->mr
, addr
, len
);
3013 static void subpage_write(void *opaque
, target_phys_addr_t addr
,
3014 uint64_t value
, unsigned len
)
3016 subpage_t
*mmio
= opaque
;
3017 unsigned int idx
= SUBPAGE_IDX(addr
);
3018 MemoryRegionSection
*section
;
3019 #if defined(DEBUG_SUBPAGE)
3020 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3021 " idx %d value %"PRIx64
"\n",
3022 __func__
, mmio
, len
, addr
, idx
, value
);
3025 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3027 addr
-= section
->offset_within_address_space
;
3028 addr
+= section
->offset_within_region
;
3029 io_mem_write(section
->mr
, addr
, value
, len
);
3032 static const MemoryRegionOps subpage_ops
= {
3033 .read
= subpage_read
,
3034 .write
= subpage_write
,
3035 .endianness
= DEVICE_NATIVE_ENDIAN
,
3038 static uint64_t subpage_ram_read(void *opaque
, target_phys_addr_t addr
,
3041 ram_addr_t raddr
= addr
;
3042 void *ptr
= qemu_get_ram_ptr(raddr
);
3044 case 1: return ldub_p(ptr
);
3045 case 2: return lduw_p(ptr
);
3046 case 4: return ldl_p(ptr
);
3051 static void subpage_ram_write(void *opaque
, target_phys_addr_t addr
,
3052 uint64_t value
, unsigned size
)
3054 ram_addr_t raddr
= addr
;
3055 void *ptr
= qemu_get_ram_ptr(raddr
);
3057 case 1: return stb_p(ptr
, value
);
3058 case 2: return stw_p(ptr
, value
);
3059 case 4: return stl_p(ptr
, value
);
3064 static const MemoryRegionOps subpage_ram_ops
= {
3065 .read
= subpage_ram_read
,
3066 .write
= subpage_ram_write
,
3067 .endianness
= DEVICE_NATIVE_ENDIAN
,
3070 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3075 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3077 idx
= SUBPAGE_IDX(start
);
3078 eidx
= SUBPAGE_IDX(end
);
3079 #if defined(DEBUG_SUBPAGE)
3080 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3081 mmio
, start
, end
, idx
, eidx
, memory
);
3083 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
3084 MemoryRegionSection new_section
= phys_sections
[section
];
3085 new_section
.mr
= &io_mem_subpage_ram
;
3086 section
= phys_section_add(&new_section
);
3088 for (; idx
<= eidx
; idx
++) {
3089 mmio
->sub_section
[idx
] = section
;
3095 static subpage_t
*subpage_init(target_phys_addr_t base
)
3099 mmio
= g_malloc0(sizeof(subpage_t
));
3102 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
3103 "subpage", TARGET_PAGE_SIZE
);
3104 mmio
->iomem
.subpage
= true;
3105 #if defined(DEBUG_SUBPAGE)
3106 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3107 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3109 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
3114 static uint16_t dummy_section(MemoryRegion
*mr
)
3116 MemoryRegionSection section
= {
3118 .offset_within_address_space
= 0,
3119 .offset_within_region
= 0,
3123 return phys_section_add(§ion
);
3126 MemoryRegion
*iotlb_to_region(target_phys_addr_t index
)
3128 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
3131 static void io_mem_init(void)
3133 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
3134 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
3135 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
3136 "unassigned", UINT64_MAX
);
3137 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
3138 "notdirty", UINT64_MAX
);
3139 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
3140 "subpage-ram", UINT64_MAX
);
3141 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
3142 "watch", UINT64_MAX
);
3145 static void core_begin(MemoryListener
*listener
)
3147 destroy_all_mappings();
3148 phys_sections_clear();
3149 phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
3150 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
3151 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
3152 phys_section_rom
= dummy_section(&io_mem_rom
);
3153 phys_section_watch
= dummy_section(&io_mem_watch
);
3156 static void core_commit(MemoryListener
*listener
)
3160 /* since each CPU stores ram addresses in its TLB cache, we must
3161 reset the modified entries */
3163 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
3168 static void core_region_add(MemoryListener
*listener
,
3169 MemoryRegionSection
*section
)
3171 cpu_register_physical_memory_log(section
, section
->readonly
);
3174 static void core_region_del(MemoryListener
*listener
,
3175 MemoryRegionSection
*section
)
3179 static void core_region_nop(MemoryListener
*listener
,
3180 MemoryRegionSection
*section
)
3182 cpu_register_physical_memory_log(section
, section
->readonly
);
3185 static void core_log_start(MemoryListener
*listener
,
3186 MemoryRegionSection
*section
)
3190 static void core_log_stop(MemoryListener
*listener
,
3191 MemoryRegionSection
*section
)
3195 static void core_log_sync(MemoryListener
*listener
,
3196 MemoryRegionSection
*section
)
3200 static void core_log_global_start(MemoryListener
*listener
)
3202 cpu_physical_memory_set_dirty_tracking(1);
3205 static void core_log_global_stop(MemoryListener
*listener
)
3207 cpu_physical_memory_set_dirty_tracking(0);
3210 static void core_eventfd_add(MemoryListener
*listener
,
3211 MemoryRegionSection
*section
,
3212 bool match_data
, uint64_t data
, EventNotifier
*e
)
3216 static void core_eventfd_del(MemoryListener
*listener
,
3217 MemoryRegionSection
*section
,
3218 bool match_data
, uint64_t data
, EventNotifier
*e
)
3222 static void io_begin(MemoryListener
*listener
)
3226 static void io_commit(MemoryListener
*listener
)
3230 static void io_region_add(MemoryListener
*listener
,
3231 MemoryRegionSection
*section
)
3233 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
3235 mrio
->mr
= section
->mr
;
3236 mrio
->offset
= section
->offset_within_region
;
3237 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
3238 section
->offset_within_address_space
, section
->size
);
3239 ioport_register(&mrio
->iorange
);
3242 static void io_region_del(MemoryListener
*listener
,
3243 MemoryRegionSection
*section
)
3245 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
3248 static void io_region_nop(MemoryListener
*listener
,
3249 MemoryRegionSection
*section
)
3253 static void io_log_start(MemoryListener
*listener
,
3254 MemoryRegionSection
*section
)
3258 static void io_log_stop(MemoryListener
*listener
,
3259 MemoryRegionSection
*section
)
3263 static void io_log_sync(MemoryListener
*listener
,
3264 MemoryRegionSection
*section
)
3268 static void io_log_global_start(MemoryListener
*listener
)
3272 static void io_log_global_stop(MemoryListener
*listener
)
3276 static void io_eventfd_add(MemoryListener
*listener
,
3277 MemoryRegionSection
*section
,
3278 bool match_data
, uint64_t data
, EventNotifier
*e
)
3282 static void io_eventfd_del(MemoryListener
*listener
,
3283 MemoryRegionSection
*section
,
3284 bool match_data
, uint64_t data
, EventNotifier
*e
)
3288 static MemoryListener core_memory_listener
= {
3289 .begin
= core_begin
,
3290 .commit
= core_commit
,
3291 .region_add
= core_region_add
,
3292 .region_del
= core_region_del
,
3293 .region_nop
= core_region_nop
,
3294 .log_start
= core_log_start
,
3295 .log_stop
= core_log_stop
,
3296 .log_sync
= core_log_sync
,
3297 .log_global_start
= core_log_global_start
,
3298 .log_global_stop
= core_log_global_stop
,
3299 .eventfd_add
= core_eventfd_add
,
3300 .eventfd_del
= core_eventfd_del
,
3304 static MemoryListener io_memory_listener
= {
3306 .commit
= io_commit
,
3307 .region_add
= io_region_add
,
3308 .region_del
= io_region_del
,
3309 .region_nop
= io_region_nop
,
3310 .log_start
= io_log_start
,
3311 .log_stop
= io_log_stop
,
3312 .log_sync
= io_log_sync
,
3313 .log_global_start
= io_log_global_start
,
3314 .log_global_stop
= io_log_global_stop
,
3315 .eventfd_add
= io_eventfd_add
,
3316 .eventfd_del
= io_eventfd_del
,
3320 static void memory_map_init(void)
3322 system_memory
= g_malloc(sizeof(*system_memory
));
3323 memory_region_init(system_memory
, "system", INT64_MAX
);
3324 set_system_memory_map(system_memory
);
3326 system_io
= g_malloc(sizeof(*system_io
));
3327 memory_region_init(system_io
, "io", 65536);
3328 set_system_io_map(system_io
);
3330 memory_listener_register(&core_memory_listener
, system_memory
);
3331 memory_listener_register(&io_memory_listener
, system_io
);
3334 MemoryRegion
*get_system_memory(void)
3336 return system_memory
;
3339 MemoryRegion
*get_system_io(void)
3344 #endif /* !defined(CONFIG_USER_ONLY) */
3346 /* physical memory access (slow version, mainly for debug) */
3347 #if defined(CONFIG_USER_ONLY)
3348 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
3349 uint8_t *buf
, int len
, int is_write
)
3356 page
= addr
& TARGET_PAGE_MASK
;
3357 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3360 flags
= page_get_flags(page
);
3361 if (!(flags
& PAGE_VALID
))
3364 if (!(flags
& PAGE_WRITE
))
3366 /* XXX: this code should not depend on lock_user */
3367 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3370 unlock_user(p
, addr
, l
);
3372 if (!(flags
& PAGE_READ
))
3374 /* XXX: this code should not depend on lock_user */
3375 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3378 unlock_user(p
, addr
, 0);
3388 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3389 int len
, int is_write
)
3394 target_phys_addr_t page
;
3395 MemoryRegionSection
*section
;
3398 page
= addr
& TARGET_PAGE_MASK
;
3399 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3402 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3405 if (!memory_region_is_ram(section
->mr
)) {
3406 target_phys_addr_t addr1
;
3407 addr1
= memory_region_section_addr(section
, addr
);
3408 /* XXX: could force cpu_single_env to NULL to avoid
3410 if (l
>= 4 && ((addr1
& 3) == 0)) {
3411 /* 32 bit write access */
3413 io_mem_write(section
->mr
, addr1
, val
, 4);
3415 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3416 /* 16 bit write access */
3418 io_mem_write(section
->mr
, addr1
, val
, 2);
3421 /* 8 bit write access */
3423 io_mem_write(section
->mr
, addr1
, val
, 1);
3426 } else if (!section
->readonly
) {
3428 addr1
= memory_region_get_ram_addr(section
->mr
)
3429 + memory_region_section_addr(section
, addr
);
3431 ptr
= qemu_get_ram_ptr(addr1
);
3432 memcpy(ptr
, buf
, l
);
3433 if (!cpu_physical_memory_is_dirty(addr1
)) {
3434 /* invalidate code */
3435 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3437 cpu_physical_memory_set_dirty_flags(
3438 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3440 qemu_put_ram_ptr(ptr
);
3443 if (!(memory_region_is_ram(section
->mr
) ||
3444 memory_region_is_romd(section
->mr
))) {
3445 target_phys_addr_t addr1
;
3447 addr1
= memory_region_section_addr(section
, addr
);
3448 if (l
>= 4 && ((addr1
& 3) == 0)) {
3449 /* 32 bit read access */
3450 val
= io_mem_read(section
->mr
, addr1
, 4);
3453 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3454 /* 16 bit read access */
3455 val
= io_mem_read(section
->mr
, addr1
, 2);
3459 /* 8 bit read access */
3460 val
= io_mem_read(section
->mr
, addr1
, 1);
3466 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
3467 + memory_region_section_addr(section
,
3469 memcpy(buf
, ptr
, l
);
3470 qemu_put_ram_ptr(ptr
);
3479 /* used for ROM loading : can write in RAM and ROM */
3480 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3481 const uint8_t *buf
, int len
)
3485 target_phys_addr_t page
;
3486 MemoryRegionSection
*section
;
3489 page
= addr
& TARGET_PAGE_MASK
;
3490 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3493 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3495 if (!(memory_region_is_ram(section
->mr
) ||
3496 memory_region_is_romd(section
->mr
))) {
3499 unsigned long addr1
;
3500 addr1
= memory_region_get_ram_addr(section
->mr
)
3501 + memory_region_section_addr(section
, addr
);
3503 ptr
= qemu_get_ram_ptr(addr1
);
3504 memcpy(ptr
, buf
, l
);
3505 qemu_put_ram_ptr(ptr
);
3515 target_phys_addr_t addr
;
3516 target_phys_addr_t len
;
3519 static BounceBuffer bounce
;
3521 typedef struct MapClient
{
3523 void (*callback
)(void *opaque
);
3524 QLIST_ENTRY(MapClient
) link
;
3527 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3528 = QLIST_HEAD_INITIALIZER(map_client_list
);
3530 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3532 MapClient
*client
= g_malloc(sizeof(*client
));
3534 client
->opaque
= opaque
;
3535 client
->callback
= callback
;
3536 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3540 void cpu_unregister_map_client(void *_client
)
3542 MapClient
*client
= (MapClient
*)_client
;
3544 QLIST_REMOVE(client
, link
);
3548 static void cpu_notify_map_clients(void)
3552 while (!QLIST_EMPTY(&map_client_list
)) {
3553 client
= QLIST_FIRST(&map_client_list
);
3554 client
->callback(client
->opaque
);
3555 cpu_unregister_map_client(client
);
3559 /* Map a physical memory region into a host virtual address.
3560 * May map a subset of the requested range, given by and returned in *plen.
3561 * May return NULL if resources needed to perform the mapping are exhausted.
3562 * Use only for reads OR writes - not for read-modify-write operations.
3563 * Use cpu_register_map_client() to know when retrying the map operation is
3564 * likely to succeed.
3566 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3567 target_phys_addr_t
*plen
,
3570 target_phys_addr_t len
= *plen
;
3571 target_phys_addr_t todo
= 0;
3573 target_phys_addr_t page
;
3574 MemoryRegionSection
*section
;
3575 ram_addr_t raddr
= RAM_ADDR_MAX
;
3580 page
= addr
& TARGET_PAGE_MASK
;
3581 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3584 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3586 if (!(memory_region_is_ram(section
->mr
) && !section
->readonly
)) {
3587 if (todo
|| bounce
.buffer
) {
3590 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3594 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3598 return bounce
.buffer
;
3601 raddr
= memory_region_get_ram_addr(section
->mr
)
3602 + memory_region_section_addr(section
, addr
);
3610 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3615 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3616 * Will also mark the memory as dirty if is_write == 1. access_len gives
3617 * the amount of memory that was actually read or written by the caller.
3619 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3620 int is_write
, target_phys_addr_t access_len
)
3622 if (buffer
!= bounce
.buffer
) {
3624 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3625 while (access_len
) {
3627 l
= TARGET_PAGE_SIZE
;
3630 if (!cpu_physical_memory_is_dirty(addr1
)) {
3631 /* invalidate code */
3632 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3634 cpu_physical_memory_set_dirty_flags(
3635 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3641 if (xen_enabled()) {
3642 xen_invalidate_map_cache_entry(buffer
);
3647 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3649 qemu_vfree(bounce
.buffer
);
3650 bounce
.buffer
= NULL
;
3651 cpu_notify_map_clients();
3654 /* warning: addr must be aligned */
3655 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
3656 enum device_endian endian
)
3660 MemoryRegionSection
*section
;
3662 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3664 if (!(memory_region_is_ram(section
->mr
) ||
3665 memory_region_is_romd(section
->mr
))) {
3667 addr
= memory_region_section_addr(section
, addr
);
3668 val
= io_mem_read(section
->mr
, addr
, 4);
3669 #if defined(TARGET_WORDS_BIGENDIAN)
3670 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3674 if (endian
== DEVICE_BIG_ENDIAN
) {
3680 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3682 + memory_region_section_addr(section
, addr
));
3684 case DEVICE_LITTLE_ENDIAN
:
3685 val
= ldl_le_p(ptr
);
3687 case DEVICE_BIG_ENDIAN
:
3688 val
= ldl_be_p(ptr
);
3698 uint32_t ldl_phys(target_phys_addr_t addr
)
3700 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3703 uint32_t ldl_le_phys(target_phys_addr_t addr
)
3705 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3708 uint32_t ldl_be_phys(target_phys_addr_t addr
)
3710 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3713 /* warning: addr must be aligned */
3714 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
3715 enum device_endian endian
)
3719 MemoryRegionSection
*section
;
3721 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3723 if (!(memory_region_is_ram(section
->mr
) ||
3724 memory_region_is_romd(section
->mr
))) {
3726 addr
= memory_region_section_addr(section
, addr
);
3728 /* XXX This is broken when device endian != cpu endian.
3729 Fix and add "endian" variable check */
3730 #ifdef TARGET_WORDS_BIGENDIAN
3731 val
= io_mem_read(section
->mr
, addr
, 4) << 32;
3732 val
|= io_mem_read(section
->mr
, addr
+ 4, 4);
3734 val
= io_mem_read(section
->mr
, addr
, 4);
3735 val
|= io_mem_read(section
->mr
, addr
+ 4, 4) << 32;
3739 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3741 + memory_region_section_addr(section
, addr
));
3743 case DEVICE_LITTLE_ENDIAN
:
3744 val
= ldq_le_p(ptr
);
3746 case DEVICE_BIG_ENDIAN
:
3747 val
= ldq_be_p(ptr
);
3757 uint64_t ldq_phys(target_phys_addr_t addr
)
3759 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3762 uint64_t ldq_le_phys(target_phys_addr_t addr
)
3764 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3767 uint64_t ldq_be_phys(target_phys_addr_t addr
)
3769 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3773 uint32_t ldub_phys(target_phys_addr_t addr
)
3776 cpu_physical_memory_read(addr
, &val
, 1);
3780 /* warning: addr must be aligned */
3781 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
3782 enum device_endian endian
)
3786 MemoryRegionSection
*section
;
3788 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3790 if (!(memory_region_is_ram(section
->mr
) ||
3791 memory_region_is_romd(section
->mr
))) {
3793 addr
= memory_region_section_addr(section
, addr
);
3794 val
= io_mem_read(section
->mr
, addr
, 2);
3795 #if defined(TARGET_WORDS_BIGENDIAN)
3796 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3800 if (endian
== DEVICE_BIG_ENDIAN
) {
3806 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3808 + memory_region_section_addr(section
, addr
));
3810 case DEVICE_LITTLE_ENDIAN
:
3811 val
= lduw_le_p(ptr
);
3813 case DEVICE_BIG_ENDIAN
:
3814 val
= lduw_be_p(ptr
);
3824 uint32_t lduw_phys(target_phys_addr_t addr
)
3826 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3829 uint32_t lduw_le_phys(target_phys_addr_t addr
)
3831 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3834 uint32_t lduw_be_phys(target_phys_addr_t addr
)
3836 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3839 /* warning: addr must be aligned. The ram page is not masked as dirty
3840 and the code inside is not invalidated. It is useful if the dirty
3841 bits are used to track modified PTEs */
3842 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3845 MemoryRegionSection
*section
;
3847 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3849 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3850 addr
= memory_region_section_addr(section
, addr
);
3851 if (memory_region_is_ram(section
->mr
)) {
3852 section
= &phys_sections
[phys_section_rom
];
3854 io_mem_write(section
->mr
, addr
, val
, 4);
3856 unsigned long addr1
= (memory_region_get_ram_addr(section
->mr
)
3858 + memory_region_section_addr(section
, addr
);
3859 ptr
= qemu_get_ram_ptr(addr1
);
3862 if (unlikely(in_migration
)) {
3863 if (!cpu_physical_memory_is_dirty(addr1
)) {
3864 /* invalidate code */
3865 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3867 cpu_physical_memory_set_dirty_flags(
3868 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3874 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3877 MemoryRegionSection
*section
;
3879 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3881 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3882 addr
= memory_region_section_addr(section
, addr
);
3883 if (memory_region_is_ram(section
->mr
)) {
3884 section
= &phys_sections
[phys_section_rom
];
3886 #ifdef TARGET_WORDS_BIGENDIAN
3887 io_mem_write(section
->mr
, addr
, val
>> 32, 4);
3888 io_mem_write(section
->mr
, addr
+ 4, (uint32_t)val
, 4);
3890 io_mem_write(section
->mr
, addr
, (uint32_t)val
, 4);
3891 io_mem_write(section
->mr
, addr
+ 4, val
>> 32, 4);
3894 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3896 + memory_region_section_addr(section
, addr
));
3901 /* warning: addr must be aligned */
3902 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
3903 enum device_endian endian
)
3906 MemoryRegionSection
*section
;
3908 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3910 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3911 addr
= memory_region_section_addr(section
, addr
);
3912 if (memory_region_is_ram(section
->mr
)) {
3913 section
= &phys_sections
[phys_section_rom
];
3915 #if defined(TARGET_WORDS_BIGENDIAN)
3916 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3920 if (endian
== DEVICE_BIG_ENDIAN
) {
3924 io_mem_write(section
->mr
, addr
, val
, 4);
3926 unsigned long addr1
;
3927 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
3928 + memory_region_section_addr(section
, addr
);
3930 ptr
= qemu_get_ram_ptr(addr1
);
3932 case DEVICE_LITTLE_ENDIAN
:
3935 case DEVICE_BIG_ENDIAN
:
3942 if (!cpu_physical_memory_is_dirty(addr1
)) {
3943 /* invalidate code */
3944 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3946 cpu_physical_memory_set_dirty_flags(addr1
,
3947 (0xff & ~CODE_DIRTY_FLAG
));
3952 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3954 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
3957 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
3959 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
3962 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
3964 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
3968 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3971 cpu_physical_memory_write(addr
, &v
, 1);
3974 /* warning: addr must be aligned */
3975 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
3976 enum device_endian endian
)
3979 MemoryRegionSection
*section
;
3981 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3983 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3984 addr
= memory_region_section_addr(section
, addr
);
3985 if (memory_region_is_ram(section
->mr
)) {
3986 section
= &phys_sections
[phys_section_rom
];
3988 #if defined(TARGET_WORDS_BIGENDIAN)
3989 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3993 if (endian
== DEVICE_BIG_ENDIAN
) {
3997 io_mem_write(section
->mr
, addr
, val
, 2);
3999 unsigned long addr1
;
4000 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
4001 + memory_region_section_addr(section
, addr
);
4003 ptr
= qemu_get_ram_ptr(addr1
);
4005 case DEVICE_LITTLE_ENDIAN
:
4008 case DEVICE_BIG_ENDIAN
:
4015 if (!cpu_physical_memory_is_dirty(addr1
)) {
4016 /* invalidate code */
4017 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4019 cpu_physical_memory_set_dirty_flags(addr1
,
4020 (0xff & ~CODE_DIRTY_FLAG
));
4025 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4027 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4030 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4032 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4035 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4037 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4041 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4044 cpu_physical_memory_write(addr
, &val
, 8);
4047 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4049 val
= cpu_to_le64(val
);
4050 cpu_physical_memory_write(addr
, &val
, 8);
4053 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4055 val
= cpu_to_be64(val
);
4056 cpu_physical_memory_write(addr
, &val
, 8);
4059 /* virtual memory access for debug (includes writing to ROM) */
4060 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
4061 uint8_t *buf
, int len
, int is_write
)
4064 target_phys_addr_t phys_addr
;
4068 page
= addr
& TARGET_PAGE_MASK
;
4069 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4070 /* if no physical page mapped, return an error */
4071 if (phys_addr
== -1)
4073 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4076 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4078 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4080 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4089 /* in deterministic execution mode, instructions doing device I/Os
4090 must be at the end of the TB */
4091 void cpu_io_recompile(CPUArchState
*env
, uintptr_t retaddr
)
4093 TranslationBlock
*tb
;
4095 target_ulong pc
, cs_base
;
4098 tb
= tb_find_pc(retaddr
);
4100 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4103 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4104 cpu_restore_state(tb
, env
, retaddr
);
4105 /* Calculate how many instructions had been executed before the fault
4107 n
= n
- env
->icount_decr
.u16
.low
;
4108 /* Generate a new TB ending on the I/O insn. */
4110 /* On MIPS and SH, delay slot instructions can only be restarted if
4111 they were already the first instruction in the TB. If this is not
4112 the first instruction in a TB then re-execute the preceding
4114 #if defined(TARGET_MIPS)
4115 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4116 env
->active_tc
.PC
-= 4;
4117 env
->icount_decr
.u16
.low
++;
4118 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4120 #elif defined(TARGET_SH4)
4121 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4124 env
->icount_decr
.u16
.low
++;
4125 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4128 /* This should never happen. */
4129 if (n
> CF_COUNT_MASK
)
4130 cpu_abort(env
, "TB too big during recompile");
4132 cflags
= n
| CF_LAST_IO
;
4134 cs_base
= tb
->cs_base
;
4136 tb_phys_invalidate(tb
, -1);
4137 /* FIXME: In theory this could raise an exception. In practice
4138 we have already translated the block once so it's probably ok. */
4139 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4140 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4141 the first in the TB) then we end up generating a whole new TB and
4142 repeating the fault, which is horribly inefficient.
4143 Better would be to execute just this insn uncached, or generate a
4145 cpu_resume_from_signal(env
, NULL
);
4148 #if !defined(CONFIG_USER_ONLY)
4150 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4152 int i
, target_code_size
, max_target_code_size
;
4153 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4154 TranslationBlock
*tb
;
4156 target_code_size
= 0;
4157 max_target_code_size
= 0;
4159 direct_jmp_count
= 0;
4160 direct_jmp2_count
= 0;
4161 for(i
= 0; i
< nb_tbs
; i
++) {
4163 target_code_size
+= tb
->size
;
4164 if (tb
->size
> max_target_code_size
)
4165 max_target_code_size
= tb
->size
;
4166 if (tb
->page_addr
[1] != -1)
4168 if (tb
->tb_next_offset
[0] != 0xffff) {
4170 if (tb
->tb_next_offset
[1] != 0xffff) {
4171 direct_jmp2_count
++;
4175 /* XXX: avoid using doubles ? */
4176 cpu_fprintf(f
, "Translation buffer state:\n");
4177 cpu_fprintf(f
, "gen code size %td/%ld\n",
4178 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4179 cpu_fprintf(f
, "TB count %d/%d\n",
4180 nb_tbs
, code_gen_max_blocks
);
4181 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4182 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4183 max_target_code_size
);
4184 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4185 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4186 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4187 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4189 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4190 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4192 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4194 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4195 cpu_fprintf(f
, "\nStatistics:\n");
4196 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4197 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4198 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4199 tcg_dump_info(f
, cpu_fprintf
);
4203 * A helper function for the _utterly broken_ virtio device model to find out if
4204 * it's running on a big endian machine. Don't do this at home kids!
4206 bool virtio_is_big_endian(void);
4207 bool virtio_is_big_endian(void)
4209 #if defined(TARGET_WORDS_BIGENDIAN)
4218 #ifndef CONFIG_USER_ONLY
4219 bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr
)
4221 MemoryRegionSection
*section
;
4223 section
= phys_page_find(phys_addr
>> TARGET_PAGE_BITS
);
4225 return !(memory_region_is_ram(section
->mr
) ||
4226 memory_region_is_romd(section
->mr
));