4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "exec/cputlb.h"
53 #include "translate-all.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
58 #include "qemu/range.h"
60 //#define DEBUG_SUBPAGE
62 #if !defined(CONFIG_USER_ONLY)
63 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
66 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
68 static MemoryRegion
*system_memory
;
69 static MemoryRegion
*system_io
;
71 AddressSpace address_space_io
;
72 AddressSpace address_space_memory
;
74 MemoryRegion io_mem_rom
, io_mem_notdirty
;
75 static MemoryRegion io_mem_unassigned
;
77 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78 #define RAM_PREALLOC (1 << 0)
80 /* RAM is mmap-ed with MAP_SHARED */
81 #define RAM_SHARED (1 << 1)
83 /* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
86 #define RAM_RESIZEABLE (1 << 2)
90 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
91 /* current CPU in the current thread. It is only valid inside
93 DEFINE_TLS(CPUState
*, current_cpu
);
94 /* 0 = Do not count executed instructions.
95 1 = Precise instruction counting.
96 2 = Adaptive rate instruction counting. */
99 #if !defined(CONFIG_USER_ONLY)
101 typedef struct PhysPageEntry PhysPageEntry
;
103 struct PhysPageEntry
{
104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
110 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
112 /* Size of the L2 (and L3, etc) page tables. */
113 #define ADDR_SPACE_BITS 64
116 #define P_L2_SIZE (1 << P_L2_BITS)
118 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
120 typedef PhysPageEntry Node
[P_L2_SIZE
];
122 typedef struct PhysPageMap
{
125 unsigned sections_nb
;
126 unsigned sections_nb_alloc
;
128 unsigned nodes_nb_alloc
;
130 MemoryRegionSection
*sections
;
133 struct AddressSpaceDispatch
{
136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
139 PhysPageEntry phys_map
;
144 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145 typedef struct subpage_t
{
149 uint16_t sub_section
[TARGET_PAGE_SIZE
];
152 #define PHYS_SECTION_UNASSIGNED 0
153 #define PHYS_SECTION_NOTDIRTY 1
154 #define PHYS_SECTION_ROM 2
155 #define PHYS_SECTION_WATCH 3
157 static void io_mem_init(void);
158 static void memory_map_init(void);
159 static void tcg_commit(MemoryListener
*listener
);
161 static MemoryRegion io_mem_watch
;
164 #if !defined(CONFIG_USER_ONLY)
166 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
168 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
169 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
170 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
171 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
175 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
182 ret
= map
->nodes_nb
++;
184 assert(ret
!= PHYS_MAP_NODE_NIL
);
185 assert(ret
!= map
->nodes_nb_alloc
);
187 e
.skip
= leaf
? 0 : 1;
188 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
189 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
190 memcpy(&p
[i
], &e
, sizeof(e
));
195 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
196 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
200 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
202 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
203 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
205 p
= map
->nodes
[lp
->ptr
];
206 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
208 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
209 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
215 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
221 static void phys_page_set(AddressSpaceDispatch
*d
,
222 hwaddr index
, hwaddr nb
,
225 /* Wildly overreserve - it doesn't matter much. */
226 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
228 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
231 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
234 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
236 unsigned valid_ptr
= P_L2_SIZE
;
241 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
246 for (i
= 0; i
< P_L2_SIZE
; i
++) {
247 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
254 phys_page_compact(&p
[i
], nodes
, compacted
);
258 /* We can only compress if there's only one child. */
263 assert(valid_ptr
< P_L2_SIZE
);
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
270 lp
->ptr
= p
[valid_ptr
].ptr
;
271 if (!p
[valid_ptr
].skip
) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
280 lp
->skip
+= p
[valid_ptr
].skip
;
284 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
286 DECLARE_BITMAP(compacted
, nodes_nb
);
288 if (d
->phys_map
.skip
) {
289 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
293 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
294 Node
*nodes
, MemoryRegionSection
*sections
)
297 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
300 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
301 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
302 return §ions
[PHYS_SECTION_UNASSIGNED
];
305 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
308 if (sections
[lp
.ptr
].size
.hi
||
309 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
310 sections
[lp
.ptr
].size
.lo
, addr
)) {
311 return §ions
[lp
.ptr
];
313 return §ions
[PHYS_SECTION_UNASSIGNED
];
317 bool memory_region_is_unassigned(MemoryRegion
*mr
)
319 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
320 && mr
!= &io_mem_watch
;
323 /* Called from RCU critical section */
324 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
326 bool resolve_subpage
)
328 MemoryRegionSection
*section
;
331 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
332 if (resolve_subpage
&& section
->mr
->subpage
) {
333 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
334 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
339 /* Called from RCU critical section */
340 static MemoryRegionSection
*
341 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
342 hwaddr
*plen
, bool resolve_subpage
)
344 MemoryRegionSection
*section
;
348 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
349 /* Compute offset within MemoryRegionSection */
350 addr
-= section
->offset_within_address_space
;
352 /* Compute offset within MemoryRegion */
353 *xlat
= addr
+ section
->offset_within_region
;
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
368 if (memory_region_is_ram(mr
)) {
369 diff
= int128_sub(section
->size
, int128_make64(addr
));
370 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
375 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
377 if (memory_region_is_ram(mr
)) {
378 return !(is_write
&& mr
->readonly
);
380 if (memory_region_is_romd(mr
)) {
387 /* Called from RCU critical section */
388 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
389 hwaddr
*xlat
, hwaddr
*plen
,
393 MemoryRegionSection
*section
;
397 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
398 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
401 if (!mr
->iommu_ops
) {
405 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
406 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
407 | (addr
& iotlb
.addr_mask
));
408 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
409 if (!(iotlb
.perm
& (1 << is_write
))) {
410 mr
= &io_mem_unassigned
;
414 as
= iotlb
.target_as
;
417 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
418 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
419 *plen
= MIN(page
, *plen
);
426 /* Called from RCU critical section */
427 MemoryRegionSection
*
428 address_space_translate_for_iotlb(CPUState
*cpu
, hwaddr addr
,
429 hwaddr
*xlat
, hwaddr
*plen
)
431 MemoryRegionSection
*section
;
432 section
= address_space_translate_internal(cpu
->memory_dispatch
,
433 addr
, xlat
, plen
, false);
435 assert(!section
->mr
->iommu_ops
);
440 #if !defined(CONFIG_USER_ONLY)
442 static int cpu_common_post_load(void *opaque
, int version_id
)
444 CPUState
*cpu
= opaque
;
446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
448 cpu
->interrupt_request
&= ~0x01;
454 static int cpu_common_pre_load(void *opaque
)
456 CPUState
*cpu
= opaque
;
458 cpu
->exception_index
= -1;
463 static bool cpu_common_exception_index_needed(void *opaque
)
465 CPUState
*cpu
= opaque
;
467 return tcg_enabled() && cpu
->exception_index
!= -1;
470 static const VMStateDescription vmstate_cpu_common_exception_index
= {
471 .name
= "cpu_common/exception_index",
473 .minimum_version_id
= 1,
474 .needed
= cpu_common_exception_index_needed
,
475 .fields
= (VMStateField
[]) {
476 VMSTATE_INT32(exception_index
, CPUState
),
477 VMSTATE_END_OF_LIST()
481 const VMStateDescription vmstate_cpu_common
= {
482 .name
= "cpu_common",
484 .minimum_version_id
= 1,
485 .pre_load
= cpu_common_pre_load
,
486 .post_load
= cpu_common_post_load
,
487 .fields
= (VMStateField
[]) {
488 VMSTATE_UINT32(halted
, CPUState
),
489 VMSTATE_UINT32(interrupt_request
, CPUState
),
490 VMSTATE_END_OF_LIST()
492 .subsections
= (const VMStateDescription
*[]) {
493 &vmstate_cpu_common_exception_index
,
500 CPUState
*qemu_get_cpu(int index
)
505 if (cpu
->cpu_index
== index
) {
513 #if !defined(CONFIG_USER_ONLY)
514 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
516 /* We only support one address space per cpu at the moment. */
517 assert(cpu
->as
== as
);
519 if (cpu
->tcg_as_listener
) {
520 memory_listener_unregister(cpu
->tcg_as_listener
);
522 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
524 cpu
->tcg_as_listener
->commit
= tcg_commit
;
525 memory_listener_register(cpu
->tcg_as_listener
, as
);
529 #ifndef CONFIG_USER_ONLY
530 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
532 static int cpu_get_free_index(Error
**errp
)
534 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
536 if (cpu
>= MAX_CPUMASK_BITS
) {
537 error_setg(errp
, "Trying to use more CPUs than max of %d",
542 bitmap_set(cpu_index_map
, cpu
, 1);
546 void cpu_exec_exit(CPUState
*cpu
)
548 if (cpu
->cpu_index
== -1) {
549 /* cpu_index was never allocated by this @cpu or was already freed. */
553 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
558 static int cpu_get_free_index(Error
**errp
)
563 CPU_FOREACH(some_cpu
) {
569 void cpu_exec_exit(CPUState
*cpu
)
574 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
576 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
578 Error
*local_err
= NULL
;
580 #ifdef TARGET_WORDS_BIGENDIAN
581 cpu
->bigendian
= true;
583 cpu
->bigendian
= false;
586 #ifndef CONFIG_USER_ONLY
587 cpu
->as
= &address_space_memory
;
588 cpu
->thread_id
= qemu_get_thread_id();
589 cpu_reload_memory_map(cpu
);
592 #if defined(CONFIG_USER_ONLY)
595 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
597 error_propagate(errp
, local_err
);
598 #if defined(CONFIG_USER_ONLY)
603 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
604 #if defined(CONFIG_USER_ONLY)
607 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
608 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
610 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
611 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
612 cpu_save
, cpu_load
, cpu
->env_ptr
);
613 assert(cc
->vmsd
== NULL
);
614 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
616 if (cc
->vmsd
!= NULL
) {
617 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
621 #if defined(CONFIG_USER_ONLY)
622 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
624 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
627 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
629 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
631 tb_invalidate_phys_addr(cpu
->as
,
632 phys
| (pc
& ~TARGET_PAGE_MASK
));
637 #if defined(CONFIG_USER_ONLY)
638 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
643 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
649 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
653 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
654 int flags
, CPUWatchpoint
**watchpoint
)
659 /* Add a watchpoint. */
660 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
661 int flags
, CPUWatchpoint
**watchpoint
)
665 /* forbid ranges which are empty or run off the end of the address space */
666 if (len
== 0 || (addr
+ len
- 1) < addr
) {
667 error_report("tried to set invalid watchpoint at %"
668 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
671 wp
= g_malloc(sizeof(*wp
));
677 /* keep all GDB-injected watchpoints in front */
678 if (flags
& BP_GDB
) {
679 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
681 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
684 tlb_flush_page(cpu
, addr
);
691 /* Remove a specific watchpoint. */
692 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
697 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
698 if (addr
== wp
->vaddr
&& len
== wp
->len
699 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
700 cpu_watchpoint_remove_by_ref(cpu
, wp
);
707 /* Remove a specific watchpoint by reference. */
708 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
710 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
712 tlb_flush_page(cpu
, watchpoint
->vaddr
);
717 /* Remove all matching watchpoints. */
718 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
720 CPUWatchpoint
*wp
, *next
;
722 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
723 if (wp
->flags
& mask
) {
724 cpu_watchpoint_remove_by_ref(cpu
, wp
);
729 /* Return true if this watchpoint address matches the specified
730 * access (ie the address range covered by the watchpoint overlaps
731 * partially or completely with the address range covered by the
734 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
738 /* We know the lengths are non-zero, but a little caution is
739 * required to avoid errors in the case where the range ends
740 * exactly at the top of the address space and so addr + len
741 * wraps round to zero.
743 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
744 vaddr addrend
= addr
+ len
- 1;
746 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
751 /* Add a breakpoint. */
752 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
753 CPUBreakpoint
**breakpoint
)
757 bp
= g_malloc(sizeof(*bp
));
762 /* keep all GDB-injected breakpoints in front */
763 if (flags
& BP_GDB
) {
764 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
766 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
769 breakpoint_invalidate(cpu
, pc
);
777 /* Remove a specific breakpoint. */
778 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
782 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
783 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
784 cpu_breakpoint_remove_by_ref(cpu
, bp
);
791 /* Remove a specific breakpoint by reference. */
792 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
794 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
796 breakpoint_invalidate(cpu
, breakpoint
->pc
);
801 /* Remove all matching breakpoints. */
802 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
804 CPUBreakpoint
*bp
, *next
;
806 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
807 if (bp
->flags
& mask
) {
808 cpu_breakpoint_remove_by_ref(cpu
, bp
);
813 /* enable or disable single step mode. EXCP_DEBUG is returned by the
814 CPU loop after each instruction */
815 void cpu_single_step(CPUState
*cpu
, int enabled
)
817 if (cpu
->singlestep_enabled
!= enabled
) {
818 cpu
->singlestep_enabled
= enabled
;
820 kvm_update_guest_debug(cpu
, 0);
822 /* must flush all the translated code to avoid inconsistencies */
823 /* XXX: only flush what is necessary */
829 void QEMU_NORETURN
cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
836 fprintf(stderr
, "qemu: fatal: ");
837 vfprintf(stderr
, fmt
, ap
);
838 fprintf(stderr
, "\n");
839 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
840 if (qemu_log_enabled()) {
841 qemu_log("qemu: fatal: ");
842 qemu_log_vprintf(fmt
, ap2
);
844 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
850 #if defined(CONFIG_USER_ONLY)
852 struct sigaction act
;
853 sigfillset(&act
.sa_mask
);
854 act
.sa_handler
= SIG_DFL
;
855 sigaction(SIGABRT
, &act
, NULL
);
861 #if !defined(CONFIG_USER_ONLY)
862 /* Called from RCU critical section */
863 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
867 block
= atomic_rcu_read(&ram_list
.mru_block
);
868 if (block
&& addr
- block
->offset
< block
->max_length
) {
871 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
872 if (addr
- block
->offset
< block
->max_length
) {
877 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
881 /* It is safe to write mru_block outside the iothread lock. This
886 * xxx removed from list
890 * call_rcu(reclaim_ramblock, xxx);
893 * atomic_rcu_set is not needed here. The block was already published
894 * when it was placed into the list. Here we're just making an extra
895 * copy of the pointer.
897 ram_list
.mru_block
= block
;
901 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
907 end
= TARGET_PAGE_ALIGN(start
+ length
);
908 start
&= TARGET_PAGE_MASK
;
911 block
= qemu_get_ram_block(start
);
912 assert(block
== qemu_get_ram_block(end
- 1));
913 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
914 cpu_tlb_reset_dirty_all(start1
, length
);
918 /* Note: start and end must be within the same ram block. */
919 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
923 unsigned long end
, page
;
930 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
931 page
= start
>> TARGET_PAGE_BITS
;
932 dirty
= bitmap_test_and_clear_atomic(ram_list
.dirty_memory
[client
],
935 if (dirty
&& tcg_enabled()) {
936 tlb_reset_dirty_range_all(start
, length
);
942 /* Called from RCU critical section */
943 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
944 MemoryRegionSection
*section
,
946 hwaddr paddr
, hwaddr xlat
,
948 target_ulong
*address
)
953 if (memory_region_is_ram(section
->mr
)) {
955 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
957 if (!section
->readonly
) {
958 iotlb
|= PHYS_SECTION_NOTDIRTY
;
960 iotlb
|= PHYS_SECTION_ROM
;
963 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
967 /* Make accesses to pages with watchpoints go via the
968 watchpoint trap routines. */
969 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
970 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
971 /* Avoid trapping reads of pages with a write breakpoint. */
972 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
973 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
974 *address
|= TLB_MMIO
;
982 #endif /* defined(CONFIG_USER_ONLY) */
984 #if !defined(CONFIG_USER_ONLY)
986 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
988 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
990 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
994 * Set a custom physical guest memory alloator.
995 * Accelerators with unusual needs may need this. Hopefully, we can
996 * get rid of it eventually.
998 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1000 phys_mem_alloc
= alloc
;
1003 static uint16_t phys_section_add(PhysPageMap
*map
,
1004 MemoryRegionSection
*section
)
1006 /* The physical section number is ORed with a page-aligned
1007 * pointer to produce the iotlb entries. Thus it should
1008 * never overflow into the page-aligned value.
1010 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1012 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1013 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1014 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1015 map
->sections_nb_alloc
);
1017 map
->sections
[map
->sections_nb
] = *section
;
1018 memory_region_ref(section
->mr
);
1019 return map
->sections_nb
++;
1022 static void phys_section_destroy(MemoryRegion
*mr
)
1024 memory_region_unref(mr
);
1027 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1028 object_unref(OBJECT(&subpage
->iomem
));
1033 static void phys_sections_free(PhysPageMap
*map
)
1035 while (map
->sections_nb
> 0) {
1036 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1037 phys_section_destroy(section
->mr
);
1039 g_free(map
->sections
);
1043 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1046 hwaddr base
= section
->offset_within_address_space
1048 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1049 d
->map
.nodes
, d
->map
.sections
);
1050 MemoryRegionSection subsection
= {
1051 .offset_within_address_space
= base
,
1052 .size
= int128_make64(TARGET_PAGE_SIZE
),
1056 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1058 if (!(existing
->mr
->subpage
)) {
1059 subpage
= subpage_init(d
->as
, base
);
1060 subsection
.address_space
= d
->as
;
1061 subsection
.mr
= &subpage
->iomem
;
1062 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1063 phys_section_add(&d
->map
, &subsection
));
1065 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1067 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1068 end
= start
+ int128_get64(section
->size
) - 1;
1069 subpage_register(subpage
, start
, end
,
1070 phys_section_add(&d
->map
, section
));
1074 static void register_multipage(AddressSpaceDispatch
*d
,
1075 MemoryRegionSection
*section
)
1077 hwaddr start_addr
= section
->offset_within_address_space
;
1078 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1079 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1083 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1086 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1088 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1089 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1090 MemoryRegionSection now
= *section
, remain
= *section
;
1091 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1093 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1094 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1095 - now
.offset_within_address_space
;
1097 now
.size
= int128_min(int128_make64(left
), now
.size
);
1098 register_subpage(d
, &now
);
1100 now
.size
= int128_zero();
1102 while (int128_ne(remain
.size
, now
.size
)) {
1103 remain
.size
= int128_sub(remain
.size
, now
.size
);
1104 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1105 remain
.offset_within_region
+= int128_get64(now
.size
);
1107 if (int128_lt(remain
.size
, page_size
)) {
1108 register_subpage(d
, &now
);
1109 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1110 now
.size
= page_size
;
1111 register_subpage(d
, &now
);
1113 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1114 register_multipage(d
, &now
);
1119 void qemu_flush_coalesced_mmio_buffer(void)
1122 kvm_flush_coalesced_mmio_buffer();
1125 void qemu_mutex_lock_ramlist(void)
1127 qemu_mutex_lock(&ram_list
.mutex
);
1130 void qemu_mutex_unlock_ramlist(void)
1132 qemu_mutex_unlock(&ram_list
.mutex
);
1137 #include <sys/vfs.h>
1139 #define HUGETLBFS_MAGIC 0x958458f6
1141 static long gethugepagesize(const char *path
, Error
**errp
)
1147 ret
= statfs(path
, &fs
);
1148 } while (ret
!= 0 && errno
== EINTR
);
1151 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1156 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1157 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1162 static void *file_ram_alloc(RAMBlock
*block
,
1168 char *sanitized_name
;
1170 void * volatile area
= NULL
;
1173 Error
*local_err
= NULL
;
1175 hpagesize
= gethugepagesize(path
, &local_err
);
1177 error_propagate(errp
, local_err
);
1180 block
->mr
->align
= hpagesize
;
1182 if (memory
< hpagesize
) {
1183 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1184 "or larger than huge page size 0x%" PRIx64
,
1189 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1191 "host lacks kvm mmu notifiers, -mem-path unsupported");
1195 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1196 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1197 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1202 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1204 g_free(sanitized_name
);
1206 fd
= mkstemp(filename
);
1208 error_setg_errno(errp
, errno
,
1209 "unable to create backing store for hugepages");
1216 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1219 * ftruncate is not supported by hugetlbfs in older
1220 * hosts, so don't bother bailing out on errors.
1221 * If anything goes wrong with it under other filesystems,
1224 if (ftruncate(fd
, memory
)) {
1225 perror("ftruncate");
1228 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
,
1229 (block
->flags
& RAM_SHARED
? MAP_SHARED
: MAP_PRIVATE
),
1231 if (area
== MAP_FAILED
) {
1232 error_setg_errno(errp
, errno
,
1233 "unable to map backing store for hugepages");
1239 os_mem_prealloc(fd
, area
, memory
);
1247 error_report("%s", error_get_pretty(*errp
));
1254 /* Called with the ramlist lock held. */
1255 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1257 RAMBlock
*block
, *next_block
;
1258 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1260 assert(size
!= 0); /* it would hand out same offset multiple times */
1262 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1266 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1267 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1269 end
= block
->offset
+ block
->max_length
;
1271 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1272 if (next_block
->offset
>= end
) {
1273 next
= MIN(next
, next_block
->offset
);
1276 if (next
- end
>= size
&& next
- end
< mingap
) {
1278 mingap
= next
- end
;
1282 if (offset
== RAM_ADDR_MAX
) {
1283 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1291 ram_addr_t
last_ram_offset(void)
1294 ram_addr_t last
= 0;
1297 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1298 last
= MAX(last
, block
->offset
+ block
->max_length
);
1304 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1308 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1309 if (!machine_dump_guest_core(current_machine
)) {
1310 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1312 perror("qemu_madvise");
1313 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1314 "but dump_guest_core=off specified\n");
1319 /* Called within an RCU critical section, or while the ramlist lock
1322 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1326 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1327 if (block
->offset
== addr
) {
1335 /* Called with iothread lock held. */
1336 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1338 RAMBlock
*new_block
, *block
;
1341 new_block
= find_ram_block(addr
);
1343 assert(!new_block
->idstr
[0]);
1346 char *id
= qdev_get_dev_path(dev
);
1348 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1352 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1354 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1355 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1356 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1364 /* Called with iothread lock held. */
1365 void qemu_ram_unset_idstr(ram_addr_t addr
)
1369 /* FIXME: arch_init.c assumes that this is not called throughout
1370 * migration. Ignore the problem since hot-unplug during migration
1371 * does not work anyway.
1375 block
= find_ram_block(addr
);
1377 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1382 static int memory_try_enable_merging(void *addr
, size_t len
)
1384 if (!machine_mem_merge(current_machine
)) {
1385 /* disabled by the user */
1389 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1392 /* Only legal before guest might have detected the memory size: e.g. on
1393 * incoming migration, or right after reset.
1395 * As memory core doesn't know how is memory accessed, it is up to
1396 * resize callback to update device state and/or add assertions to detect
1397 * misuse, if necessary.
1399 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1401 RAMBlock
*block
= find_ram_block(base
);
1405 newsize
= TARGET_PAGE_ALIGN(newsize
);
1407 if (block
->used_length
== newsize
) {
1411 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1412 error_setg_errno(errp
, EINVAL
,
1413 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1414 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1415 newsize
, block
->used_length
);
1419 if (block
->max_length
< newsize
) {
1420 error_setg_errno(errp
, EINVAL
,
1421 "Length too large: %s: 0x" RAM_ADDR_FMT
1422 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1423 newsize
, block
->max_length
);
1427 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1428 block
->used_length
= newsize
;
1429 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1431 memory_region_set_size(block
->mr
, newsize
);
1432 if (block
->resized
) {
1433 block
->resized(block
->idstr
, newsize
, block
->host
);
1438 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1441 RAMBlock
*last_block
= NULL
;
1442 ram_addr_t old_ram_size
, new_ram_size
;
1444 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1446 qemu_mutex_lock_ramlist();
1447 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1449 if (!new_block
->host
) {
1450 if (xen_enabled()) {
1451 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1454 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1455 &new_block
->mr
->align
);
1456 if (!new_block
->host
) {
1457 error_setg_errno(errp
, errno
,
1458 "cannot set up guest memory '%s'",
1459 memory_region_name(new_block
->mr
));
1460 qemu_mutex_unlock_ramlist();
1463 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1467 new_ram_size
= MAX(old_ram_size
,
1468 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1469 if (new_ram_size
> old_ram_size
) {
1470 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1472 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1473 * QLIST (which has an RCU-friendly variant) does not have insertion at
1474 * tail, so save the last element in last_block.
1476 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1478 if (block
->max_length
< new_block
->max_length
) {
1483 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1484 } else if (last_block
) {
1485 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1486 } else { /* list is empty */
1487 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1489 ram_list
.mru_block
= NULL
;
1491 /* Write list before version */
1494 qemu_mutex_unlock_ramlist();
1496 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1498 if (new_ram_size
> old_ram_size
) {
1501 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1502 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1503 ram_list
.dirty_memory
[i
] =
1504 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1505 old_ram_size
, new_ram_size
);
1508 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1509 new_block
->used_length
,
1512 if (new_block
->host
) {
1513 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1514 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1515 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1516 if (kvm_enabled()) {
1517 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1521 return new_block
->offset
;
1525 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1526 bool share
, const char *mem_path
,
1529 RAMBlock
*new_block
;
1531 Error
*local_err
= NULL
;
1533 if (xen_enabled()) {
1534 error_setg(errp
, "-mem-path not supported with Xen");
1538 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1540 * file_ram_alloc() needs to allocate just like
1541 * phys_mem_alloc, but we haven't bothered to provide
1545 "-mem-path not supported with this accelerator");
1549 size
= TARGET_PAGE_ALIGN(size
);
1550 new_block
= g_malloc0(sizeof(*new_block
));
1552 new_block
->used_length
= size
;
1553 new_block
->max_length
= size
;
1554 new_block
->flags
= share
? RAM_SHARED
: 0;
1555 new_block
->host
= file_ram_alloc(new_block
, size
,
1557 if (!new_block
->host
) {
1562 addr
= ram_block_add(new_block
, &local_err
);
1565 error_propagate(errp
, local_err
);
1573 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1574 void (*resized
)(const char*,
1577 void *host
, bool resizeable
,
1578 MemoryRegion
*mr
, Error
**errp
)
1580 RAMBlock
*new_block
;
1582 Error
*local_err
= NULL
;
1584 size
= TARGET_PAGE_ALIGN(size
);
1585 max_size
= TARGET_PAGE_ALIGN(max_size
);
1586 new_block
= g_malloc0(sizeof(*new_block
));
1588 new_block
->resized
= resized
;
1589 new_block
->used_length
= size
;
1590 new_block
->max_length
= max_size
;
1591 assert(max_size
>= size
);
1593 new_block
->host
= host
;
1595 new_block
->flags
|= RAM_PREALLOC
;
1598 new_block
->flags
|= RAM_RESIZEABLE
;
1600 addr
= ram_block_add(new_block
, &local_err
);
1603 error_propagate(errp
, local_err
);
1609 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1610 MemoryRegion
*mr
, Error
**errp
)
1612 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1615 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1617 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1620 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1621 void (*resized
)(const char*,
1624 MemoryRegion
*mr
, Error
**errp
)
1626 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1629 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1633 qemu_mutex_lock_ramlist();
1634 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1635 if (addr
== block
->offset
) {
1636 QLIST_REMOVE_RCU(block
, next
);
1637 ram_list
.mru_block
= NULL
;
1638 /* Write list before version */
1641 g_free_rcu(block
, rcu
);
1645 qemu_mutex_unlock_ramlist();
1648 static void reclaim_ramblock(RAMBlock
*block
)
1650 if (block
->flags
& RAM_PREALLOC
) {
1652 } else if (xen_enabled()) {
1653 xen_invalidate_map_cache_entry(block
->host
);
1655 } else if (block
->fd
>= 0) {
1656 munmap(block
->host
, block
->max_length
);
1660 qemu_anon_ram_free(block
->host
, block
->max_length
);
1665 void qemu_ram_free(ram_addr_t addr
)
1669 qemu_mutex_lock_ramlist();
1670 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1671 if (addr
== block
->offset
) {
1672 QLIST_REMOVE_RCU(block
, next
);
1673 ram_list
.mru_block
= NULL
;
1674 /* Write list before version */
1677 call_rcu(block
, reclaim_ramblock
, rcu
);
1681 qemu_mutex_unlock_ramlist();
1685 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1692 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1693 offset
= addr
- block
->offset
;
1694 if (offset
< block
->max_length
) {
1695 vaddr
= ramblock_ptr(block
, offset
);
1696 if (block
->flags
& RAM_PREALLOC
) {
1698 } else if (xen_enabled()) {
1702 if (block
->fd
>= 0) {
1703 flags
|= (block
->flags
& RAM_SHARED
?
1704 MAP_SHARED
: MAP_PRIVATE
);
1705 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1706 flags
, block
->fd
, offset
);
1709 * Remap needs to match alloc. Accelerators that
1710 * set phys_mem_alloc never remap. If they did,
1711 * we'd need a remap hook here.
1713 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1715 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1716 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1719 if (area
!= vaddr
) {
1720 fprintf(stderr
, "Could not remap addr: "
1721 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1725 memory_try_enable_merging(vaddr
, length
);
1726 qemu_ram_setup_dump(vaddr
, length
);
1731 #endif /* !_WIN32 */
1733 int qemu_get_ram_fd(ram_addr_t addr
)
1739 block
= qemu_get_ram_block(addr
);
1745 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1751 block
= qemu_get_ram_block(addr
);
1752 ptr
= ramblock_ptr(block
, 0);
1757 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1758 * This should not be used for general purpose DMA. Use address_space_map
1759 * or address_space_rw instead. For local memory (e.g. video ram) that the
1760 * device owns, use memory_region_get_ram_ptr.
1762 * By the time this function returns, the returned pointer is not protected
1763 * by RCU anymore. If the caller is not within an RCU critical section and
1764 * does not hold the iothread lock, it must have other means of protecting the
1765 * pointer, such as a reference to the region that includes the incoming
1768 void *qemu_get_ram_ptr(ram_addr_t addr
)
1774 block
= qemu_get_ram_block(addr
);
1776 if (xen_enabled() && block
->host
== NULL
) {
1777 /* We need to check if the requested address is in the RAM
1778 * because we don't want to map the entire memory in QEMU.
1779 * In that case just map until the end of the page.
1781 if (block
->offset
== 0) {
1782 ptr
= xen_map_cache(addr
, 0, 0);
1786 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1788 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1795 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1796 * but takes a size argument.
1798 * By the time this function returns, the returned pointer is not protected
1799 * by RCU anymore. If the caller is not within an RCU critical section and
1800 * does not hold the iothread lock, it must have other means of protecting the
1801 * pointer, such as a reference to the region that includes the incoming
1804 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1810 if (xen_enabled()) {
1811 return xen_map_cache(addr
, *size
, 1);
1815 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1816 if (addr
- block
->offset
< block
->max_length
) {
1817 if (addr
- block
->offset
+ *size
> block
->max_length
)
1818 *size
= block
->max_length
- addr
+ block
->offset
;
1819 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1825 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1830 /* Some of the softmmu routines need to translate from a host pointer
1831 * (typically a TLB entry) back to a ram offset.
1833 * By the time this function returns, the returned pointer is not protected
1834 * by RCU anymore. If the caller is not within an RCU critical section and
1835 * does not hold the iothread lock, it must have other means of protecting the
1836 * pointer, such as a reference to the region that includes the incoming
1839 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1842 uint8_t *host
= ptr
;
1845 if (xen_enabled()) {
1847 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1848 mr
= qemu_get_ram_block(*ram_addr
)->mr
;
1854 block
= atomic_rcu_read(&ram_list
.mru_block
);
1855 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1859 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1860 /* This case append when the block is not mapped. */
1861 if (block
->host
== NULL
) {
1864 if (host
- block
->host
< block
->max_length
) {
1873 *ram_addr
= block
->offset
+ (host
- block
->host
);
1879 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1880 uint64_t val
, unsigned size
)
1882 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1883 tb_invalidate_phys_page_fast(ram_addr
, size
);
1887 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1890 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1893 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1898 /* Set both VGA and migration bits for simplicity and to remove
1899 * the notdirty callback faster.
1901 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
1902 DIRTY_CLIENTS_NOCODE
);
1903 /* we remove the notdirty callback only if the code has been
1905 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1906 CPUArchState
*env
= current_cpu
->env_ptr
;
1907 tlb_set_dirty(env
, current_cpu
->mem_io_vaddr
);
1911 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1912 unsigned size
, bool is_write
)
1917 static const MemoryRegionOps notdirty_mem_ops
= {
1918 .write
= notdirty_mem_write
,
1919 .valid
.accepts
= notdirty_mem_accepts
,
1920 .endianness
= DEVICE_NATIVE_ENDIAN
,
1923 /* Generate a debug exception if a watchpoint has been hit. */
1924 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
1926 CPUState
*cpu
= current_cpu
;
1927 CPUArchState
*env
= cpu
->env_ptr
;
1928 target_ulong pc
, cs_base
;
1933 if (cpu
->watchpoint_hit
) {
1934 /* We re-entered the check after replacing the TB. Now raise
1935 * the debug interrupt so that is will trigger after the
1936 * current instruction. */
1937 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1940 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1941 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1942 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
1943 && (wp
->flags
& flags
)) {
1944 if (flags
== BP_MEM_READ
) {
1945 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
1947 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
1949 wp
->hitaddr
= vaddr
;
1950 wp
->hitattrs
= attrs
;
1951 if (!cpu
->watchpoint_hit
) {
1952 cpu
->watchpoint_hit
= wp
;
1953 tb_check_watchpoint(cpu
);
1954 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1955 cpu
->exception_index
= EXCP_DEBUG
;
1958 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1959 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1960 cpu_resume_from_signal(cpu
, NULL
);
1964 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1969 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1970 so these check for a hit then pass through to the normal out-of-line
1972 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
1973 unsigned size
, MemTxAttrs attrs
)
1978 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
1981 data
= address_space_ldub(&address_space_memory
, addr
, attrs
, &res
);
1984 data
= address_space_lduw(&address_space_memory
, addr
, attrs
, &res
);
1987 data
= address_space_ldl(&address_space_memory
, addr
, attrs
, &res
);
1995 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
1996 uint64_t val
, unsigned size
,
2001 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2004 address_space_stb(&address_space_memory
, addr
, val
, attrs
, &res
);
2007 address_space_stw(&address_space_memory
, addr
, val
, attrs
, &res
);
2010 address_space_stl(&address_space_memory
, addr
, val
, attrs
, &res
);
2017 static const MemoryRegionOps watch_mem_ops
= {
2018 .read_with_attrs
= watch_mem_read
,
2019 .write_with_attrs
= watch_mem_write
,
2020 .endianness
= DEVICE_NATIVE_ENDIAN
,
2023 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2024 unsigned len
, MemTxAttrs attrs
)
2026 subpage_t
*subpage
= opaque
;
2030 #if defined(DEBUG_SUBPAGE)
2031 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2032 subpage
, len
, addr
);
2034 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2041 *data
= ldub_p(buf
);
2044 *data
= lduw_p(buf
);
2057 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2058 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2060 subpage_t
*subpage
= opaque
;
2063 #if defined(DEBUG_SUBPAGE)
2064 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2065 " value %"PRIx64
"\n",
2066 __func__
, subpage
, len
, addr
, value
);
2084 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2088 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2089 unsigned len
, bool is_write
)
2091 subpage_t
*subpage
= opaque
;
2092 #if defined(DEBUG_SUBPAGE)
2093 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2094 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2097 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2101 static const MemoryRegionOps subpage_ops
= {
2102 .read_with_attrs
= subpage_read
,
2103 .write_with_attrs
= subpage_write
,
2104 .impl
.min_access_size
= 1,
2105 .impl
.max_access_size
= 8,
2106 .valid
.min_access_size
= 1,
2107 .valid
.max_access_size
= 8,
2108 .valid
.accepts
= subpage_accepts
,
2109 .endianness
= DEVICE_NATIVE_ENDIAN
,
2112 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2117 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2119 idx
= SUBPAGE_IDX(start
);
2120 eidx
= SUBPAGE_IDX(end
);
2121 #if defined(DEBUG_SUBPAGE)
2122 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2123 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2125 for (; idx
<= eidx
; idx
++) {
2126 mmio
->sub_section
[idx
] = section
;
2132 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2136 mmio
= g_malloc0(sizeof(subpage_t
));
2140 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2141 NULL
, TARGET_PAGE_SIZE
);
2142 mmio
->iomem
.subpage
= true;
2143 #if defined(DEBUG_SUBPAGE)
2144 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2145 mmio
, base
, TARGET_PAGE_SIZE
);
2147 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2152 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2156 MemoryRegionSection section
= {
2157 .address_space
= as
,
2159 .offset_within_address_space
= 0,
2160 .offset_within_region
= 0,
2161 .size
= int128_2_64(),
2164 return phys_section_add(map
, §ion
);
2167 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
)
2169 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpu
->memory_dispatch
);
2170 MemoryRegionSection
*sections
= d
->map
.sections
;
2172 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2175 static void io_mem_init(void)
2177 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2178 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2180 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2182 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2186 static void mem_begin(MemoryListener
*listener
)
2188 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2189 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2192 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2193 assert(n
== PHYS_SECTION_UNASSIGNED
);
2194 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2195 assert(n
== PHYS_SECTION_NOTDIRTY
);
2196 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2197 assert(n
== PHYS_SECTION_ROM
);
2198 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2199 assert(n
== PHYS_SECTION_WATCH
);
2201 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2203 as
->next_dispatch
= d
;
2206 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2208 phys_sections_free(&d
->map
);
2212 static void mem_commit(MemoryListener
*listener
)
2214 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2215 AddressSpaceDispatch
*cur
= as
->dispatch
;
2216 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2218 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2220 atomic_rcu_set(&as
->dispatch
, next
);
2222 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2226 static void tcg_commit(MemoryListener
*listener
)
2230 /* since each CPU stores ram addresses in its TLB cache, we must
2231 reset the modified entries */
2234 /* FIXME: Disentangle the cpu.h circular files deps so we can
2235 directly get the right CPU from listener. */
2236 if (cpu
->tcg_as_listener
!= listener
) {
2239 cpu_reload_memory_map(cpu
);
2243 void address_space_init_dispatch(AddressSpace
*as
)
2245 as
->dispatch
= NULL
;
2246 as
->dispatch_listener
= (MemoryListener
) {
2248 .commit
= mem_commit
,
2249 .region_add
= mem_add
,
2250 .region_nop
= mem_add
,
2253 memory_listener_register(&as
->dispatch_listener
, as
);
2256 void address_space_unregister(AddressSpace
*as
)
2258 memory_listener_unregister(&as
->dispatch_listener
);
2261 void address_space_destroy_dispatch(AddressSpace
*as
)
2263 AddressSpaceDispatch
*d
= as
->dispatch
;
2265 atomic_rcu_set(&as
->dispatch
, NULL
);
2267 call_rcu(d
, address_space_dispatch_free
, rcu
);
2271 static void memory_map_init(void)
2273 system_memory
= g_malloc(sizeof(*system_memory
));
2275 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2276 address_space_init(&address_space_memory
, system_memory
, "memory");
2278 system_io
= g_malloc(sizeof(*system_io
));
2279 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2281 address_space_init(&address_space_io
, system_io
, "I/O");
2284 MemoryRegion
*get_system_memory(void)
2286 return system_memory
;
2289 MemoryRegion
*get_system_io(void)
2294 #endif /* !defined(CONFIG_USER_ONLY) */
2296 /* physical memory access (slow version, mainly for debug) */
2297 #if defined(CONFIG_USER_ONLY)
2298 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2299 uint8_t *buf
, int len
, int is_write
)
2306 page
= addr
& TARGET_PAGE_MASK
;
2307 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2310 flags
= page_get_flags(page
);
2311 if (!(flags
& PAGE_VALID
))
2314 if (!(flags
& PAGE_WRITE
))
2316 /* XXX: this code should not depend on lock_user */
2317 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2320 unlock_user(p
, addr
, l
);
2322 if (!(flags
& PAGE_READ
))
2324 /* XXX: this code should not depend on lock_user */
2325 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2328 unlock_user(p
, addr
, 0);
2339 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2342 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2343 /* No early return if dirty_log_mask is or becomes 0, because
2344 * cpu_physical_memory_set_dirty_range will still call
2345 * xen_modified_memory.
2347 if (dirty_log_mask
) {
2349 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2351 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2352 tb_invalidate_phys_range(addr
, addr
+ length
);
2353 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2355 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2358 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2360 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2362 /* Regions are assumed to support 1-4 byte accesses unless
2363 otherwise specified. */
2364 if (access_size_max
== 0) {
2365 access_size_max
= 4;
2368 /* Bound the maximum access by the alignment of the address. */
2369 if (!mr
->ops
->impl
.unaligned
) {
2370 unsigned align_size_max
= addr
& -addr
;
2371 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2372 access_size_max
= align_size_max
;
2376 /* Don't attempt accesses larger than the maximum. */
2377 if (l
> access_size_max
) {
2378 l
= access_size_max
;
2381 l
= 1 << (qemu_fls(l
) - 1);
2387 static bool prepare_mmio_access(MemoryRegion
*mr
)
2389 bool unlocked
= !qemu_mutex_iothread_locked();
2390 bool release_lock
= false;
2392 if (unlocked
&& mr
->global_locking
) {
2393 qemu_mutex_lock_iothread();
2395 release_lock
= true;
2397 if (mr
->flush_coalesced_mmio
) {
2399 qemu_mutex_lock_iothread();
2401 qemu_flush_coalesced_mmio_buffer();
2403 qemu_mutex_unlock_iothread();
2407 return release_lock
;
2410 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2411 uint8_t *buf
, int len
, bool is_write
)
2418 MemTxResult result
= MEMTX_OK
;
2419 bool release_lock
= false;
2424 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2427 if (!memory_access_is_direct(mr
, is_write
)) {
2428 release_lock
|= prepare_mmio_access(mr
);
2429 l
= memory_access_size(mr
, l
, addr1
);
2430 /* XXX: could force current_cpu to NULL to avoid
2434 /* 64 bit write access */
2436 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2440 /* 32 bit write access */
2442 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2446 /* 16 bit write access */
2448 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2452 /* 8 bit write access */
2454 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2461 addr1
+= memory_region_get_ram_addr(mr
);
2463 ptr
= qemu_get_ram_ptr(addr1
);
2464 memcpy(ptr
, buf
, l
);
2465 invalidate_and_set_dirty(mr
, addr1
, l
);
2468 if (!memory_access_is_direct(mr
, is_write
)) {
2470 release_lock
|= prepare_mmio_access(mr
);
2471 l
= memory_access_size(mr
, l
, addr1
);
2474 /* 64 bit read access */
2475 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2480 /* 32 bit read access */
2481 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2486 /* 16 bit read access */
2487 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2492 /* 8 bit read access */
2493 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2502 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2503 memcpy(buf
, ptr
, l
);
2508 qemu_mutex_unlock_iothread();
2509 release_lock
= false;
2521 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2522 const uint8_t *buf
, int len
)
2524 return address_space_rw(as
, addr
, attrs
, (uint8_t *)buf
, len
, true);
2527 MemTxResult
address_space_read(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2528 uint8_t *buf
, int len
)
2530 return address_space_rw(as
, addr
, attrs
, buf
, len
, false);
2534 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2535 int len
, int is_write
)
2537 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2538 buf
, len
, is_write
);
2541 enum write_rom_type
{
2546 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2547 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2557 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2559 if (!(memory_region_is_ram(mr
) ||
2560 memory_region_is_romd(mr
))) {
2561 l
= memory_access_size(mr
, l
, addr1
);
2563 addr1
+= memory_region_get_ram_addr(mr
);
2565 ptr
= qemu_get_ram_ptr(addr1
);
2568 memcpy(ptr
, buf
, l
);
2569 invalidate_and_set_dirty(mr
, addr1
, l
);
2572 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2583 /* used for ROM loading : can write in RAM and ROM */
2584 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2585 const uint8_t *buf
, int len
)
2587 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2590 void cpu_flush_icache_range(hwaddr start
, int len
)
2593 * This function should do the same thing as an icache flush that was
2594 * triggered from within the guest. For TCG we are always cache coherent,
2595 * so there is no need to flush anything. For KVM / Xen we need to flush
2596 * the host's instruction cache at least.
2598 if (tcg_enabled()) {
2602 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2603 start
, NULL
, len
, FLUSH_CACHE
);
2614 static BounceBuffer bounce
;
2616 typedef struct MapClient
{
2618 QLIST_ENTRY(MapClient
) link
;
2621 QemuMutex map_client_list_lock
;
2622 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2623 = QLIST_HEAD_INITIALIZER(map_client_list
);
2625 static void cpu_unregister_map_client_do(MapClient
*client
)
2627 QLIST_REMOVE(client
, link
);
2631 static void cpu_notify_map_clients_locked(void)
2635 while (!QLIST_EMPTY(&map_client_list
)) {
2636 client
= QLIST_FIRST(&map_client_list
);
2637 qemu_bh_schedule(client
->bh
);
2638 cpu_unregister_map_client_do(client
);
2642 void cpu_register_map_client(QEMUBH
*bh
)
2644 MapClient
*client
= g_malloc(sizeof(*client
));
2646 qemu_mutex_lock(&map_client_list_lock
);
2648 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2649 if (!atomic_read(&bounce
.in_use
)) {
2650 cpu_notify_map_clients_locked();
2652 qemu_mutex_unlock(&map_client_list_lock
);
2655 void cpu_exec_init_all(void)
2657 qemu_mutex_init(&ram_list
.mutex
);
2660 qemu_mutex_init(&map_client_list_lock
);
2663 void cpu_unregister_map_client(QEMUBH
*bh
)
2667 qemu_mutex_lock(&map_client_list_lock
);
2668 QLIST_FOREACH(client
, &map_client_list
, link
) {
2669 if (client
->bh
== bh
) {
2670 cpu_unregister_map_client_do(client
);
2674 qemu_mutex_unlock(&map_client_list_lock
);
2677 static void cpu_notify_map_clients(void)
2679 qemu_mutex_lock(&map_client_list_lock
);
2680 cpu_notify_map_clients_locked();
2681 qemu_mutex_unlock(&map_client_list_lock
);
2684 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2692 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2693 if (!memory_access_is_direct(mr
, is_write
)) {
2694 l
= memory_access_size(mr
, l
, addr
);
2695 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2707 /* Map a physical memory region into a host virtual address.
2708 * May map a subset of the requested range, given by and returned in *plen.
2709 * May return NULL if resources needed to perform the mapping are exhausted.
2710 * Use only for reads OR writes - not for read-modify-write operations.
2711 * Use cpu_register_map_client() to know when retrying the map operation is
2712 * likely to succeed.
2714 void *address_space_map(AddressSpace
*as
,
2721 hwaddr l
, xlat
, base
;
2722 MemoryRegion
*mr
, *this_mr
;
2731 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2733 if (!memory_access_is_direct(mr
, is_write
)) {
2734 if (atomic_xchg(&bounce
.in_use
, true)) {
2738 /* Avoid unbounded allocations */
2739 l
= MIN(l
, TARGET_PAGE_SIZE
);
2740 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2744 memory_region_ref(mr
);
2747 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2753 return bounce
.buffer
;
2757 raddr
= memory_region_get_ram_addr(mr
);
2768 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2769 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2774 memory_region_ref(mr
);
2777 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2780 /* Unmaps a memory region previously mapped by address_space_map().
2781 * Will also mark the memory as dirty if is_write == 1. access_len gives
2782 * the amount of memory that was actually read or written by the caller.
2784 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2785 int is_write
, hwaddr access_len
)
2787 if (buffer
!= bounce
.buffer
) {
2791 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2794 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2796 if (xen_enabled()) {
2797 xen_invalidate_map_cache_entry(buffer
);
2799 memory_region_unref(mr
);
2803 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2804 bounce
.buffer
, access_len
);
2806 qemu_vfree(bounce
.buffer
);
2807 bounce
.buffer
= NULL
;
2808 memory_region_unref(bounce
.mr
);
2809 atomic_mb_set(&bounce
.in_use
, false);
2810 cpu_notify_map_clients();
2813 void *cpu_physical_memory_map(hwaddr addr
,
2817 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2820 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2821 int is_write
, hwaddr access_len
)
2823 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2826 /* warning: addr must be aligned */
2827 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2829 MemTxResult
*result
,
2830 enum device_endian endian
)
2838 bool release_lock
= false;
2841 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2842 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2843 release_lock
|= prepare_mmio_access(mr
);
2846 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
2847 #if defined(TARGET_WORDS_BIGENDIAN)
2848 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2852 if (endian
== DEVICE_BIG_ENDIAN
) {
2858 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2862 case DEVICE_LITTLE_ENDIAN
:
2863 val
= ldl_le_p(ptr
);
2865 case DEVICE_BIG_ENDIAN
:
2866 val
= ldl_be_p(ptr
);
2878 qemu_mutex_unlock_iothread();
2884 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
2885 MemTxAttrs attrs
, MemTxResult
*result
)
2887 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2888 DEVICE_NATIVE_ENDIAN
);
2891 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
2892 MemTxAttrs attrs
, MemTxResult
*result
)
2894 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2895 DEVICE_LITTLE_ENDIAN
);
2898 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
2899 MemTxAttrs attrs
, MemTxResult
*result
)
2901 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2905 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2907 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2910 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2912 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2915 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2917 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2920 /* warning: addr must be aligned */
2921 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
2923 MemTxResult
*result
,
2924 enum device_endian endian
)
2932 bool release_lock
= false;
2935 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2937 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2938 release_lock
|= prepare_mmio_access(mr
);
2941 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
2942 #if defined(TARGET_WORDS_BIGENDIAN)
2943 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2947 if (endian
== DEVICE_BIG_ENDIAN
) {
2953 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2957 case DEVICE_LITTLE_ENDIAN
:
2958 val
= ldq_le_p(ptr
);
2960 case DEVICE_BIG_ENDIAN
:
2961 val
= ldq_be_p(ptr
);
2973 qemu_mutex_unlock_iothread();
2979 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
2980 MemTxAttrs attrs
, MemTxResult
*result
)
2982 return address_space_ldq_internal(as
, addr
, attrs
, result
,
2983 DEVICE_NATIVE_ENDIAN
);
2986 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
2987 MemTxAttrs attrs
, MemTxResult
*result
)
2989 return address_space_ldq_internal(as
, addr
, attrs
, result
,
2990 DEVICE_LITTLE_ENDIAN
);
2993 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
2994 MemTxAttrs attrs
, MemTxResult
*result
)
2996 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3000 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3002 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3005 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3007 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3010 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3012 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3016 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3017 MemTxAttrs attrs
, MemTxResult
*result
)
3022 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3029 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3031 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3034 /* warning: addr must be aligned */
3035 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3038 MemTxResult
*result
,
3039 enum device_endian endian
)
3047 bool release_lock
= false;
3050 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3052 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3053 release_lock
|= prepare_mmio_access(mr
);
3056 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3057 #if defined(TARGET_WORDS_BIGENDIAN)
3058 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3062 if (endian
== DEVICE_BIG_ENDIAN
) {
3068 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3072 case DEVICE_LITTLE_ENDIAN
:
3073 val
= lduw_le_p(ptr
);
3075 case DEVICE_BIG_ENDIAN
:
3076 val
= lduw_be_p(ptr
);
3088 qemu_mutex_unlock_iothread();
3094 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3095 MemTxAttrs attrs
, MemTxResult
*result
)
3097 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3098 DEVICE_NATIVE_ENDIAN
);
3101 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3102 MemTxAttrs attrs
, MemTxResult
*result
)
3104 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3105 DEVICE_LITTLE_ENDIAN
);
3108 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3109 MemTxAttrs attrs
, MemTxResult
*result
)
3111 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3115 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3117 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3120 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3122 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3125 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3127 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3130 /* warning: addr must be aligned. The ram page is not masked as dirty
3131 and the code inside is not invalidated. It is useful if the dirty
3132 bits are used to track modified PTEs */
3133 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3134 MemTxAttrs attrs
, MemTxResult
*result
)
3141 uint8_t dirty_log_mask
;
3142 bool release_lock
= false;
3145 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3147 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3148 release_lock
|= prepare_mmio_access(mr
);
3150 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3152 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3153 ptr
= qemu_get_ram_ptr(addr1
);
3156 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3157 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3158 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3165 qemu_mutex_unlock_iothread();
3170 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3172 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3175 /* warning: addr must be aligned */
3176 static inline void address_space_stl_internal(AddressSpace
*as
,
3177 hwaddr addr
, uint32_t val
,
3179 MemTxResult
*result
,
3180 enum device_endian endian
)
3187 bool release_lock
= false;
3190 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3192 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3193 release_lock
|= prepare_mmio_access(mr
);
3195 #if defined(TARGET_WORDS_BIGENDIAN)
3196 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3200 if (endian
== DEVICE_BIG_ENDIAN
) {
3204 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3207 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3208 ptr
= qemu_get_ram_ptr(addr1
);
3210 case DEVICE_LITTLE_ENDIAN
:
3213 case DEVICE_BIG_ENDIAN
:
3220 invalidate_and_set_dirty(mr
, addr1
, 4);
3227 qemu_mutex_unlock_iothread();
3232 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3233 MemTxAttrs attrs
, MemTxResult
*result
)
3235 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3236 DEVICE_NATIVE_ENDIAN
);
3239 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3240 MemTxAttrs attrs
, MemTxResult
*result
)
3242 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3243 DEVICE_LITTLE_ENDIAN
);
3246 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3247 MemTxAttrs attrs
, MemTxResult
*result
)
3249 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3253 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3255 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3258 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3260 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3263 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3265 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3269 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3270 MemTxAttrs attrs
, MemTxResult
*result
)
3275 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3281 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3283 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3286 /* warning: addr must be aligned */
3287 static inline void address_space_stw_internal(AddressSpace
*as
,
3288 hwaddr addr
, uint32_t val
,
3290 MemTxResult
*result
,
3291 enum device_endian endian
)
3298 bool release_lock
= false;
3301 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3302 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3303 release_lock
|= prepare_mmio_access(mr
);
3305 #if defined(TARGET_WORDS_BIGENDIAN)
3306 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3310 if (endian
== DEVICE_BIG_ENDIAN
) {
3314 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3317 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3318 ptr
= qemu_get_ram_ptr(addr1
);
3320 case DEVICE_LITTLE_ENDIAN
:
3323 case DEVICE_BIG_ENDIAN
:
3330 invalidate_and_set_dirty(mr
, addr1
, 2);
3337 qemu_mutex_unlock_iothread();
3342 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3343 MemTxAttrs attrs
, MemTxResult
*result
)
3345 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3346 DEVICE_NATIVE_ENDIAN
);
3349 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3350 MemTxAttrs attrs
, MemTxResult
*result
)
3352 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3353 DEVICE_LITTLE_ENDIAN
);
3356 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3357 MemTxAttrs attrs
, MemTxResult
*result
)
3359 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3363 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3365 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3368 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3370 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3373 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3375 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3379 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3380 MemTxAttrs attrs
, MemTxResult
*result
)
3384 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3390 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3391 MemTxAttrs attrs
, MemTxResult
*result
)
3394 val
= cpu_to_le64(val
);
3395 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3400 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3401 MemTxAttrs attrs
, MemTxResult
*result
)
3404 val
= cpu_to_be64(val
);
3405 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3411 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3413 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3416 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3418 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3421 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3423 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3426 /* virtual memory access for debug (includes writing to ROM) */
3427 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3428 uint8_t *buf
, int len
, int is_write
)
3435 page
= addr
& TARGET_PAGE_MASK
;
3436 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
3437 /* if no physical page mapped, return an error */
3438 if (phys_addr
== -1)
3440 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3443 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3445 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
3447 address_space_rw(cpu
->as
, phys_addr
, MEMTXATTRS_UNSPECIFIED
,
3459 * A helper function for the _utterly broken_ virtio device model to find out if
3460 * it's running on a big endian machine. Don't do this at home kids!
3462 bool target_words_bigendian(void);
3463 bool target_words_bigendian(void)
3465 #if defined(TARGET_WORDS_BIGENDIAN)
3472 #ifndef CONFIG_USER_ONLY
3473 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3480 mr
= address_space_translate(&address_space_memory
,
3481 phys_addr
, &phys_addr
, &l
, false);
3483 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3488 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3494 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3495 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3496 block
->used_length
, opaque
);