4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "exec/cputlb.h"
53 #include "translate-all.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
58 #include "qemu/range.h"
60 //#define DEBUG_SUBPAGE
62 #if !defined(CONFIG_USER_ONLY)
63 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
66 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
68 static MemoryRegion
*system_memory
;
69 static MemoryRegion
*system_io
;
71 AddressSpace address_space_io
;
72 AddressSpace address_space_memory
;
74 MemoryRegion io_mem_rom
, io_mem_notdirty
;
75 static MemoryRegion io_mem_unassigned
;
77 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78 #define RAM_PREALLOC (1 << 0)
80 /* RAM is mmap-ed with MAP_SHARED */
81 #define RAM_SHARED (1 << 1)
83 /* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
86 #define RAM_RESIZEABLE (1 << 2)
90 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
91 /* current CPU in the current thread. It is only valid inside
93 DEFINE_TLS(CPUState
*, current_cpu
);
94 /* 0 = Do not count executed instructions.
95 1 = Precise instruction counting.
96 2 = Adaptive rate instruction counting. */
99 #if !defined(CONFIG_USER_ONLY)
101 typedef struct PhysPageEntry PhysPageEntry
;
103 struct PhysPageEntry
{
104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
110 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
112 /* Size of the L2 (and L3, etc) page tables. */
113 #define ADDR_SPACE_BITS 64
116 #define P_L2_SIZE (1 << P_L2_BITS)
118 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
120 typedef PhysPageEntry Node
[P_L2_SIZE
];
122 typedef struct PhysPageMap
{
125 unsigned sections_nb
;
126 unsigned sections_nb_alloc
;
128 unsigned nodes_nb_alloc
;
130 MemoryRegionSection
*sections
;
133 struct AddressSpaceDispatch
{
136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
139 PhysPageEntry phys_map
;
144 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145 typedef struct subpage_t
{
149 uint16_t sub_section
[TARGET_PAGE_SIZE
];
152 #define PHYS_SECTION_UNASSIGNED 0
153 #define PHYS_SECTION_NOTDIRTY 1
154 #define PHYS_SECTION_ROM 2
155 #define PHYS_SECTION_WATCH 3
157 static void io_mem_init(void);
158 static void memory_map_init(void);
159 static void tcg_commit(MemoryListener
*listener
);
161 static MemoryRegion io_mem_watch
;
164 #if !defined(CONFIG_USER_ONLY)
166 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
168 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
169 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
170 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
171 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
175 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
182 ret
= map
->nodes_nb
++;
184 assert(ret
!= PHYS_MAP_NODE_NIL
);
185 assert(ret
!= map
->nodes_nb_alloc
);
187 e
.skip
= leaf
? 0 : 1;
188 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
189 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
190 memcpy(&p
[i
], &e
, sizeof(e
));
195 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
196 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
200 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
202 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
203 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
205 p
= map
->nodes
[lp
->ptr
];
206 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
208 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
209 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
215 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
221 static void phys_page_set(AddressSpaceDispatch
*d
,
222 hwaddr index
, hwaddr nb
,
225 /* Wildly overreserve - it doesn't matter much. */
226 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
228 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
231 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
234 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
236 unsigned valid_ptr
= P_L2_SIZE
;
241 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
246 for (i
= 0; i
< P_L2_SIZE
; i
++) {
247 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
254 phys_page_compact(&p
[i
], nodes
, compacted
);
258 /* We can only compress if there's only one child. */
263 assert(valid_ptr
< P_L2_SIZE
);
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
270 lp
->ptr
= p
[valid_ptr
].ptr
;
271 if (!p
[valid_ptr
].skip
) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
280 lp
->skip
+= p
[valid_ptr
].skip
;
284 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
286 DECLARE_BITMAP(compacted
, nodes_nb
);
288 if (d
->phys_map
.skip
) {
289 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
293 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
294 Node
*nodes
, MemoryRegionSection
*sections
)
297 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
300 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
301 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
302 return §ions
[PHYS_SECTION_UNASSIGNED
];
305 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
308 if (sections
[lp
.ptr
].size
.hi
||
309 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
310 sections
[lp
.ptr
].size
.lo
, addr
)) {
311 return §ions
[lp
.ptr
];
313 return §ions
[PHYS_SECTION_UNASSIGNED
];
317 bool memory_region_is_unassigned(MemoryRegion
*mr
)
319 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
320 && mr
!= &io_mem_watch
;
323 /* Called from RCU critical section */
324 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
326 bool resolve_subpage
)
328 MemoryRegionSection
*section
;
331 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
332 if (resolve_subpage
&& section
->mr
->subpage
) {
333 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
334 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
339 /* Called from RCU critical section */
340 static MemoryRegionSection
*
341 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
342 hwaddr
*plen
, bool resolve_subpage
)
344 MemoryRegionSection
*section
;
348 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
349 /* Compute offset within MemoryRegionSection */
350 addr
-= section
->offset_within_address_space
;
352 /* Compute offset within MemoryRegion */
353 *xlat
= addr
+ section
->offset_within_region
;
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
368 if (memory_region_is_ram(mr
)) {
369 diff
= int128_sub(section
->size
, int128_make64(addr
));
370 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
375 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
377 if (memory_region_is_ram(mr
)) {
378 return !(is_write
&& mr
->readonly
);
380 if (memory_region_is_romd(mr
)) {
387 /* Called from RCU critical section */
388 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
389 hwaddr
*xlat
, hwaddr
*plen
,
393 MemoryRegionSection
*section
;
397 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
398 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
401 if (!mr
->iommu_ops
) {
405 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
406 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
407 | (addr
& iotlb
.addr_mask
));
408 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
409 if (!(iotlb
.perm
& (1 << is_write
))) {
410 mr
= &io_mem_unassigned
;
414 as
= iotlb
.target_as
;
417 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
418 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
419 *plen
= MIN(page
, *plen
);
426 /* Called from RCU critical section */
427 MemoryRegionSection
*
428 address_space_translate_for_iotlb(CPUState
*cpu
, hwaddr addr
,
429 hwaddr
*xlat
, hwaddr
*plen
)
431 MemoryRegionSection
*section
;
432 section
= address_space_translate_internal(cpu
->memory_dispatch
,
433 addr
, xlat
, plen
, false);
435 assert(!section
->mr
->iommu_ops
);
440 #if !defined(CONFIG_USER_ONLY)
442 static int cpu_common_post_load(void *opaque
, int version_id
)
444 CPUState
*cpu
= opaque
;
446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
448 cpu
->interrupt_request
&= ~0x01;
454 static int cpu_common_pre_load(void *opaque
)
456 CPUState
*cpu
= opaque
;
458 cpu
->exception_index
= -1;
463 static bool cpu_common_exception_index_needed(void *opaque
)
465 CPUState
*cpu
= opaque
;
467 return tcg_enabled() && cpu
->exception_index
!= -1;
470 static const VMStateDescription vmstate_cpu_common_exception_index
= {
471 .name
= "cpu_common/exception_index",
473 .minimum_version_id
= 1,
474 .needed
= cpu_common_exception_index_needed
,
475 .fields
= (VMStateField
[]) {
476 VMSTATE_INT32(exception_index
, CPUState
),
477 VMSTATE_END_OF_LIST()
481 const VMStateDescription vmstate_cpu_common
= {
482 .name
= "cpu_common",
484 .minimum_version_id
= 1,
485 .pre_load
= cpu_common_pre_load
,
486 .post_load
= cpu_common_post_load
,
487 .fields
= (VMStateField
[]) {
488 VMSTATE_UINT32(halted
, CPUState
),
489 VMSTATE_UINT32(interrupt_request
, CPUState
),
490 VMSTATE_END_OF_LIST()
492 .subsections
= (const VMStateDescription
*[]) {
493 &vmstate_cpu_common_exception_index
,
500 CPUState
*qemu_get_cpu(int index
)
505 if (cpu
->cpu_index
== index
) {
513 #if !defined(CONFIG_USER_ONLY)
514 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
516 /* We only support one address space per cpu at the moment. */
517 assert(cpu
->as
== as
);
519 if (cpu
->tcg_as_listener
) {
520 memory_listener_unregister(cpu
->tcg_as_listener
);
522 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
524 cpu
->tcg_as_listener
->commit
= tcg_commit
;
525 memory_listener_register(cpu
->tcg_as_listener
, as
);
529 #ifndef CONFIG_USER_ONLY
530 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
532 static int cpu_get_free_index(Error
**errp
)
534 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
536 if (cpu
>= MAX_CPUMASK_BITS
) {
537 error_setg(errp
, "Trying to use more CPUs than max of %d",
542 bitmap_set(cpu_index_map
, cpu
, 1);
546 void cpu_exec_exit(CPUState
*cpu
)
548 if (cpu
->cpu_index
== -1) {
549 /* cpu_index was never allocated by this @cpu or was already freed. */
553 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
558 static int cpu_get_free_index(Error
**errp
)
563 CPU_FOREACH(some_cpu
) {
569 void cpu_exec_exit(CPUState
*cpu
)
574 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
576 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
578 Error
*local_err
= NULL
;
580 #ifdef TARGET_WORDS_BIGENDIAN
581 cpu
->bigendian
= true;
583 cpu
->bigendian
= false;
586 #ifndef CONFIG_USER_ONLY
587 cpu
->as
= &address_space_memory
;
588 cpu
->thread_id
= qemu_get_thread_id();
589 cpu_reload_memory_map(cpu
);
592 #if defined(CONFIG_USER_ONLY)
595 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
597 error_propagate(errp
, local_err
);
598 #if defined(CONFIG_USER_ONLY)
603 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
604 #if defined(CONFIG_USER_ONLY)
607 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
608 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
610 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
611 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
612 cpu_save
, cpu_load
, cpu
->env_ptr
);
613 assert(cc
->vmsd
== NULL
);
614 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
616 if (cc
->vmsd
!= NULL
) {
617 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
621 #if defined(CONFIG_USER_ONLY)
622 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
624 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
627 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
629 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
631 tb_invalidate_phys_addr(cpu
->as
,
632 phys
| (pc
& ~TARGET_PAGE_MASK
));
637 #if defined(CONFIG_USER_ONLY)
638 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
643 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
649 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
653 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
654 int flags
, CPUWatchpoint
**watchpoint
)
659 /* Add a watchpoint. */
660 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
661 int flags
, CPUWatchpoint
**watchpoint
)
665 /* forbid ranges which are empty or run off the end of the address space */
666 if (len
== 0 || (addr
+ len
- 1) < addr
) {
667 error_report("tried to set invalid watchpoint at %"
668 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
671 wp
= g_malloc(sizeof(*wp
));
677 /* keep all GDB-injected watchpoints in front */
678 if (flags
& BP_GDB
) {
679 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
681 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
684 tlb_flush_page(cpu
, addr
);
691 /* Remove a specific watchpoint. */
692 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
697 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
698 if (addr
== wp
->vaddr
&& len
== wp
->len
699 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
700 cpu_watchpoint_remove_by_ref(cpu
, wp
);
707 /* Remove a specific watchpoint by reference. */
708 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
710 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
712 tlb_flush_page(cpu
, watchpoint
->vaddr
);
717 /* Remove all matching watchpoints. */
718 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
720 CPUWatchpoint
*wp
, *next
;
722 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
723 if (wp
->flags
& mask
) {
724 cpu_watchpoint_remove_by_ref(cpu
, wp
);
729 /* Return true if this watchpoint address matches the specified
730 * access (ie the address range covered by the watchpoint overlaps
731 * partially or completely with the address range covered by the
734 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
738 /* We know the lengths are non-zero, but a little caution is
739 * required to avoid errors in the case where the range ends
740 * exactly at the top of the address space and so addr + len
741 * wraps round to zero.
743 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
744 vaddr addrend
= addr
+ len
- 1;
746 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
751 /* Add a breakpoint. */
752 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
753 CPUBreakpoint
**breakpoint
)
757 bp
= g_malloc(sizeof(*bp
));
762 /* keep all GDB-injected breakpoints in front */
763 if (flags
& BP_GDB
) {
764 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
766 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
769 breakpoint_invalidate(cpu
, pc
);
777 /* Remove a specific breakpoint. */
778 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
782 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
783 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
784 cpu_breakpoint_remove_by_ref(cpu
, bp
);
791 /* Remove a specific breakpoint by reference. */
792 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
794 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
796 breakpoint_invalidate(cpu
, breakpoint
->pc
);
801 /* Remove all matching breakpoints. */
802 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
804 CPUBreakpoint
*bp
, *next
;
806 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
807 if (bp
->flags
& mask
) {
808 cpu_breakpoint_remove_by_ref(cpu
, bp
);
813 /* enable or disable single step mode. EXCP_DEBUG is returned by the
814 CPU loop after each instruction */
815 void cpu_single_step(CPUState
*cpu
, int enabled
)
817 if (cpu
->singlestep_enabled
!= enabled
) {
818 cpu
->singlestep_enabled
= enabled
;
820 kvm_update_guest_debug(cpu
, 0);
822 /* must flush all the translated code to avoid inconsistencies */
823 /* XXX: only flush what is necessary */
829 void QEMU_NORETURN
cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
836 fprintf(stderr
, "qemu: fatal: ");
837 vfprintf(stderr
, fmt
, ap
);
838 fprintf(stderr
, "\n");
839 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
840 if (qemu_log_enabled()) {
841 qemu_log("qemu: fatal: ");
842 qemu_log_vprintf(fmt
, ap2
);
844 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
850 #if defined(CONFIG_USER_ONLY)
852 struct sigaction act
;
853 sigfillset(&act
.sa_mask
);
854 act
.sa_handler
= SIG_DFL
;
855 sigaction(SIGABRT
, &act
, NULL
);
861 #if !defined(CONFIG_USER_ONLY)
862 /* Called from RCU critical section */
863 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
867 block
= atomic_rcu_read(&ram_list
.mru_block
);
868 if (block
&& addr
- block
->offset
< block
->max_length
) {
871 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
872 if (addr
- block
->offset
< block
->max_length
) {
877 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
881 /* It is safe to write mru_block outside the iothread lock. This
886 * xxx removed from list
890 * call_rcu(reclaim_ramblock, xxx);
893 * atomic_rcu_set is not needed here. The block was already published
894 * when it was placed into the list. Here we're just making an extra
895 * copy of the pointer.
897 ram_list
.mru_block
= block
;
901 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
907 end
= TARGET_PAGE_ALIGN(start
+ length
);
908 start
&= TARGET_PAGE_MASK
;
911 block
= qemu_get_ram_block(start
);
912 assert(block
== qemu_get_ram_block(end
- 1));
913 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
914 cpu_tlb_reset_dirty_all(start1
, length
);
918 /* Note: start and end must be within the same ram block. */
919 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
923 unsigned long end
, page
;
930 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
931 page
= start
>> TARGET_PAGE_BITS
;
932 dirty
= bitmap_test_and_clear_atomic(ram_list
.dirty_memory
[client
],
935 if (dirty
&& tcg_enabled()) {
936 tlb_reset_dirty_range_all(start
, length
);
942 /* Called from RCU critical section */
943 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
944 MemoryRegionSection
*section
,
946 hwaddr paddr
, hwaddr xlat
,
948 target_ulong
*address
)
953 if (memory_region_is_ram(section
->mr
)) {
955 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
957 if (!section
->readonly
) {
958 iotlb
|= PHYS_SECTION_NOTDIRTY
;
960 iotlb
|= PHYS_SECTION_ROM
;
963 AddressSpaceDispatch
*d
;
965 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
966 iotlb
= section
- d
->map
.sections
;
970 /* Make accesses to pages with watchpoints go via the
971 watchpoint trap routines. */
972 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
973 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
974 /* Avoid trapping reads of pages with a write breakpoint. */
975 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
976 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
977 *address
|= TLB_MMIO
;
985 #endif /* defined(CONFIG_USER_ONLY) */
987 #if !defined(CONFIG_USER_ONLY)
989 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
991 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
993 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
997 * Set a custom physical guest memory alloator.
998 * Accelerators with unusual needs may need this. Hopefully, we can
999 * get rid of it eventually.
1001 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1003 phys_mem_alloc
= alloc
;
1006 static uint16_t phys_section_add(PhysPageMap
*map
,
1007 MemoryRegionSection
*section
)
1009 /* The physical section number is ORed with a page-aligned
1010 * pointer to produce the iotlb entries. Thus it should
1011 * never overflow into the page-aligned value.
1013 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1015 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1016 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1017 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1018 map
->sections_nb_alloc
);
1020 map
->sections
[map
->sections_nb
] = *section
;
1021 memory_region_ref(section
->mr
);
1022 return map
->sections_nb
++;
1025 static void phys_section_destroy(MemoryRegion
*mr
)
1027 memory_region_unref(mr
);
1030 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1031 object_unref(OBJECT(&subpage
->iomem
));
1036 static void phys_sections_free(PhysPageMap
*map
)
1038 while (map
->sections_nb
> 0) {
1039 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1040 phys_section_destroy(section
->mr
);
1042 g_free(map
->sections
);
1046 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1049 hwaddr base
= section
->offset_within_address_space
1051 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1052 d
->map
.nodes
, d
->map
.sections
);
1053 MemoryRegionSection subsection
= {
1054 .offset_within_address_space
= base
,
1055 .size
= int128_make64(TARGET_PAGE_SIZE
),
1059 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1061 if (!(existing
->mr
->subpage
)) {
1062 subpage
= subpage_init(d
->as
, base
);
1063 subsection
.address_space
= d
->as
;
1064 subsection
.mr
= &subpage
->iomem
;
1065 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1066 phys_section_add(&d
->map
, &subsection
));
1068 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1070 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1071 end
= start
+ int128_get64(section
->size
) - 1;
1072 subpage_register(subpage
, start
, end
,
1073 phys_section_add(&d
->map
, section
));
1077 static void register_multipage(AddressSpaceDispatch
*d
,
1078 MemoryRegionSection
*section
)
1080 hwaddr start_addr
= section
->offset_within_address_space
;
1081 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1082 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1086 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1089 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1091 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1092 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1093 MemoryRegionSection now
= *section
, remain
= *section
;
1094 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1096 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1097 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1098 - now
.offset_within_address_space
;
1100 now
.size
= int128_min(int128_make64(left
), now
.size
);
1101 register_subpage(d
, &now
);
1103 now
.size
= int128_zero();
1105 while (int128_ne(remain
.size
, now
.size
)) {
1106 remain
.size
= int128_sub(remain
.size
, now
.size
);
1107 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1108 remain
.offset_within_region
+= int128_get64(now
.size
);
1110 if (int128_lt(remain
.size
, page_size
)) {
1111 register_subpage(d
, &now
);
1112 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1113 now
.size
= page_size
;
1114 register_subpage(d
, &now
);
1116 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1117 register_multipage(d
, &now
);
1122 void qemu_flush_coalesced_mmio_buffer(void)
1125 kvm_flush_coalesced_mmio_buffer();
1128 void qemu_mutex_lock_ramlist(void)
1130 qemu_mutex_lock(&ram_list
.mutex
);
1133 void qemu_mutex_unlock_ramlist(void)
1135 qemu_mutex_unlock(&ram_list
.mutex
);
1140 #include <sys/vfs.h>
1142 #define HUGETLBFS_MAGIC 0x958458f6
1144 static long gethugepagesize(const char *path
, Error
**errp
)
1150 ret
= statfs(path
, &fs
);
1151 } while (ret
!= 0 && errno
== EINTR
);
1154 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1159 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1160 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1165 static void *file_ram_alloc(RAMBlock
*block
,
1171 char *sanitized_name
;
1173 void * volatile area
= NULL
;
1176 Error
*local_err
= NULL
;
1178 hpagesize
= gethugepagesize(path
, &local_err
);
1180 error_propagate(errp
, local_err
);
1183 block
->mr
->align
= hpagesize
;
1185 if (memory
< hpagesize
) {
1186 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1187 "or larger than huge page size 0x%" PRIx64
,
1192 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1194 "host lacks kvm mmu notifiers, -mem-path unsupported");
1198 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1199 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1200 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1205 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1207 g_free(sanitized_name
);
1209 fd
= mkstemp(filename
);
1211 error_setg_errno(errp
, errno
,
1212 "unable to create backing store for hugepages");
1219 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1222 * ftruncate is not supported by hugetlbfs in older
1223 * hosts, so don't bother bailing out on errors.
1224 * If anything goes wrong with it under other filesystems,
1227 if (ftruncate(fd
, memory
)) {
1228 perror("ftruncate");
1231 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
,
1232 (block
->flags
& RAM_SHARED
? MAP_SHARED
: MAP_PRIVATE
),
1234 if (area
== MAP_FAILED
) {
1235 error_setg_errno(errp
, errno
,
1236 "unable to map backing store for hugepages");
1242 os_mem_prealloc(fd
, area
, memory
);
1250 error_report("%s", error_get_pretty(*errp
));
1257 /* Called with the ramlist lock held. */
1258 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1260 RAMBlock
*block
, *next_block
;
1261 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1263 assert(size
!= 0); /* it would hand out same offset multiple times */
1265 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1269 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1270 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1272 end
= block
->offset
+ block
->max_length
;
1274 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1275 if (next_block
->offset
>= end
) {
1276 next
= MIN(next
, next_block
->offset
);
1279 if (next
- end
>= size
&& next
- end
< mingap
) {
1281 mingap
= next
- end
;
1285 if (offset
== RAM_ADDR_MAX
) {
1286 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1294 ram_addr_t
last_ram_offset(void)
1297 ram_addr_t last
= 0;
1300 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1301 last
= MAX(last
, block
->offset
+ block
->max_length
);
1307 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1311 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1312 if (!machine_dump_guest_core(current_machine
)) {
1313 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1315 perror("qemu_madvise");
1316 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1317 "but dump_guest_core=off specified\n");
1322 /* Called within an RCU critical section, or while the ramlist lock
1325 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1329 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1330 if (block
->offset
== addr
) {
1338 /* Called with iothread lock held. */
1339 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1341 RAMBlock
*new_block
, *block
;
1344 new_block
= find_ram_block(addr
);
1346 assert(!new_block
->idstr
[0]);
1349 char *id
= qdev_get_dev_path(dev
);
1351 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1355 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1357 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1358 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1359 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1367 /* Called with iothread lock held. */
1368 void qemu_ram_unset_idstr(ram_addr_t addr
)
1372 /* FIXME: arch_init.c assumes that this is not called throughout
1373 * migration. Ignore the problem since hot-unplug during migration
1374 * does not work anyway.
1378 block
= find_ram_block(addr
);
1380 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1385 static int memory_try_enable_merging(void *addr
, size_t len
)
1387 if (!machine_mem_merge(current_machine
)) {
1388 /* disabled by the user */
1392 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1395 /* Only legal before guest might have detected the memory size: e.g. on
1396 * incoming migration, or right after reset.
1398 * As memory core doesn't know how is memory accessed, it is up to
1399 * resize callback to update device state and/or add assertions to detect
1400 * misuse, if necessary.
1402 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1404 RAMBlock
*block
= find_ram_block(base
);
1408 newsize
= TARGET_PAGE_ALIGN(newsize
);
1410 if (block
->used_length
== newsize
) {
1414 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1415 error_setg_errno(errp
, EINVAL
,
1416 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1417 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1418 newsize
, block
->used_length
);
1422 if (block
->max_length
< newsize
) {
1423 error_setg_errno(errp
, EINVAL
,
1424 "Length too large: %s: 0x" RAM_ADDR_FMT
1425 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1426 newsize
, block
->max_length
);
1430 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1431 block
->used_length
= newsize
;
1432 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1434 memory_region_set_size(block
->mr
, newsize
);
1435 if (block
->resized
) {
1436 block
->resized(block
->idstr
, newsize
, block
->host
);
1441 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1444 RAMBlock
*last_block
= NULL
;
1445 ram_addr_t old_ram_size
, new_ram_size
;
1447 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1449 qemu_mutex_lock_ramlist();
1450 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1452 if (!new_block
->host
) {
1453 if (xen_enabled()) {
1454 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1457 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1458 &new_block
->mr
->align
);
1459 if (!new_block
->host
) {
1460 error_setg_errno(errp
, errno
,
1461 "cannot set up guest memory '%s'",
1462 memory_region_name(new_block
->mr
));
1463 qemu_mutex_unlock_ramlist();
1466 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1470 new_ram_size
= MAX(old_ram_size
,
1471 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1472 if (new_ram_size
> old_ram_size
) {
1473 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1475 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1476 * QLIST (which has an RCU-friendly variant) does not have insertion at
1477 * tail, so save the last element in last_block.
1479 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1481 if (block
->max_length
< new_block
->max_length
) {
1486 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1487 } else if (last_block
) {
1488 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1489 } else { /* list is empty */
1490 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1492 ram_list
.mru_block
= NULL
;
1494 /* Write list before version */
1497 qemu_mutex_unlock_ramlist();
1499 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1501 if (new_ram_size
> old_ram_size
) {
1504 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1505 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1506 ram_list
.dirty_memory
[i
] =
1507 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1508 old_ram_size
, new_ram_size
);
1511 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1512 new_block
->used_length
,
1515 if (new_block
->host
) {
1516 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1517 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1518 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1519 if (kvm_enabled()) {
1520 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1524 return new_block
->offset
;
1528 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1529 bool share
, const char *mem_path
,
1532 RAMBlock
*new_block
;
1534 Error
*local_err
= NULL
;
1536 if (xen_enabled()) {
1537 error_setg(errp
, "-mem-path not supported with Xen");
1541 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1543 * file_ram_alloc() needs to allocate just like
1544 * phys_mem_alloc, but we haven't bothered to provide
1548 "-mem-path not supported with this accelerator");
1552 size
= TARGET_PAGE_ALIGN(size
);
1553 new_block
= g_malloc0(sizeof(*new_block
));
1555 new_block
->used_length
= size
;
1556 new_block
->max_length
= size
;
1557 new_block
->flags
= share
? RAM_SHARED
: 0;
1558 new_block
->host
= file_ram_alloc(new_block
, size
,
1560 if (!new_block
->host
) {
1565 addr
= ram_block_add(new_block
, &local_err
);
1568 error_propagate(errp
, local_err
);
1576 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1577 void (*resized
)(const char*,
1580 void *host
, bool resizeable
,
1581 MemoryRegion
*mr
, Error
**errp
)
1583 RAMBlock
*new_block
;
1585 Error
*local_err
= NULL
;
1587 size
= TARGET_PAGE_ALIGN(size
);
1588 max_size
= TARGET_PAGE_ALIGN(max_size
);
1589 new_block
= g_malloc0(sizeof(*new_block
));
1591 new_block
->resized
= resized
;
1592 new_block
->used_length
= size
;
1593 new_block
->max_length
= max_size
;
1594 assert(max_size
>= size
);
1596 new_block
->host
= host
;
1598 new_block
->flags
|= RAM_PREALLOC
;
1601 new_block
->flags
|= RAM_RESIZEABLE
;
1603 addr
= ram_block_add(new_block
, &local_err
);
1606 error_propagate(errp
, local_err
);
1612 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1613 MemoryRegion
*mr
, Error
**errp
)
1615 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1618 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1620 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1623 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1624 void (*resized
)(const char*,
1627 MemoryRegion
*mr
, Error
**errp
)
1629 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1632 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1636 qemu_mutex_lock_ramlist();
1637 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1638 if (addr
== block
->offset
) {
1639 QLIST_REMOVE_RCU(block
, next
);
1640 ram_list
.mru_block
= NULL
;
1641 /* Write list before version */
1644 g_free_rcu(block
, rcu
);
1648 qemu_mutex_unlock_ramlist();
1651 static void reclaim_ramblock(RAMBlock
*block
)
1653 if (block
->flags
& RAM_PREALLOC
) {
1655 } else if (xen_enabled()) {
1656 xen_invalidate_map_cache_entry(block
->host
);
1658 } else if (block
->fd
>= 0) {
1659 munmap(block
->host
, block
->max_length
);
1663 qemu_anon_ram_free(block
->host
, block
->max_length
);
1668 void qemu_ram_free(ram_addr_t addr
)
1672 qemu_mutex_lock_ramlist();
1673 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1674 if (addr
== block
->offset
) {
1675 QLIST_REMOVE_RCU(block
, next
);
1676 ram_list
.mru_block
= NULL
;
1677 /* Write list before version */
1680 call_rcu(block
, reclaim_ramblock
, rcu
);
1684 qemu_mutex_unlock_ramlist();
1688 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1695 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1696 offset
= addr
- block
->offset
;
1697 if (offset
< block
->max_length
) {
1698 vaddr
= ramblock_ptr(block
, offset
);
1699 if (block
->flags
& RAM_PREALLOC
) {
1701 } else if (xen_enabled()) {
1705 if (block
->fd
>= 0) {
1706 flags
|= (block
->flags
& RAM_SHARED
?
1707 MAP_SHARED
: MAP_PRIVATE
);
1708 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1709 flags
, block
->fd
, offset
);
1712 * Remap needs to match alloc. Accelerators that
1713 * set phys_mem_alloc never remap. If they did,
1714 * we'd need a remap hook here.
1716 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1718 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1719 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1722 if (area
!= vaddr
) {
1723 fprintf(stderr
, "Could not remap addr: "
1724 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1728 memory_try_enable_merging(vaddr
, length
);
1729 qemu_ram_setup_dump(vaddr
, length
);
1734 #endif /* !_WIN32 */
1736 int qemu_get_ram_fd(ram_addr_t addr
)
1742 block
= qemu_get_ram_block(addr
);
1748 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1754 block
= qemu_get_ram_block(addr
);
1755 ptr
= ramblock_ptr(block
, 0);
1760 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1761 * This should not be used for general purpose DMA. Use address_space_map
1762 * or address_space_rw instead. For local memory (e.g. video ram) that the
1763 * device owns, use memory_region_get_ram_ptr.
1765 * By the time this function returns, the returned pointer is not protected
1766 * by RCU anymore. If the caller is not within an RCU critical section and
1767 * does not hold the iothread lock, it must have other means of protecting the
1768 * pointer, such as a reference to the region that includes the incoming
1771 void *qemu_get_ram_ptr(ram_addr_t addr
)
1777 block
= qemu_get_ram_block(addr
);
1779 if (xen_enabled() && block
->host
== NULL
) {
1780 /* We need to check if the requested address is in the RAM
1781 * because we don't want to map the entire memory in QEMU.
1782 * In that case just map until the end of the page.
1784 if (block
->offset
== 0) {
1785 ptr
= xen_map_cache(addr
, 0, 0);
1789 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1791 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1798 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1799 * but takes a size argument.
1801 * By the time this function returns, the returned pointer is not protected
1802 * by RCU anymore. If the caller is not within an RCU critical section and
1803 * does not hold the iothread lock, it must have other means of protecting the
1804 * pointer, such as a reference to the region that includes the incoming
1807 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1813 if (xen_enabled()) {
1814 return xen_map_cache(addr
, *size
, 1);
1818 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1819 if (addr
- block
->offset
< block
->max_length
) {
1820 if (addr
- block
->offset
+ *size
> block
->max_length
)
1821 *size
= block
->max_length
- addr
+ block
->offset
;
1822 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1828 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1833 /* Some of the softmmu routines need to translate from a host pointer
1834 * (typically a TLB entry) back to a ram offset.
1836 * By the time this function returns, the returned pointer is not protected
1837 * by RCU anymore. If the caller is not within an RCU critical section and
1838 * does not hold the iothread lock, it must have other means of protecting the
1839 * pointer, such as a reference to the region that includes the incoming
1842 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1845 uint8_t *host
= ptr
;
1848 if (xen_enabled()) {
1850 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1851 mr
= qemu_get_ram_block(*ram_addr
)->mr
;
1857 block
= atomic_rcu_read(&ram_list
.mru_block
);
1858 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1862 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1863 /* This case append when the block is not mapped. */
1864 if (block
->host
== NULL
) {
1867 if (host
- block
->host
< block
->max_length
) {
1876 *ram_addr
= block
->offset
+ (host
- block
->host
);
1882 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1883 uint64_t val
, unsigned size
)
1885 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1886 tb_invalidate_phys_page_fast(ram_addr
, size
);
1890 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1893 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1896 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1901 /* Set both VGA and migration bits for simplicity and to remove
1902 * the notdirty callback faster.
1904 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
1905 DIRTY_CLIENTS_NOCODE
);
1906 /* we remove the notdirty callback only if the code has been
1908 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1909 CPUArchState
*env
= current_cpu
->env_ptr
;
1910 tlb_set_dirty(env
, current_cpu
->mem_io_vaddr
);
1914 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1915 unsigned size
, bool is_write
)
1920 static const MemoryRegionOps notdirty_mem_ops
= {
1921 .write
= notdirty_mem_write
,
1922 .valid
.accepts
= notdirty_mem_accepts
,
1923 .endianness
= DEVICE_NATIVE_ENDIAN
,
1926 /* Generate a debug exception if a watchpoint has been hit. */
1927 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
1929 CPUState
*cpu
= current_cpu
;
1930 CPUArchState
*env
= cpu
->env_ptr
;
1931 target_ulong pc
, cs_base
;
1936 if (cpu
->watchpoint_hit
) {
1937 /* We re-entered the check after replacing the TB. Now raise
1938 * the debug interrupt so that is will trigger after the
1939 * current instruction. */
1940 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1943 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1944 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1945 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
1946 && (wp
->flags
& flags
)) {
1947 if (flags
== BP_MEM_READ
) {
1948 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
1950 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
1952 wp
->hitaddr
= vaddr
;
1953 wp
->hitattrs
= attrs
;
1954 if (!cpu
->watchpoint_hit
) {
1955 cpu
->watchpoint_hit
= wp
;
1956 tb_check_watchpoint(cpu
);
1957 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1958 cpu
->exception_index
= EXCP_DEBUG
;
1961 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1962 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1963 cpu_resume_from_signal(cpu
, NULL
);
1967 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1972 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1973 so these check for a hit then pass through to the normal out-of-line
1975 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
1976 unsigned size
, MemTxAttrs attrs
)
1981 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
1984 data
= address_space_ldub(&address_space_memory
, addr
, attrs
, &res
);
1987 data
= address_space_lduw(&address_space_memory
, addr
, attrs
, &res
);
1990 data
= address_space_ldl(&address_space_memory
, addr
, attrs
, &res
);
1998 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
1999 uint64_t val
, unsigned size
,
2004 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2007 address_space_stb(&address_space_memory
, addr
, val
, attrs
, &res
);
2010 address_space_stw(&address_space_memory
, addr
, val
, attrs
, &res
);
2013 address_space_stl(&address_space_memory
, addr
, val
, attrs
, &res
);
2020 static const MemoryRegionOps watch_mem_ops
= {
2021 .read_with_attrs
= watch_mem_read
,
2022 .write_with_attrs
= watch_mem_write
,
2023 .endianness
= DEVICE_NATIVE_ENDIAN
,
2026 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2027 unsigned len
, MemTxAttrs attrs
)
2029 subpage_t
*subpage
= opaque
;
2033 #if defined(DEBUG_SUBPAGE)
2034 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2035 subpage
, len
, addr
);
2037 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2044 *data
= ldub_p(buf
);
2047 *data
= lduw_p(buf
);
2060 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2061 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2063 subpage_t
*subpage
= opaque
;
2066 #if defined(DEBUG_SUBPAGE)
2067 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2068 " value %"PRIx64
"\n",
2069 __func__
, subpage
, len
, addr
, value
);
2087 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2091 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2092 unsigned len
, bool is_write
)
2094 subpage_t
*subpage
= opaque
;
2095 #if defined(DEBUG_SUBPAGE)
2096 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2097 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2100 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2104 static const MemoryRegionOps subpage_ops
= {
2105 .read_with_attrs
= subpage_read
,
2106 .write_with_attrs
= subpage_write
,
2107 .impl
.min_access_size
= 1,
2108 .impl
.max_access_size
= 8,
2109 .valid
.min_access_size
= 1,
2110 .valid
.max_access_size
= 8,
2111 .valid
.accepts
= subpage_accepts
,
2112 .endianness
= DEVICE_NATIVE_ENDIAN
,
2115 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2120 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2122 idx
= SUBPAGE_IDX(start
);
2123 eidx
= SUBPAGE_IDX(end
);
2124 #if defined(DEBUG_SUBPAGE)
2125 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2126 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2128 for (; idx
<= eidx
; idx
++) {
2129 mmio
->sub_section
[idx
] = section
;
2135 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2139 mmio
= g_malloc0(sizeof(subpage_t
));
2143 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2144 NULL
, TARGET_PAGE_SIZE
);
2145 mmio
->iomem
.subpage
= true;
2146 #if defined(DEBUG_SUBPAGE)
2147 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2148 mmio
, base
, TARGET_PAGE_SIZE
);
2150 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2155 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2159 MemoryRegionSection section
= {
2160 .address_space
= as
,
2162 .offset_within_address_space
= 0,
2163 .offset_within_region
= 0,
2164 .size
= int128_2_64(),
2167 return phys_section_add(map
, §ion
);
2170 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
)
2172 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpu
->memory_dispatch
);
2173 MemoryRegionSection
*sections
= d
->map
.sections
;
2175 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2178 static void io_mem_init(void)
2180 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2181 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2183 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2185 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2189 static void mem_begin(MemoryListener
*listener
)
2191 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2192 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2195 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2196 assert(n
== PHYS_SECTION_UNASSIGNED
);
2197 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2198 assert(n
== PHYS_SECTION_NOTDIRTY
);
2199 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2200 assert(n
== PHYS_SECTION_ROM
);
2201 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2202 assert(n
== PHYS_SECTION_WATCH
);
2204 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2206 as
->next_dispatch
= d
;
2209 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2211 phys_sections_free(&d
->map
);
2215 static void mem_commit(MemoryListener
*listener
)
2217 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2218 AddressSpaceDispatch
*cur
= as
->dispatch
;
2219 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2221 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2223 atomic_rcu_set(&as
->dispatch
, next
);
2225 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2229 static void tcg_commit(MemoryListener
*listener
)
2233 /* since each CPU stores ram addresses in its TLB cache, we must
2234 reset the modified entries */
2237 /* FIXME: Disentangle the cpu.h circular files deps so we can
2238 directly get the right CPU from listener. */
2239 if (cpu
->tcg_as_listener
!= listener
) {
2242 cpu_reload_memory_map(cpu
);
2246 void address_space_init_dispatch(AddressSpace
*as
)
2248 as
->dispatch
= NULL
;
2249 as
->dispatch_listener
= (MemoryListener
) {
2251 .commit
= mem_commit
,
2252 .region_add
= mem_add
,
2253 .region_nop
= mem_add
,
2256 memory_listener_register(&as
->dispatch_listener
, as
);
2259 void address_space_unregister(AddressSpace
*as
)
2261 memory_listener_unregister(&as
->dispatch_listener
);
2264 void address_space_destroy_dispatch(AddressSpace
*as
)
2266 AddressSpaceDispatch
*d
= as
->dispatch
;
2268 atomic_rcu_set(&as
->dispatch
, NULL
);
2270 call_rcu(d
, address_space_dispatch_free
, rcu
);
2274 static void memory_map_init(void)
2276 system_memory
= g_malloc(sizeof(*system_memory
));
2278 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2279 address_space_init(&address_space_memory
, system_memory
, "memory");
2281 system_io
= g_malloc(sizeof(*system_io
));
2282 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2284 address_space_init(&address_space_io
, system_io
, "I/O");
2287 MemoryRegion
*get_system_memory(void)
2289 return system_memory
;
2292 MemoryRegion
*get_system_io(void)
2297 #endif /* !defined(CONFIG_USER_ONLY) */
2299 /* physical memory access (slow version, mainly for debug) */
2300 #if defined(CONFIG_USER_ONLY)
2301 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2302 uint8_t *buf
, int len
, int is_write
)
2309 page
= addr
& TARGET_PAGE_MASK
;
2310 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2313 flags
= page_get_flags(page
);
2314 if (!(flags
& PAGE_VALID
))
2317 if (!(flags
& PAGE_WRITE
))
2319 /* XXX: this code should not depend on lock_user */
2320 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2323 unlock_user(p
, addr
, l
);
2325 if (!(flags
& PAGE_READ
))
2327 /* XXX: this code should not depend on lock_user */
2328 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2331 unlock_user(p
, addr
, 0);
2342 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2345 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2346 /* No early return if dirty_log_mask is or becomes 0, because
2347 * cpu_physical_memory_set_dirty_range will still call
2348 * xen_modified_memory.
2350 if (dirty_log_mask
) {
2352 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2354 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2355 tb_invalidate_phys_range(addr
, addr
+ length
);
2356 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2358 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2361 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2363 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2365 /* Regions are assumed to support 1-4 byte accesses unless
2366 otherwise specified. */
2367 if (access_size_max
== 0) {
2368 access_size_max
= 4;
2371 /* Bound the maximum access by the alignment of the address. */
2372 if (!mr
->ops
->impl
.unaligned
) {
2373 unsigned align_size_max
= addr
& -addr
;
2374 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2375 access_size_max
= align_size_max
;
2379 /* Don't attempt accesses larger than the maximum. */
2380 if (l
> access_size_max
) {
2381 l
= access_size_max
;
2384 l
= 1 << (qemu_fls(l
) - 1);
2390 static bool prepare_mmio_access(MemoryRegion
*mr
)
2392 bool unlocked
= !qemu_mutex_iothread_locked();
2393 bool release_lock
= false;
2395 if (unlocked
&& mr
->global_locking
) {
2396 qemu_mutex_lock_iothread();
2398 release_lock
= true;
2400 if (mr
->flush_coalesced_mmio
) {
2402 qemu_mutex_lock_iothread();
2404 qemu_flush_coalesced_mmio_buffer();
2406 qemu_mutex_unlock_iothread();
2410 return release_lock
;
2413 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2414 uint8_t *buf
, int len
, bool is_write
)
2421 MemTxResult result
= MEMTX_OK
;
2422 bool release_lock
= false;
2427 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2430 if (!memory_access_is_direct(mr
, is_write
)) {
2431 release_lock
|= prepare_mmio_access(mr
);
2432 l
= memory_access_size(mr
, l
, addr1
);
2433 /* XXX: could force current_cpu to NULL to avoid
2437 /* 64 bit write access */
2439 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2443 /* 32 bit write access */
2445 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2449 /* 16 bit write access */
2451 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2455 /* 8 bit write access */
2457 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2464 addr1
+= memory_region_get_ram_addr(mr
);
2466 ptr
= qemu_get_ram_ptr(addr1
);
2467 memcpy(ptr
, buf
, l
);
2468 invalidate_and_set_dirty(mr
, addr1
, l
);
2471 if (!memory_access_is_direct(mr
, is_write
)) {
2473 release_lock
|= prepare_mmio_access(mr
);
2474 l
= memory_access_size(mr
, l
, addr1
);
2477 /* 64 bit read access */
2478 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2483 /* 32 bit read access */
2484 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2489 /* 16 bit read access */
2490 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2495 /* 8 bit read access */
2496 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2505 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2506 memcpy(buf
, ptr
, l
);
2511 qemu_mutex_unlock_iothread();
2512 release_lock
= false;
2524 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2525 const uint8_t *buf
, int len
)
2527 return address_space_rw(as
, addr
, attrs
, (uint8_t *)buf
, len
, true);
2530 MemTxResult
address_space_read(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2531 uint8_t *buf
, int len
)
2533 return address_space_rw(as
, addr
, attrs
, buf
, len
, false);
2537 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2538 int len
, int is_write
)
2540 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2541 buf
, len
, is_write
);
2544 enum write_rom_type
{
2549 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2550 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2560 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2562 if (!(memory_region_is_ram(mr
) ||
2563 memory_region_is_romd(mr
))) {
2564 l
= memory_access_size(mr
, l
, addr1
);
2566 addr1
+= memory_region_get_ram_addr(mr
);
2568 ptr
= qemu_get_ram_ptr(addr1
);
2571 memcpy(ptr
, buf
, l
);
2572 invalidate_and_set_dirty(mr
, addr1
, l
);
2575 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2586 /* used for ROM loading : can write in RAM and ROM */
2587 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2588 const uint8_t *buf
, int len
)
2590 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2593 void cpu_flush_icache_range(hwaddr start
, int len
)
2596 * This function should do the same thing as an icache flush that was
2597 * triggered from within the guest. For TCG we are always cache coherent,
2598 * so there is no need to flush anything. For KVM / Xen we need to flush
2599 * the host's instruction cache at least.
2601 if (tcg_enabled()) {
2605 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2606 start
, NULL
, len
, FLUSH_CACHE
);
2617 static BounceBuffer bounce
;
2619 typedef struct MapClient
{
2621 QLIST_ENTRY(MapClient
) link
;
2624 QemuMutex map_client_list_lock
;
2625 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2626 = QLIST_HEAD_INITIALIZER(map_client_list
);
2628 static void cpu_unregister_map_client_do(MapClient
*client
)
2630 QLIST_REMOVE(client
, link
);
2634 static void cpu_notify_map_clients_locked(void)
2638 while (!QLIST_EMPTY(&map_client_list
)) {
2639 client
= QLIST_FIRST(&map_client_list
);
2640 qemu_bh_schedule(client
->bh
);
2641 cpu_unregister_map_client_do(client
);
2645 void cpu_register_map_client(QEMUBH
*bh
)
2647 MapClient
*client
= g_malloc(sizeof(*client
));
2649 qemu_mutex_lock(&map_client_list_lock
);
2651 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2652 if (!atomic_read(&bounce
.in_use
)) {
2653 cpu_notify_map_clients_locked();
2655 qemu_mutex_unlock(&map_client_list_lock
);
2658 void cpu_exec_init_all(void)
2660 qemu_mutex_init(&ram_list
.mutex
);
2663 qemu_mutex_init(&map_client_list_lock
);
2666 void cpu_unregister_map_client(QEMUBH
*bh
)
2670 qemu_mutex_lock(&map_client_list_lock
);
2671 QLIST_FOREACH(client
, &map_client_list
, link
) {
2672 if (client
->bh
== bh
) {
2673 cpu_unregister_map_client_do(client
);
2677 qemu_mutex_unlock(&map_client_list_lock
);
2680 static void cpu_notify_map_clients(void)
2682 qemu_mutex_lock(&map_client_list_lock
);
2683 cpu_notify_map_clients_locked();
2684 qemu_mutex_unlock(&map_client_list_lock
);
2687 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2695 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2696 if (!memory_access_is_direct(mr
, is_write
)) {
2697 l
= memory_access_size(mr
, l
, addr
);
2698 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2710 /* Map a physical memory region into a host virtual address.
2711 * May map a subset of the requested range, given by and returned in *plen.
2712 * May return NULL if resources needed to perform the mapping are exhausted.
2713 * Use only for reads OR writes - not for read-modify-write operations.
2714 * Use cpu_register_map_client() to know when retrying the map operation is
2715 * likely to succeed.
2717 void *address_space_map(AddressSpace
*as
,
2724 hwaddr l
, xlat
, base
;
2725 MemoryRegion
*mr
, *this_mr
;
2734 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2736 if (!memory_access_is_direct(mr
, is_write
)) {
2737 if (atomic_xchg(&bounce
.in_use
, true)) {
2741 /* Avoid unbounded allocations */
2742 l
= MIN(l
, TARGET_PAGE_SIZE
);
2743 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2747 memory_region_ref(mr
);
2750 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2756 return bounce
.buffer
;
2760 raddr
= memory_region_get_ram_addr(mr
);
2771 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2772 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2777 memory_region_ref(mr
);
2780 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2783 /* Unmaps a memory region previously mapped by address_space_map().
2784 * Will also mark the memory as dirty if is_write == 1. access_len gives
2785 * the amount of memory that was actually read or written by the caller.
2787 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2788 int is_write
, hwaddr access_len
)
2790 if (buffer
!= bounce
.buffer
) {
2794 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2797 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2799 if (xen_enabled()) {
2800 xen_invalidate_map_cache_entry(buffer
);
2802 memory_region_unref(mr
);
2806 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2807 bounce
.buffer
, access_len
);
2809 qemu_vfree(bounce
.buffer
);
2810 bounce
.buffer
= NULL
;
2811 memory_region_unref(bounce
.mr
);
2812 atomic_mb_set(&bounce
.in_use
, false);
2813 cpu_notify_map_clients();
2816 void *cpu_physical_memory_map(hwaddr addr
,
2820 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2823 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2824 int is_write
, hwaddr access_len
)
2826 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2829 /* warning: addr must be aligned */
2830 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2832 MemTxResult
*result
,
2833 enum device_endian endian
)
2841 bool release_lock
= false;
2844 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2845 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2846 release_lock
|= prepare_mmio_access(mr
);
2849 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
2850 #if defined(TARGET_WORDS_BIGENDIAN)
2851 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2855 if (endian
== DEVICE_BIG_ENDIAN
) {
2861 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2865 case DEVICE_LITTLE_ENDIAN
:
2866 val
= ldl_le_p(ptr
);
2868 case DEVICE_BIG_ENDIAN
:
2869 val
= ldl_be_p(ptr
);
2881 qemu_mutex_unlock_iothread();
2887 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
2888 MemTxAttrs attrs
, MemTxResult
*result
)
2890 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2891 DEVICE_NATIVE_ENDIAN
);
2894 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
2895 MemTxAttrs attrs
, MemTxResult
*result
)
2897 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2898 DEVICE_LITTLE_ENDIAN
);
2901 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
2902 MemTxAttrs attrs
, MemTxResult
*result
)
2904 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2908 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2910 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2913 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2915 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2918 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2920 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2923 /* warning: addr must be aligned */
2924 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
2926 MemTxResult
*result
,
2927 enum device_endian endian
)
2935 bool release_lock
= false;
2938 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2940 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2941 release_lock
|= prepare_mmio_access(mr
);
2944 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
2945 #if defined(TARGET_WORDS_BIGENDIAN)
2946 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2950 if (endian
== DEVICE_BIG_ENDIAN
) {
2956 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2960 case DEVICE_LITTLE_ENDIAN
:
2961 val
= ldq_le_p(ptr
);
2963 case DEVICE_BIG_ENDIAN
:
2964 val
= ldq_be_p(ptr
);
2976 qemu_mutex_unlock_iothread();
2982 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
2983 MemTxAttrs attrs
, MemTxResult
*result
)
2985 return address_space_ldq_internal(as
, addr
, attrs
, result
,
2986 DEVICE_NATIVE_ENDIAN
);
2989 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
2990 MemTxAttrs attrs
, MemTxResult
*result
)
2992 return address_space_ldq_internal(as
, addr
, attrs
, result
,
2993 DEVICE_LITTLE_ENDIAN
);
2996 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
2997 MemTxAttrs attrs
, MemTxResult
*result
)
2999 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3003 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3005 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3008 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3010 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3013 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3015 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3019 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3020 MemTxAttrs attrs
, MemTxResult
*result
)
3025 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3032 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3034 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3037 /* warning: addr must be aligned */
3038 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3041 MemTxResult
*result
,
3042 enum device_endian endian
)
3050 bool release_lock
= false;
3053 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3055 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3056 release_lock
|= prepare_mmio_access(mr
);
3059 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3060 #if defined(TARGET_WORDS_BIGENDIAN)
3061 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3065 if (endian
== DEVICE_BIG_ENDIAN
) {
3071 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3075 case DEVICE_LITTLE_ENDIAN
:
3076 val
= lduw_le_p(ptr
);
3078 case DEVICE_BIG_ENDIAN
:
3079 val
= lduw_be_p(ptr
);
3091 qemu_mutex_unlock_iothread();
3097 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3098 MemTxAttrs attrs
, MemTxResult
*result
)
3100 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3101 DEVICE_NATIVE_ENDIAN
);
3104 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3105 MemTxAttrs attrs
, MemTxResult
*result
)
3107 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3108 DEVICE_LITTLE_ENDIAN
);
3111 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3112 MemTxAttrs attrs
, MemTxResult
*result
)
3114 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3118 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3120 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3123 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3125 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3128 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3130 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3133 /* warning: addr must be aligned. The ram page is not masked as dirty
3134 and the code inside is not invalidated. It is useful if the dirty
3135 bits are used to track modified PTEs */
3136 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3137 MemTxAttrs attrs
, MemTxResult
*result
)
3144 uint8_t dirty_log_mask
;
3145 bool release_lock
= false;
3148 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3150 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3151 release_lock
|= prepare_mmio_access(mr
);
3153 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3155 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3156 ptr
= qemu_get_ram_ptr(addr1
);
3159 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3160 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3161 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3168 qemu_mutex_unlock_iothread();
3173 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3175 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3178 /* warning: addr must be aligned */
3179 static inline void address_space_stl_internal(AddressSpace
*as
,
3180 hwaddr addr
, uint32_t val
,
3182 MemTxResult
*result
,
3183 enum device_endian endian
)
3190 bool release_lock
= false;
3193 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3195 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3196 release_lock
|= prepare_mmio_access(mr
);
3198 #if defined(TARGET_WORDS_BIGENDIAN)
3199 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3203 if (endian
== DEVICE_BIG_ENDIAN
) {
3207 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3210 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3211 ptr
= qemu_get_ram_ptr(addr1
);
3213 case DEVICE_LITTLE_ENDIAN
:
3216 case DEVICE_BIG_ENDIAN
:
3223 invalidate_and_set_dirty(mr
, addr1
, 4);
3230 qemu_mutex_unlock_iothread();
3235 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3236 MemTxAttrs attrs
, MemTxResult
*result
)
3238 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3239 DEVICE_NATIVE_ENDIAN
);
3242 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3243 MemTxAttrs attrs
, MemTxResult
*result
)
3245 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3246 DEVICE_LITTLE_ENDIAN
);
3249 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3250 MemTxAttrs attrs
, MemTxResult
*result
)
3252 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3256 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3258 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3261 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3263 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3266 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3268 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3272 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3273 MemTxAttrs attrs
, MemTxResult
*result
)
3278 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3284 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3286 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3289 /* warning: addr must be aligned */
3290 static inline void address_space_stw_internal(AddressSpace
*as
,
3291 hwaddr addr
, uint32_t val
,
3293 MemTxResult
*result
,
3294 enum device_endian endian
)
3301 bool release_lock
= false;
3304 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3305 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3306 release_lock
|= prepare_mmio_access(mr
);
3308 #if defined(TARGET_WORDS_BIGENDIAN)
3309 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3313 if (endian
== DEVICE_BIG_ENDIAN
) {
3317 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3320 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3321 ptr
= qemu_get_ram_ptr(addr1
);
3323 case DEVICE_LITTLE_ENDIAN
:
3326 case DEVICE_BIG_ENDIAN
:
3333 invalidate_and_set_dirty(mr
, addr1
, 2);
3340 qemu_mutex_unlock_iothread();
3345 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3346 MemTxAttrs attrs
, MemTxResult
*result
)
3348 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3349 DEVICE_NATIVE_ENDIAN
);
3352 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3353 MemTxAttrs attrs
, MemTxResult
*result
)
3355 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3356 DEVICE_LITTLE_ENDIAN
);
3359 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3360 MemTxAttrs attrs
, MemTxResult
*result
)
3362 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3366 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3368 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3371 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3373 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3376 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3378 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3382 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3383 MemTxAttrs attrs
, MemTxResult
*result
)
3387 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3393 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3394 MemTxAttrs attrs
, MemTxResult
*result
)
3397 val
= cpu_to_le64(val
);
3398 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3403 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3404 MemTxAttrs attrs
, MemTxResult
*result
)
3407 val
= cpu_to_be64(val
);
3408 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3414 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3416 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3419 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3421 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3424 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3426 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3429 /* virtual memory access for debug (includes writing to ROM) */
3430 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3431 uint8_t *buf
, int len
, int is_write
)
3438 page
= addr
& TARGET_PAGE_MASK
;
3439 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
3440 /* if no physical page mapped, return an error */
3441 if (phys_addr
== -1)
3443 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3446 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3448 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
3450 address_space_rw(cpu
->as
, phys_addr
, MEMTXATTRS_UNSPECIFIED
,
3462 * A helper function for the _utterly broken_ virtio device model to find out if
3463 * it's running on a big endian machine. Don't do this at home kids!
3465 bool target_words_bigendian(void);
3466 bool target_words_bigendian(void)
3468 #if defined(TARGET_WORDS_BIGENDIAN)
3475 #ifndef CONFIG_USER_ONLY
3476 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3483 mr
= address_space_translate(&address_space_memory
,
3484 phys_addr
, &phys_addr
, &l
, false);
3486 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3491 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3497 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3498 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3499 block
->used_length
, opaque
);