4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
54 #include "exec/memory-internal.h"
55 #include "exec/ram_addr.h"
57 #include "qemu/range.h"
59 //#define DEBUG_SUBPAGE
61 #if !defined(CONFIG_USER_ONLY)
62 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
63 * are protected by the ramlist lock.
65 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
67 static MemoryRegion
*system_memory
;
68 static MemoryRegion
*system_io
;
70 AddressSpace address_space_io
;
71 AddressSpace address_space_memory
;
73 MemoryRegion io_mem_rom
, io_mem_notdirty
;
74 static MemoryRegion io_mem_unassigned
;
76 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
77 #define RAM_PREALLOC (1 << 0)
79 /* RAM is mmap-ed with MAP_SHARED */
80 #define RAM_SHARED (1 << 1)
82 /* Only a portion of RAM (used_length) is actually used, and migrated.
83 * This used_length size can change across reboots.
85 #define RAM_RESIZEABLE (1 << 2)
89 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
90 /* current CPU in the current thread. It is only valid inside
92 __thread CPUState
*current_cpu
;
93 /* 0 = Do not count executed instructions.
94 1 = Precise instruction counting.
95 2 = Adaptive rate instruction counting. */
98 #if !defined(CONFIG_USER_ONLY)
100 typedef struct PhysPageEntry PhysPageEntry
;
102 struct PhysPageEntry
{
103 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
105 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
109 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
111 /* Size of the L2 (and L3, etc) page tables. */
112 #define ADDR_SPACE_BITS 64
115 #define P_L2_SIZE (1 << P_L2_BITS)
117 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
119 typedef PhysPageEntry Node
[P_L2_SIZE
];
121 typedef struct PhysPageMap
{
124 unsigned sections_nb
;
125 unsigned sections_nb_alloc
;
127 unsigned nodes_nb_alloc
;
129 MemoryRegionSection
*sections
;
132 struct AddressSpaceDispatch
{
135 /* This is a multi-level map on the physical address space.
136 * The bottom level has pointers to MemoryRegionSections.
138 PhysPageEntry phys_map
;
143 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
144 typedef struct subpage_t
{
148 uint16_t sub_section
[TARGET_PAGE_SIZE
];
151 #define PHYS_SECTION_UNASSIGNED 0
152 #define PHYS_SECTION_NOTDIRTY 1
153 #define PHYS_SECTION_ROM 2
154 #define PHYS_SECTION_WATCH 3
156 static void io_mem_init(void);
157 static void memory_map_init(void);
158 static void tcg_commit(MemoryListener
*listener
);
160 static MemoryRegion io_mem_watch
;
163 #if !defined(CONFIG_USER_ONLY)
165 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
167 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
168 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
169 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
170 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
174 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
181 ret
= map
->nodes_nb
++;
183 assert(ret
!= PHYS_MAP_NODE_NIL
);
184 assert(ret
!= map
->nodes_nb_alloc
);
186 e
.skip
= leaf
? 0 : 1;
187 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
188 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
189 memcpy(&p
[i
], &e
, sizeof(e
));
194 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
195 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
199 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
201 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
202 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
204 p
= map
->nodes
[lp
->ptr
];
205 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
207 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
208 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
214 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
220 static void phys_page_set(AddressSpaceDispatch
*d
,
221 hwaddr index
, hwaddr nb
,
224 /* Wildly overreserve - it doesn't matter much. */
225 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
227 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
230 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
231 * and update our entry so we can skip it and go directly to the destination.
233 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
235 unsigned valid_ptr
= P_L2_SIZE
;
240 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
245 for (i
= 0; i
< P_L2_SIZE
; i
++) {
246 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
253 phys_page_compact(&p
[i
], nodes
, compacted
);
257 /* We can only compress if there's only one child. */
262 assert(valid_ptr
< P_L2_SIZE
);
264 /* Don't compress if it won't fit in the # of bits we have. */
265 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
269 lp
->ptr
= p
[valid_ptr
].ptr
;
270 if (!p
[valid_ptr
].skip
) {
271 /* If our only child is a leaf, make this a leaf. */
272 /* By design, we should have made this node a leaf to begin with so we
273 * should never reach here.
274 * But since it's so simple to handle this, let's do it just in case we
279 lp
->skip
+= p
[valid_ptr
].skip
;
283 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
285 DECLARE_BITMAP(compacted
, nodes_nb
);
287 if (d
->phys_map
.skip
) {
288 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
292 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
293 Node
*nodes
, MemoryRegionSection
*sections
)
296 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
299 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
300 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
301 return §ions
[PHYS_SECTION_UNASSIGNED
];
304 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
307 if (sections
[lp
.ptr
].size
.hi
||
308 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
309 sections
[lp
.ptr
].size
.lo
, addr
)) {
310 return §ions
[lp
.ptr
];
312 return §ions
[PHYS_SECTION_UNASSIGNED
];
316 bool memory_region_is_unassigned(MemoryRegion
*mr
)
318 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
319 && mr
!= &io_mem_watch
;
322 /* Called from RCU critical section */
323 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
325 bool resolve_subpage
)
327 MemoryRegionSection
*section
;
330 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
331 if (resolve_subpage
&& section
->mr
->subpage
) {
332 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
333 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
338 /* Called from RCU critical section */
339 static MemoryRegionSection
*
340 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
341 hwaddr
*plen
, bool resolve_subpage
)
343 MemoryRegionSection
*section
;
347 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
348 /* Compute offset within MemoryRegionSection */
349 addr
-= section
->offset_within_address_space
;
351 /* Compute offset within MemoryRegion */
352 *xlat
= addr
+ section
->offset_within_region
;
356 /* MMIO registers can be expected to perform full-width accesses based only
357 * on their address, without considering adjacent registers that could
358 * decode to completely different MemoryRegions. When such registers
359 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
360 * regions overlap wildly. For this reason we cannot clamp the accesses
363 * If the length is small (as is the case for address_space_ldl/stl),
364 * everything works fine. If the incoming length is large, however,
365 * the caller really has to do the clamping through memory_access_size.
367 if (memory_region_is_ram(mr
)) {
368 diff
= int128_sub(section
->size
, int128_make64(addr
));
369 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
374 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
376 if (memory_region_is_ram(mr
)) {
377 return !(is_write
&& mr
->readonly
);
379 if (memory_region_is_romd(mr
)) {
386 /* Called from RCU critical section */
387 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
388 hwaddr
*xlat
, hwaddr
*plen
,
392 MemoryRegionSection
*section
;
396 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
397 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
400 if (!mr
->iommu_ops
) {
404 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
405 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
406 | (addr
& iotlb
.addr_mask
));
407 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
408 if (!(iotlb
.perm
& (1 << is_write
))) {
409 mr
= &io_mem_unassigned
;
413 as
= iotlb
.target_as
;
416 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
417 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
418 *plen
= MIN(page
, *plen
);
425 /* Called from RCU critical section */
426 MemoryRegionSection
*
427 address_space_translate_for_iotlb(CPUState
*cpu
, hwaddr addr
,
428 hwaddr
*xlat
, hwaddr
*plen
)
430 MemoryRegionSection
*section
;
431 section
= address_space_translate_internal(cpu
->memory_dispatch
,
432 addr
, xlat
, plen
, false);
434 assert(!section
->mr
->iommu_ops
);
439 #if !defined(CONFIG_USER_ONLY)
441 static int cpu_common_post_load(void *opaque
, int version_id
)
443 CPUState
*cpu
= opaque
;
445 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
446 version_id is increased. */
447 cpu
->interrupt_request
&= ~0x01;
453 static int cpu_common_pre_load(void *opaque
)
455 CPUState
*cpu
= opaque
;
457 cpu
->exception_index
= -1;
462 static bool cpu_common_exception_index_needed(void *opaque
)
464 CPUState
*cpu
= opaque
;
466 return tcg_enabled() && cpu
->exception_index
!= -1;
469 static const VMStateDescription vmstate_cpu_common_exception_index
= {
470 .name
= "cpu_common/exception_index",
472 .minimum_version_id
= 1,
473 .needed
= cpu_common_exception_index_needed
,
474 .fields
= (VMStateField
[]) {
475 VMSTATE_INT32(exception_index
, CPUState
),
476 VMSTATE_END_OF_LIST()
480 static bool cpu_common_crash_occurred_needed(void *opaque
)
482 CPUState
*cpu
= opaque
;
484 return cpu
->crash_occurred
;
487 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
488 .name
= "cpu_common/crash_occurred",
490 .minimum_version_id
= 1,
491 .needed
= cpu_common_crash_occurred_needed
,
492 .fields
= (VMStateField
[]) {
493 VMSTATE_BOOL(crash_occurred
, CPUState
),
494 VMSTATE_END_OF_LIST()
498 const VMStateDescription vmstate_cpu_common
= {
499 .name
= "cpu_common",
501 .minimum_version_id
= 1,
502 .pre_load
= cpu_common_pre_load
,
503 .post_load
= cpu_common_post_load
,
504 .fields
= (VMStateField
[]) {
505 VMSTATE_UINT32(halted
, CPUState
),
506 VMSTATE_UINT32(interrupt_request
, CPUState
),
507 VMSTATE_END_OF_LIST()
509 .subsections
= (const VMStateDescription
*[]) {
510 &vmstate_cpu_common_exception_index
,
511 &vmstate_cpu_common_crash_occurred
,
518 CPUState
*qemu_get_cpu(int index
)
523 if (cpu
->cpu_index
== index
) {
531 #if !defined(CONFIG_USER_ONLY)
532 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
534 /* We only support one address space per cpu at the moment. */
535 assert(cpu
->as
== as
);
537 if (cpu
->tcg_as_listener
) {
538 memory_listener_unregister(cpu
->tcg_as_listener
);
540 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
542 cpu
->tcg_as_listener
->commit
= tcg_commit
;
543 memory_listener_register(cpu
->tcg_as_listener
, as
);
547 #ifndef CONFIG_USER_ONLY
548 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
550 static int cpu_get_free_index(Error
**errp
)
552 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
554 if (cpu
>= MAX_CPUMASK_BITS
) {
555 error_setg(errp
, "Trying to use more CPUs than max of %d",
560 bitmap_set(cpu_index_map
, cpu
, 1);
564 void cpu_exec_exit(CPUState
*cpu
)
566 if (cpu
->cpu_index
== -1) {
567 /* cpu_index was never allocated by this @cpu or was already freed. */
571 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
576 static int cpu_get_free_index(Error
**errp
)
581 CPU_FOREACH(some_cpu
) {
587 void cpu_exec_exit(CPUState
*cpu
)
592 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
594 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
596 Error
*local_err
= NULL
;
598 #ifdef TARGET_WORDS_BIGENDIAN
599 cpu
->bigendian
= true;
601 cpu
->bigendian
= false;
604 #ifndef CONFIG_USER_ONLY
605 cpu
->as
= &address_space_memory
;
606 cpu
->thread_id
= qemu_get_thread_id();
607 cpu_reload_memory_map(cpu
);
610 #if defined(CONFIG_USER_ONLY)
613 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
615 error_propagate(errp
, local_err
);
616 #if defined(CONFIG_USER_ONLY)
621 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
622 #if defined(CONFIG_USER_ONLY)
625 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
626 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
628 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
629 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
630 cpu_save
, cpu_load
, cpu
->env_ptr
);
631 assert(cc
->vmsd
== NULL
);
632 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
634 if (cc
->vmsd
!= NULL
) {
635 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
639 #if defined(CONFIG_USER_ONLY)
640 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
642 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
645 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
647 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
649 tb_invalidate_phys_addr(cpu
->as
,
650 phys
| (pc
& ~TARGET_PAGE_MASK
));
655 #if defined(CONFIG_USER_ONLY)
656 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
661 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
667 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
671 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
672 int flags
, CPUWatchpoint
**watchpoint
)
677 /* Add a watchpoint. */
678 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
679 int flags
, CPUWatchpoint
**watchpoint
)
683 /* forbid ranges which are empty or run off the end of the address space */
684 if (len
== 0 || (addr
+ len
- 1) < addr
) {
685 error_report("tried to set invalid watchpoint at %"
686 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
689 wp
= g_malloc(sizeof(*wp
));
695 /* keep all GDB-injected watchpoints in front */
696 if (flags
& BP_GDB
) {
697 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
699 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
702 tlb_flush_page(cpu
, addr
);
709 /* Remove a specific watchpoint. */
710 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
715 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
716 if (addr
== wp
->vaddr
&& len
== wp
->len
717 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
718 cpu_watchpoint_remove_by_ref(cpu
, wp
);
725 /* Remove a specific watchpoint by reference. */
726 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
728 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
730 tlb_flush_page(cpu
, watchpoint
->vaddr
);
735 /* Remove all matching watchpoints. */
736 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
738 CPUWatchpoint
*wp
, *next
;
740 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
741 if (wp
->flags
& mask
) {
742 cpu_watchpoint_remove_by_ref(cpu
, wp
);
747 /* Return true if this watchpoint address matches the specified
748 * access (ie the address range covered by the watchpoint overlaps
749 * partially or completely with the address range covered by the
752 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
756 /* We know the lengths are non-zero, but a little caution is
757 * required to avoid errors in the case where the range ends
758 * exactly at the top of the address space and so addr + len
759 * wraps round to zero.
761 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
762 vaddr addrend
= addr
+ len
- 1;
764 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
769 /* Add a breakpoint. */
770 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
771 CPUBreakpoint
**breakpoint
)
775 bp
= g_malloc(sizeof(*bp
));
780 /* keep all GDB-injected breakpoints in front */
781 if (flags
& BP_GDB
) {
782 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
784 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
787 breakpoint_invalidate(cpu
, pc
);
795 /* Remove a specific breakpoint. */
796 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
800 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
801 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
802 cpu_breakpoint_remove_by_ref(cpu
, bp
);
809 /* Remove a specific breakpoint by reference. */
810 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
812 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
814 breakpoint_invalidate(cpu
, breakpoint
->pc
);
819 /* Remove all matching breakpoints. */
820 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
822 CPUBreakpoint
*bp
, *next
;
824 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
825 if (bp
->flags
& mask
) {
826 cpu_breakpoint_remove_by_ref(cpu
, bp
);
831 /* enable or disable single step mode. EXCP_DEBUG is returned by the
832 CPU loop after each instruction */
833 void cpu_single_step(CPUState
*cpu
, int enabled
)
835 if (cpu
->singlestep_enabled
!= enabled
) {
836 cpu
->singlestep_enabled
= enabled
;
838 kvm_update_guest_debug(cpu
, 0);
840 /* must flush all the translated code to avoid inconsistencies */
841 /* XXX: only flush what is necessary */
847 void QEMU_NORETURN
cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
854 fprintf(stderr
, "qemu: fatal: ");
855 vfprintf(stderr
, fmt
, ap
);
856 fprintf(stderr
, "\n");
857 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
858 if (qemu_log_enabled()) {
859 qemu_log("qemu: fatal: ");
860 qemu_log_vprintf(fmt
, ap2
);
862 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
868 #if defined(CONFIG_USER_ONLY)
870 struct sigaction act
;
871 sigfillset(&act
.sa_mask
);
872 act
.sa_handler
= SIG_DFL
;
873 sigaction(SIGABRT
, &act
, NULL
);
879 #if !defined(CONFIG_USER_ONLY)
880 /* Called from RCU critical section */
881 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
885 block
= atomic_rcu_read(&ram_list
.mru_block
);
886 if (block
&& addr
- block
->offset
< block
->max_length
) {
889 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
890 if (addr
- block
->offset
< block
->max_length
) {
895 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
899 /* It is safe to write mru_block outside the iothread lock. This
904 * xxx removed from list
908 * call_rcu(reclaim_ramblock, xxx);
911 * atomic_rcu_set is not needed here. The block was already published
912 * when it was placed into the list. Here we're just making an extra
913 * copy of the pointer.
915 ram_list
.mru_block
= block
;
919 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
926 end
= TARGET_PAGE_ALIGN(start
+ length
);
927 start
&= TARGET_PAGE_MASK
;
930 block
= qemu_get_ram_block(start
);
931 assert(block
== qemu_get_ram_block(end
- 1));
932 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
934 tlb_reset_dirty(cpu
, start1
, length
);
939 /* Note: start and end must be within the same ram block. */
940 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
944 unsigned long end
, page
;
951 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
952 page
= start
>> TARGET_PAGE_BITS
;
953 dirty
= bitmap_test_and_clear_atomic(ram_list
.dirty_memory
[client
],
956 if (dirty
&& tcg_enabled()) {
957 tlb_reset_dirty_range_all(start
, length
);
963 /* Called from RCU critical section */
964 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
965 MemoryRegionSection
*section
,
967 hwaddr paddr
, hwaddr xlat
,
969 target_ulong
*address
)
974 if (memory_region_is_ram(section
->mr
)) {
976 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
978 if (!section
->readonly
) {
979 iotlb
|= PHYS_SECTION_NOTDIRTY
;
981 iotlb
|= PHYS_SECTION_ROM
;
984 AddressSpaceDispatch
*d
;
986 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
987 iotlb
= section
- d
->map
.sections
;
991 /* Make accesses to pages with watchpoints go via the
992 watchpoint trap routines. */
993 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
994 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
995 /* Avoid trapping reads of pages with a write breakpoint. */
996 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
997 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
998 *address
|= TLB_MMIO
;
1006 #endif /* defined(CONFIG_USER_ONLY) */
1008 #if !defined(CONFIG_USER_ONLY)
1010 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1012 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1014 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1015 qemu_anon_ram_alloc
;
1018 * Set a custom physical guest memory alloator.
1019 * Accelerators with unusual needs may need this. Hopefully, we can
1020 * get rid of it eventually.
1022 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1024 phys_mem_alloc
= alloc
;
1027 static uint16_t phys_section_add(PhysPageMap
*map
,
1028 MemoryRegionSection
*section
)
1030 /* The physical section number is ORed with a page-aligned
1031 * pointer to produce the iotlb entries. Thus it should
1032 * never overflow into the page-aligned value.
1034 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1036 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1037 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1038 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1039 map
->sections_nb_alloc
);
1041 map
->sections
[map
->sections_nb
] = *section
;
1042 memory_region_ref(section
->mr
);
1043 return map
->sections_nb
++;
1046 static void phys_section_destroy(MemoryRegion
*mr
)
1048 memory_region_unref(mr
);
1051 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1052 object_unref(OBJECT(&subpage
->iomem
));
1057 static void phys_sections_free(PhysPageMap
*map
)
1059 while (map
->sections_nb
> 0) {
1060 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1061 phys_section_destroy(section
->mr
);
1063 g_free(map
->sections
);
1067 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1070 hwaddr base
= section
->offset_within_address_space
1072 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1073 d
->map
.nodes
, d
->map
.sections
);
1074 MemoryRegionSection subsection
= {
1075 .offset_within_address_space
= base
,
1076 .size
= int128_make64(TARGET_PAGE_SIZE
),
1080 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1082 if (!(existing
->mr
->subpage
)) {
1083 subpage
= subpage_init(d
->as
, base
);
1084 subsection
.address_space
= d
->as
;
1085 subsection
.mr
= &subpage
->iomem
;
1086 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1087 phys_section_add(&d
->map
, &subsection
));
1089 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1091 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1092 end
= start
+ int128_get64(section
->size
) - 1;
1093 subpage_register(subpage
, start
, end
,
1094 phys_section_add(&d
->map
, section
));
1098 static void register_multipage(AddressSpaceDispatch
*d
,
1099 MemoryRegionSection
*section
)
1101 hwaddr start_addr
= section
->offset_within_address_space
;
1102 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1103 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1107 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1110 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1112 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1113 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1114 MemoryRegionSection now
= *section
, remain
= *section
;
1115 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1117 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1118 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1119 - now
.offset_within_address_space
;
1121 now
.size
= int128_min(int128_make64(left
), now
.size
);
1122 register_subpage(d
, &now
);
1124 now
.size
= int128_zero();
1126 while (int128_ne(remain
.size
, now
.size
)) {
1127 remain
.size
= int128_sub(remain
.size
, now
.size
);
1128 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1129 remain
.offset_within_region
+= int128_get64(now
.size
);
1131 if (int128_lt(remain
.size
, page_size
)) {
1132 register_subpage(d
, &now
);
1133 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1134 now
.size
= page_size
;
1135 register_subpage(d
, &now
);
1137 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1138 register_multipage(d
, &now
);
1143 void qemu_flush_coalesced_mmio_buffer(void)
1146 kvm_flush_coalesced_mmio_buffer();
1149 void qemu_mutex_lock_ramlist(void)
1151 qemu_mutex_lock(&ram_list
.mutex
);
1154 void qemu_mutex_unlock_ramlist(void)
1156 qemu_mutex_unlock(&ram_list
.mutex
);
1161 #include <sys/vfs.h>
1163 #define HUGETLBFS_MAGIC 0x958458f6
1165 static long gethugepagesize(const char *path
, Error
**errp
)
1171 ret
= statfs(path
, &fs
);
1172 } while (ret
!= 0 && errno
== EINTR
);
1175 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1180 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1181 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1186 static void *file_ram_alloc(RAMBlock
*block
,
1192 char *sanitized_name
;
1194 void * volatile area
= NULL
;
1197 Error
*local_err
= NULL
;
1199 hpagesize
= gethugepagesize(path
, &local_err
);
1201 error_propagate(errp
, local_err
);
1204 block
->mr
->align
= hpagesize
;
1206 if (memory
< hpagesize
) {
1207 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1208 "or larger than huge page size 0x%" PRIx64
,
1213 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1215 "host lacks kvm mmu notifiers, -mem-path unsupported");
1219 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1220 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1221 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1226 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1228 g_free(sanitized_name
);
1230 fd
= mkstemp(filename
);
1232 error_setg_errno(errp
, errno
,
1233 "unable to create backing store for hugepages");
1240 memory
= ROUND_UP(memory
, hpagesize
);
1243 * ftruncate is not supported by hugetlbfs in older
1244 * hosts, so don't bother bailing out on errors.
1245 * If anything goes wrong with it under other filesystems,
1248 if (ftruncate(fd
, memory
)) {
1249 perror("ftruncate");
1252 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
,
1253 (block
->flags
& RAM_SHARED
? MAP_SHARED
: MAP_PRIVATE
),
1255 if (area
== MAP_FAILED
) {
1256 error_setg_errno(errp
, errno
,
1257 "unable to map backing store for hugepages");
1263 os_mem_prealloc(fd
, area
, memory
);
1271 error_report("%s", error_get_pretty(*errp
));
1278 /* Called with the ramlist lock held. */
1279 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1281 RAMBlock
*block
, *next_block
;
1282 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1284 assert(size
!= 0); /* it would hand out same offset multiple times */
1286 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1290 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1291 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1293 end
= block
->offset
+ block
->max_length
;
1295 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1296 if (next_block
->offset
>= end
) {
1297 next
= MIN(next
, next_block
->offset
);
1300 if (next
- end
>= size
&& next
- end
< mingap
) {
1302 mingap
= next
- end
;
1306 if (offset
== RAM_ADDR_MAX
) {
1307 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1315 ram_addr_t
last_ram_offset(void)
1318 ram_addr_t last
= 0;
1321 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1322 last
= MAX(last
, block
->offset
+ block
->max_length
);
1328 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1332 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1333 if (!machine_dump_guest_core(current_machine
)) {
1334 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1336 perror("qemu_madvise");
1337 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1338 "but dump_guest_core=off specified\n");
1343 /* Called within an RCU critical section, or while the ramlist lock
1346 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1350 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1351 if (block
->offset
== addr
) {
1359 /* Called with iothread lock held. */
1360 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1362 RAMBlock
*new_block
, *block
;
1365 new_block
= find_ram_block(addr
);
1367 assert(!new_block
->idstr
[0]);
1370 char *id
= qdev_get_dev_path(dev
);
1372 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1376 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1378 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1379 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1380 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1388 /* Called with iothread lock held. */
1389 void qemu_ram_unset_idstr(ram_addr_t addr
)
1393 /* FIXME: arch_init.c assumes that this is not called throughout
1394 * migration. Ignore the problem since hot-unplug during migration
1395 * does not work anyway.
1399 block
= find_ram_block(addr
);
1401 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1406 static int memory_try_enable_merging(void *addr
, size_t len
)
1408 if (!machine_mem_merge(current_machine
)) {
1409 /* disabled by the user */
1413 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1416 /* Only legal before guest might have detected the memory size: e.g. on
1417 * incoming migration, or right after reset.
1419 * As memory core doesn't know how is memory accessed, it is up to
1420 * resize callback to update device state and/or add assertions to detect
1421 * misuse, if necessary.
1423 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1425 RAMBlock
*block
= find_ram_block(base
);
1429 newsize
= TARGET_PAGE_ALIGN(newsize
);
1431 if (block
->used_length
== newsize
) {
1435 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1436 error_setg_errno(errp
, EINVAL
,
1437 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1438 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1439 newsize
, block
->used_length
);
1443 if (block
->max_length
< newsize
) {
1444 error_setg_errno(errp
, EINVAL
,
1445 "Length too large: %s: 0x" RAM_ADDR_FMT
1446 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1447 newsize
, block
->max_length
);
1451 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1452 block
->used_length
= newsize
;
1453 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1455 memory_region_set_size(block
->mr
, newsize
);
1456 if (block
->resized
) {
1457 block
->resized(block
->idstr
, newsize
, block
->host
);
1462 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1465 RAMBlock
*last_block
= NULL
;
1466 ram_addr_t old_ram_size
, new_ram_size
;
1468 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1470 qemu_mutex_lock_ramlist();
1471 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1473 if (!new_block
->host
) {
1474 if (xen_enabled()) {
1475 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1478 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1479 &new_block
->mr
->align
);
1480 if (!new_block
->host
) {
1481 error_setg_errno(errp
, errno
,
1482 "cannot set up guest memory '%s'",
1483 memory_region_name(new_block
->mr
));
1484 qemu_mutex_unlock_ramlist();
1487 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1491 new_ram_size
= MAX(old_ram_size
,
1492 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1493 if (new_ram_size
> old_ram_size
) {
1494 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1496 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1497 * QLIST (which has an RCU-friendly variant) does not have insertion at
1498 * tail, so save the last element in last_block.
1500 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1502 if (block
->max_length
< new_block
->max_length
) {
1507 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1508 } else if (last_block
) {
1509 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1510 } else { /* list is empty */
1511 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1513 ram_list
.mru_block
= NULL
;
1515 /* Write list before version */
1518 qemu_mutex_unlock_ramlist();
1520 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1522 if (new_ram_size
> old_ram_size
) {
1525 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1526 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1527 ram_list
.dirty_memory
[i
] =
1528 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1529 old_ram_size
, new_ram_size
);
1532 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1533 new_block
->used_length
,
1536 if (new_block
->host
) {
1537 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1538 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1539 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1540 if (kvm_enabled()) {
1541 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1545 return new_block
->offset
;
1549 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1550 bool share
, const char *mem_path
,
1553 RAMBlock
*new_block
;
1555 Error
*local_err
= NULL
;
1557 if (xen_enabled()) {
1558 error_setg(errp
, "-mem-path not supported with Xen");
1562 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1564 * file_ram_alloc() needs to allocate just like
1565 * phys_mem_alloc, but we haven't bothered to provide
1569 "-mem-path not supported with this accelerator");
1573 size
= TARGET_PAGE_ALIGN(size
);
1574 new_block
= g_malloc0(sizeof(*new_block
));
1576 new_block
->used_length
= size
;
1577 new_block
->max_length
= size
;
1578 new_block
->flags
= share
? RAM_SHARED
: 0;
1579 new_block
->host
= file_ram_alloc(new_block
, size
,
1581 if (!new_block
->host
) {
1586 addr
= ram_block_add(new_block
, &local_err
);
1589 error_propagate(errp
, local_err
);
1597 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1598 void (*resized
)(const char*,
1601 void *host
, bool resizeable
,
1602 MemoryRegion
*mr
, Error
**errp
)
1604 RAMBlock
*new_block
;
1606 Error
*local_err
= NULL
;
1608 size
= TARGET_PAGE_ALIGN(size
);
1609 max_size
= TARGET_PAGE_ALIGN(max_size
);
1610 new_block
= g_malloc0(sizeof(*new_block
));
1612 new_block
->resized
= resized
;
1613 new_block
->used_length
= size
;
1614 new_block
->max_length
= max_size
;
1615 assert(max_size
>= size
);
1617 new_block
->host
= host
;
1619 new_block
->flags
|= RAM_PREALLOC
;
1622 new_block
->flags
|= RAM_RESIZEABLE
;
1624 addr
= ram_block_add(new_block
, &local_err
);
1627 error_propagate(errp
, local_err
);
1633 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1634 MemoryRegion
*mr
, Error
**errp
)
1636 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1639 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1641 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1644 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1645 void (*resized
)(const char*,
1648 MemoryRegion
*mr
, Error
**errp
)
1650 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1653 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1657 qemu_mutex_lock_ramlist();
1658 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1659 if (addr
== block
->offset
) {
1660 QLIST_REMOVE_RCU(block
, next
);
1661 ram_list
.mru_block
= NULL
;
1662 /* Write list before version */
1665 g_free_rcu(block
, rcu
);
1669 qemu_mutex_unlock_ramlist();
1672 static void reclaim_ramblock(RAMBlock
*block
)
1674 if (block
->flags
& RAM_PREALLOC
) {
1676 } else if (xen_enabled()) {
1677 xen_invalidate_map_cache_entry(block
->host
);
1679 } else if (block
->fd
>= 0) {
1680 munmap(block
->host
, block
->max_length
);
1684 qemu_anon_ram_free(block
->host
, block
->max_length
);
1689 void qemu_ram_free(ram_addr_t addr
)
1693 qemu_mutex_lock_ramlist();
1694 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1695 if (addr
== block
->offset
) {
1696 QLIST_REMOVE_RCU(block
, next
);
1697 ram_list
.mru_block
= NULL
;
1698 /* Write list before version */
1701 call_rcu(block
, reclaim_ramblock
, rcu
);
1705 qemu_mutex_unlock_ramlist();
1709 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1716 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1717 offset
= addr
- block
->offset
;
1718 if (offset
< block
->max_length
) {
1719 vaddr
= ramblock_ptr(block
, offset
);
1720 if (block
->flags
& RAM_PREALLOC
) {
1722 } else if (xen_enabled()) {
1726 if (block
->fd
>= 0) {
1727 flags
|= (block
->flags
& RAM_SHARED
?
1728 MAP_SHARED
: MAP_PRIVATE
);
1729 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1730 flags
, block
->fd
, offset
);
1733 * Remap needs to match alloc. Accelerators that
1734 * set phys_mem_alloc never remap. If they did,
1735 * we'd need a remap hook here.
1737 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1739 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1740 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1743 if (area
!= vaddr
) {
1744 fprintf(stderr
, "Could not remap addr: "
1745 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1749 memory_try_enable_merging(vaddr
, length
);
1750 qemu_ram_setup_dump(vaddr
, length
);
1755 #endif /* !_WIN32 */
1757 int qemu_get_ram_fd(ram_addr_t addr
)
1763 block
= qemu_get_ram_block(addr
);
1769 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1775 block
= qemu_get_ram_block(addr
);
1776 ptr
= ramblock_ptr(block
, 0);
1781 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1782 * This should not be used for general purpose DMA. Use address_space_map
1783 * or address_space_rw instead. For local memory (e.g. video ram) that the
1784 * device owns, use memory_region_get_ram_ptr.
1786 * By the time this function returns, the returned pointer is not protected
1787 * by RCU anymore. If the caller is not within an RCU critical section and
1788 * does not hold the iothread lock, it must have other means of protecting the
1789 * pointer, such as a reference to the region that includes the incoming
1792 void *qemu_get_ram_ptr(ram_addr_t addr
)
1798 block
= qemu_get_ram_block(addr
);
1800 if (xen_enabled() && block
->host
== NULL
) {
1801 /* We need to check if the requested address is in the RAM
1802 * because we don't want to map the entire memory in QEMU.
1803 * In that case just map until the end of the page.
1805 if (block
->offset
== 0) {
1806 ptr
= xen_map_cache(addr
, 0, 0);
1810 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1812 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1819 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1820 * but takes a size argument.
1822 * By the time this function returns, the returned pointer is not protected
1823 * by RCU anymore. If the caller is not within an RCU critical section and
1824 * does not hold the iothread lock, it must have other means of protecting the
1825 * pointer, such as a reference to the region that includes the incoming
1828 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1834 if (xen_enabled()) {
1835 return xen_map_cache(addr
, *size
, 1);
1839 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1840 if (addr
- block
->offset
< block
->max_length
) {
1841 if (addr
- block
->offset
+ *size
> block
->max_length
)
1842 *size
= block
->max_length
- addr
+ block
->offset
;
1843 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1849 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1854 /* Some of the softmmu routines need to translate from a host pointer
1855 * (typically a TLB entry) back to a ram offset.
1857 * By the time this function returns, the returned pointer is not protected
1858 * by RCU anymore. If the caller is not within an RCU critical section and
1859 * does not hold the iothread lock, it must have other means of protecting the
1860 * pointer, such as a reference to the region that includes the incoming
1863 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1866 uint8_t *host
= ptr
;
1869 if (xen_enabled()) {
1871 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1872 mr
= qemu_get_ram_block(*ram_addr
)->mr
;
1878 block
= atomic_rcu_read(&ram_list
.mru_block
);
1879 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1883 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1884 /* This case append when the block is not mapped. */
1885 if (block
->host
== NULL
) {
1888 if (host
- block
->host
< block
->max_length
) {
1897 *ram_addr
= block
->offset
+ (host
- block
->host
);
1903 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1904 uint64_t val
, unsigned size
)
1906 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1907 tb_invalidate_phys_page_fast(ram_addr
, size
);
1911 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1914 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1917 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1922 /* Set both VGA and migration bits for simplicity and to remove
1923 * the notdirty callback faster.
1925 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
1926 DIRTY_CLIENTS_NOCODE
);
1927 /* we remove the notdirty callback only if the code has been
1929 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1930 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
1934 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1935 unsigned size
, bool is_write
)
1940 static const MemoryRegionOps notdirty_mem_ops
= {
1941 .write
= notdirty_mem_write
,
1942 .valid
.accepts
= notdirty_mem_accepts
,
1943 .endianness
= DEVICE_NATIVE_ENDIAN
,
1946 /* Generate a debug exception if a watchpoint has been hit. */
1947 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
1949 CPUState
*cpu
= current_cpu
;
1950 CPUArchState
*env
= cpu
->env_ptr
;
1951 target_ulong pc
, cs_base
;
1956 if (cpu
->watchpoint_hit
) {
1957 /* We re-entered the check after replacing the TB. Now raise
1958 * the debug interrupt so that is will trigger after the
1959 * current instruction. */
1960 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1963 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1964 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1965 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
1966 && (wp
->flags
& flags
)) {
1967 if (flags
== BP_MEM_READ
) {
1968 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
1970 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
1972 wp
->hitaddr
= vaddr
;
1973 wp
->hitattrs
= attrs
;
1974 if (!cpu
->watchpoint_hit
) {
1975 cpu
->watchpoint_hit
= wp
;
1976 tb_check_watchpoint(cpu
);
1977 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1978 cpu
->exception_index
= EXCP_DEBUG
;
1981 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1982 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1983 cpu_resume_from_signal(cpu
, NULL
);
1987 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1992 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1993 so these check for a hit then pass through to the normal out-of-line
1995 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
1996 unsigned size
, MemTxAttrs attrs
)
2001 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2004 data
= address_space_ldub(&address_space_memory
, addr
, attrs
, &res
);
2007 data
= address_space_lduw(&address_space_memory
, addr
, attrs
, &res
);
2010 data
= address_space_ldl(&address_space_memory
, addr
, attrs
, &res
);
2018 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2019 uint64_t val
, unsigned size
,
2024 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2027 address_space_stb(&address_space_memory
, addr
, val
, attrs
, &res
);
2030 address_space_stw(&address_space_memory
, addr
, val
, attrs
, &res
);
2033 address_space_stl(&address_space_memory
, addr
, val
, attrs
, &res
);
2040 static const MemoryRegionOps watch_mem_ops
= {
2041 .read_with_attrs
= watch_mem_read
,
2042 .write_with_attrs
= watch_mem_write
,
2043 .endianness
= DEVICE_NATIVE_ENDIAN
,
2046 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2047 unsigned len
, MemTxAttrs attrs
)
2049 subpage_t
*subpage
= opaque
;
2053 #if defined(DEBUG_SUBPAGE)
2054 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2055 subpage
, len
, addr
);
2057 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2064 *data
= ldub_p(buf
);
2067 *data
= lduw_p(buf
);
2080 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2081 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2083 subpage_t
*subpage
= opaque
;
2086 #if defined(DEBUG_SUBPAGE)
2087 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2088 " value %"PRIx64
"\n",
2089 __func__
, subpage
, len
, addr
, value
);
2107 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2111 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2112 unsigned len
, bool is_write
)
2114 subpage_t
*subpage
= opaque
;
2115 #if defined(DEBUG_SUBPAGE)
2116 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2117 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2120 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2124 static const MemoryRegionOps subpage_ops
= {
2125 .read_with_attrs
= subpage_read
,
2126 .write_with_attrs
= subpage_write
,
2127 .impl
.min_access_size
= 1,
2128 .impl
.max_access_size
= 8,
2129 .valid
.min_access_size
= 1,
2130 .valid
.max_access_size
= 8,
2131 .valid
.accepts
= subpage_accepts
,
2132 .endianness
= DEVICE_NATIVE_ENDIAN
,
2135 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2140 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2142 idx
= SUBPAGE_IDX(start
);
2143 eidx
= SUBPAGE_IDX(end
);
2144 #if defined(DEBUG_SUBPAGE)
2145 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2146 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2148 for (; idx
<= eidx
; idx
++) {
2149 mmio
->sub_section
[idx
] = section
;
2155 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2159 mmio
= g_malloc0(sizeof(subpage_t
));
2163 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2164 NULL
, TARGET_PAGE_SIZE
);
2165 mmio
->iomem
.subpage
= true;
2166 #if defined(DEBUG_SUBPAGE)
2167 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2168 mmio
, base
, TARGET_PAGE_SIZE
);
2170 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2175 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2179 MemoryRegionSection section
= {
2180 .address_space
= as
,
2182 .offset_within_address_space
= 0,
2183 .offset_within_region
= 0,
2184 .size
= int128_2_64(),
2187 return phys_section_add(map
, §ion
);
2190 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
)
2192 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpu
->memory_dispatch
);
2193 MemoryRegionSection
*sections
= d
->map
.sections
;
2195 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2198 static void io_mem_init(void)
2200 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2201 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2203 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2205 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2209 static void mem_begin(MemoryListener
*listener
)
2211 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2212 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2215 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2216 assert(n
== PHYS_SECTION_UNASSIGNED
);
2217 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2218 assert(n
== PHYS_SECTION_NOTDIRTY
);
2219 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2220 assert(n
== PHYS_SECTION_ROM
);
2221 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2222 assert(n
== PHYS_SECTION_WATCH
);
2224 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2226 as
->next_dispatch
= d
;
2229 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2231 phys_sections_free(&d
->map
);
2235 static void mem_commit(MemoryListener
*listener
)
2237 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2238 AddressSpaceDispatch
*cur
= as
->dispatch
;
2239 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2241 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2243 atomic_rcu_set(&as
->dispatch
, next
);
2245 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2249 static void tcg_commit(MemoryListener
*listener
)
2253 /* since each CPU stores ram addresses in its TLB cache, we must
2254 reset the modified entries */
2257 /* FIXME: Disentangle the cpu.h circular files deps so we can
2258 directly get the right CPU from listener. */
2259 if (cpu
->tcg_as_listener
!= listener
) {
2262 cpu_reload_memory_map(cpu
);
2266 void address_space_init_dispatch(AddressSpace
*as
)
2268 as
->dispatch
= NULL
;
2269 as
->dispatch_listener
= (MemoryListener
) {
2271 .commit
= mem_commit
,
2272 .region_add
= mem_add
,
2273 .region_nop
= mem_add
,
2276 memory_listener_register(&as
->dispatch_listener
, as
);
2279 void address_space_unregister(AddressSpace
*as
)
2281 memory_listener_unregister(&as
->dispatch_listener
);
2284 void address_space_destroy_dispatch(AddressSpace
*as
)
2286 AddressSpaceDispatch
*d
= as
->dispatch
;
2288 atomic_rcu_set(&as
->dispatch
, NULL
);
2290 call_rcu(d
, address_space_dispatch_free
, rcu
);
2294 static void memory_map_init(void)
2296 system_memory
= g_malloc(sizeof(*system_memory
));
2298 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2299 address_space_init(&address_space_memory
, system_memory
, "memory");
2301 system_io
= g_malloc(sizeof(*system_io
));
2302 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2304 address_space_init(&address_space_io
, system_io
, "I/O");
2307 MemoryRegion
*get_system_memory(void)
2309 return system_memory
;
2312 MemoryRegion
*get_system_io(void)
2317 #endif /* !defined(CONFIG_USER_ONLY) */
2319 /* physical memory access (slow version, mainly for debug) */
2320 #if defined(CONFIG_USER_ONLY)
2321 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2322 uint8_t *buf
, int len
, int is_write
)
2329 page
= addr
& TARGET_PAGE_MASK
;
2330 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2333 flags
= page_get_flags(page
);
2334 if (!(flags
& PAGE_VALID
))
2337 if (!(flags
& PAGE_WRITE
))
2339 /* XXX: this code should not depend on lock_user */
2340 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2343 unlock_user(p
, addr
, l
);
2345 if (!(flags
& PAGE_READ
))
2347 /* XXX: this code should not depend on lock_user */
2348 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2351 unlock_user(p
, addr
, 0);
2362 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2365 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2366 /* No early return if dirty_log_mask is or becomes 0, because
2367 * cpu_physical_memory_set_dirty_range will still call
2368 * xen_modified_memory.
2370 if (dirty_log_mask
) {
2372 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2374 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2375 tb_invalidate_phys_range(addr
, addr
+ length
);
2376 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2378 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2381 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2383 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2385 /* Regions are assumed to support 1-4 byte accesses unless
2386 otherwise specified. */
2387 if (access_size_max
== 0) {
2388 access_size_max
= 4;
2391 /* Bound the maximum access by the alignment of the address. */
2392 if (!mr
->ops
->impl
.unaligned
) {
2393 unsigned align_size_max
= addr
& -addr
;
2394 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2395 access_size_max
= align_size_max
;
2399 /* Don't attempt accesses larger than the maximum. */
2400 if (l
> access_size_max
) {
2401 l
= access_size_max
;
2408 static bool prepare_mmio_access(MemoryRegion
*mr
)
2410 bool unlocked
= !qemu_mutex_iothread_locked();
2411 bool release_lock
= false;
2413 if (unlocked
&& mr
->global_locking
) {
2414 qemu_mutex_lock_iothread();
2416 release_lock
= true;
2418 if (mr
->flush_coalesced_mmio
) {
2420 qemu_mutex_lock_iothread();
2422 qemu_flush_coalesced_mmio_buffer();
2424 qemu_mutex_unlock_iothread();
2428 return release_lock
;
2431 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2432 uint8_t *buf
, int len
, bool is_write
)
2439 MemTxResult result
= MEMTX_OK
;
2440 bool release_lock
= false;
2445 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2448 if (!memory_access_is_direct(mr
, is_write
)) {
2449 release_lock
|= prepare_mmio_access(mr
);
2450 l
= memory_access_size(mr
, l
, addr1
);
2451 /* XXX: could force current_cpu to NULL to avoid
2455 /* 64 bit write access */
2457 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2461 /* 32 bit write access */
2463 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2467 /* 16 bit write access */
2469 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2473 /* 8 bit write access */
2475 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2482 addr1
+= memory_region_get_ram_addr(mr
);
2484 ptr
= qemu_get_ram_ptr(addr1
);
2485 memcpy(ptr
, buf
, l
);
2486 invalidate_and_set_dirty(mr
, addr1
, l
);
2489 if (!memory_access_is_direct(mr
, is_write
)) {
2491 release_lock
|= prepare_mmio_access(mr
);
2492 l
= memory_access_size(mr
, l
, addr1
);
2495 /* 64 bit read access */
2496 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2501 /* 32 bit read access */
2502 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2507 /* 16 bit read access */
2508 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2513 /* 8 bit read access */
2514 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2523 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2524 memcpy(buf
, ptr
, l
);
2529 qemu_mutex_unlock_iothread();
2530 release_lock
= false;
2542 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2543 const uint8_t *buf
, int len
)
2545 return address_space_rw(as
, addr
, attrs
, (uint8_t *)buf
, len
, true);
2548 MemTxResult
address_space_read(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2549 uint8_t *buf
, int len
)
2551 return address_space_rw(as
, addr
, attrs
, buf
, len
, false);
2555 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2556 int len
, int is_write
)
2558 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2559 buf
, len
, is_write
);
2562 enum write_rom_type
{
2567 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2568 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2578 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2580 if (!(memory_region_is_ram(mr
) ||
2581 memory_region_is_romd(mr
))) {
2582 l
= memory_access_size(mr
, l
, addr1
);
2584 addr1
+= memory_region_get_ram_addr(mr
);
2586 ptr
= qemu_get_ram_ptr(addr1
);
2589 memcpy(ptr
, buf
, l
);
2590 invalidate_and_set_dirty(mr
, addr1
, l
);
2593 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2604 /* used for ROM loading : can write in RAM and ROM */
2605 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2606 const uint8_t *buf
, int len
)
2608 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2611 void cpu_flush_icache_range(hwaddr start
, int len
)
2614 * This function should do the same thing as an icache flush that was
2615 * triggered from within the guest. For TCG we are always cache coherent,
2616 * so there is no need to flush anything. For KVM / Xen we need to flush
2617 * the host's instruction cache at least.
2619 if (tcg_enabled()) {
2623 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2624 start
, NULL
, len
, FLUSH_CACHE
);
2635 static BounceBuffer bounce
;
2637 typedef struct MapClient
{
2639 QLIST_ENTRY(MapClient
) link
;
2642 QemuMutex map_client_list_lock
;
2643 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2644 = QLIST_HEAD_INITIALIZER(map_client_list
);
2646 static void cpu_unregister_map_client_do(MapClient
*client
)
2648 QLIST_REMOVE(client
, link
);
2652 static void cpu_notify_map_clients_locked(void)
2656 while (!QLIST_EMPTY(&map_client_list
)) {
2657 client
= QLIST_FIRST(&map_client_list
);
2658 qemu_bh_schedule(client
->bh
);
2659 cpu_unregister_map_client_do(client
);
2663 void cpu_register_map_client(QEMUBH
*bh
)
2665 MapClient
*client
= g_malloc(sizeof(*client
));
2667 qemu_mutex_lock(&map_client_list_lock
);
2669 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2670 if (!atomic_read(&bounce
.in_use
)) {
2671 cpu_notify_map_clients_locked();
2673 qemu_mutex_unlock(&map_client_list_lock
);
2676 void cpu_exec_init_all(void)
2678 qemu_mutex_init(&ram_list
.mutex
);
2681 qemu_mutex_init(&map_client_list_lock
);
2684 void cpu_unregister_map_client(QEMUBH
*bh
)
2688 qemu_mutex_lock(&map_client_list_lock
);
2689 QLIST_FOREACH(client
, &map_client_list
, link
) {
2690 if (client
->bh
== bh
) {
2691 cpu_unregister_map_client_do(client
);
2695 qemu_mutex_unlock(&map_client_list_lock
);
2698 static void cpu_notify_map_clients(void)
2700 qemu_mutex_lock(&map_client_list_lock
);
2701 cpu_notify_map_clients_locked();
2702 qemu_mutex_unlock(&map_client_list_lock
);
2705 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2713 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2714 if (!memory_access_is_direct(mr
, is_write
)) {
2715 l
= memory_access_size(mr
, l
, addr
);
2716 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2728 /* Map a physical memory region into a host virtual address.
2729 * May map a subset of the requested range, given by and returned in *plen.
2730 * May return NULL if resources needed to perform the mapping are exhausted.
2731 * Use only for reads OR writes - not for read-modify-write operations.
2732 * Use cpu_register_map_client() to know when retrying the map operation is
2733 * likely to succeed.
2735 void *address_space_map(AddressSpace
*as
,
2742 hwaddr l
, xlat
, base
;
2743 MemoryRegion
*mr
, *this_mr
;
2752 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2754 if (!memory_access_is_direct(mr
, is_write
)) {
2755 if (atomic_xchg(&bounce
.in_use
, true)) {
2759 /* Avoid unbounded allocations */
2760 l
= MIN(l
, TARGET_PAGE_SIZE
);
2761 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2765 memory_region_ref(mr
);
2768 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2774 return bounce
.buffer
;
2778 raddr
= memory_region_get_ram_addr(mr
);
2789 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2790 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2795 memory_region_ref(mr
);
2798 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2801 /* Unmaps a memory region previously mapped by address_space_map().
2802 * Will also mark the memory as dirty if is_write == 1. access_len gives
2803 * the amount of memory that was actually read or written by the caller.
2805 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2806 int is_write
, hwaddr access_len
)
2808 if (buffer
!= bounce
.buffer
) {
2812 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2815 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2817 if (xen_enabled()) {
2818 xen_invalidate_map_cache_entry(buffer
);
2820 memory_region_unref(mr
);
2824 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2825 bounce
.buffer
, access_len
);
2827 qemu_vfree(bounce
.buffer
);
2828 bounce
.buffer
= NULL
;
2829 memory_region_unref(bounce
.mr
);
2830 atomic_mb_set(&bounce
.in_use
, false);
2831 cpu_notify_map_clients();
2834 void *cpu_physical_memory_map(hwaddr addr
,
2838 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2841 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2842 int is_write
, hwaddr access_len
)
2844 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2847 /* warning: addr must be aligned */
2848 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2850 MemTxResult
*result
,
2851 enum device_endian endian
)
2859 bool release_lock
= false;
2862 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2863 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2864 release_lock
|= prepare_mmio_access(mr
);
2867 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
2868 #if defined(TARGET_WORDS_BIGENDIAN)
2869 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2873 if (endian
== DEVICE_BIG_ENDIAN
) {
2879 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2883 case DEVICE_LITTLE_ENDIAN
:
2884 val
= ldl_le_p(ptr
);
2886 case DEVICE_BIG_ENDIAN
:
2887 val
= ldl_be_p(ptr
);
2899 qemu_mutex_unlock_iothread();
2905 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
2906 MemTxAttrs attrs
, MemTxResult
*result
)
2908 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2909 DEVICE_NATIVE_ENDIAN
);
2912 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
2913 MemTxAttrs attrs
, MemTxResult
*result
)
2915 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2916 DEVICE_LITTLE_ENDIAN
);
2919 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
2920 MemTxAttrs attrs
, MemTxResult
*result
)
2922 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2926 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2928 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2931 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2933 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2936 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2938 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2941 /* warning: addr must be aligned */
2942 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
2944 MemTxResult
*result
,
2945 enum device_endian endian
)
2953 bool release_lock
= false;
2956 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2958 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2959 release_lock
|= prepare_mmio_access(mr
);
2962 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
2963 #if defined(TARGET_WORDS_BIGENDIAN)
2964 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2968 if (endian
== DEVICE_BIG_ENDIAN
) {
2974 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2978 case DEVICE_LITTLE_ENDIAN
:
2979 val
= ldq_le_p(ptr
);
2981 case DEVICE_BIG_ENDIAN
:
2982 val
= ldq_be_p(ptr
);
2994 qemu_mutex_unlock_iothread();
3000 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3001 MemTxAttrs attrs
, MemTxResult
*result
)
3003 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3004 DEVICE_NATIVE_ENDIAN
);
3007 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3008 MemTxAttrs attrs
, MemTxResult
*result
)
3010 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3011 DEVICE_LITTLE_ENDIAN
);
3014 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3015 MemTxAttrs attrs
, MemTxResult
*result
)
3017 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3021 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3023 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3026 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3028 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3031 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3033 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3037 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3038 MemTxAttrs attrs
, MemTxResult
*result
)
3043 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3050 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3052 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3055 /* warning: addr must be aligned */
3056 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3059 MemTxResult
*result
,
3060 enum device_endian endian
)
3068 bool release_lock
= false;
3071 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3073 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3074 release_lock
|= prepare_mmio_access(mr
);
3077 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3078 #if defined(TARGET_WORDS_BIGENDIAN)
3079 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3083 if (endian
== DEVICE_BIG_ENDIAN
) {
3089 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3093 case DEVICE_LITTLE_ENDIAN
:
3094 val
= lduw_le_p(ptr
);
3096 case DEVICE_BIG_ENDIAN
:
3097 val
= lduw_be_p(ptr
);
3109 qemu_mutex_unlock_iothread();
3115 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3116 MemTxAttrs attrs
, MemTxResult
*result
)
3118 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3119 DEVICE_NATIVE_ENDIAN
);
3122 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3123 MemTxAttrs attrs
, MemTxResult
*result
)
3125 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3126 DEVICE_LITTLE_ENDIAN
);
3129 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3130 MemTxAttrs attrs
, MemTxResult
*result
)
3132 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3136 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3138 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3141 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3143 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3146 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3148 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3151 /* warning: addr must be aligned. The ram page is not masked as dirty
3152 and the code inside is not invalidated. It is useful if the dirty
3153 bits are used to track modified PTEs */
3154 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3155 MemTxAttrs attrs
, MemTxResult
*result
)
3162 uint8_t dirty_log_mask
;
3163 bool release_lock
= false;
3166 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3168 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3169 release_lock
|= prepare_mmio_access(mr
);
3171 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3173 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3174 ptr
= qemu_get_ram_ptr(addr1
);
3177 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3178 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3179 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3186 qemu_mutex_unlock_iothread();
3191 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3193 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3196 /* warning: addr must be aligned */
3197 static inline void address_space_stl_internal(AddressSpace
*as
,
3198 hwaddr addr
, uint32_t val
,
3200 MemTxResult
*result
,
3201 enum device_endian endian
)
3208 bool release_lock
= false;
3211 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3213 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3214 release_lock
|= prepare_mmio_access(mr
);
3216 #if defined(TARGET_WORDS_BIGENDIAN)
3217 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3221 if (endian
== DEVICE_BIG_ENDIAN
) {
3225 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3228 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3229 ptr
= qemu_get_ram_ptr(addr1
);
3231 case DEVICE_LITTLE_ENDIAN
:
3234 case DEVICE_BIG_ENDIAN
:
3241 invalidate_and_set_dirty(mr
, addr1
, 4);
3248 qemu_mutex_unlock_iothread();
3253 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3254 MemTxAttrs attrs
, MemTxResult
*result
)
3256 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3257 DEVICE_NATIVE_ENDIAN
);
3260 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3261 MemTxAttrs attrs
, MemTxResult
*result
)
3263 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3264 DEVICE_LITTLE_ENDIAN
);
3267 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3268 MemTxAttrs attrs
, MemTxResult
*result
)
3270 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3274 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3276 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3279 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3281 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3284 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3286 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3290 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3291 MemTxAttrs attrs
, MemTxResult
*result
)
3296 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3302 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3304 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3307 /* warning: addr must be aligned */
3308 static inline void address_space_stw_internal(AddressSpace
*as
,
3309 hwaddr addr
, uint32_t val
,
3311 MemTxResult
*result
,
3312 enum device_endian endian
)
3319 bool release_lock
= false;
3322 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3323 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3324 release_lock
|= prepare_mmio_access(mr
);
3326 #if defined(TARGET_WORDS_BIGENDIAN)
3327 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3331 if (endian
== DEVICE_BIG_ENDIAN
) {
3335 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3338 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3339 ptr
= qemu_get_ram_ptr(addr1
);
3341 case DEVICE_LITTLE_ENDIAN
:
3344 case DEVICE_BIG_ENDIAN
:
3351 invalidate_and_set_dirty(mr
, addr1
, 2);
3358 qemu_mutex_unlock_iothread();
3363 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3364 MemTxAttrs attrs
, MemTxResult
*result
)
3366 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3367 DEVICE_NATIVE_ENDIAN
);
3370 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3371 MemTxAttrs attrs
, MemTxResult
*result
)
3373 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3374 DEVICE_LITTLE_ENDIAN
);
3377 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3378 MemTxAttrs attrs
, MemTxResult
*result
)
3380 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3384 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3386 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3389 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3391 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3394 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3396 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3400 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3401 MemTxAttrs attrs
, MemTxResult
*result
)
3405 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3411 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3412 MemTxAttrs attrs
, MemTxResult
*result
)
3415 val
= cpu_to_le64(val
);
3416 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3421 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3422 MemTxAttrs attrs
, MemTxResult
*result
)
3425 val
= cpu_to_be64(val
);
3426 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3432 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3434 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3437 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3439 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3442 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3444 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3447 /* virtual memory access for debug (includes writing to ROM) */
3448 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3449 uint8_t *buf
, int len
, int is_write
)
3456 page
= addr
& TARGET_PAGE_MASK
;
3457 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
3458 /* if no physical page mapped, return an error */
3459 if (phys_addr
== -1)
3461 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3464 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3466 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
3468 address_space_rw(cpu
->as
, phys_addr
, MEMTXATTRS_UNSPECIFIED
,
3480 * A helper function for the _utterly broken_ virtio device model to find out if
3481 * it's running on a big endian machine. Don't do this at home kids!
3483 bool target_words_bigendian(void);
3484 bool target_words_bigendian(void)
3486 #if defined(TARGET_WORDS_BIGENDIAN)
3493 #ifndef CONFIG_USER_ONLY
3494 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3501 mr
= address_space_translate(&address_space_memory
,
3502 phys_addr
, &phys_addr
, &l
, false);
3504 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3509 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3515 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3516 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3517 block
->used_length
, opaque
);