4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
30 #include "qemu/osdep.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/xen/xen.h"
34 #include "qemu/timer.h"
35 #include "qemu/config-file.h"
36 #include "qemu/error-report.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
52 #include "exec/ram_addr.h"
54 #include "qemu/range.h"
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 static bool in_migration
;
61 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
63 static MemoryRegion
*system_memory
;
64 static MemoryRegion
*system_io
;
66 AddressSpace address_space_io
;
67 AddressSpace address_space_memory
;
69 MemoryRegion io_mem_rom
, io_mem_notdirty
;
70 static MemoryRegion io_mem_unassigned
;
72 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73 #define RAM_PREALLOC (1 << 0)
75 /* RAM is mmap-ed with MAP_SHARED */
76 #define RAM_SHARED (1 << 1)
78 /* Only a portion of RAM (used_length) is actually used, and migrated.
79 * This used_length size can change across reboots.
81 #define RAM_RESIZEABLE (1 << 2)
85 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
86 /* current CPU in the current thread. It is only valid inside
88 DEFINE_TLS(CPUState
*, current_cpu
);
89 /* 0 = Do not count executed instructions.
90 1 = Precise instruction counting.
91 2 = Adaptive rate instruction counting. */
94 #if !defined(CONFIG_USER_ONLY)
96 typedef struct PhysPageEntry PhysPageEntry
;
98 struct PhysPageEntry
{
99 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
101 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
105 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
107 /* Size of the L2 (and L3, etc) page tables. */
108 #define ADDR_SPACE_BITS 64
111 #define P_L2_SIZE (1 << P_L2_BITS)
113 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
115 typedef PhysPageEntry Node
[P_L2_SIZE
];
117 typedef struct PhysPageMap
{
118 unsigned sections_nb
;
119 unsigned sections_nb_alloc
;
121 unsigned nodes_nb_alloc
;
123 MemoryRegionSection
*sections
;
126 struct AddressSpaceDispatch
{
127 /* This is a multi-level map on the physical address space.
128 * The bottom level has pointers to MemoryRegionSections.
130 PhysPageEntry phys_map
;
135 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
136 typedef struct subpage_t
{
140 uint16_t sub_section
[TARGET_PAGE_SIZE
];
143 #define PHYS_SECTION_UNASSIGNED 0
144 #define PHYS_SECTION_NOTDIRTY 1
145 #define PHYS_SECTION_ROM 2
146 #define PHYS_SECTION_WATCH 3
148 static void io_mem_init(void);
149 static void memory_map_init(void);
150 static void tcg_commit(MemoryListener
*listener
);
152 static MemoryRegion io_mem_watch
;
155 #if !defined(CONFIG_USER_ONLY)
157 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
159 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
160 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
161 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
162 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
166 static uint32_t phys_map_node_alloc(PhysPageMap
*map
)
171 ret
= map
->nodes_nb
++;
172 assert(ret
!= PHYS_MAP_NODE_NIL
);
173 assert(ret
!= map
->nodes_nb_alloc
);
174 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
175 map
->nodes
[ret
][i
].skip
= 1;
176 map
->nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
181 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
182 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
187 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
189 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
190 lp
->ptr
= phys_map_node_alloc(map
);
191 p
= map
->nodes
[lp
->ptr
];
193 for (i
= 0; i
< P_L2_SIZE
; i
++) {
195 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
199 p
= map
->nodes
[lp
->ptr
];
201 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
203 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
204 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
210 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
216 static void phys_page_set(AddressSpaceDispatch
*d
,
217 hwaddr index
, hwaddr nb
,
220 /* Wildly overreserve - it doesn't matter much. */
221 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
223 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
226 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
227 * and update our entry so we can skip it and go directly to the destination.
229 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
231 unsigned valid_ptr
= P_L2_SIZE
;
236 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
241 for (i
= 0; i
< P_L2_SIZE
; i
++) {
242 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
249 phys_page_compact(&p
[i
], nodes
, compacted
);
253 /* We can only compress if there's only one child. */
258 assert(valid_ptr
< P_L2_SIZE
);
260 /* Don't compress if it won't fit in the # of bits we have. */
261 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
265 lp
->ptr
= p
[valid_ptr
].ptr
;
266 if (!p
[valid_ptr
].skip
) {
267 /* If our only child is a leaf, make this a leaf. */
268 /* By design, we should have made this node a leaf to begin with so we
269 * should never reach here.
270 * But since it's so simple to handle this, let's do it just in case we
275 lp
->skip
+= p
[valid_ptr
].skip
;
279 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
281 DECLARE_BITMAP(compacted
, nodes_nb
);
283 if (d
->phys_map
.skip
) {
284 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
288 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
289 Node
*nodes
, MemoryRegionSection
*sections
)
292 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
295 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
296 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
297 return §ions
[PHYS_SECTION_UNASSIGNED
];
300 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
303 if (sections
[lp
.ptr
].size
.hi
||
304 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
305 sections
[lp
.ptr
].size
.lo
, addr
)) {
306 return §ions
[lp
.ptr
];
308 return §ions
[PHYS_SECTION_UNASSIGNED
];
312 bool memory_region_is_unassigned(MemoryRegion
*mr
)
314 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
315 && mr
!= &io_mem_watch
;
318 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
320 bool resolve_subpage
)
322 MemoryRegionSection
*section
;
325 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
326 if (resolve_subpage
&& section
->mr
->subpage
) {
327 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
328 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
333 static MemoryRegionSection
*
334 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
335 hwaddr
*plen
, bool resolve_subpage
)
337 MemoryRegionSection
*section
;
340 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
341 /* Compute offset within MemoryRegionSection */
342 addr
-= section
->offset_within_address_space
;
344 /* Compute offset within MemoryRegion */
345 *xlat
= addr
+ section
->offset_within_region
;
347 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
348 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
352 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
354 if (memory_region_is_ram(mr
)) {
355 return !(is_write
&& mr
->readonly
);
357 if (memory_region_is_romd(mr
)) {
364 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
365 hwaddr
*xlat
, hwaddr
*plen
,
369 MemoryRegionSection
*section
;
374 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
377 if (!mr
->iommu_ops
) {
381 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
382 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
383 | (addr
& iotlb
.addr_mask
));
384 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
385 if (!(iotlb
.perm
& (1 << is_write
))) {
386 mr
= &io_mem_unassigned
;
390 as
= iotlb
.target_as
;
393 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
394 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
395 len
= MIN(page
, len
);
403 MemoryRegionSection
*
404 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
407 MemoryRegionSection
*section
;
408 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
410 assert(!section
->mr
->iommu_ops
);
415 void cpu_exec_init_all(void)
417 #if !defined(CONFIG_USER_ONLY)
418 qemu_mutex_init(&ram_list
.mutex
);
424 #if !defined(CONFIG_USER_ONLY)
426 static int cpu_common_post_load(void *opaque
, int version_id
)
428 CPUState
*cpu
= opaque
;
430 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
431 version_id is increased. */
432 cpu
->interrupt_request
&= ~0x01;
438 static int cpu_common_pre_load(void *opaque
)
440 CPUState
*cpu
= opaque
;
442 cpu
->exception_index
= 0;
447 static bool cpu_common_exception_index_needed(void *opaque
)
449 CPUState
*cpu
= opaque
;
451 return cpu
->exception_index
!= 0;
454 static const VMStateDescription vmstate_cpu_common_exception_index
= {
455 .name
= "cpu_common/exception_index",
457 .minimum_version_id
= 1,
458 .fields
= (VMStateField
[]) {
459 VMSTATE_INT32(exception_index
, CPUState
),
460 VMSTATE_END_OF_LIST()
464 const VMStateDescription vmstate_cpu_common
= {
465 .name
= "cpu_common",
467 .minimum_version_id
= 1,
468 .pre_load
= cpu_common_pre_load
,
469 .post_load
= cpu_common_post_load
,
470 .fields
= (VMStateField
[]) {
471 VMSTATE_UINT32(halted
, CPUState
),
472 VMSTATE_UINT32(interrupt_request
, CPUState
),
473 VMSTATE_END_OF_LIST()
475 .subsections
= (VMStateSubsection
[]) {
477 .vmsd
= &vmstate_cpu_common_exception_index
,
478 .needed
= cpu_common_exception_index_needed
,
487 CPUState
*qemu_get_cpu(int index
)
492 if (cpu
->cpu_index
== index
) {
500 #if !defined(CONFIG_USER_ONLY)
501 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
503 /* We only support one address space per cpu at the moment. */
504 assert(cpu
->as
== as
);
506 if (cpu
->tcg_as_listener
) {
507 memory_listener_unregister(cpu
->tcg_as_listener
);
509 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
511 cpu
->tcg_as_listener
->commit
= tcg_commit
;
512 memory_listener_register(cpu
->tcg_as_listener
, as
);
516 void cpu_exec_init(CPUArchState
*env
)
518 CPUState
*cpu
= ENV_GET_CPU(env
);
519 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
523 #if defined(CONFIG_USER_ONLY)
527 CPU_FOREACH(some_cpu
) {
530 cpu
->cpu_index
= cpu_index
;
532 QTAILQ_INIT(&cpu
->breakpoints
);
533 QTAILQ_INIT(&cpu
->watchpoints
);
534 #ifndef CONFIG_USER_ONLY
535 cpu
->as
= &address_space_memory
;
536 cpu
->thread_id
= qemu_get_thread_id();
538 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
539 #if defined(CONFIG_USER_ONLY)
542 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
543 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
545 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
546 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
547 cpu_save
, cpu_load
, env
);
548 assert(cc
->vmsd
== NULL
);
549 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
551 if (cc
->vmsd
!= NULL
) {
552 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
556 #if defined(TARGET_HAS_ICE)
557 #if defined(CONFIG_USER_ONLY)
558 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
560 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
563 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
565 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
567 tb_invalidate_phys_addr(cpu
->as
,
568 phys
| (pc
& ~TARGET_PAGE_MASK
));
572 #endif /* TARGET_HAS_ICE */
574 #if defined(CONFIG_USER_ONLY)
575 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
580 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
586 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
590 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
591 int flags
, CPUWatchpoint
**watchpoint
)
596 /* Add a watchpoint. */
597 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
598 int flags
, CPUWatchpoint
**watchpoint
)
602 /* forbid ranges which are empty or run off the end of the address space */
603 if (len
== 0 || (addr
+ len
- 1) < addr
) {
604 error_report("tried to set invalid watchpoint at %"
605 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
608 wp
= g_malloc(sizeof(*wp
));
614 /* keep all GDB-injected watchpoints in front */
615 if (flags
& BP_GDB
) {
616 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
618 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
621 tlb_flush_page(cpu
, addr
);
628 /* Remove a specific watchpoint. */
629 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
634 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
635 if (addr
== wp
->vaddr
&& len
== wp
->len
636 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
637 cpu_watchpoint_remove_by_ref(cpu
, wp
);
644 /* Remove a specific watchpoint by reference. */
645 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
647 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
649 tlb_flush_page(cpu
, watchpoint
->vaddr
);
654 /* Remove all matching watchpoints. */
655 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
657 CPUWatchpoint
*wp
, *next
;
659 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
660 if (wp
->flags
& mask
) {
661 cpu_watchpoint_remove_by_ref(cpu
, wp
);
666 /* Return true if this watchpoint address matches the specified
667 * access (ie the address range covered by the watchpoint overlaps
668 * partially or completely with the address range covered by the
671 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
675 /* We know the lengths are non-zero, but a little caution is
676 * required to avoid errors in the case where the range ends
677 * exactly at the top of the address space and so addr + len
678 * wraps round to zero.
680 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
681 vaddr addrend
= addr
+ len
- 1;
683 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
688 /* Add a breakpoint. */
689 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
690 CPUBreakpoint
**breakpoint
)
692 #if defined(TARGET_HAS_ICE)
695 bp
= g_malloc(sizeof(*bp
));
700 /* keep all GDB-injected breakpoints in front */
701 if (flags
& BP_GDB
) {
702 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
704 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
707 breakpoint_invalidate(cpu
, pc
);
718 /* Remove a specific breakpoint. */
719 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
721 #if defined(TARGET_HAS_ICE)
724 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
725 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
726 cpu_breakpoint_remove_by_ref(cpu
, bp
);
736 /* Remove a specific breakpoint by reference. */
737 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
739 #if defined(TARGET_HAS_ICE)
740 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
742 breakpoint_invalidate(cpu
, breakpoint
->pc
);
748 /* Remove all matching breakpoints. */
749 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
751 #if defined(TARGET_HAS_ICE)
752 CPUBreakpoint
*bp
, *next
;
754 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
755 if (bp
->flags
& mask
) {
756 cpu_breakpoint_remove_by_ref(cpu
, bp
);
762 /* enable or disable single step mode. EXCP_DEBUG is returned by the
763 CPU loop after each instruction */
764 void cpu_single_step(CPUState
*cpu
, int enabled
)
766 #if defined(TARGET_HAS_ICE)
767 if (cpu
->singlestep_enabled
!= enabled
) {
768 cpu
->singlestep_enabled
= enabled
;
770 kvm_update_guest_debug(cpu
, 0);
772 /* must flush all the translated code to avoid inconsistencies */
773 /* XXX: only flush what is necessary */
774 CPUArchState
*env
= cpu
->env_ptr
;
781 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
788 fprintf(stderr
, "qemu: fatal: ");
789 vfprintf(stderr
, fmt
, ap
);
790 fprintf(stderr
, "\n");
791 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
792 if (qemu_log_enabled()) {
793 qemu_log("qemu: fatal: ");
794 qemu_log_vprintf(fmt
, ap2
);
796 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
802 #if defined(CONFIG_USER_ONLY)
804 struct sigaction act
;
805 sigfillset(&act
.sa_mask
);
806 act
.sa_handler
= SIG_DFL
;
807 sigaction(SIGABRT
, &act
, NULL
);
813 #if !defined(CONFIG_USER_ONLY)
814 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
818 /* The list is protected by the iothread lock here. */
819 block
= ram_list
.mru_block
;
820 if (block
&& addr
- block
->offset
< block
->max_length
) {
823 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
824 if (addr
- block
->offset
< block
->max_length
) {
829 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
833 ram_list
.mru_block
= block
;
837 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
843 end
= TARGET_PAGE_ALIGN(start
+ length
);
844 start
&= TARGET_PAGE_MASK
;
846 block
= qemu_get_ram_block(start
);
847 assert(block
== qemu_get_ram_block(end
- 1));
848 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
849 cpu_tlb_reset_dirty_all(start1
, length
);
852 /* Note: start and end must be within the same ram block. */
853 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,
858 cpu_physical_memory_clear_dirty_range_type(start
, length
, client
);
861 tlb_reset_dirty_range_all(start
, length
);
865 static void cpu_physical_memory_set_dirty_tracking(bool enable
)
867 in_migration
= enable
;
870 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
871 MemoryRegionSection
*section
,
873 hwaddr paddr
, hwaddr xlat
,
875 target_ulong
*address
)
880 if (memory_region_is_ram(section
->mr
)) {
882 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
884 if (!section
->readonly
) {
885 iotlb
|= PHYS_SECTION_NOTDIRTY
;
887 iotlb
|= PHYS_SECTION_ROM
;
890 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
894 /* Make accesses to pages with watchpoints go via the
895 watchpoint trap routines. */
896 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
897 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
898 /* Avoid trapping reads of pages with a write breakpoint. */
899 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
900 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
901 *address
|= TLB_MMIO
;
909 #endif /* defined(CONFIG_USER_ONLY) */
911 #if !defined(CONFIG_USER_ONLY)
913 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
915 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
917 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
921 * Set a custom physical guest memory alloator.
922 * Accelerators with unusual needs may need this. Hopefully, we can
923 * get rid of it eventually.
925 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
927 phys_mem_alloc
= alloc
;
930 static uint16_t phys_section_add(PhysPageMap
*map
,
931 MemoryRegionSection
*section
)
933 /* The physical section number is ORed with a page-aligned
934 * pointer to produce the iotlb entries. Thus it should
935 * never overflow into the page-aligned value.
937 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
939 if (map
->sections_nb
== map
->sections_nb_alloc
) {
940 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
941 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
942 map
->sections_nb_alloc
);
944 map
->sections
[map
->sections_nb
] = *section
;
945 memory_region_ref(section
->mr
);
946 return map
->sections_nb
++;
949 static void phys_section_destroy(MemoryRegion
*mr
)
951 memory_region_unref(mr
);
954 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
955 object_unref(OBJECT(&subpage
->iomem
));
960 static void phys_sections_free(PhysPageMap
*map
)
962 while (map
->sections_nb
> 0) {
963 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
964 phys_section_destroy(section
->mr
);
966 g_free(map
->sections
);
970 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
973 hwaddr base
= section
->offset_within_address_space
975 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
976 d
->map
.nodes
, d
->map
.sections
);
977 MemoryRegionSection subsection
= {
978 .offset_within_address_space
= base
,
979 .size
= int128_make64(TARGET_PAGE_SIZE
),
983 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
985 if (!(existing
->mr
->subpage
)) {
986 subpage
= subpage_init(d
->as
, base
);
987 subsection
.address_space
= d
->as
;
988 subsection
.mr
= &subpage
->iomem
;
989 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
990 phys_section_add(&d
->map
, &subsection
));
992 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
994 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
995 end
= start
+ int128_get64(section
->size
) - 1;
996 subpage_register(subpage
, start
, end
,
997 phys_section_add(&d
->map
, section
));
1001 static void register_multipage(AddressSpaceDispatch
*d
,
1002 MemoryRegionSection
*section
)
1004 hwaddr start_addr
= section
->offset_within_address_space
;
1005 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1006 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1010 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1013 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1015 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1016 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1017 MemoryRegionSection now
= *section
, remain
= *section
;
1018 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1020 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1021 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1022 - now
.offset_within_address_space
;
1024 now
.size
= int128_min(int128_make64(left
), now
.size
);
1025 register_subpage(d
, &now
);
1027 now
.size
= int128_zero();
1029 while (int128_ne(remain
.size
, now
.size
)) {
1030 remain
.size
= int128_sub(remain
.size
, now
.size
);
1031 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1032 remain
.offset_within_region
+= int128_get64(now
.size
);
1034 if (int128_lt(remain
.size
, page_size
)) {
1035 register_subpage(d
, &now
);
1036 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1037 now
.size
= page_size
;
1038 register_subpage(d
, &now
);
1040 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1041 register_multipage(d
, &now
);
1046 void qemu_flush_coalesced_mmio_buffer(void)
1049 kvm_flush_coalesced_mmio_buffer();
1052 void qemu_mutex_lock_ramlist(void)
1054 qemu_mutex_lock(&ram_list
.mutex
);
1057 void qemu_mutex_unlock_ramlist(void)
1059 qemu_mutex_unlock(&ram_list
.mutex
);
1064 #include <sys/vfs.h>
1066 #define HUGETLBFS_MAGIC 0x958458f6
1068 static long gethugepagesize(const char *path
, Error
**errp
)
1074 ret
= statfs(path
, &fs
);
1075 } while (ret
!= 0 && errno
== EINTR
);
1078 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1083 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1084 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1089 static void *file_ram_alloc(RAMBlock
*block
,
1095 char *sanitized_name
;
1100 Error
*local_err
= NULL
;
1102 hpagesize
= gethugepagesize(path
, &local_err
);
1104 error_propagate(errp
, local_err
);
1107 block
->mr
->align
= hpagesize
;
1109 if (memory
< hpagesize
) {
1110 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1111 "or larger than huge page size 0x%" PRIx64
,
1116 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1118 "host lacks kvm mmu notifiers, -mem-path unsupported");
1122 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1123 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1124 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1129 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1131 g_free(sanitized_name
);
1133 fd
= mkstemp(filename
);
1135 error_setg_errno(errp
, errno
,
1136 "unable to create backing store for hugepages");
1143 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1146 * ftruncate is not supported by hugetlbfs in older
1147 * hosts, so don't bother bailing out on errors.
1148 * If anything goes wrong with it under other filesystems,
1151 if (ftruncate(fd
, memory
)) {
1152 perror("ftruncate");
1155 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
,
1156 (block
->flags
& RAM_SHARED
? MAP_SHARED
: MAP_PRIVATE
),
1158 if (area
== MAP_FAILED
) {
1159 error_setg_errno(errp
, errno
,
1160 "unable to map backing store for hugepages");
1166 os_mem_prealloc(fd
, area
, memory
);
1174 error_report("%s\n", error_get_pretty(*errp
));
1181 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1183 RAMBlock
*block
, *next_block
;
1184 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1186 assert(size
!= 0); /* it would hand out same offset multiple times */
1188 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1191 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1192 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1194 end
= block
->offset
+ block
->max_length
;
1196 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1197 if (next_block
->offset
>= end
) {
1198 next
= MIN(next
, next_block
->offset
);
1201 if (next
- end
>= size
&& next
- end
< mingap
) {
1203 mingap
= next
- end
;
1207 if (offset
== RAM_ADDR_MAX
) {
1208 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1216 ram_addr_t
last_ram_offset(void)
1219 ram_addr_t last
= 0;
1221 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1222 last
= MAX(last
, block
->offset
+ block
->max_length
);
1227 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1231 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1232 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1233 "dump-guest-core", true)) {
1234 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1236 perror("qemu_madvise");
1237 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1238 "but dump_guest_core=off specified\n");
1243 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1247 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1248 if (block
->offset
== addr
) {
1256 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1258 RAMBlock
*new_block
= find_ram_block(addr
);
1262 assert(!new_block
->idstr
[0]);
1265 char *id
= qdev_get_dev_path(dev
);
1267 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1271 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1273 /* This assumes the iothread lock is taken here too. */
1274 qemu_mutex_lock_ramlist();
1275 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1276 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1277 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1282 qemu_mutex_unlock_ramlist();
1285 void qemu_ram_unset_idstr(ram_addr_t addr
)
1287 RAMBlock
*block
= find_ram_block(addr
);
1290 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1294 static int memory_try_enable_merging(void *addr
, size_t len
)
1296 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1297 /* disabled by the user */
1301 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1304 /* Only legal before guest might have detected the memory size: e.g. on
1305 * incoming migration, or right after reset.
1307 * As memory core doesn't know how is memory accessed, it is up to
1308 * resize callback to update device state and/or add assertions to detect
1309 * misuse, if necessary.
1311 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1313 RAMBlock
*block
= find_ram_block(base
);
1317 if (block
->used_length
== newsize
) {
1321 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1322 error_setg_errno(errp
, EINVAL
,
1323 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1324 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1325 newsize
, block
->used_length
);
1329 if (block
->max_length
< newsize
) {
1330 error_setg_errno(errp
, EINVAL
,
1331 "Length too large: %s: 0x" RAM_ADDR_FMT
1332 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1333 newsize
, block
->max_length
);
1337 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1338 block
->used_length
= newsize
;
1339 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
);
1340 memory_region_set_size(block
->mr
, newsize
);
1341 if (block
->resized
) {
1342 block
->resized(block
->idstr
, newsize
, block
->host
);
1347 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1350 ram_addr_t old_ram_size
, new_ram_size
;
1352 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1354 /* This assumes the iothread lock is taken here too. */
1355 qemu_mutex_lock_ramlist();
1356 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1358 if (!new_block
->host
) {
1359 if (xen_enabled()) {
1360 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1363 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1364 &new_block
->mr
->align
);
1365 if (!new_block
->host
) {
1366 error_setg_errno(errp
, errno
,
1367 "cannot set up guest memory '%s'",
1368 memory_region_name(new_block
->mr
));
1369 qemu_mutex_unlock_ramlist();
1372 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1376 /* Keep the list sorted from biggest to smallest block. */
1377 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1378 if (block
->max_length
< new_block
->max_length
) {
1383 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1385 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1387 ram_list
.mru_block
= NULL
;
1390 qemu_mutex_unlock_ramlist();
1392 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1394 if (new_ram_size
> old_ram_size
) {
1396 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1397 ram_list
.dirty_memory
[i
] =
1398 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1399 old_ram_size
, new_ram_size
);
1402 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1403 new_block
->used_length
);
1405 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1406 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1407 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1409 if (kvm_enabled()) {
1410 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1413 return new_block
->offset
;
1417 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1418 bool share
, const char *mem_path
,
1421 RAMBlock
*new_block
;
1423 Error
*local_err
= NULL
;
1425 if (xen_enabled()) {
1426 error_setg(errp
, "-mem-path not supported with Xen");
1430 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1432 * file_ram_alloc() needs to allocate just like
1433 * phys_mem_alloc, but we haven't bothered to provide
1437 "-mem-path not supported with this accelerator");
1441 size
= TARGET_PAGE_ALIGN(size
);
1442 new_block
= g_malloc0(sizeof(*new_block
));
1444 new_block
->used_length
= size
;
1445 new_block
->max_length
= size
;
1446 new_block
->flags
= share
? RAM_SHARED
: 0;
1447 new_block
->host
= file_ram_alloc(new_block
, size
,
1449 if (!new_block
->host
) {
1454 addr
= ram_block_add(new_block
, &local_err
);
1457 error_propagate(errp
, local_err
);
1465 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1466 void (*resized
)(const char*,
1469 void *host
, bool resizeable
,
1470 MemoryRegion
*mr
, Error
**errp
)
1472 RAMBlock
*new_block
;
1474 Error
*local_err
= NULL
;
1476 size
= TARGET_PAGE_ALIGN(size
);
1477 max_size
= TARGET_PAGE_ALIGN(max_size
);
1478 new_block
= g_malloc0(sizeof(*new_block
));
1480 new_block
->resized
= resized
;
1481 new_block
->used_length
= size
;
1482 new_block
->max_length
= max_size
;
1483 assert(max_size
>= size
);
1485 new_block
->host
= host
;
1487 new_block
->flags
|= RAM_PREALLOC
;
1490 new_block
->flags
|= RAM_RESIZEABLE
;
1492 addr
= ram_block_add(new_block
, &local_err
);
1495 error_propagate(errp
, local_err
);
1501 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1502 MemoryRegion
*mr
, Error
**errp
)
1504 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1507 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1509 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1512 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1513 void (*resized
)(const char*,
1516 MemoryRegion
*mr
, Error
**errp
)
1518 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1521 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1525 /* This assumes the iothread lock is taken here too. */
1526 qemu_mutex_lock_ramlist();
1527 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1528 if (addr
== block
->offset
) {
1529 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1530 ram_list
.mru_block
= NULL
;
1536 qemu_mutex_unlock_ramlist();
1539 void qemu_ram_free(ram_addr_t addr
)
1543 /* This assumes the iothread lock is taken here too. */
1544 qemu_mutex_lock_ramlist();
1545 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1546 if (addr
== block
->offset
) {
1547 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1548 ram_list
.mru_block
= NULL
;
1550 if (block
->flags
& RAM_PREALLOC
) {
1552 } else if (xen_enabled()) {
1553 xen_invalidate_map_cache_entry(block
->host
);
1555 } else if (block
->fd
>= 0) {
1556 munmap(block
->host
, block
->max_length
);
1560 qemu_anon_ram_free(block
->host
, block
->max_length
);
1566 qemu_mutex_unlock_ramlist();
1571 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1578 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1579 offset
= addr
- block
->offset
;
1580 if (offset
< block
->max_length
) {
1581 vaddr
= ramblock_ptr(block
, offset
);
1582 if (block
->flags
& RAM_PREALLOC
) {
1584 } else if (xen_enabled()) {
1588 munmap(vaddr
, length
);
1589 if (block
->fd
>= 0) {
1590 flags
|= (block
->flags
& RAM_SHARED
?
1591 MAP_SHARED
: MAP_PRIVATE
);
1592 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1593 flags
, block
->fd
, offset
);
1596 * Remap needs to match alloc. Accelerators that
1597 * set phys_mem_alloc never remap. If they did,
1598 * we'd need a remap hook here.
1600 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1602 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1603 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1606 if (area
!= vaddr
) {
1607 fprintf(stderr
, "Could not remap addr: "
1608 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1612 memory_try_enable_merging(vaddr
, length
);
1613 qemu_ram_setup_dump(vaddr
, length
);
1619 #endif /* !_WIN32 */
1621 int qemu_get_ram_fd(ram_addr_t addr
)
1623 RAMBlock
*block
= qemu_get_ram_block(addr
);
1628 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1630 RAMBlock
*block
= qemu_get_ram_block(addr
);
1632 return ramblock_ptr(block
, 0);
1635 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1636 With the exception of the softmmu code in this file, this should
1637 only be used for local memory (e.g. video ram) that the device owns,
1638 and knows it isn't going to access beyond the end of the block.
1640 It should not be used for general purpose DMA.
1641 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1643 void *qemu_get_ram_ptr(ram_addr_t addr
)
1645 RAMBlock
*block
= qemu_get_ram_block(addr
);
1647 if (xen_enabled()) {
1648 /* We need to check if the requested address is in the RAM
1649 * because we don't want to map the entire memory in QEMU.
1650 * In that case just map until the end of the page.
1652 if (block
->offset
== 0) {
1653 return xen_map_cache(addr
, 0, 0);
1654 } else if (block
->host
== NULL
) {
1656 xen_map_cache(block
->offset
, block
->max_length
, 1);
1659 return ramblock_ptr(block
, addr
- block
->offset
);
1662 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1663 * but takes a size argument */
1664 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1669 if (xen_enabled()) {
1670 return xen_map_cache(addr
, *size
, 1);
1674 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1675 if (addr
- block
->offset
< block
->max_length
) {
1676 if (addr
- block
->offset
+ *size
> block
->max_length
)
1677 *size
= block
->max_length
- addr
+ block
->offset
;
1678 return ramblock_ptr(block
, addr
- block
->offset
);
1682 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1687 /* Some of the softmmu routines need to translate from a host pointer
1688 (typically a TLB entry) back to a ram offset. */
1689 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1692 uint8_t *host
= ptr
;
1694 if (xen_enabled()) {
1695 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1696 return qemu_get_ram_block(*ram_addr
)->mr
;
1699 block
= ram_list
.mru_block
;
1700 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1704 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1705 /* This case append when the block is not mapped. */
1706 if (block
->host
== NULL
) {
1709 if (host
- block
->host
< block
->max_length
) {
1717 *ram_addr
= block
->offset
+ (host
- block
->host
);
1721 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1722 uint64_t val
, unsigned size
)
1724 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1725 tb_invalidate_phys_page_fast(ram_addr
, size
);
1729 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1732 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1735 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1740 cpu_physical_memory_set_dirty_range_nocode(ram_addr
, size
);
1741 /* we remove the notdirty callback only if the code has been
1743 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1744 CPUArchState
*env
= current_cpu
->env_ptr
;
1745 tlb_set_dirty(env
, current_cpu
->mem_io_vaddr
);
1749 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1750 unsigned size
, bool is_write
)
1755 static const MemoryRegionOps notdirty_mem_ops
= {
1756 .write
= notdirty_mem_write
,
1757 .valid
.accepts
= notdirty_mem_accepts
,
1758 .endianness
= DEVICE_NATIVE_ENDIAN
,
1761 /* Generate a debug exception if a watchpoint has been hit. */
1762 static void check_watchpoint(int offset
, int len
, int flags
)
1764 CPUState
*cpu
= current_cpu
;
1765 CPUArchState
*env
= cpu
->env_ptr
;
1766 target_ulong pc
, cs_base
;
1771 if (cpu
->watchpoint_hit
) {
1772 /* We re-entered the check after replacing the TB. Now raise
1773 * the debug interrupt so that is will trigger after the
1774 * current instruction. */
1775 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1778 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1779 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1780 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
1781 && (wp
->flags
& flags
)) {
1782 if (flags
== BP_MEM_READ
) {
1783 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
1785 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
1787 wp
->hitaddr
= vaddr
;
1788 if (!cpu
->watchpoint_hit
) {
1789 cpu
->watchpoint_hit
= wp
;
1790 tb_check_watchpoint(cpu
);
1791 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1792 cpu
->exception_index
= EXCP_DEBUG
;
1795 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1796 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1797 cpu_resume_from_signal(cpu
, NULL
);
1801 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1806 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1807 so these check for a hit then pass through to the normal out-of-line
1809 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1812 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, BP_MEM_READ
);
1814 case 1: return ldub_phys(&address_space_memory
, addr
);
1815 case 2: return lduw_phys(&address_space_memory
, addr
);
1816 case 4: return ldl_phys(&address_space_memory
, addr
);
1821 static void watch_mem_write(void *opaque
, hwaddr addr
,
1822 uint64_t val
, unsigned size
)
1824 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, BP_MEM_WRITE
);
1827 stb_phys(&address_space_memory
, addr
, val
);
1830 stw_phys(&address_space_memory
, addr
, val
);
1833 stl_phys(&address_space_memory
, addr
, val
);
1839 static const MemoryRegionOps watch_mem_ops
= {
1840 .read
= watch_mem_read
,
1841 .write
= watch_mem_write
,
1842 .endianness
= DEVICE_NATIVE_ENDIAN
,
1845 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1848 subpage_t
*subpage
= opaque
;
1851 #if defined(DEBUG_SUBPAGE)
1852 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
1853 subpage
, len
, addr
);
1855 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1868 static void subpage_write(void *opaque
, hwaddr addr
,
1869 uint64_t value
, unsigned len
)
1871 subpage_t
*subpage
= opaque
;
1874 #if defined(DEBUG_SUBPAGE)
1875 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1876 " value %"PRIx64
"\n",
1877 __func__
, subpage
, len
, addr
, value
);
1892 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1895 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1896 unsigned len
, bool is_write
)
1898 subpage_t
*subpage
= opaque
;
1899 #if defined(DEBUG_SUBPAGE)
1900 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
1901 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1904 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1908 static const MemoryRegionOps subpage_ops
= {
1909 .read
= subpage_read
,
1910 .write
= subpage_write
,
1911 .valid
.accepts
= subpage_accepts
,
1912 .endianness
= DEVICE_NATIVE_ENDIAN
,
1915 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1920 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1922 idx
= SUBPAGE_IDX(start
);
1923 eidx
= SUBPAGE_IDX(end
);
1924 #if defined(DEBUG_SUBPAGE)
1925 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1926 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
1928 for (; idx
<= eidx
; idx
++) {
1929 mmio
->sub_section
[idx
] = section
;
1935 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1939 mmio
= g_malloc0(sizeof(subpage_t
));
1943 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1944 NULL
, TARGET_PAGE_SIZE
);
1945 mmio
->iomem
.subpage
= true;
1946 #if defined(DEBUG_SUBPAGE)
1947 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
1948 mmio
, base
, TARGET_PAGE_SIZE
);
1950 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1955 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
1959 MemoryRegionSection section
= {
1960 .address_space
= as
,
1962 .offset_within_address_space
= 0,
1963 .offset_within_region
= 0,
1964 .size
= int128_2_64(),
1967 return phys_section_add(map
, §ion
);
1970 MemoryRegion
*iotlb_to_region(AddressSpace
*as
, hwaddr index
)
1972 return as
->dispatch
->map
.sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1975 static void io_mem_init(void)
1977 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
1978 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1980 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1982 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1986 static void mem_begin(MemoryListener
*listener
)
1988 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1989 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
1992 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
1993 assert(n
== PHYS_SECTION_UNASSIGNED
);
1994 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
1995 assert(n
== PHYS_SECTION_NOTDIRTY
);
1996 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
1997 assert(n
== PHYS_SECTION_ROM
);
1998 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
1999 assert(n
== PHYS_SECTION_WATCH
);
2001 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2003 as
->next_dispatch
= d
;
2006 static void mem_commit(MemoryListener
*listener
)
2008 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2009 AddressSpaceDispatch
*cur
= as
->dispatch
;
2010 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2012 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2014 as
->dispatch
= next
;
2017 phys_sections_free(&cur
->map
);
2022 static void tcg_commit(MemoryListener
*listener
)
2026 /* since each CPU stores ram addresses in its TLB cache, we must
2027 reset the modified entries */
2030 /* FIXME: Disentangle the cpu.h circular files deps so we can
2031 directly get the right CPU from listener. */
2032 if (cpu
->tcg_as_listener
!= listener
) {
2039 static void core_log_global_start(MemoryListener
*listener
)
2041 cpu_physical_memory_set_dirty_tracking(true);
2044 static void core_log_global_stop(MemoryListener
*listener
)
2046 cpu_physical_memory_set_dirty_tracking(false);
2049 static MemoryListener core_memory_listener
= {
2050 .log_global_start
= core_log_global_start
,
2051 .log_global_stop
= core_log_global_stop
,
2055 void address_space_init_dispatch(AddressSpace
*as
)
2057 as
->dispatch
= NULL
;
2058 as
->dispatch_listener
= (MemoryListener
) {
2060 .commit
= mem_commit
,
2061 .region_add
= mem_add
,
2062 .region_nop
= mem_add
,
2065 memory_listener_register(&as
->dispatch_listener
, as
);
2068 void address_space_destroy_dispatch(AddressSpace
*as
)
2070 AddressSpaceDispatch
*d
= as
->dispatch
;
2072 memory_listener_unregister(&as
->dispatch_listener
);
2074 as
->dispatch
= NULL
;
2077 static void memory_map_init(void)
2079 system_memory
= g_malloc(sizeof(*system_memory
));
2081 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2082 address_space_init(&address_space_memory
, system_memory
, "memory");
2084 system_io
= g_malloc(sizeof(*system_io
));
2085 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2087 address_space_init(&address_space_io
, system_io
, "I/O");
2089 memory_listener_register(&core_memory_listener
, &address_space_memory
);
2092 MemoryRegion
*get_system_memory(void)
2094 return system_memory
;
2097 MemoryRegion
*get_system_io(void)
2102 #endif /* !defined(CONFIG_USER_ONLY) */
2104 /* physical memory access (slow version, mainly for debug) */
2105 #if defined(CONFIG_USER_ONLY)
2106 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2107 uint8_t *buf
, int len
, int is_write
)
2114 page
= addr
& TARGET_PAGE_MASK
;
2115 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2118 flags
= page_get_flags(page
);
2119 if (!(flags
& PAGE_VALID
))
2122 if (!(flags
& PAGE_WRITE
))
2124 /* XXX: this code should not depend on lock_user */
2125 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2128 unlock_user(p
, addr
, l
);
2130 if (!(flags
& PAGE_READ
))
2132 /* XXX: this code should not depend on lock_user */
2133 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2136 unlock_user(p
, addr
, 0);
2147 static void invalidate_and_set_dirty(hwaddr addr
,
2150 if (cpu_physical_memory_range_includes_clean(addr
, length
)) {
2151 tb_invalidate_phys_range(addr
, addr
+ length
, 0);
2152 cpu_physical_memory_set_dirty_range_nocode(addr
, length
);
2154 xen_modified_memory(addr
, length
);
2157 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2159 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2161 /* Regions are assumed to support 1-4 byte accesses unless
2162 otherwise specified. */
2163 if (access_size_max
== 0) {
2164 access_size_max
= 4;
2167 /* Bound the maximum access by the alignment of the address. */
2168 if (!mr
->ops
->impl
.unaligned
) {
2169 unsigned align_size_max
= addr
& -addr
;
2170 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2171 access_size_max
= align_size_max
;
2175 /* Don't attempt accesses larger than the maximum. */
2176 if (l
> access_size_max
) {
2177 l
= access_size_max
;
2180 l
= 1 << (qemu_fls(l
) - 1);
2186 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
2187 int len
, bool is_write
)
2198 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2201 if (!memory_access_is_direct(mr
, is_write
)) {
2202 l
= memory_access_size(mr
, l
, addr1
);
2203 /* XXX: could force current_cpu to NULL to avoid
2207 /* 64 bit write access */
2209 error
|= io_mem_write(mr
, addr1
, val
, 8);
2212 /* 32 bit write access */
2214 error
|= io_mem_write(mr
, addr1
, val
, 4);
2217 /* 16 bit write access */
2219 error
|= io_mem_write(mr
, addr1
, val
, 2);
2222 /* 8 bit write access */
2224 error
|= io_mem_write(mr
, addr1
, val
, 1);
2230 addr1
+= memory_region_get_ram_addr(mr
);
2232 ptr
= qemu_get_ram_ptr(addr1
);
2233 memcpy(ptr
, buf
, l
);
2234 invalidate_and_set_dirty(addr1
, l
);
2237 if (!memory_access_is_direct(mr
, is_write
)) {
2239 l
= memory_access_size(mr
, l
, addr1
);
2242 /* 64 bit read access */
2243 error
|= io_mem_read(mr
, addr1
, &val
, 8);
2247 /* 32 bit read access */
2248 error
|= io_mem_read(mr
, addr1
, &val
, 4);
2252 /* 16 bit read access */
2253 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2257 /* 8 bit read access */
2258 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2266 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2267 memcpy(buf
, ptr
, l
);
2278 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2279 const uint8_t *buf
, int len
)
2281 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2284 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2286 return address_space_rw(as
, addr
, buf
, len
, false);
2290 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2291 int len
, int is_write
)
2293 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2296 enum write_rom_type
{
2301 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2302 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2311 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2313 if (!(memory_region_is_ram(mr
) ||
2314 memory_region_is_romd(mr
))) {
2317 addr1
+= memory_region_get_ram_addr(mr
);
2319 ptr
= qemu_get_ram_ptr(addr1
);
2322 memcpy(ptr
, buf
, l
);
2323 invalidate_and_set_dirty(addr1
, l
);
2326 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2336 /* used for ROM loading : can write in RAM and ROM */
2337 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2338 const uint8_t *buf
, int len
)
2340 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2343 void cpu_flush_icache_range(hwaddr start
, int len
)
2346 * This function should do the same thing as an icache flush that was
2347 * triggered from within the guest. For TCG we are always cache coherent,
2348 * so there is no need to flush anything. For KVM / Xen we need to flush
2349 * the host's instruction cache at least.
2351 if (tcg_enabled()) {
2355 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2356 start
, NULL
, len
, FLUSH_CACHE
);
2366 static BounceBuffer bounce
;
2368 typedef struct MapClient
{
2370 void (*callback
)(void *opaque
);
2371 QLIST_ENTRY(MapClient
) link
;
2374 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2375 = QLIST_HEAD_INITIALIZER(map_client_list
);
2377 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2379 MapClient
*client
= g_malloc(sizeof(*client
));
2381 client
->opaque
= opaque
;
2382 client
->callback
= callback
;
2383 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2387 static void cpu_unregister_map_client(void *_client
)
2389 MapClient
*client
= (MapClient
*)_client
;
2391 QLIST_REMOVE(client
, link
);
2395 static void cpu_notify_map_clients(void)
2399 while (!QLIST_EMPTY(&map_client_list
)) {
2400 client
= QLIST_FIRST(&map_client_list
);
2401 client
->callback(client
->opaque
);
2402 cpu_unregister_map_client(client
);
2406 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2413 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2414 if (!memory_access_is_direct(mr
, is_write
)) {
2415 l
= memory_access_size(mr
, l
, addr
);
2416 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2427 /* Map a physical memory region into a host virtual address.
2428 * May map a subset of the requested range, given by and returned in *plen.
2429 * May return NULL if resources needed to perform the mapping are exhausted.
2430 * Use only for reads OR writes - not for read-modify-write operations.
2431 * Use cpu_register_map_client() to know when retrying the map operation is
2432 * likely to succeed.
2434 void *address_space_map(AddressSpace
*as
,
2441 hwaddr l
, xlat
, base
;
2442 MemoryRegion
*mr
, *this_mr
;
2450 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2451 if (!memory_access_is_direct(mr
, is_write
)) {
2452 if (bounce
.buffer
) {
2455 /* Avoid unbounded allocations */
2456 l
= MIN(l
, TARGET_PAGE_SIZE
);
2457 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2461 memory_region_ref(mr
);
2464 address_space_read(as
, addr
, bounce
.buffer
, l
);
2468 return bounce
.buffer
;
2472 raddr
= memory_region_get_ram_addr(mr
);
2483 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2484 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2489 memory_region_ref(mr
);
2491 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2494 /* Unmaps a memory region previously mapped by address_space_map().
2495 * Will also mark the memory as dirty if is_write == 1. access_len gives
2496 * the amount of memory that was actually read or written by the caller.
2498 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2499 int is_write
, hwaddr access_len
)
2501 if (buffer
!= bounce
.buffer
) {
2505 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2508 invalidate_and_set_dirty(addr1
, access_len
);
2510 if (xen_enabled()) {
2511 xen_invalidate_map_cache_entry(buffer
);
2513 memory_region_unref(mr
);
2517 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2519 qemu_vfree(bounce
.buffer
);
2520 bounce
.buffer
= NULL
;
2521 memory_region_unref(bounce
.mr
);
2522 cpu_notify_map_clients();
2525 void *cpu_physical_memory_map(hwaddr addr
,
2529 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2532 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2533 int is_write
, hwaddr access_len
)
2535 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2538 /* warning: addr must be aligned */
2539 static inline uint32_t ldl_phys_internal(AddressSpace
*as
, hwaddr addr
,
2540 enum device_endian endian
)
2548 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2549 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2551 io_mem_read(mr
, addr1
, &val
, 4);
2552 #if defined(TARGET_WORDS_BIGENDIAN)
2553 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2557 if (endian
== DEVICE_BIG_ENDIAN
) {
2563 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2567 case DEVICE_LITTLE_ENDIAN
:
2568 val
= ldl_le_p(ptr
);
2570 case DEVICE_BIG_ENDIAN
:
2571 val
= ldl_be_p(ptr
);
2581 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2583 return ldl_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2586 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2588 return ldl_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2591 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2593 return ldl_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2596 /* warning: addr must be aligned */
2597 static inline uint64_t ldq_phys_internal(AddressSpace
*as
, hwaddr addr
,
2598 enum device_endian endian
)
2606 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2608 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2610 io_mem_read(mr
, addr1
, &val
, 8);
2611 #if defined(TARGET_WORDS_BIGENDIAN)
2612 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2616 if (endian
== DEVICE_BIG_ENDIAN
) {
2622 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2626 case DEVICE_LITTLE_ENDIAN
:
2627 val
= ldq_le_p(ptr
);
2629 case DEVICE_BIG_ENDIAN
:
2630 val
= ldq_be_p(ptr
);
2640 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
2642 return ldq_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2645 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
2647 return ldq_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2650 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
2652 return ldq_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2656 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
2659 address_space_rw(as
, addr
, &val
, 1, 0);
2663 /* warning: addr must be aligned */
2664 static inline uint32_t lduw_phys_internal(AddressSpace
*as
, hwaddr addr
,
2665 enum device_endian endian
)
2673 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2675 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2677 io_mem_read(mr
, addr1
, &val
, 2);
2678 #if defined(TARGET_WORDS_BIGENDIAN)
2679 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2683 if (endian
== DEVICE_BIG_ENDIAN
) {
2689 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2693 case DEVICE_LITTLE_ENDIAN
:
2694 val
= lduw_le_p(ptr
);
2696 case DEVICE_BIG_ENDIAN
:
2697 val
= lduw_be_p(ptr
);
2707 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
2709 return lduw_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2712 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
2714 return lduw_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2717 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
2719 return lduw_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2722 /* warning: addr must be aligned. The ram page is not masked as dirty
2723 and the code inside is not invalidated. It is useful if the dirty
2724 bits are used to track modified PTEs */
2725 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2732 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2734 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2735 io_mem_write(mr
, addr1
, val
, 4);
2737 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2738 ptr
= qemu_get_ram_ptr(addr1
);
2741 if (unlikely(in_migration
)) {
2742 if (cpu_physical_memory_is_clean(addr1
)) {
2743 /* invalidate code */
2744 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2746 cpu_physical_memory_set_dirty_range_nocode(addr1
, 4);
2752 /* warning: addr must be aligned */
2753 static inline void stl_phys_internal(AddressSpace
*as
,
2754 hwaddr addr
, uint32_t val
,
2755 enum device_endian endian
)
2762 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2764 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2765 #if defined(TARGET_WORDS_BIGENDIAN)
2766 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2770 if (endian
== DEVICE_BIG_ENDIAN
) {
2774 io_mem_write(mr
, addr1
, val
, 4);
2777 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2778 ptr
= qemu_get_ram_ptr(addr1
);
2780 case DEVICE_LITTLE_ENDIAN
:
2783 case DEVICE_BIG_ENDIAN
:
2790 invalidate_and_set_dirty(addr1
, 4);
2794 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2796 stl_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2799 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2801 stl_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2804 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2806 stl_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2810 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2813 address_space_rw(as
, addr
, &v
, 1, 1);
2816 /* warning: addr must be aligned */
2817 static inline void stw_phys_internal(AddressSpace
*as
,
2818 hwaddr addr
, uint32_t val
,
2819 enum device_endian endian
)
2826 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2827 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2828 #if defined(TARGET_WORDS_BIGENDIAN)
2829 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2833 if (endian
== DEVICE_BIG_ENDIAN
) {
2837 io_mem_write(mr
, addr1
, val
, 2);
2840 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2841 ptr
= qemu_get_ram_ptr(addr1
);
2843 case DEVICE_LITTLE_ENDIAN
:
2846 case DEVICE_BIG_ENDIAN
:
2853 invalidate_and_set_dirty(addr1
, 2);
2857 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2859 stw_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2862 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2864 stw_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2867 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2869 stw_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2873 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2876 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2879 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2881 val
= cpu_to_le64(val
);
2882 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2885 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2887 val
= cpu_to_be64(val
);
2888 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2891 /* virtual memory access for debug (includes writing to ROM) */
2892 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2893 uint8_t *buf
, int len
, int is_write
)
2900 page
= addr
& TARGET_PAGE_MASK
;
2901 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2902 /* if no physical page mapped, return an error */
2903 if (phys_addr
== -1)
2905 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2908 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2910 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
2912 address_space_rw(cpu
->as
, phys_addr
, buf
, l
, 0);
2923 * A helper function for the _utterly broken_ virtio device model to find out if
2924 * it's running on a big endian machine. Don't do this at home kids!
2926 bool target_words_bigendian(void);
2927 bool target_words_bigendian(void)
2929 #if defined(TARGET_WORDS_BIGENDIAN)
2936 #ifndef CONFIG_USER_ONLY
2937 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2942 mr
= address_space_translate(&address_space_memory
,
2943 phys_addr
, &phys_addr
, &l
, false);
2945 return !(memory_region_is_ram(mr
) ||
2946 memory_region_is_romd(mr
));
2949 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2953 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2954 func(block
->host
, block
->offset
, block
->used_length
, opaque
);