4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
24 #include "qemu/cutils.h"
26 #include "exec/exec-all.h"
28 #include "hw/qdev-core.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #include "hw/xen/xen.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #if defined(CONFIG_USER_ONLY)
40 #else /* !CONFIG_USER_ONLY */
42 #include "exec/memory.h"
43 #include "exec/ioport.h"
44 #include "sysemu/dma.h"
45 #include "exec/address-spaces.h"
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
59 #include "migration/vmstate.h"
61 #include "qemu/range.h"
63 #include "qemu/mmap-alloc.h"
66 //#define DEBUG_SUBPAGE
68 #if !defined(CONFIG_USER_ONLY)
69 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
72 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
74 static MemoryRegion
*system_memory
;
75 static MemoryRegion
*system_io
;
77 AddressSpace address_space_io
;
78 AddressSpace address_space_memory
;
80 MemoryRegion io_mem_rom
, io_mem_notdirty
;
81 static MemoryRegion io_mem_unassigned
;
83 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84 #define RAM_PREALLOC (1 << 0)
86 /* RAM is mmap-ed with MAP_SHARED */
87 #define RAM_SHARED (1 << 1)
89 /* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
92 #define RAM_RESIZEABLE (1 << 2)
96 #ifdef TARGET_PAGE_BITS_VARY
98 bool target_page_bits_decided
;
101 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
102 /* current CPU in the current thread. It is only valid inside
104 __thread CPUState
*current_cpu
;
105 /* 0 = Do not count executed instructions.
106 1 = Precise instruction counting.
107 2 = Adaptive rate instruction counting. */
110 bool set_preferred_target_page_bits(int bits
)
112 /* The target page size is the lowest common denominator for all
113 * the CPUs in the system, so we can only make it smaller, never
114 * larger. And we can't make it smaller once we've committed to
117 #ifdef TARGET_PAGE_BITS_VARY
118 assert(bits
>= TARGET_PAGE_BITS_MIN
);
119 if (target_page_bits
== 0 || target_page_bits
> bits
) {
120 if (target_page_bits_decided
) {
123 target_page_bits
= bits
;
129 #if !defined(CONFIG_USER_ONLY)
131 static void finalize_target_page_bits(void)
133 #ifdef TARGET_PAGE_BITS_VARY
134 if (target_page_bits
== 0) {
135 target_page_bits
= TARGET_PAGE_BITS_MIN
;
137 target_page_bits_decided
= true;
141 typedef struct PhysPageEntry PhysPageEntry
;
143 struct PhysPageEntry
{
144 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
146 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
150 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
152 /* Size of the L2 (and L3, etc) page tables. */
153 #define ADDR_SPACE_BITS 64
156 #define P_L2_SIZE (1 << P_L2_BITS)
158 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
160 typedef PhysPageEntry Node
[P_L2_SIZE
];
162 typedef struct PhysPageMap
{
165 unsigned sections_nb
;
166 unsigned sections_nb_alloc
;
168 unsigned nodes_nb_alloc
;
170 MemoryRegionSection
*sections
;
173 struct AddressSpaceDispatch
{
176 MemoryRegionSection
*mru_section
;
177 /* This is a multi-level map on the physical address space.
178 * The bottom level has pointers to MemoryRegionSections.
180 PhysPageEntry phys_map
;
185 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
186 typedef struct subpage_t
{
190 uint16_t sub_section
[];
193 #define PHYS_SECTION_UNASSIGNED 0
194 #define PHYS_SECTION_NOTDIRTY 1
195 #define PHYS_SECTION_ROM 2
196 #define PHYS_SECTION_WATCH 3
198 static void io_mem_init(void);
199 static void memory_map_init(void);
200 static void tcg_commit(MemoryListener
*listener
);
202 static MemoryRegion io_mem_watch
;
205 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
206 * @cpu: the CPU whose AddressSpace this is
207 * @as: the AddressSpace itself
208 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
209 * @tcg_as_listener: listener for tracking changes to the AddressSpace
211 struct CPUAddressSpace
{
214 struct AddressSpaceDispatch
*memory_dispatch
;
215 MemoryListener tcg_as_listener
;
220 #if !defined(CONFIG_USER_ONLY)
222 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
224 static unsigned alloc_hint
= 16;
225 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
226 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, alloc_hint
);
227 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
228 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
229 alloc_hint
= map
->nodes_nb_alloc
;
233 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
240 ret
= map
->nodes_nb
++;
242 assert(ret
!= PHYS_MAP_NODE_NIL
);
243 assert(ret
!= map
->nodes_nb_alloc
);
245 e
.skip
= leaf
? 0 : 1;
246 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
247 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
248 memcpy(&p
[i
], &e
, sizeof(e
));
253 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
254 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
258 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
260 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
261 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
263 p
= map
->nodes
[lp
->ptr
];
264 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
266 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
267 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
273 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
279 static void phys_page_set(AddressSpaceDispatch
*d
,
280 hwaddr index
, hwaddr nb
,
283 /* Wildly overreserve - it doesn't matter much. */
284 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
286 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
289 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
290 * and update our entry so we can skip it and go directly to the destination.
292 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
)
294 unsigned valid_ptr
= P_L2_SIZE
;
299 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
304 for (i
= 0; i
< P_L2_SIZE
; i
++) {
305 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
312 phys_page_compact(&p
[i
], nodes
);
316 /* We can only compress if there's only one child. */
321 assert(valid_ptr
< P_L2_SIZE
);
323 /* Don't compress if it won't fit in the # of bits we have. */
324 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
328 lp
->ptr
= p
[valid_ptr
].ptr
;
329 if (!p
[valid_ptr
].skip
) {
330 /* If our only child is a leaf, make this a leaf. */
331 /* By design, we should have made this node a leaf to begin with so we
332 * should never reach here.
333 * But since it's so simple to handle this, let's do it just in case we
338 lp
->skip
+= p
[valid_ptr
].skip
;
342 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
344 if (d
->phys_map
.skip
) {
345 phys_page_compact(&d
->phys_map
, d
->map
.nodes
);
349 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
352 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
353 * the section must cover the entire address space.
355 return section
->size
.hi
||
356 range_covers_byte(section
->offset_within_address_space
,
357 section
->size
.lo
, addr
);
360 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
361 Node
*nodes
, MemoryRegionSection
*sections
)
364 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
367 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
368 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
369 return §ions
[PHYS_SECTION_UNASSIGNED
];
372 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
375 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
376 return §ions
[lp
.ptr
];
378 return §ions
[PHYS_SECTION_UNASSIGNED
];
382 bool memory_region_is_unassigned(MemoryRegion
*mr
)
384 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
385 && mr
!= &io_mem_watch
;
388 /* Called from RCU critical section */
389 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
391 bool resolve_subpage
)
393 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
397 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
398 section_covers_addr(section
, addr
)) {
401 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
405 if (resolve_subpage
&& section
->mr
->subpage
) {
406 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
407 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
410 atomic_set(&d
->mru_section
, section
);
415 /* Called from RCU critical section */
416 static MemoryRegionSection
*
417 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
418 hwaddr
*plen
, bool resolve_subpage
)
420 MemoryRegionSection
*section
;
424 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
425 /* Compute offset within MemoryRegionSection */
426 addr
-= section
->offset_within_address_space
;
428 /* Compute offset within MemoryRegion */
429 *xlat
= addr
+ section
->offset_within_region
;
433 /* MMIO registers can be expected to perform full-width accesses based only
434 * on their address, without considering adjacent registers that could
435 * decode to completely different MemoryRegions. When such registers
436 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
437 * regions overlap wildly. For this reason we cannot clamp the accesses
440 * If the length is small (as is the case for address_space_ldl/stl),
441 * everything works fine. If the incoming length is large, however,
442 * the caller really has to do the clamping through memory_access_size.
444 if (memory_region_is_ram(mr
)) {
445 diff
= int128_sub(section
->size
, int128_make64(addr
));
446 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
451 /* Called from RCU critical section */
452 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
453 hwaddr
*xlat
, hwaddr
*plen
,
457 MemoryRegionSection
*section
;
461 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
462 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
465 if (!mr
->iommu_ops
) {
469 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
470 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
471 | (addr
& iotlb
.addr_mask
));
472 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
473 if (!(iotlb
.perm
& (1 << is_write
))) {
474 mr
= &io_mem_unassigned
;
478 as
= iotlb
.target_as
;
481 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
482 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
483 *plen
= MIN(page
, *plen
);
490 /* Called from RCU critical section */
491 MemoryRegionSection
*
492 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
493 hwaddr
*xlat
, hwaddr
*plen
)
495 MemoryRegionSection
*section
;
496 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
498 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
500 assert(!section
->mr
->iommu_ops
);
505 #if !defined(CONFIG_USER_ONLY)
507 static int cpu_common_post_load(void *opaque
, int version_id
)
509 CPUState
*cpu
= opaque
;
511 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
512 version_id is increased. */
513 cpu
->interrupt_request
&= ~0x01;
519 static int cpu_common_pre_load(void *opaque
)
521 CPUState
*cpu
= opaque
;
523 cpu
->exception_index
= -1;
528 static bool cpu_common_exception_index_needed(void *opaque
)
530 CPUState
*cpu
= opaque
;
532 return tcg_enabled() && cpu
->exception_index
!= -1;
535 static const VMStateDescription vmstate_cpu_common_exception_index
= {
536 .name
= "cpu_common/exception_index",
538 .minimum_version_id
= 1,
539 .needed
= cpu_common_exception_index_needed
,
540 .fields
= (VMStateField
[]) {
541 VMSTATE_INT32(exception_index
, CPUState
),
542 VMSTATE_END_OF_LIST()
546 static bool cpu_common_crash_occurred_needed(void *opaque
)
548 CPUState
*cpu
= opaque
;
550 return cpu
->crash_occurred
;
553 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
554 .name
= "cpu_common/crash_occurred",
556 .minimum_version_id
= 1,
557 .needed
= cpu_common_crash_occurred_needed
,
558 .fields
= (VMStateField
[]) {
559 VMSTATE_BOOL(crash_occurred
, CPUState
),
560 VMSTATE_END_OF_LIST()
564 const VMStateDescription vmstate_cpu_common
= {
565 .name
= "cpu_common",
567 .minimum_version_id
= 1,
568 .pre_load
= cpu_common_pre_load
,
569 .post_load
= cpu_common_post_load
,
570 .fields
= (VMStateField
[]) {
571 VMSTATE_UINT32(halted
, CPUState
),
572 VMSTATE_UINT32(interrupt_request
, CPUState
),
573 VMSTATE_END_OF_LIST()
575 .subsections
= (const VMStateDescription
*[]) {
576 &vmstate_cpu_common_exception_index
,
577 &vmstate_cpu_common_crash_occurred
,
584 CPUState
*qemu_get_cpu(int index
)
589 if (cpu
->cpu_index
== index
) {
597 #if !defined(CONFIG_USER_ONLY)
598 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
600 CPUAddressSpace
*newas
;
602 /* Target code should have set num_ases before calling us */
603 assert(asidx
< cpu
->num_ases
);
606 /* address space 0 gets the convenience alias */
610 /* KVM cannot currently support multiple address spaces. */
611 assert(asidx
== 0 || !kvm_enabled());
613 if (!cpu
->cpu_ases
) {
614 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
617 newas
= &cpu
->cpu_ases
[asidx
];
621 newas
->tcg_as_listener
.commit
= tcg_commit
;
622 memory_listener_register(&newas
->tcg_as_listener
, as
);
626 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
628 /* Return the AddressSpace corresponding to the specified index */
629 return cpu
->cpu_ases
[asidx
].as
;
633 void cpu_exec_exit(CPUState
*cpu
)
635 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
637 cpu_list_remove(cpu
);
639 if (cc
->vmsd
!= NULL
) {
640 vmstate_unregister(NULL
, cc
->vmsd
, cpu
);
642 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
643 vmstate_unregister(NULL
, &vmstate_cpu_common
, cpu
);
647 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
649 CPUClass
*cc ATTRIBUTE_UNUSED
= CPU_GET_CLASS(cpu
);
650 Error
*local_err ATTRIBUTE_UNUSED
= NULL
;
655 #ifndef CONFIG_USER_ONLY
656 cpu
->thread_id
= qemu_get_thread_id();
658 /* This is a softmmu CPU object, so create a property for it
659 * so users can wire up its memory. (This can't go in qom/cpu.c
660 * because that file is compiled only once for both user-mode
661 * and system builds.) The default if no link is set up is to use
662 * the system address space.
664 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
665 (Object
**)&cpu
->memory
,
666 qdev_prop_allow_set_link_before_realize
,
667 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
669 cpu
->memory
= system_memory
;
670 object_ref(OBJECT(cpu
->memory
));
675 #ifndef CONFIG_USER_ONLY
676 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
677 vmstate_register(NULL
, cpu
->cpu_index
, &vmstate_cpu_common
, cpu
);
679 if (cc
->vmsd
!= NULL
) {
680 vmstate_register(NULL
, cpu
->cpu_index
, cc
->vmsd
, cpu
);
685 #if defined(CONFIG_USER_ONLY)
686 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
688 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
691 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
694 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
695 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
697 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
698 phys
| (pc
& ~TARGET_PAGE_MASK
));
703 #if defined(CONFIG_USER_ONLY)
704 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
709 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
715 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
719 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
720 int flags
, CPUWatchpoint
**watchpoint
)
725 /* Add a watchpoint. */
726 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
727 int flags
, CPUWatchpoint
**watchpoint
)
731 /* forbid ranges which are empty or run off the end of the address space */
732 if (len
== 0 || (addr
+ len
- 1) < addr
) {
733 error_report("tried to set invalid watchpoint at %"
734 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
737 wp
= g_malloc(sizeof(*wp
));
743 /* keep all GDB-injected watchpoints in front */
744 if (flags
& BP_GDB
) {
745 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
747 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
750 tlb_flush_page(cpu
, addr
);
757 /* Remove a specific watchpoint. */
758 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
763 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
764 if (addr
== wp
->vaddr
&& len
== wp
->len
765 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
766 cpu_watchpoint_remove_by_ref(cpu
, wp
);
773 /* Remove a specific watchpoint by reference. */
774 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
776 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
778 tlb_flush_page(cpu
, watchpoint
->vaddr
);
783 /* Remove all matching watchpoints. */
784 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
786 CPUWatchpoint
*wp
, *next
;
788 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
789 if (wp
->flags
& mask
) {
790 cpu_watchpoint_remove_by_ref(cpu
, wp
);
795 /* Return true if this watchpoint address matches the specified
796 * access (ie the address range covered by the watchpoint overlaps
797 * partially or completely with the address range covered by the
800 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
804 /* We know the lengths are non-zero, but a little caution is
805 * required to avoid errors in the case where the range ends
806 * exactly at the top of the address space and so addr + len
807 * wraps round to zero.
809 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
810 vaddr addrend
= addr
+ len
- 1;
812 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
817 /* Add a breakpoint. */
818 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
819 CPUBreakpoint
**breakpoint
)
823 bp
= g_malloc(sizeof(*bp
));
828 /* keep all GDB-injected breakpoints in front */
829 if (flags
& BP_GDB
) {
830 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
832 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
835 breakpoint_invalidate(cpu
, pc
);
843 /* Remove a specific breakpoint. */
844 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
848 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
849 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
850 cpu_breakpoint_remove_by_ref(cpu
, bp
);
857 /* Remove a specific breakpoint by reference. */
858 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
860 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
862 breakpoint_invalidate(cpu
, breakpoint
->pc
);
867 /* Remove all matching breakpoints. */
868 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
870 CPUBreakpoint
*bp
, *next
;
872 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
873 if (bp
->flags
& mask
) {
874 cpu_breakpoint_remove_by_ref(cpu
, bp
);
879 /* enable or disable single step mode. EXCP_DEBUG is returned by the
880 CPU loop after each instruction */
881 void cpu_single_step(CPUState
*cpu
, int enabled
)
883 if (cpu
->singlestep_enabled
!= enabled
) {
884 cpu
->singlestep_enabled
= enabled
;
886 kvm_update_guest_debug(cpu
, 0);
888 /* must flush all the translated code to avoid inconsistencies */
889 /* XXX: only flush what is necessary */
895 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
902 fprintf(stderr
, "qemu: fatal: ");
903 vfprintf(stderr
, fmt
, ap
);
904 fprintf(stderr
, "\n");
905 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
906 if (qemu_log_separate()) {
907 qemu_log("qemu: fatal: ");
908 qemu_log_vprintf(fmt
, ap2
);
910 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
917 #if defined(CONFIG_USER_ONLY)
919 struct sigaction act
;
920 sigfillset(&act
.sa_mask
);
921 act
.sa_handler
= SIG_DFL
;
922 sigaction(SIGABRT
, &act
, NULL
);
928 #if !defined(CONFIG_USER_ONLY)
929 /* Called from RCU critical section */
930 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
934 block
= atomic_rcu_read(&ram_list
.mru_block
);
935 if (block
&& addr
- block
->offset
< block
->max_length
) {
938 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
939 if (addr
- block
->offset
< block
->max_length
) {
944 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
948 /* It is safe to write mru_block outside the iothread lock. This
953 * xxx removed from list
957 * call_rcu(reclaim_ramblock, xxx);
960 * atomic_rcu_set is not needed here. The block was already published
961 * when it was placed into the list. Here we're just making an extra
962 * copy of the pointer.
964 ram_list
.mru_block
= block
;
968 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
975 end
= TARGET_PAGE_ALIGN(start
+ length
);
976 start
&= TARGET_PAGE_MASK
;
979 block
= qemu_get_ram_block(start
);
980 assert(block
== qemu_get_ram_block(end
- 1));
981 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
983 tlb_reset_dirty(cpu
, start1
, length
);
988 /* Note: start and end must be within the same ram block. */
989 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
993 DirtyMemoryBlocks
*blocks
;
994 unsigned long end
, page
;
1001 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1002 page
= start
>> TARGET_PAGE_BITS
;
1006 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1008 while (page
< end
) {
1009 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1010 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1011 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1013 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1020 if (dirty
&& tcg_enabled()) {
1021 tlb_reset_dirty_range_all(start
, length
);
1027 /* Called from RCU critical section */
1028 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1029 MemoryRegionSection
*section
,
1031 hwaddr paddr
, hwaddr xlat
,
1033 target_ulong
*address
)
1038 if (memory_region_is_ram(section
->mr
)) {
1040 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1041 if (!section
->readonly
) {
1042 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1044 iotlb
|= PHYS_SECTION_ROM
;
1047 AddressSpaceDispatch
*d
;
1049 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1050 iotlb
= section
- d
->map
.sections
;
1054 /* Make accesses to pages with watchpoints go via the
1055 watchpoint trap routines. */
1056 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1057 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1058 /* Avoid trapping reads of pages with a write breakpoint. */
1059 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1060 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1061 *address
|= TLB_MMIO
;
1069 #endif /* defined(CONFIG_USER_ONLY) */
1071 #if !defined(CONFIG_USER_ONLY)
1073 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1075 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1077 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1078 qemu_anon_ram_alloc
;
1081 * Set a custom physical guest memory alloator.
1082 * Accelerators with unusual needs may need this. Hopefully, we can
1083 * get rid of it eventually.
1085 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1087 phys_mem_alloc
= alloc
;
1090 static uint16_t phys_section_add(PhysPageMap
*map
,
1091 MemoryRegionSection
*section
)
1093 /* The physical section number is ORed with a page-aligned
1094 * pointer to produce the iotlb entries. Thus it should
1095 * never overflow into the page-aligned value.
1097 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1099 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1100 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1101 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1102 map
->sections_nb_alloc
);
1104 map
->sections
[map
->sections_nb
] = *section
;
1105 memory_region_ref(section
->mr
);
1106 return map
->sections_nb
++;
1109 static void phys_section_destroy(MemoryRegion
*mr
)
1111 bool have_sub_page
= mr
->subpage
;
1113 memory_region_unref(mr
);
1115 if (have_sub_page
) {
1116 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1117 object_unref(OBJECT(&subpage
->iomem
));
1122 static void phys_sections_free(PhysPageMap
*map
)
1124 while (map
->sections_nb
> 0) {
1125 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1126 phys_section_destroy(section
->mr
);
1128 g_free(map
->sections
);
1132 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1135 hwaddr base
= section
->offset_within_address_space
1137 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1138 d
->map
.nodes
, d
->map
.sections
);
1139 MemoryRegionSection subsection
= {
1140 .offset_within_address_space
= base
,
1141 .size
= int128_make64(TARGET_PAGE_SIZE
),
1145 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1147 if (!(existing
->mr
->subpage
)) {
1148 subpage
= subpage_init(d
->as
, base
);
1149 subsection
.address_space
= d
->as
;
1150 subsection
.mr
= &subpage
->iomem
;
1151 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1152 phys_section_add(&d
->map
, &subsection
));
1154 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1156 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1157 end
= start
+ int128_get64(section
->size
) - 1;
1158 subpage_register(subpage
, start
, end
,
1159 phys_section_add(&d
->map
, section
));
1163 static void register_multipage(AddressSpaceDispatch
*d
,
1164 MemoryRegionSection
*section
)
1166 hwaddr start_addr
= section
->offset_within_address_space
;
1167 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1168 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1172 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1175 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1177 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1178 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1179 MemoryRegionSection now
= *section
, remain
= *section
;
1180 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1182 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1183 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1184 - now
.offset_within_address_space
;
1186 now
.size
= int128_min(int128_make64(left
), now
.size
);
1187 register_subpage(d
, &now
);
1189 now
.size
= int128_zero();
1191 while (int128_ne(remain
.size
, now
.size
)) {
1192 remain
.size
= int128_sub(remain
.size
, now
.size
);
1193 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1194 remain
.offset_within_region
+= int128_get64(now
.size
);
1196 if (int128_lt(remain
.size
, page_size
)) {
1197 register_subpage(d
, &now
);
1198 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1199 now
.size
= page_size
;
1200 register_subpage(d
, &now
);
1202 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1203 register_multipage(d
, &now
);
1208 void qemu_flush_coalesced_mmio_buffer(void)
1211 kvm_flush_coalesced_mmio_buffer();
1214 void qemu_mutex_lock_ramlist(void)
1216 qemu_mutex_lock(&ram_list
.mutex
);
1219 void qemu_mutex_unlock_ramlist(void)
1221 qemu_mutex_unlock(&ram_list
.mutex
);
1225 static void *file_ram_alloc(RAMBlock
*block
,
1230 bool unlink_on_error
= false;
1232 char *sanitized_name
;
1234 void *area
= MAP_FAILED
;
1237 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1239 "host lacks kvm mmu notifiers, -mem-path unsupported");
1244 fd
= open(path
, O_RDWR
);
1246 /* @path names an existing file, use it */
1249 if (errno
== ENOENT
) {
1250 /* @path names a file that doesn't exist, create it */
1251 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1253 unlink_on_error
= true;
1256 } else if (errno
== EISDIR
) {
1257 /* @path names a directory, create a file there */
1258 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1259 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1260 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1266 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1268 g_free(sanitized_name
);
1270 fd
= mkstemp(filename
);
1278 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1279 error_setg_errno(errp
, errno
,
1280 "can't open backing store %s for guest RAM",
1285 * Try again on EINTR and EEXIST. The latter happens when
1286 * something else creates the file between our two open().
1290 block
->page_size
= qemu_fd_getpagesize(fd
);
1291 block
->mr
->align
= block
->page_size
;
1292 #if defined(__s390x__)
1293 if (kvm_enabled()) {
1294 block
->mr
->align
= MAX(block
->mr
->align
, QEMU_VMALLOC_ALIGN
);
1298 if (memory
< block
->page_size
) {
1299 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1300 "or larger than page size 0x%zx",
1301 memory
, block
->page_size
);
1305 memory
= ROUND_UP(memory
, block
->page_size
);
1308 * ftruncate is not supported by hugetlbfs in older
1309 * hosts, so don't bother bailing out on errors.
1310 * If anything goes wrong with it under other filesystems,
1313 if (ftruncate(fd
, memory
)) {
1314 perror("ftruncate");
1317 area
= qemu_ram_mmap(fd
, memory
, block
->mr
->align
,
1318 block
->flags
& RAM_SHARED
);
1319 if (area
== MAP_FAILED
) {
1320 error_setg_errno(errp
, errno
,
1321 "unable to map backing store for guest RAM");
1326 os_mem_prealloc(fd
, area
, memory
, errp
);
1327 if (errp
&& *errp
) {
1336 if (area
!= MAP_FAILED
) {
1337 qemu_ram_munmap(area
, memory
);
1339 if (unlink_on_error
) {
1349 /* Called with the ramlist lock held. */
1350 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1352 RAMBlock
*block
, *next_block
;
1353 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1355 assert(size
!= 0); /* it would hand out same offset multiple times */
1357 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1361 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1362 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1364 end
= block
->offset
+ block
->max_length
;
1366 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1367 if (next_block
->offset
>= end
) {
1368 next
= MIN(next
, next_block
->offset
);
1371 if (next
- end
>= size
&& next
- end
< mingap
) {
1373 mingap
= next
- end
;
1377 if (offset
== RAM_ADDR_MAX
) {
1378 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1386 ram_addr_t
last_ram_offset(void)
1389 ram_addr_t last
= 0;
1392 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1393 last
= MAX(last
, block
->offset
+ block
->max_length
);
1399 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1403 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1404 if (!machine_dump_guest_core(current_machine
)) {
1405 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1407 perror("qemu_madvise");
1408 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1409 "but dump_guest_core=off specified\n");
1414 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1419 /* Called with iothread lock held. */
1420 void qemu_ram_set_idstr(RAMBlock
*new_block
, const char *name
, DeviceState
*dev
)
1425 assert(!new_block
->idstr
[0]);
1428 char *id
= qdev_get_dev_path(dev
);
1430 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1434 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1437 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1438 if (block
!= new_block
&&
1439 !strcmp(block
->idstr
, new_block
->idstr
)) {
1440 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1448 /* Called with iothread lock held. */
1449 void qemu_ram_unset_idstr(RAMBlock
*block
)
1451 /* FIXME: arch_init.c assumes that this is not called throughout
1452 * migration. Ignore the problem since hot-unplug during migration
1453 * does not work anyway.
1456 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1460 size_t qemu_ram_pagesize(RAMBlock
*rb
)
1462 return rb
->page_size
;
1465 static int memory_try_enable_merging(void *addr
, size_t len
)
1467 if (!machine_mem_merge(current_machine
)) {
1468 /* disabled by the user */
1472 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1475 /* Only legal before guest might have detected the memory size: e.g. on
1476 * incoming migration, or right after reset.
1478 * As memory core doesn't know how is memory accessed, it is up to
1479 * resize callback to update device state and/or add assertions to detect
1480 * misuse, if necessary.
1482 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
)
1486 newsize
= HOST_PAGE_ALIGN(newsize
);
1488 if (block
->used_length
== newsize
) {
1492 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1493 error_setg_errno(errp
, EINVAL
,
1494 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1495 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1496 newsize
, block
->used_length
);
1500 if (block
->max_length
< newsize
) {
1501 error_setg_errno(errp
, EINVAL
,
1502 "Length too large: %s: 0x" RAM_ADDR_FMT
1503 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1504 newsize
, block
->max_length
);
1508 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1509 block
->used_length
= newsize
;
1510 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1512 memory_region_set_size(block
->mr
, newsize
);
1513 if (block
->resized
) {
1514 block
->resized(block
->idstr
, newsize
, block
->host
);
1519 /* Called with ram_list.mutex held */
1520 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1521 ram_addr_t new_ram_size
)
1523 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1524 DIRTY_MEMORY_BLOCK_SIZE
);
1525 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1526 DIRTY_MEMORY_BLOCK_SIZE
);
1529 /* Only need to extend if block count increased */
1530 if (new_num_blocks
<= old_num_blocks
) {
1534 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1535 DirtyMemoryBlocks
*old_blocks
;
1536 DirtyMemoryBlocks
*new_blocks
;
1539 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1540 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1541 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1543 if (old_num_blocks
) {
1544 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1545 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1548 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1549 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1552 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1555 g_free_rcu(old_blocks
, rcu
);
1560 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1563 RAMBlock
*last_block
= NULL
;
1564 ram_addr_t old_ram_size
, new_ram_size
;
1567 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1569 qemu_mutex_lock_ramlist();
1570 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1572 if (!new_block
->host
) {
1573 if (xen_enabled()) {
1574 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1575 new_block
->mr
, &err
);
1577 error_propagate(errp
, err
);
1578 qemu_mutex_unlock_ramlist();
1582 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1583 &new_block
->mr
->align
);
1584 if (!new_block
->host
) {
1585 error_setg_errno(errp
, errno
,
1586 "cannot set up guest memory '%s'",
1587 memory_region_name(new_block
->mr
));
1588 qemu_mutex_unlock_ramlist();
1591 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1595 new_ram_size
= MAX(old_ram_size
,
1596 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1597 if (new_ram_size
> old_ram_size
) {
1598 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1599 dirty_memory_extend(old_ram_size
, new_ram_size
);
1601 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1602 * QLIST (which has an RCU-friendly variant) does not have insertion at
1603 * tail, so save the last element in last_block.
1605 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1607 if (block
->max_length
< new_block
->max_length
) {
1612 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1613 } else if (last_block
) {
1614 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1615 } else { /* list is empty */
1616 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1618 ram_list
.mru_block
= NULL
;
1620 /* Write list before version */
1623 qemu_mutex_unlock_ramlist();
1625 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1626 new_block
->used_length
,
1629 if (new_block
->host
) {
1630 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1631 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1632 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
1633 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1638 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1639 bool share
, const char *mem_path
,
1642 RAMBlock
*new_block
;
1643 Error
*local_err
= NULL
;
1645 if (xen_enabled()) {
1646 error_setg(errp
, "-mem-path not supported with Xen");
1650 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1652 * file_ram_alloc() needs to allocate just like
1653 * phys_mem_alloc, but we haven't bothered to provide
1657 "-mem-path not supported with this accelerator");
1661 size
= HOST_PAGE_ALIGN(size
);
1662 new_block
= g_malloc0(sizeof(*new_block
));
1664 new_block
->used_length
= size
;
1665 new_block
->max_length
= size
;
1666 new_block
->flags
= share
? RAM_SHARED
: 0;
1667 new_block
->host
= file_ram_alloc(new_block
, size
,
1669 if (!new_block
->host
) {
1674 ram_block_add(new_block
, &local_err
);
1677 error_propagate(errp
, local_err
);
1685 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1686 void (*resized
)(const char*,
1689 void *host
, bool resizeable
,
1690 MemoryRegion
*mr
, Error
**errp
)
1692 RAMBlock
*new_block
;
1693 Error
*local_err
= NULL
;
1695 size
= HOST_PAGE_ALIGN(size
);
1696 max_size
= HOST_PAGE_ALIGN(max_size
);
1697 new_block
= g_malloc0(sizeof(*new_block
));
1699 new_block
->resized
= resized
;
1700 new_block
->used_length
= size
;
1701 new_block
->max_length
= max_size
;
1702 assert(max_size
>= size
);
1704 new_block
->page_size
= getpagesize();
1705 new_block
->host
= host
;
1707 new_block
->flags
|= RAM_PREALLOC
;
1710 new_block
->flags
|= RAM_RESIZEABLE
;
1712 ram_block_add(new_block
, &local_err
);
1715 error_propagate(errp
, local_err
);
1721 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1722 MemoryRegion
*mr
, Error
**errp
)
1724 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1727 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1729 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1732 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1733 void (*resized
)(const char*,
1736 MemoryRegion
*mr
, Error
**errp
)
1738 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1741 static void reclaim_ramblock(RAMBlock
*block
)
1743 if (block
->flags
& RAM_PREALLOC
) {
1745 } else if (xen_enabled()) {
1746 xen_invalidate_map_cache_entry(block
->host
);
1748 } else if (block
->fd
>= 0) {
1749 qemu_ram_munmap(block
->host
, block
->max_length
);
1753 qemu_anon_ram_free(block
->host
, block
->max_length
);
1758 void qemu_ram_free(RAMBlock
*block
)
1764 qemu_mutex_lock_ramlist();
1765 QLIST_REMOVE_RCU(block
, next
);
1766 ram_list
.mru_block
= NULL
;
1767 /* Write list before version */
1770 call_rcu(block
, reclaim_ramblock
, rcu
);
1771 qemu_mutex_unlock_ramlist();
1775 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1782 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1783 offset
= addr
- block
->offset
;
1784 if (offset
< block
->max_length
) {
1785 vaddr
= ramblock_ptr(block
, offset
);
1786 if (block
->flags
& RAM_PREALLOC
) {
1788 } else if (xen_enabled()) {
1792 if (block
->fd
>= 0) {
1793 flags
|= (block
->flags
& RAM_SHARED
?
1794 MAP_SHARED
: MAP_PRIVATE
);
1795 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1796 flags
, block
->fd
, offset
);
1799 * Remap needs to match alloc. Accelerators that
1800 * set phys_mem_alloc never remap. If they did,
1801 * we'd need a remap hook here.
1803 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1805 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1806 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1809 if (area
!= vaddr
) {
1810 fprintf(stderr
, "Could not remap addr: "
1811 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1815 memory_try_enable_merging(vaddr
, length
);
1816 qemu_ram_setup_dump(vaddr
, length
);
1821 #endif /* !_WIN32 */
1823 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1824 * This should not be used for general purpose DMA. Use address_space_map
1825 * or address_space_rw instead. For local memory (e.g. video ram) that the
1826 * device owns, use memory_region_get_ram_ptr.
1828 * Called within RCU critical section.
1830 void *qemu_map_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1832 RAMBlock
*block
= ram_block
;
1834 if (block
== NULL
) {
1835 block
= qemu_get_ram_block(addr
);
1836 addr
-= block
->offset
;
1839 if (xen_enabled() && block
->host
== NULL
) {
1840 /* We need to check if the requested address is in the RAM
1841 * because we don't want to map the entire memory in QEMU.
1842 * In that case just map until the end of the page.
1844 if (block
->offset
== 0) {
1845 return xen_map_cache(addr
, 0, 0);
1848 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1850 return ramblock_ptr(block
, addr
);
1853 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1854 * but takes a size argument.
1856 * Called within RCU critical section.
1858 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1861 RAMBlock
*block
= ram_block
;
1866 if (block
== NULL
) {
1867 block
= qemu_get_ram_block(addr
);
1868 addr
-= block
->offset
;
1870 *size
= MIN(*size
, block
->max_length
- addr
);
1872 if (xen_enabled() && block
->host
== NULL
) {
1873 /* We need to check if the requested address is in the RAM
1874 * because we don't want to map the entire memory in QEMU.
1875 * In that case just map the requested area.
1877 if (block
->offset
== 0) {
1878 return xen_map_cache(addr
, *size
, 1);
1881 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1884 return ramblock_ptr(block
, addr
);
1888 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1891 * ptr: Host pointer to look up
1892 * round_offset: If true round the result offset down to a page boundary
1893 * *ram_addr: set to result ram_addr
1894 * *offset: set to result offset within the RAMBlock
1896 * Returns: RAMBlock (or NULL if not found)
1898 * By the time this function returns, the returned pointer is not protected
1899 * by RCU anymore. If the caller is not within an RCU critical section and
1900 * does not hold the iothread lock, it must have other means of protecting the
1901 * pointer, such as a reference to the region that includes the incoming
1904 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1908 uint8_t *host
= ptr
;
1910 if (xen_enabled()) {
1911 ram_addr_t ram_addr
;
1913 ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1914 block
= qemu_get_ram_block(ram_addr
);
1916 *offset
= ram_addr
- block
->offset
;
1923 block
= atomic_rcu_read(&ram_list
.mru_block
);
1924 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1928 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1929 /* This case append when the block is not mapped. */
1930 if (block
->host
== NULL
) {
1933 if (host
- block
->host
< block
->max_length
) {
1942 *offset
= (host
- block
->host
);
1944 *offset
&= TARGET_PAGE_MASK
;
1951 * Finds the named RAMBlock
1953 * name: The name of RAMBlock to find
1955 * Returns: RAMBlock (or NULL if not found)
1957 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1961 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1962 if (!strcmp(name
, block
->idstr
)) {
1970 /* Some of the softmmu routines need to translate from a host pointer
1971 (typically a TLB entry) back to a ram offset. */
1972 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
1977 block
= qemu_ram_block_from_host(ptr
, false, &offset
);
1979 return RAM_ADDR_INVALID
;
1982 return block
->offset
+ offset
;
1985 /* Called within RCU critical section. */
1986 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1987 uint64_t val
, unsigned size
)
1989 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1990 tb_invalidate_phys_page_fast(ram_addr
, size
);
1994 stb_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
1997 stw_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2000 stl_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2005 /* Set both VGA and migration bits for simplicity and to remove
2006 * the notdirty callback faster.
2008 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2009 DIRTY_CLIENTS_NOCODE
);
2010 /* we remove the notdirty callback only if the code has been
2012 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2013 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2017 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2018 unsigned size
, bool is_write
)
2023 static const MemoryRegionOps notdirty_mem_ops
= {
2024 .write
= notdirty_mem_write
,
2025 .valid
.accepts
= notdirty_mem_accepts
,
2026 .endianness
= DEVICE_NATIVE_ENDIAN
,
2029 /* Generate a debug exception if a watchpoint has been hit. */
2030 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2032 CPUState
*cpu
= current_cpu
;
2033 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2034 CPUArchState
*env
= cpu
->env_ptr
;
2035 target_ulong pc
, cs_base
;
2040 if (cpu
->watchpoint_hit
) {
2041 /* We re-entered the check after replacing the TB. Now raise
2042 * the debug interrupt so that is will trigger after the
2043 * current instruction. */
2044 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2047 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2048 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2049 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2050 && (wp
->flags
& flags
)) {
2051 if (flags
== BP_MEM_READ
) {
2052 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2054 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2056 wp
->hitaddr
= vaddr
;
2057 wp
->hitattrs
= attrs
;
2058 if (!cpu
->watchpoint_hit
) {
2059 if (wp
->flags
& BP_CPU
&&
2060 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2061 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2064 cpu
->watchpoint_hit
= wp
;
2065 tb_check_watchpoint(cpu
);
2066 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2067 cpu
->exception_index
= EXCP_DEBUG
;
2070 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2071 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2072 cpu_loop_exit_noexc(cpu
);
2076 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2081 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2082 so these check for a hit then pass through to the normal out-of-line
2084 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2085 unsigned size
, MemTxAttrs attrs
)
2089 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2090 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2092 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2095 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2098 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2101 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2109 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2110 uint64_t val
, unsigned size
,
2114 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2115 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2117 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2120 address_space_stb(as
, addr
, val
, attrs
, &res
);
2123 address_space_stw(as
, addr
, val
, attrs
, &res
);
2126 address_space_stl(as
, addr
, val
, attrs
, &res
);
2133 static const MemoryRegionOps watch_mem_ops
= {
2134 .read_with_attrs
= watch_mem_read
,
2135 .write_with_attrs
= watch_mem_write
,
2136 .endianness
= DEVICE_NATIVE_ENDIAN
,
2139 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2140 unsigned len
, MemTxAttrs attrs
)
2142 subpage_t
*subpage
= opaque
;
2146 #if defined(DEBUG_SUBPAGE)
2147 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2148 subpage
, len
, addr
);
2150 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2157 *data
= ldub_p(buf
);
2160 *data
= lduw_p(buf
);
2173 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2174 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2176 subpage_t
*subpage
= opaque
;
2179 #if defined(DEBUG_SUBPAGE)
2180 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2181 " value %"PRIx64
"\n",
2182 __func__
, subpage
, len
, addr
, value
);
2200 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2204 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2205 unsigned len
, bool is_write
)
2207 subpage_t
*subpage
= opaque
;
2208 #if defined(DEBUG_SUBPAGE)
2209 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2210 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2213 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2217 static const MemoryRegionOps subpage_ops
= {
2218 .read_with_attrs
= subpage_read
,
2219 .write_with_attrs
= subpage_write
,
2220 .impl
.min_access_size
= 1,
2221 .impl
.max_access_size
= 8,
2222 .valid
.min_access_size
= 1,
2223 .valid
.max_access_size
= 8,
2224 .valid
.accepts
= subpage_accepts
,
2225 .endianness
= DEVICE_NATIVE_ENDIAN
,
2228 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2233 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2235 idx
= SUBPAGE_IDX(start
);
2236 eidx
= SUBPAGE_IDX(end
);
2237 #if defined(DEBUG_SUBPAGE)
2238 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2239 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2241 for (; idx
<= eidx
; idx
++) {
2242 mmio
->sub_section
[idx
] = section
;
2248 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2252 mmio
= g_malloc0(sizeof(subpage_t
) + TARGET_PAGE_SIZE
* sizeof(uint16_t));
2255 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2256 NULL
, TARGET_PAGE_SIZE
);
2257 mmio
->iomem
.subpage
= true;
2258 #if defined(DEBUG_SUBPAGE)
2259 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2260 mmio
, base
, TARGET_PAGE_SIZE
);
2262 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2267 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2271 MemoryRegionSection section
= {
2272 .address_space
= as
,
2274 .offset_within_address_space
= 0,
2275 .offset_within_region
= 0,
2276 .size
= int128_2_64(),
2279 return phys_section_add(map
, §ion
);
2282 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2284 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2285 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2286 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2287 MemoryRegionSection
*sections
= d
->map
.sections
;
2289 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2292 static void io_mem_init(void)
2294 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2295 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2297 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2299 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2303 static void mem_begin(MemoryListener
*listener
)
2305 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2306 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2309 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2310 assert(n
== PHYS_SECTION_UNASSIGNED
);
2311 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2312 assert(n
== PHYS_SECTION_NOTDIRTY
);
2313 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2314 assert(n
== PHYS_SECTION_ROM
);
2315 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2316 assert(n
== PHYS_SECTION_WATCH
);
2318 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2320 as
->next_dispatch
= d
;
2323 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2325 phys_sections_free(&d
->map
);
2329 static void mem_commit(MemoryListener
*listener
)
2331 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2332 AddressSpaceDispatch
*cur
= as
->dispatch
;
2333 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2335 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2337 atomic_rcu_set(&as
->dispatch
, next
);
2339 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2343 static void tcg_commit(MemoryListener
*listener
)
2345 CPUAddressSpace
*cpuas
;
2346 AddressSpaceDispatch
*d
;
2348 /* since each CPU stores ram addresses in its TLB cache, we must
2349 reset the modified entries */
2350 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2351 cpu_reloading_memory_map();
2352 /* The CPU and TLB are protected by the iothread lock.
2353 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2354 * may have split the RCU critical section.
2356 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2357 cpuas
->memory_dispatch
= d
;
2358 tlb_flush(cpuas
->cpu
, 1);
2361 void address_space_init_dispatch(AddressSpace
*as
)
2363 as
->dispatch
= NULL
;
2364 as
->dispatch_listener
= (MemoryListener
) {
2366 .commit
= mem_commit
,
2367 .region_add
= mem_add
,
2368 .region_nop
= mem_add
,
2371 memory_listener_register(&as
->dispatch_listener
, as
);
2374 void address_space_unregister(AddressSpace
*as
)
2376 memory_listener_unregister(&as
->dispatch_listener
);
2379 void address_space_destroy_dispatch(AddressSpace
*as
)
2381 AddressSpaceDispatch
*d
= as
->dispatch
;
2383 atomic_rcu_set(&as
->dispatch
, NULL
);
2385 call_rcu(d
, address_space_dispatch_free
, rcu
);
2389 static void memory_map_init(void)
2391 system_memory
= g_malloc(sizeof(*system_memory
));
2393 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2394 address_space_init(&address_space_memory
, system_memory
, "memory");
2396 system_io
= g_malloc(sizeof(*system_io
));
2397 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2399 address_space_init(&address_space_io
, system_io
, "I/O");
2402 MemoryRegion
*get_system_memory(void)
2404 return system_memory
;
2407 MemoryRegion
*get_system_io(void)
2412 #endif /* !defined(CONFIG_USER_ONLY) */
2414 /* physical memory access (slow version, mainly for debug) */
2415 #if defined(CONFIG_USER_ONLY)
2416 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2417 uint8_t *buf
, int len
, int is_write
)
2424 page
= addr
& TARGET_PAGE_MASK
;
2425 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2428 flags
= page_get_flags(page
);
2429 if (!(flags
& PAGE_VALID
))
2432 if (!(flags
& PAGE_WRITE
))
2434 /* XXX: this code should not depend on lock_user */
2435 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2438 unlock_user(p
, addr
, l
);
2440 if (!(flags
& PAGE_READ
))
2442 /* XXX: this code should not depend on lock_user */
2443 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2446 unlock_user(p
, addr
, 0);
2457 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2460 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2461 addr
+= memory_region_get_ram_addr(mr
);
2463 /* No early return if dirty_log_mask is or becomes 0, because
2464 * cpu_physical_memory_set_dirty_range will still call
2465 * xen_modified_memory.
2467 if (dirty_log_mask
) {
2469 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2471 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2472 tb_invalidate_phys_range(addr
, addr
+ length
);
2473 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2475 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2478 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2480 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2482 /* Regions are assumed to support 1-4 byte accesses unless
2483 otherwise specified. */
2484 if (access_size_max
== 0) {
2485 access_size_max
= 4;
2488 /* Bound the maximum access by the alignment of the address. */
2489 if (!mr
->ops
->impl
.unaligned
) {
2490 unsigned align_size_max
= addr
& -addr
;
2491 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2492 access_size_max
= align_size_max
;
2496 /* Don't attempt accesses larger than the maximum. */
2497 if (l
> access_size_max
) {
2498 l
= access_size_max
;
2505 static bool prepare_mmio_access(MemoryRegion
*mr
)
2507 bool unlocked
= !qemu_mutex_iothread_locked();
2508 bool release_lock
= false;
2510 if (unlocked
&& mr
->global_locking
) {
2511 qemu_mutex_lock_iothread();
2513 release_lock
= true;
2515 if (mr
->flush_coalesced_mmio
) {
2517 qemu_mutex_lock_iothread();
2519 qemu_flush_coalesced_mmio_buffer();
2521 qemu_mutex_unlock_iothread();
2525 return release_lock
;
2528 /* Called within RCU critical section. */
2529 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2532 int len
, hwaddr addr1
,
2533 hwaddr l
, MemoryRegion
*mr
)
2537 MemTxResult result
= MEMTX_OK
;
2538 bool release_lock
= false;
2541 if (!memory_access_is_direct(mr
, true)) {
2542 release_lock
|= prepare_mmio_access(mr
);
2543 l
= memory_access_size(mr
, l
, addr1
);
2544 /* XXX: could force current_cpu to NULL to avoid
2548 /* 64 bit write access */
2550 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2554 /* 32 bit write access */
2556 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2560 /* 16 bit write access */
2562 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2566 /* 8 bit write access */
2568 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2576 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2577 memcpy(ptr
, buf
, l
);
2578 invalidate_and_set_dirty(mr
, addr1
, l
);
2582 qemu_mutex_unlock_iothread();
2583 release_lock
= false;
2595 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2601 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2602 const uint8_t *buf
, int len
)
2607 MemTxResult result
= MEMTX_OK
;
2612 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2613 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2621 /* Called within RCU critical section. */
2622 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2623 MemTxAttrs attrs
, uint8_t *buf
,
2624 int len
, hwaddr addr1
, hwaddr l
,
2629 MemTxResult result
= MEMTX_OK
;
2630 bool release_lock
= false;
2633 if (!memory_access_is_direct(mr
, false)) {
2635 release_lock
|= prepare_mmio_access(mr
);
2636 l
= memory_access_size(mr
, l
, addr1
);
2639 /* 64 bit read access */
2640 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2645 /* 32 bit read access */
2646 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2651 /* 16 bit read access */
2652 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2657 /* 8 bit read access */
2658 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2667 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2668 memcpy(buf
, ptr
, l
);
2672 qemu_mutex_unlock_iothread();
2673 release_lock
= false;
2685 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2691 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2692 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2697 MemTxResult result
= MEMTX_OK
;
2702 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2703 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2711 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2712 uint8_t *buf
, int len
, bool is_write
)
2715 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2717 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2721 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2722 int len
, int is_write
)
2724 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2725 buf
, len
, is_write
);
2728 enum write_rom_type
{
2733 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2734 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2744 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2746 if (!(memory_region_is_ram(mr
) ||
2747 memory_region_is_romd(mr
))) {
2748 l
= memory_access_size(mr
, l
, addr1
);
2751 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2754 memcpy(ptr
, buf
, l
);
2755 invalidate_and_set_dirty(mr
, addr1
, l
);
2758 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2769 /* used for ROM loading : can write in RAM and ROM */
2770 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2771 const uint8_t *buf
, int len
)
2773 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2776 void cpu_flush_icache_range(hwaddr start
, int len
)
2779 * This function should do the same thing as an icache flush that was
2780 * triggered from within the guest. For TCG we are always cache coherent,
2781 * so there is no need to flush anything. For KVM / Xen we need to flush
2782 * the host's instruction cache at least.
2784 if (tcg_enabled()) {
2788 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2789 start
, NULL
, len
, FLUSH_CACHE
);
2800 static BounceBuffer bounce
;
2802 typedef struct MapClient
{
2804 QLIST_ENTRY(MapClient
) link
;
2807 QemuMutex map_client_list_lock
;
2808 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2809 = QLIST_HEAD_INITIALIZER(map_client_list
);
2811 static void cpu_unregister_map_client_do(MapClient
*client
)
2813 QLIST_REMOVE(client
, link
);
2817 static void cpu_notify_map_clients_locked(void)
2821 while (!QLIST_EMPTY(&map_client_list
)) {
2822 client
= QLIST_FIRST(&map_client_list
);
2823 qemu_bh_schedule(client
->bh
);
2824 cpu_unregister_map_client_do(client
);
2828 void cpu_register_map_client(QEMUBH
*bh
)
2830 MapClient
*client
= g_malloc(sizeof(*client
));
2832 qemu_mutex_lock(&map_client_list_lock
);
2834 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2835 if (!atomic_read(&bounce
.in_use
)) {
2836 cpu_notify_map_clients_locked();
2838 qemu_mutex_unlock(&map_client_list_lock
);
2841 void cpu_exec_init_all(void)
2843 qemu_mutex_init(&ram_list
.mutex
);
2844 /* The data structures we set up here depend on knowing the page size,
2845 * so no more changes can be made after this point.
2846 * In an ideal world, nothing we did before we had finished the
2847 * machine setup would care about the target page size, and we could
2848 * do this much later, rather than requiring board models to state
2849 * up front what their requirements are.
2851 finalize_target_page_bits();
2854 qemu_mutex_init(&map_client_list_lock
);
2857 void cpu_unregister_map_client(QEMUBH
*bh
)
2861 qemu_mutex_lock(&map_client_list_lock
);
2862 QLIST_FOREACH(client
, &map_client_list
, link
) {
2863 if (client
->bh
== bh
) {
2864 cpu_unregister_map_client_do(client
);
2868 qemu_mutex_unlock(&map_client_list_lock
);
2871 static void cpu_notify_map_clients(void)
2873 qemu_mutex_lock(&map_client_list_lock
);
2874 cpu_notify_map_clients_locked();
2875 qemu_mutex_unlock(&map_client_list_lock
);
2878 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2886 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2887 if (!memory_access_is_direct(mr
, is_write
)) {
2888 l
= memory_access_size(mr
, l
, addr
);
2889 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2901 /* Map a physical memory region into a host virtual address.
2902 * May map a subset of the requested range, given by and returned in *plen.
2903 * May return NULL if resources needed to perform the mapping are exhausted.
2904 * Use only for reads OR writes - not for read-modify-write operations.
2905 * Use cpu_register_map_client() to know when retrying the map operation is
2906 * likely to succeed.
2908 void *address_space_map(AddressSpace
*as
,
2915 hwaddr l
, xlat
, base
;
2916 MemoryRegion
*mr
, *this_mr
;
2925 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2927 if (!memory_access_is_direct(mr
, is_write
)) {
2928 if (atomic_xchg(&bounce
.in_use
, true)) {
2932 /* Avoid unbounded allocations */
2933 l
= MIN(l
, TARGET_PAGE_SIZE
);
2934 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2938 memory_region_ref(mr
);
2941 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2947 return bounce
.buffer
;
2961 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2962 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2967 memory_region_ref(mr
);
2969 ptr
= qemu_ram_ptr_length(mr
->ram_block
, base
, plen
);
2975 /* Unmaps a memory region previously mapped by address_space_map().
2976 * Will also mark the memory as dirty if is_write == 1. access_len gives
2977 * the amount of memory that was actually read or written by the caller.
2979 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2980 int is_write
, hwaddr access_len
)
2982 if (buffer
!= bounce
.buffer
) {
2986 mr
= memory_region_from_host(buffer
, &addr1
);
2989 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2991 if (xen_enabled()) {
2992 xen_invalidate_map_cache_entry(buffer
);
2994 memory_region_unref(mr
);
2998 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2999 bounce
.buffer
, access_len
);
3001 qemu_vfree(bounce
.buffer
);
3002 bounce
.buffer
= NULL
;
3003 memory_region_unref(bounce
.mr
);
3004 atomic_mb_set(&bounce
.in_use
, false);
3005 cpu_notify_map_clients();
3008 void *cpu_physical_memory_map(hwaddr addr
,
3012 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3015 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3016 int is_write
, hwaddr access_len
)
3018 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3021 /* warning: addr must be aligned */
3022 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3024 MemTxResult
*result
,
3025 enum device_endian endian
)
3033 bool release_lock
= false;
3036 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3037 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3038 release_lock
|= prepare_mmio_access(mr
);
3041 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3042 #if defined(TARGET_WORDS_BIGENDIAN)
3043 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3047 if (endian
== DEVICE_BIG_ENDIAN
) {
3053 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3055 case DEVICE_LITTLE_ENDIAN
:
3056 val
= ldl_le_p(ptr
);
3058 case DEVICE_BIG_ENDIAN
:
3059 val
= ldl_be_p(ptr
);
3071 qemu_mutex_unlock_iothread();
3077 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3078 MemTxAttrs attrs
, MemTxResult
*result
)
3080 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3081 DEVICE_NATIVE_ENDIAN
);
3084 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3085 MemTxAttrs attrs
, MemTxResult
*result
)
3087 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3088 DEVICE_LITTLE_ENDIAN
);
3091 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3092 MemTxAttrs attrs
, MemTxResult
*result
)
3094 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3098 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3100 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3103 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3105 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3108 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3110 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3113 /* warning: addr must be aligned */
3114 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3116 MemTxResult
*result
,
3117 enum device_endian endian
)
3125 bool release_lock
= false;
3128 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3130 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3131 release_lock
|= prepare_mmio_access(mr
);
3134 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3135 #if defined(TARGET_WORDS_BIGENDIAN)
3136 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3140 if (endian
== DEVICE_BIG_ENDIAN
) {
3146 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3148 case DEVICE_LITTLE_ENDIAN
:
3149 val
= ldq_le_p(ptr
);
3151 case DEVICE_BIG_ENDIAN
:
3152 val
= ldq_be_p(ptr
);
3164 qemu_mutex_unlock_iothread();
3170 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3171 MemTxAttrs attrs
, MemTxResult
*result
)
3173 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3174 DEVICE_NATIVE_ENDIAN
);
3177 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3178 MemTxAttrs attrs
, MemTxResult
*result
)
3180 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3181 DEVICE_LITTLE_ENDIAN
);
3184 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3185 MemTxAttrs attrs
, MemTxResult
*result
)
3187 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3191 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3193 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3196 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3198 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3201 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3203 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3207 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3208 MemTxAttrs attrs
, MemTxResult
*result
)
3213 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3220 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3222 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3225 /* warning: addr must be aligned */
3226 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3229 MemTxResult
*result
,
3230 enum device_endian endian
)
3238 bool release_lock
= false;
3241 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3243 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3244 release_lock
|= prepare_mmio_access(mr
);
3247 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3248 #if defined(TARGET_WORDS_BIGENDIAN)
3249 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3253 if (endian
== DEVICE_BIG_ENDIAN
) {
3259 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3261 case DEVICE_LITTLE_ENDIAN
:
3262 val
= lduw_le_p(ptr
);
3264 case DEVICE_BIG_ENDIAN
:
3265 val
= lduw_be_p(ptr
);
3277 qemu_mutex_unlock_iothread();
3283 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3284 MemTxAttrs attrs
, MemTxResult
*result
)
3286 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3287 DEVICE_NATIVE_ENDIAN
);
3290 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3291 MemTxAttrs attrs
, MemTxResult
*result
)
3293 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3294 DEVICE_LITTLE_ENDIAN
);
3297 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3298 MemTxAttrs attrs
, MemTxResult
*result
)
3300 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3304 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3306 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3309 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3311 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3314 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3316 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3319 /* warning: addr must be aligned. The ram page is not masked as dirty
3320 and the code inside is not invalidated. It is useful if the dirty
3321 bits are used to track modified PTEs */
3322 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3323 MemTxAttrs attrs
, MemTxResult
*result
)
3330 uint8_t dirty_log_mask
;
3331 bool release_lock
= false;
3334 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3336 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3337 release_lock
|= prepare_mmio_access(mr
);
3339 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3341 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3344 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3345 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3346 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
3354 qemu_mutex_unlock_iothread();
3359 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3361 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3364 /* warning: addr must be aligned */
3365 static inline void address_space_stl_internal(AddressSpace
*as
,
3366 hwaddr addr
, uint32_t val
,
3368 MemTxResult
*result
,
3369 enum device_endian endian
)
3376 bool release_lock
= false;
3379 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3381 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3382 release_lock
|= prepare_mmio_access(mr
);
3384 #if defined(TARGET_WORDS_BIGENDIAN)
3385 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3389 if (endian
== DEVICE_BIG_ENDIAN
) {
3393 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3396 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3398 case DEVICE_LITTLE_ENDIAN
:
3401 case DEVICE_BIG_ENDIAN
:
3408 invalidate_and_set_dirty(mr
, addr1
, 4);
3415 qemu_mutex_unlock_iothread();
3420 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3421 MemTxAttrs attrs
, MemTxResult
*result
)
3423 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3424 DEVICE_NATIVE_ENDIAN
);
3427 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3428 MemTxAttrs attrs
, MemTxResult
*result
)
3430 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3431 DEVICE_LITTLE_ENDIAN
);
3434 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3435 MemTxAttrs attrs
, MemTxResult
*result
)
3437 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3441 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3443 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3446 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3448 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3451 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3453 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3457 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3458 MemTxAttrs attrs
, MemTxResult
*result
)
3463 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3469 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3471 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3474 /* warning: addr must be aligned */
3475 static inline void address_space_stw_internal(AddressSpace
*as
,
3476 hwaddr addr
, uint32_t val
,
3478 MemTxResult
*result
,
3479 enum device_endian endian
)
3486 bool release_lock
= false;
3489 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3490 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3491 release_lock
|= prepare_mmio_access(mr
);
3493 #if defined(TARGET_WORDS_BIGENDIAN)
3494 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3498 if (endian
== DEVICE_BIG_ENDIAN
) {
3502 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3505 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3507 case DEVICE_LITTLE_ENDIAN
:
3510 case DEVICE_BIG_ENDIAN
:
3517 invalidate_and_set_dirty(mr
, addr1
, 2);
3524 qemu_mutex_unlock_iothread();
3529 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3530 MemTxAttrs attrs
, MemTxResult
*result
)
3532 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3533 DEVICE_NATIVE_ENDIAN
);
3536 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3537 MemTxAttrs attrs
, MemTxResult
*result
)
3539 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3540 DEVICE_LITTLE_ENDIAN
);
3543 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3544 MemTxAttrs attrs
, MemTxResult
*result
)
3546 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3550 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3552 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3555 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3557 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3560 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3562 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3566 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3567 MemTxAttrs attrs
, MemTxResult
*result
)
3571 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3577 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3578 MemTxAttrs attrs
, MemTxResult
*result
)
3581 val
= cpu_to_le64(val
);
3582 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3587 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3588 MemTxAttrs attrs
, MemTxResult
*result
)
3591 val
= cpu_to_be64(val
);
3592 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3598 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3600 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3603 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3605 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3608 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3610 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3613 /* virtual memory access for debug (includes writing to ROM) */
3614 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3615 uint8_t *buf
, int len
, int is_write
)
3625 page
= addr
& TARGET_PAGE_MASK
;
3626 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3627 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3628 /* if no physical page mapped, return an error */
3629 if (phys_addr
== -1)
3631 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3634 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3636 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3639 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3640 MEMTXATTRS_UNSPECIFIED
,
3651 * Allows code that needs to deal with migration bitmaps etc to still be built
3652 * target independent.
3654 size_t qemu_target_page_bits(void)
3656 return TARGET_PAGE_BITS
;
3662 * A helper function for the _utterly broken_ virtio device model to find out if
3663 * it's running on a big endian machine. Don't do this at home kids!
3665 bool target_words_bigendian(void);
3666 bool target_words_bigendian(void)
3668 #if defined(TARGET_WORDS_BIGENDIAN)
3675 #ifndef CONFIG_USER_ONLY
3676 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3683 mr
= address_space_translate(&address_space_memory
,
3684 phys_addr
, &phys_addr
, &l
, false);
3686 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3691 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3697 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3698 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3699 block
->used_length
, opaque
);