4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
24 #include "qemu/cutils.h"
26 #include "exec/exec-all.h"
28 #include "hw/qdev-core.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #include "hw/xen/xen.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #if defined(CONFIG_USER_ONLY)
40 #else /* !CONFIG_USER_ONLY */
42 #include "exec/memory.h"
43 #include "exec/ioport.h"
44 #include "sysemu/dma.h"
45 #include "exec/address-spaces.h"
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
59 #include "migration/vmstate.h"
61 #include "qemu/range.h"
63 #include "qemu/mmap-alloc.h"
66 //#define DEBUG_SUBPAGE
68 #if !defined(CONFIG_USER_ONLY)
69 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
72 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
74 static MemoryRegion
*system_memory
;
75 static MemoryRegion
*system_io
;
77 AddressSpace address_space_io
;
78 AddressSpace address_space_memory
;
80 MemoryRegion io_mem_rom
, io_mem_notdirty
;
81 static MemoryRegion io_mem_unassigned
;
83 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84 #define RAM_PREALLOC (1 << 0)
86 /* RAM is mmap-ed with MAP_SHARED */
87 #define RAM_SHARED (1 << 1)
89 /* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
92 #define RAM_RESIZEABLE (1 << 2)
96 #ifdef TARGET_PAGE_BITS_VARY
98 bool target_page_bits_decided
;
101 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
102 /* current CPU in the current thread. It is only valid inside
104 __thread CPUState
*current_cpu
;
105 /* 0 = Do not count executed instructions.
106 1 = Precise instruction counting.
107 2 = Adaptive rate instruction counting. */
110 bool set_preferred_target_page_bits(int bits
)
112 /* The target page size is the lowest common denominator for all
113 * the CPUs in the system, so we can only make it smaller, never
114 * larger. And we can't make it smaller once we've committed to
117 #ifdef TARGET_PAGE_BITS_VARY
118 assert(bits
>= TARGET_PAGE_BITS_MIN
);
119 if (target_page_bits
== 0 || target_page_bits
> bits
) {
120 if (target_page_bits_decided
) {
123 target_page_bits
= bits
;
129 #if !defined(CONFIG_USER_ONLY)
131 static void finalize_target_page_bits(void)
133 #ifdef TARGET_PAGE_BITS_VARY
134 if (target_page_bits
== 0) {
135 target_page_bits
= TARGET_PAGE_BITS_MIN
;
137 target_page_bits_decided
= true;
141 typedef struct PhysPageEntry PhysPageEntry
;
143 struct PhysPageEntry
{
144 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
146 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
150 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
152 /* Size of the L2 (and L3, etc) page tables. */
153 #define ADDR_SPACE_BITS 64
156 #define P_L2_SIZE (1 << P_L2_BITS)
158 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
160 typedef PhysPageEntry Node
[P_L2_SIZE
];
162 typedef struct PhysPageMap
{
165 unsigned sections_nb
;
166 unsigned sections_nb_alloc
;
168 unsigned nodes_nb_alloc
;
170 MemoryRegionSection
*sections
;
173 struct AddressSpaceDispatch
{
176 MemoryRegionSection
*mru_section
;
177 /* This is a multi-level map on the physical address space.
178 * The bottom level has pointers to MemoryRegionSections.
180 PhysPageEntry phys_map
;
185 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
186 typedef struct subpage_t
{
190 uint16_t sub_section
[];
193 #define PHYS_SECTION_UNASSIGNED 0
194 #define PHYS_SECTION_NOTDIRTY 1
195 #define PHYS_SECTION_ROM 2
196 #define PHYS_SECTION_WATCH 3
198 static void io_mem_init(void);
199 static void memory_map_init(void);
200 static void tcg_commit(MemoryListener
*listener
);
202 static MemoryRegion io_mem_watch
;
205 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
206 * @cpu: the CPU whose AddressSpace this is
207 * @as: the AddressSpace itself
208 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
209 * @tcg_as_listener: listener for tracking changes to the AddressSpace
211 struct CPUAddressSpace
{
214 struct AddressSpaceDispatch
*memory_dispatch
;
215 MemoryListener tcg_as_listener
;
220 #if !defined(CONFIG_USER_ONLY)
222 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
224 static unsigned alloc_hint
= 16;
225 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
226 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, alloc_hint
);
227 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
228 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
229 alloc_hint
= map
->nodes_nb_alloc
;
233 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
240 ret
= map
->nodes_nb
++;
242 assert(ret
!= PHYS_MAP_NODE_NIL
);
243 assert(ret
!= map
->nodes_nb_alloc
);
245 e
.skip
= leaf
? 0 : 1;
246 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
247 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
248 memcpy(&p
[i
], &e
, sizeof(e
));
253 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
254 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
258 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
260 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
261 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
263 p
= map
->nodes
[lp
->ptr
];
264 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
266 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
267 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
273 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
279 static void phys_page_set(AddressSpaceDispatch
*d
,
280 hwaddr index
, hwaddr nb
,
283 /* Wildly overreserve - it doesn't matter much. */
284 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
286 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
289 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
290 * and update our entry so we can skip it and go directly to the destination.
292 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
)
294 unsigned valid_ptr
= P_L2_SIZE
;
299 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
304 for (i
= 0; i
< P_L2_SIZE
; i
++) {
305 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
312 phys_page_compact(&p
[i
], nodes
);
316 /* We can only compress if there's only one child. */
321 assert(valid_ptr
< P_L2_SIZE
);
323 /* Don't compress if it won't fit in the # of bits we have. */
324 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
328 lp
->ptr
= p
[valid_ptr
].ptr
;
329 if (!p
[valid_ptr
].skip
) {
330 /* If our only child is a leaf, make this a leaf. */
331 /* By design, we should have made this node a leaf to begin with so we
332 * should never reach here.
333 * But since it's so simple to handle this, let's do it just in case we
338 lp
->skip
+= p
[valid_ptr
].skip
;
342 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
344 if (d
->phys_map
.skip
) {
345 phys_page_compact(&d
->phys_map
, d
->map
.nodes
);
349 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
352 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
353 * the section must cover the entire address space.
355 return int128_gethi(section
->size
) ||
356 range_covers_byte(section
->offset_within_address_space
,
357 int128_getlo(section
->size
), addr
);
360 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
361 Node
*nodes
, MemoryRegionSection
*sections
)
364 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
367 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
368 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
369 return §ions
[PHYS_SECTION_UNASSIGNED
];
372 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
375 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
376 return §ions
[lp
.ptr
];
378 return §ions
[PHYS_SECTION_UNASSIGNED
];
382 bool memory_region_is_unassigned(MemoryRegion
*mr
)
384 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
385 && mr
!= &io_mem_watch
;
388 /* Called from RCU critical section */
389 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
391 bool resolve_subpage
)
393 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
397 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
398 section_covers_addr(section
, addr
)) {
401 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
405 if (resolve_subpage
&& section
->mr
->subpage
) {
406 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
407 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
410 atomic_set(&d
->mru_section
, section
);
415 /* Called from RCU critical section */
416 static MemoryRegionSection
*
417 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
418 hwaddr
*plen
, bool resolve_subpage
)
420 MemoryRegionSection
*section
;
424 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
425 /* Compute offset within MemoryRegionSection */
426 addr
-= section
->offset_within_address_space
;
428 /* Compute offset within MemoryRegion */
429 *xlat
= addr
+ section
->offset_within_region
;
433 /* MMIO registers can be expected to perform full-width accesses based only
434 * on their address, without considering adjacent registers that could
435 * decode to completely different MemoryRegions. When such registers
436 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
437 * regions overlap wildly. For this reason we cannot clamp the accesses
440 * If the length is small (as is the case for address_space_ldl/stl),
441 * everything works fine. If the incoming length is large, however,
442 * the caller really has to do the clamping through memory_access_size.
444 if (memory_region_is_ram(mr
)) {
445 diff
= int128_sub(section
->size
, int128_make64(addr
));
446 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
451 /* Called from RCU critical section */
452 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
453 hwaddr
*xlat
, hwaddr
*plen
,
457 MemoryRegionSection
*section
;
461 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
462 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
465 if (!mr
->iommu_ops
) {
469 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
470 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
471 | (addr
& iotlb
.addr_mask
));
472 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
473 if (!(iotlb
.perm
& (1 << is_write
))) {
474 mr
= &io_mem_unassigned
;
478 as
= iotlb
.target_as
;
481 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
482 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
483 *plen
= MIN(page
, *plen
);
490 /* Called from RCU critical section */
491 MemoryRegionSection
*
492 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
493 hwaddr
*xlat
, hwaddr
*plen
)
495 MemoryRegionSection
*section
;
496 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
498 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
500 assert(!section
->mr
->iommu_ops
);
505 #if !defined(CONFIG_USER_ONLY)
507 static int cpu_common_post_load(void *opaque
, int version_id
)
509 CPUState
*cpu
= opaque
;
511 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
512 version_id is increased. */
513 cpu
->interrupt_request
&= ~0x01;
519 static int cpu_common_pre_load(void *opaque
)
521 CPUState
*cpu
= opaque
;
523 cpu
->exception_index
= -1;
528 static bool cpu_common_exception_index_needed(void *opaque
)
530 CPUState
*cpu
= opaque
;
532 return tcg_enabled() && cpu
->exception_index
!= -1;
535 static const VMStateDescription vmstate_cpu_common_exception_index
= {
536 .name
= "cpu_common/exception_index",
538 .minimum_version_id
= 1,
539 .needed
= cpu_common_exception_index_needed
,
540 .fields
= (VMStateField
[]) {
541 VMSTATE_INT32(exception_index
, CPUState
),
542 VMSTATE_END_OF_LIST()
546 static bool cpu_common_crash_occurred_needed(void *opaque
)
548 CPUState
*cpu
= opaque
;
550 return cpu
->crash_occurred
;
553 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
554 .name
= "cpu_common/crash_occurred",
556 .minimum_version_id
= 1,
557 .needed
= cpu_common_crash_occurred_needed
,
558 .fields
= (VMStateField
[]) {
559 VMSTATE_BOOL(crash_occurred
, CPUState
),
560 VMSTATE_END_OF_LIST()
564 const VMStateDescription vmstate_cpu_common
= {
565 .name
= "cpu_common",
567 .minimum_version_id
= 1,
568 .pre_load
= cpu_common_pre_load
,
569 .post_load
= cpu_common_post_load
,
570 .fields
= (VMStateField
[]) {
571 VMSTATE_UINT32(halted
, CPUState
),
572 VMSTATE_UINT32(interrupt_request
, CPUState
),
573 VMSTATE_END_OF_LIST()
575 .subsections
= (const VMStateDescription
*[]) {
576 &vmstate_cpu_common_exception_index
,
577 &vmstate_cpu_common_crash_occurred
,
584 CPUState
*qemu_get_cpu(int index
)
589 if (cpu
->cpu_index
== index
) {
597 #if !defined(CONFIG_USER_ONLY)
598 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
600 CPUAddressSpace
*newas
;
602 /* Target code should have set num_ases before calling us */
603 assert(asidx
< cpu
->num_ases
);
606 /* address space 0 gets the convenience alias */
610 /* KVM cannot currently support multiple address spaces. */
611 assert(asidx
== 0 || !kvm_enabled());
613 if (!cpu
->cpu_ases
) {
614 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
617 newas
= &cpu
->cpu_ases
[asidx
];
621 newas
->tcg_as_listener
.commit
= tcg_commit
;
622 memory_listener_register(&newas
->tcg_as_listener
, as
);
626 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
628 /* Return the AddressSpace corresponding to the specified index */
629 return cpu
->cpu_ases
[asidx
].as
;
633 void cpu_exec_unrealizefn(CPUState
*cpu
)
635 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
637 cpu_list_remove(cpu
);
639 if (cc
->vmsd
!= NULL
) {
640 vmstate_unregister(NULL
, cc
->vmsd
, cpu
);
642 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
643 vmstate_unregister(NULL
, &vmstate_cpu_common
, cpu
);
647 void cpu_exec_initfn(CPUState
*cpu
)
652 #ifndef CONFIG_USER_ONLY
653 cpu
->thread_id
= qemu_get_thread_id();
655 /* This is a softmmu CPU object, so create a property for it
656 * so users can wire up its memory. (This can't go in qom/cpu.c
657 * because that file is compiled only once for both user-mode
658 * and system builds.) The default if no link is set up is to use
659 * the system address space.
661 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
662 (Object
**)&cpu
->memory
,
663 qdev_prop_allow_set_link_before_realize
,
664 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
666 cpu
->memory
= system_memory
;
667 object_ref(OBJECT(cpu
->memory
));
671 void cpu_exec_realizefn(CPUState
*cpu
, Error
**errp
)
673 CPUClass
*cc ATTRIBUTE_UNUSED
= CPU_GET_CLASS(cpu
);
677 #ifndef CONFIG_USER_ONLY
678 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
679 vmstate_register(NULL
, cpu
->cpu_index
, &vmstate_cpu_common
, cpu
);
681 if (cc
->vmsd
!= NULL
) {
682 vmstate_register(NULL
, cpu
->cpu_index
, cc
->vmsd
, cpu
);
687 #if defined(CONFIG_USER_ONLY)
688 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
690 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
693 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
696 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
697 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
699 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
700 phys
| (pc
& ~TARGET_PAGE_MASK
));
705 #if defined(CONFIG_USER_ONLY)
706 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
711 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
717 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
721 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
722 int flags
, CPUWatchpoint
**watchpoint
)
727 /* Add a watchpoint. */
728 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
729 int flags
, CPUWatchpoint
**watchpoint
)
733 /* forbid ranges which are empty or run off the end of the address space */
734 if (len
== 0 || (addr
+ len
- 1) < addr
) {
735 error_report("tried to set invalid watchpoint at %"
736 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
739 wp
= g_malloc(sizeof(*wp
));
745 /* keep all GDB-injected watchpoints in front */
746 if (flags
& BP_GDB
) {
747 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
749 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
752 tlb_flush_page(cpu
, addr
);
759 /* Remove a specific watchpoint. */
760 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
765 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
766 if (addr
== wp
->vaddr
&& len
== wp
->len
767 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
768 cpu_watchpoint_remove_by_ref(cpu
, wp
);
775 /* Remove a specific watchpoint by reference. */
776 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
778 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
780 tlb_flush_page(cpu
, watchpoint
->vaddr
);
785 /* Remove all matching watchpoints. */
786 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
788 CPUWatchpoint
*wp
, *next
;
790 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
791 if (wp
->flags
& mask
) {
792 cpu_watchpoint_remove_by_ref(cpu
, wp
);
797 /* Return true if this watchpoint address matches the specified
798 * access (ie the address range covered by the watchpoint overlaps
799 * partially or completely with the address range covered by the
802 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
806 /* We know the lengths are non-zero, but a little caution is
807 * required to avoid errors in the case where the range ends
808 * exactly at the top of the address space and so addr + len
809 * wraps round to zero.
811 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
812 vaddr addrend
= addr
+ len
- 1;
814 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
819 /* Add a breakpoint. */
820 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
821 CPUBreakpoint
**breakpoint
)
825 bp
= g_malloc(sizeof(*bp
));
830 /* keep all GDB-injected breakpoints in front */
831 if (flags
& BP_GDB
) {
832 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
834 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
837 breakpoint_invalidate(cpu
, pc
);
845 /* Remove a specific breakpoint. */
846 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
850 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
851 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
852 cpu_breakpoint_remove_by_ref(cpu
, bp
);
859 /* Remove a specific breakpoint by reference. */
860 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
862 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
864 breakpoint_invalidate(cpu
, breakpoint
->pc
);
869 /* Remove all matching breakpoints. */
870 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
872 CPUBreakpoint
*bp
, *next
;
874 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
875 if (bp
->flags
& mask
) {
876 cpu_breakpoint_remove_by_ref(cpu
, bp
);
881 /* enable or disable single step mode. EXCP_DEBUG is returned by the
882 CPU loop after each instruction */
883 void cpu_single_step(CPUState
*cpu
, int enabled
)
885 if (cpu
->singlestep_enabled
!= enabled
) {
886 cpu
->singlestep_enabled
= enabled
;
888 kvm_update_guest_debug(cpu
, 0);
890 /* must flush all the translated code to avoid inconsistencies */
891 /* XXX: only flush what is necessary */
897 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
904 fprintf(stderr
, "qemu: fatal: ");
905 vfprintf(stderr
, fmt
, ap
);
906 fprintf(stderr
, "\n");
907 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
908 if (qemu_log_separate()) {
909 qemu_log("qemu: fatal: ");
910 qemu_log_vprintf(fmt
, ap2
);
912 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
919 #if defined(CONFIG_USER_ONLY)
921 struct sigaction act
;
922 sigfillset(&act
.sa_mask
);
923 act
.sa_handler
= SIG_DFL
;
924 sigaction(SIGABRT
, &act
, NULL
);
930 #if !defined(CONFIG_USER_ONLY)
931 /* Called from RCU critical section */
932 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
936 block
= atomic_rcu_read(&ram_list
.mru_block
);
937 if (block
&& addr
- block
->offset
< block
->max_length
) {
940 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
941 if (addr
- block
->offset
< block
->max_length
) {
946 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
950 /* It is safe to write mru_block outside the iothread lock. This
955 * xxx removed from list
959 * call_rcu(reclaim_ramblock, xxx);
962 * atomic_rcu_set is not needed here. The block was already published
963 * when it was placed into the list. Here we're just making an extra
964 * copy of the pointer.
966 ram_list
.mru_block
= block
;
970 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
977 end
= TARGET_PAGE_ALIGN(start
+ length
);
978 start
&= TARGET_PAGE_MASK
;
981 block
= qemu_get_ram_block(start
);
982 assert(block
== qemu_get_ram_block(end
- 1));
983 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
985 tlb_reset_dirty(cpu
, start1
, length
);
990 /* Note: start and end must be within the same ram block. */
991 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
995 DirtyMemoryBlocks
*blocks
;
996 unsigned long end
, page
;
1003 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1004 page
= start
>> TARGET_PAGE_BITS
;
1008 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1010 while (page
< end
) {
1011 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1012 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1013 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1015 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1022 if (dirty
&& tcg_enabled()) {
1023 tlb_reset_dirty_range_all(start
, length
);
1029 /* Called from RCU critical section */
1030 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1031 MemoryRegionSection
*section
,
1033 hwaddr paddr
, hwaddr xlat
,
1035 target_ulong
*address
)
1040 if (memory_region_is_ram(section
->mr
)) {
1042 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1043 if (!section
->readonly
) {
1044 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1046 iotlb
|= PHYS_SECTION_ROM
;
1049 AddressSpaceDispatch
*d
;
1051 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1052 iotlb
= section
- d
->map
.sections
;
1056 /* Make accesses to pages with watchpoints go via the
1057 watchpoint trap routines. */
1058 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1059 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1060 /* Avoid trapping reads of pages with a write breakpoint. */
1061 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1062 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1063 *address
|= TLB_MMIO
;
1071 #endif /* defined(CONFIG_USER_ONLY) */
1073 #if !defined(CONFIG_USER_ONLY)
1075 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1077 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1079 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1080 qemu_anon_ram_alloc
;
1083 * Set a custom physical guest memory alloator.
1084 * Accelerators with unusual needs may need this. Hopefully, we can
1085 * get rid of it eventually.
1087 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1089 phys_mem_alloc
= alloc
;
1092 static uint16_t phys_section_add(PhysPageMap
*map
,
1093 MemoryRegionSection
*section
)
1095 /* The physical section number is ORed with a page-aligned
1096 * pointer to produce the iotlb entries. Thus it should
1097 * never overflow into the page-aligned value.
1099 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1101 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1102 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1103 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1104 map
->sections_nb_alloc
);
1106 map
->sections
[map
->sections_nb
] = *section
;
1107 memory_region_ref(section
->mr
);
1108 return map
->sections_nb
++;
1111 static void phys_section_destroy(MemoryRegion
*mr
)
1113 bool have_sub_page
= mr
->subpage
;
1115 memory_region_unref(mr
);
1117 if (have_sub_page
) {
1118 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1119 object_unref(OBJECT(&subpage
->iomem
));
1124 static void phys_sections_free(PhysPageMap
*map
)
1126 while (map
->sections_nb
> 0) {
1127 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1128 phys_section_destroy(section
->mr
);
1130 g_free(map
->sections
);
1134 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1137 hwaddr base
= section
->offset_within_address_space
1139 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1140 d
->map
.nodes
, d
->map
.sections
);
1141 MemoryRegionSection subsection
= {
1142 .offset_within_address_space
= base
,
1143 .size
= int128_make64(TARGET_PAGE_SIZE
),
1147 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1149 if (!(existing
->mr
->subpage
)) {
1150 subpage
= subpage_init(d
->as
, base
);
1151 subsection
.address_space
= d
->as
;
1152 subsection
.mr
= &subpage
->iomem
;
1153 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1154 phys_section_add(&d
->map
, &subsection
));
1156 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1158 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1159 end
= start
+ int128_get64(section
->size
) - 1;
1160 subpage_register(subpage
, start
, end
,
1161 phys_section_add(&d
->map
, section
));
1165 static void register_multipage(AddressSpaceDispatch
*d
,
1166 MemoryRegionSection
*section
)
1168 hwaddr start_addr
= section
->offset_within_address_space
;
1169 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1170 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1174 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1177 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1179 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1180 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1181 MemoryRegionSection now
= *section
, remain
= *section
;
1182 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1184 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1185 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1186 - now
.offset_within_address_space
;
1188 now
.size
= int128_min(int128_make64(left
), now
.size
);
1189 register_subpage(d
, &now
);
1191 now
.size
= int128_zero();
1193 while (int128_ne(remain
.size
, now
.size
)) {
1194 remain
.size
= int128_sub(remain
.size
, now
.size
);
1195 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1196 remain
.offset_within_region
+= int128_get64(now
.size
);
1198 if (int128_lt(remain
.size
, page_size
)) {
1199 register_subpage(d
, &now
);
1200 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1201 now
.size
= page_size
;
1202 register_subpage(d
, &now
);
1204 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1205 register_multipage(d
, &now
);
1210 void qemu_flush_coalesced_mmio_buffer(void)
1213 kvm_flush_coalesced_mmio_buffer();
1216 void qemu_mutex_lock_ramlist(void)
1218 qemu_mutex_lock(&ram_list
.mutex
);
1221 void qemu_mutex_unlock_ramlist(void)
1223 qemu_mutex_unlock(&ram_list
.mutex
);
1227 static void *file_ram_alloc(RAMBlock
*block
,
1232 bool unlink_on_error
= false;
1234 char *sanitized_name
;
1236 void *area
= MAP_FAILED
;
1239 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1241 "host lacks kvm mmu notifiers, -mem-path unsupported");
1246 fd
= open(path
, O_RDWR
);
1248 /* @path names an existing file, use it */
1251 if (errno
== ENOENT
) {
1252 /* @path names a file that doesn't exist, create it */
1253 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1255 unlink_on_error
= true;
1258 } else if (errno
== EISDIR
) {
1259 /* @path names a directory, create a file there */
1260 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1261 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1262 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1268 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1270 g_free(sanitized_name
);
1272 fd
= mkstemp(filename
);
1280 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1281 error_setg_errno(errp
, errno
,
1282 "can't open backing store %s for guest RAM",
1287 * Try again on EINTR and EEXIST. The latter happens when
1288 * something else creates the file between our two open().
1292 block
->page_size
= qemu_fd_getpagesize(fd
);
1293 block
->mr
->align
= block
->page_size
;
1294 #if defined(__s390x__)
1295 if (kvm_enabled()) {
1296 block
->mr
->align
= MAX(block
->mr
->align
, QEMU_VMALLOC_ALIGN
);
1300 if (memory
< block
->page_size
) {
1301 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1302 "or larger than page size 0x%zx",
1303 memory
, block
->page_size
);
1307 memory
= ROUND_UP(memory
, block
->page_size
);
1310 * ftruncate is not supported by hugetlbfs in older
1311 * hosts, so don't bother bailing out on errors.
1312 * If anything goes wrong with it under other filesystems,
1315 if (ftruncate(fd
, memory
)) {
1316 perror("ftruncate");
1319 area
= qemu_ram_mmap(fd
, memory
, block
->mr
->align
,
1320 block
->flags
& RAM_SHARED
);
1321 if (area
== MAP_FAILED
) {
1322 error_setg_errno(errp
, errno
,
1323 "unable to map backing store for guest RAM");
1328 os_mem_prealloc(fd
, area
, memory
, errp
);
1329 if (errp
&& *errp
) {
1338 if (area
!= MAP_FAILED
) {
1339 qemu_ram_munmap(area
, memory
);
1341 if (unlink_on_error
) {
1351 /* Called with the ramlist lock held. */
1352 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1354 RAMBlock
*block
, *next_block
;
1355 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1357 assert(size
!= 0); /* it would hand out same offset multiple times */
1359 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1363 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1364 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1366 end
= block
->offset
+ block
->max_length
;
1368 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1369 if (next_block
->offset
>= end
) {
1370 next
= MIN(next
, next_block
->offset
);
1373 if (next
- end
>= size
&& next
- end
< mingap
) {
1375 mingap
= next
- end
;
1379 if (offset
== RAM_ADDR_MAX
) {
1380 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1388 ram_addr_t
last_ram_offset(void)
1391 ram_addr_t last
= 0;
1394 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1395 last
= MAX(last
, block
->offset
+ block
->max_length
);
1401 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1405 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1406 if (!machine_dump_guest_core(current_machine
)) {
1407 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1409 perror("qemu_madvise");
1410 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1411 "but dump_guest_core=off specified\n");
1416 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1421 /* Called with iothread lock held. */
1422 void qemu_ram_set_idstr(RAMBlock
*new_block
, const char *name
, DeviceState
*dev
)
1427 assert(!new_block
->idstr
[0]);
1430 char *id
= qdev_get_dev_path(dev
);
1432 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1436 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1439 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1440 if (block
!= new_block
&&
1441 !strcmp(block
->idstr
, new_block
->idstr
)) {
1442 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1450 /* Called with iothread lock held. */
1451 void qemu_ram_unset_idstr(RAMBlock
*block
)
1453 /* FIXME: arch_init.c assumes that this is not called throughout
1454 * migration. Ignore the problem since hot-unplug during migration
1455 * does not work anyway.
1458 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1462 size_t qemu_ram_pagesize(RAMBlock
*rb
)
1464 return rb
->page_size
;
1467 static int memory_try_enable_merging(void *addr
, size_t len
)
1469 if (!machine_mem_merge(current_machine
)) {
1470 /* disabled by the user */
1474 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1477 /* Only legal before guest might have detected the memory size: e.g. on
1478 * incoming migration, or right after reset.
1480 * As memory core doesn't know how is memory accessed, it is up to
1481 * resize callback to update device state and/or add assertions to detect
1482 * misuse, if necessary.
1484 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
)
1488 newsize
= HOST_PAGE_ALIGN(newsize
);
1490 if (block
->used_length
== newsize
) {
1494 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1495 error_setg_errno(errp
, EINVAL
,
1496 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1497 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1498 newsize
, block
->used_length
);
1502 if (block
->max_length
< newsize
) {
1503 error_setg_errno(errp
, EINVAL
,
1504 "Length too large: %s: 0x" RAM_ADDR_FMT
1505 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1506 newsize
, block
->max_length
);
1510 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1511 block
->used_length
= newsize
;
1512 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1514 memory_region_set_size(block
->mr
, newsize
);
1515 if (block
->resized
) {
1516 block
->resized(block
->idstr
, newsize
, block
->host
);
1521 /* Called with ram_list.mutex held */
1522 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1523 ram_addr_t new_ram_size
)
1525 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1526 DIRTY_MEMORY_BLOCK_SIZE
);
1527 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1528 DIRTY_MEMORY_BLOCK_SIZE
);
1531 /* Only need to extend if block count increased */
1532 if (new_num_blocks
<= old_num_blocks
) {
1536 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1537 DirtyMemoryBlocks
*old_blocks
;
1538 DirtyMemoryBlocks
*new_blocks
;
1541 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1542 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1543 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1545 if (old_num_blocks
) {
1546 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1547 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1550 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1551 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1554 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1557 g_free_rcu(old_blocks
, rcu
);
1562 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1565 RAMBlock
*last_block
= NULL
;
1566 ram_addr_t old_ram_size
, new_ram_size
;
1569 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1571 qemu_mutex_lock_ramlist();
1572 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1574 if (!new_block
->host
) {
1575 if (xen_enabled()) {
1576 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1577 new_block
->mr
, &err
);
1579 error_propagate(errp
, err
);
1580 qemu_mutex_unlock_ramlist();
1584 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1585 &new_block
->mr
->align
);
1586 if (!new_block
->host
) {
1587 error_setg_errno(errp
, errno
,
1588 "cannot set up guest memory '%s'",
1589 memory_region_name(new_block
->mr
));
1590 qemu_mutex_unlock_ramlist();
1593 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1597 new_ram_size
= MAX(old_ram_size
,
1598 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1599 if (new_ram_size
> old_ram_size
) {
1600 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1601 dirty_memory_extend(old_ram_size
, new_ram_size
);
1603 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1604 * QLIST (which has an RCU-friendly variant) does not have insertion at
1605 * tail, so save the last element in last_block.
1607 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1609 if (block
->max_length
< new_block
->max_length
) {
1614 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1615 } else if (last_block
) {
1616 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1617 } else { /* list is empty */
1618 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1620 ram_list
.mru_block
= NULL
;
1622 /* Write list before version */
1625 qemu_mutex_unlock_ramlist();
1627 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1628 new_block
->used_length
,
1631 if (new_block
->host
) {
1632 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1633 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1634 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
1635 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1640 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1641 bool share
, const char *mem_path
,
1644 RAMBlock
*new_block
;
1645 Error
*local_err
= NULL
;
1647 if (xen_enabled()) {
1648 error_setg(errp
, "-mem-path not supported with Xen");
1652 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1654 * file_ram_alloc() needs to allocate just like
1655 * phys_mem_alloc, but we haven't bothered to provide
1659 "-mem-path not supported with this accelerator");
1663 size
= HOST_PAGE_ALIGN(size
);
1664 new_block
= g_malloc0(sizeof(*new_block
));
1666 new_block
->used_length
= size
;
1667 new_block
->max_length
= size
;
1668 new_block
->flags
= share
? RAM_SHARED
: 0;
1669 new_block
->host
= file_ram_alloc(new_block
, size
,
1671 if (!new_block
->host
) {
1676 ram_block_add(new_block
, &local_err
);
1679 error_propagate(errp
, local_err
);
1687 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1688 void (*resized
)(const char*,
1691 void *host
, bool resizeable
,
1692 MemoryRegion
*mr
, Error
**errp
)
1694 RAMBlock
*new_block
;
1695 Error
*local_err
= NULL
;
1697 size
= HOST_PAGE_ALIGN(size
);
1698 max_size
= HOST_PAGE_ALIGN(max_size
);
1699 new_block
= g_malloc0(sizeof(*new_block
));
1701 new_block
->resized
= resized
;
1702 new_block
->used_length
= size
;
1703 new_block
->max_length
= max_size
;
1704 assert(max_size
>= size
);
1706 new_block
->page_size
= getpagesize();
1707 new_block
->host
= host
;
1709 new_block
->flags
|= RAM_PREALLOC
;
1712 new_block
->flags
|= RAM_RESIZEABLE
;
1714 ram_block_add(new_block
, &local_err
);
1717 error_propagate(errp
, local_err
);
1723 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1724 MemoryRegion
*mr
, Error
**errp
)
1726 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1729 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1731 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1734 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1735 void (*resized
)(const char*,
1738 MemoryRegion
*mr
, Error
**errp
)
1740 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1743 static void reclaim_ramblock(RAMBlock
*block
)
1745 if (block
->flags
& RAM_PREALLOC
) {
1747 } else if (xen_enabled()) {
1748 xen_invalidate_map_cache_entry(block
->host
);
1750 } else if (block
->fd
>= 0) {
1751 qemu_ram_munmap(block
->host
, block
->max_length
);
1755 qemu_anon_ram_free(block
->host
, block
->max_length
);
1760 void qemu_ram_free(RAMBlock
*block
)
1766 qemu_mutex_lock_ramlist();
1767 QLIST_REMOVE_RCU(block
, next
);
1768 ram_list
.mru_block
= NULL
;
1769 /* Write list before version */
1772 call_rcu(block
, reclaim_ramblock
, rcu
);
1773 qemu_mutex_unlock_ramlist();
1777 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1784 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1785 offset
= addr
- block
->offset
;
1786 if (offset
< block
->max_length
) {
1787 vaddr
= ramblock_ptr(block
, offset
);
1788 if (block
->flags
& RAM_PREALLOC
) {
1790 } else if (xen_enabled()) {
1794 if (block
->fd
>= 0) {
1795 flags
|= (block
->flags
& RAM_SHARED
?
1796 MAP_SHARED
: MAP_PRIVATE
);
1797 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1798 flags
, block
->fd
, offset
);
1801 * Remap needs to match alloc. Accelerators that
1802 * set phys_mem_alloc never remap. If they did,
1803 * we'd need a remap hook here.
1805 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1807 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1808 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1811 if (area
!= vaddr
) {
1812 fprintf(stderr
, "Could not remap addr: "
1813 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1817 memory_try_enable_merging(vaddr
, length
);
1818 qemu_ram_setup_dump(vaddr
, length
);
1823 #endif /* !_WIN32 */
1825 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1826 * This should not be used for general purpose DMA. Use address_space_map
1827 * or address_space_rw instead. For local memory (e.g. video ram) that the
1828 * device owns, use memory_region_get_ram_ptr.
1830 * Called within RCU critical section.
1832 void *qemu_map_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1834 RAMBlock
*block
= ram_block
;
1836 if (block
== NULL
) {
1837 block
= qemu_get_ram_block(addr
);
1838 addr
-= block
->offset
;
1841 if (xen_enabled() && block
->host
== NULL
) {
1842 /* We need to check if the requested address is in the RAM
1843 * because we don't want to map the entire memory in QEMU.
1844 * In that case just map until the end of the page.
1846 if (block
->offset
== 0) {
1847 return xen_map_cache(addr
, 0, 0);
1850 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1852 return ramblock_ptr(block
, addr
);
1855 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1856 * but takes a size argument.
1858 * Called within RCU critical section.
1860 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1863 RAMBlock
*block
= ram_block
;
1868 if (block
== NULL
) {
1869 block
= qemu_get_ram_block(addr
);
1870 addr
-= block
->offset
;
1872 *size
= MIN(*size
, block
->max_length
- addr
);
1874 if (xen_enabled() && block
->host
== NULL
) {
1875 /* We need to check if the requested address is in the RAM
1876 * because we don't want to map the entire memory in QEMU.
1877 * In that case just map the requested area.
1879 if (block
->offset
== 0) {
1880 return xen_map_cache(addr
, *size
, 1);
1883 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1886 return ramblock_ptr(block
, addr
);
1890 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1893 * ptr: Host pointer to look up
1894 * round_offset: If true round the result offset down to a page boundary
1895 * *ram_addr: set to result ram_addr
1896 * *offset: set to result offset within the RAMBlock
1898 * Returns: RAMBlock (or NULL if not found)
1900 * By the time this function returns, the returned pointer is not protected
1901 * by RCU anymore. If the caller is not within an RCU critical section and
1902 * does not hold the iothread lock, it must have other means of protecting the
1903 * pointer, such as a reference to the region that includes the incoming
1906 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1910 uint8_t *host
= ptr
;
1912 if (xen_enabled()) {
1913 ram_addr_t ram_addr
;
1915 ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1916 block
= qemu_get_ram_block(ram_addr
);
1918 *offset
= ram_addr
- block
->offset
;
1925 block
= atomic_rcu_read(&ram_list
.mru_block
);
1926 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1930 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1931 /* This case append when the block is not mapped. */
1932 if (block
->host
== NULL
) {
1935 if (host
- block
->host
< block
->max_length
) {
1944 *offset
= (host
- block
->host
);
1946 *offset
&= TARGET_PAGE_MASK
;
1953 * Finds the named RAMBlock
1955 * name: The name of RAMBlock to find
1957 * Returns: RAMBlock (or NULL if not found)
1959 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1963 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1964 if (!strcmp(name
, block
->idstr
)) {
1972 /* Some of the softmmu routines need to translate from a host pointer
1973 (typically a TLB entry) back to a ram offset. */
1974 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
1979 block
= qemu_ram_block_from_host(ptr
, false, &offset
);
1981 return RAM_ADDR_INVALID
;
1984 return block
->offset
+ offset
;
1987 /* Called within RCU critical section. */
1988 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1989 uint64_t val
, unsigned size
)
1991 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1992 tb_invalidate_phys_page_fast(ram_addr
, size
);
1996 stb_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
1999 stw_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2002 stl_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2007 /* Set both VGA and migration bits for simplicity and to remove
2008 * the notdirty callback faster.
2010 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2011 DIRTY_CLIENTS_NOCODE
);
2012 /* we remove the notdirty callback only if the code has been
2014 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2015 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2019 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2020 unsigned size
, bool is_write
)
2025 static const MemoryRegionOps notdirty_mem_ops
= {
2026 .write
= notdirty_mem_write
,
2027 .valid
.accepts
= notdirty_mem_accepts
,
2028 .endianness
= DEVICE_NATIVE_ENDIAN
,
2031 /* Generate a debug exception if a watchpoint has been hit. */
2032 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2034 CPUState
*cpu
= current_cpu
;
2035 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2036 CPUArchState
*env
= cpu
->env_ptr
;
2037 target_ulong pc
, cs_base
;
2042 if (cpu
->watchpoint_hit
) {
2043 /* We re-entered the check after replacing the TB. Now raise
2044 * the debug interrupt so that is will trigger after the
2045 * current instruction. */
2046 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2049 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2050 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2051 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2052 && (wp
->flags
& flags
)) {
2053 if (flags
== BP_MEM_READ
) {
2054 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2056 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2058 wp
->hitaddr
= vaddr
;
2059 wp
->hitattrs
= attrs
;
2060 if (!cpu
->watchpoint_hit
) {
2061 if (wp
->flags
& BP_CPU
&&
2062 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2063 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2066 cpu
->watchpoint_hit
= wp
;
2067 tb_check_watchpoint(cpu
);
2068 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2069 cpu
->exception_index
= EXCP_DEBUG
;
2072 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2073 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2074 cpu_loop_exit_noexc(cpu
);
2078 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2083 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2084 so these check for a hit then pass through to the normal out-of-line
2086 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2087 unsigned size
, MemTxAttrs attrs
)
2091 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2092 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2094 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2097 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2100 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2103 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2111 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2112 uint64_t val
, unsigned size
,
2116 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2117 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2119 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2122 address_space_stb(as
, addr
, val
, attrs
, &res
);
2125 address_space_stw(as
, addr
, val
, attrs
, &res
);
2128 address_space_stl(as
, addr
, val
, attrs
, &res
);
2135 static const MemoryRegionOps watch_mem_ops
= {
2136 .read_with_attrs
= watch_mem_read
,
2137 .write_with_attrs
= watch_mem_write
,
2138 .endianness
= DEVICE_NATIVE_ENDIAN
,
2141 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2142 unsigned len
, MemTxAttrs attrs
)
2144 subpage_t
*subpage
= opaque
;
2148 #if defined(DEBUG_SUBPAGE)
2149 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2150 subpage
, len
, addr
);
2152 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2159 *data
= ldub_p(buf
);
2162 *data
= lduw_p(buf
);
2175 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2176 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2178 subpage_t
*subpage
= opaque
;
2181 #if defined(DEBUG_SUBPAGE)
2182 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2183 " value %"PRIx64
"\n",
2184 __func__
, subpage
, len
, addr
, value
);
2202 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2206 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2207 unsigned len
, bool is_write
)
2209 subpage_t
*subpage
= opaque
;
2210 #if defined(DEBUG_SUBPAGE)
2211 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2212 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2215 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2219 static const MemoryRegionOps subpage_ops
= {
2220 .read_with_attrs
= subpage_read
,
2221 .write_with_attrs
= subpage_write
,
2222 .impl
.min_access_size
= 1,
2223 .impl
.max_access_size
= 8,
2224 .valid
.min_access_size
= 1,
2225 .valid
.max_access_size
= 8,
2226 .valid
.accepts
= subpage_accepts
,
2227 .endianness
= DEVICE_NATIVE_ENDIAN
,
2230 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2235 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2237 idx
= SUBPAGE_IDX(start
);
2238 eidx
= SUBPAGE_IDX(end
);
2239 #if defined(DEBUG_SUBPAGE)
2240 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2241 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2243 for (; idx
<= eidx
; idx
++) {
2244 mmio
->sub_section
[idx
] = section
;
2250 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2254 mmio
= g_malloc0(sizeof(subpage_t
) + TARGET_PAGE_SIZE
* sizeof(uint16_t));
2257 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2258 NULL
, TARGET_PAGE_SIZE
);
2259 mmio
->iomem
.subpage
= true;
2260 #if defined(DEBUG_SUBPAGE)
2261 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2262 mmio
, base
, TARGET_PAGE_SIZE
);
2264 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2269 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2273 MemoryRegionSection section
= {
2274 .address_space
= as
,
2276 .offset_within_address_space
= 0,
2277 .offset_within_region
= 0,
2278 .size
= int128_2_64(),
2281 return phys_section_add(map
, §ion
);
2284 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2286 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2287 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2288 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2289 MemoryRegionSection
*sections
= d
->map
.sections
;
2291 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2294 static void io_mem_init(void)
2296 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2297 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2299 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2301 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2305 static void mem_begin(MemoryListener
*listener
)
2307 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2308 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2311 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2312 assert(n
== PHYS_SECTION_UNASSIGNED
);
2313 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2314 assert(n
== PHYS_SECTION_NOTDIRTY
);
2315 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2316 assert(n
== PHYS_SECTION_ROM
);
2317 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2318 assert(n
== PHYS_SECTION_WATCH
);
2320 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2322 as
->next_dispatch
= d
;
2325 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2327 phys_sections_free(&d
->map
);
2331 static void mem_commit(MemoryListener
*listener
)
2333 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2334 AddressSpaceDispatch
*cur
= as
->dispatch
;
2335 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2337 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2339 atomic_rcu_set(&as
->dispatch
, next
);
2341 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2345 static void tcg_commit(MemoryListener
*listener
)
2347 CPUAddressSpace
*cpuas
;
2348 AddressSpaceDispatch
*d
;
2350 /* since each CPU stores ram addresses in its TLB cache, we must
2351 reset the modified entries */
2352 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2353 cpu_reloading_memory_map();
2354 /* The CPU and TLB are protected by the iothread lock.
2355 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2356 * may have split the RCU critical section.
2358 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2359 cpuas
->memory_dispatch
= d
;
2360 tlb_flush(cpuas
->cpu
, 1);
2363 void address_space_init_dispatch(AddressSpace
*as
)
2365 as
->dispatch
= NULL
;
2366 as
->dispatch_listener
= (MemoryListener
) {
2368 .commit
= mem_commit
,
2369 .region_add
= mem_add
,
2370 .region_nop
= mem_add
,
2373 memory_listener_register(&as
->dispatch_listener
, as
);
2376 void address_space_unregister(AddressSpace
*as
)
2378 memory_listener_unregister(&as
->dispatch_listener
);
2381 void address_space_destroy_dispatch(AddressSpace
*as
)
2383 AddressSpaceDispatch
*d
= as
->dispatch
;
2385 atomic_rcu_set(&as
->dispatch
, NULL
);
2387 call_rcu(d
, address_space_dispatch_free
, rcu
);
2391 static void memory_map_init(void)
2393 system_memory
= g_malloc(sizeof(*system_memory
));
2395 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2396 address_space_init(&address_space_memory
, system_memory
, "memory");
2398 system_io
= g_malloc(sizeof(*system_io
));
2399 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2401 address_space_init(&address_space_io
, system_io
, "I/O");
2404 MemoryRegion
*get_system_memory(void)
2406 return system_memory
;
2409 MemoryRegion
*get_system_io(void)
2414 #endif /* !defined(CONFIG_USER_ONLY) */
2416 /* physical memory access (slow version, mainly for debug) */
2417 #if defined(CONFIG_USER_ONLY)
2418 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2419 uint8_t *buf
, int len
, int is_write
)
2426 page
= addr
& TARGET_PAGE_MASK
;
2427 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2430 flags
= page_get_flags(page
);
2431 if (!(flags
& PAGE_VALID
))
2434 if (!(flags
& PAGE_WRITE
))
2436 /* XXX: this code should not depend on lock_user */
2437 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2440 unlock_user(p
, addr
, l
);
2442 if (!(flags
& PAGE_READ
))
2444 /* XXX: this code should not depend on lock_user */
2445 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2448 unlock_user(p
, addr
, 0);
2459 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2462 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2463 addr
+= memory_region_get_ram_addr(mr
);
2465 /* No early return if dirty_log_mask is or becomes 0, because
2466 * cpu_physical_memory_set_dirty_range will still call
2467 * xen_modified_memory.
2469 if (dirty_log_mask
) {
2471 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2473 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2474 tb_invalidate_phys_range(addr
, addr
+ length
);
2475 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2477 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2480 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2482 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2484 /* Regions are assumed to support 1-4 byte accesses unless
2485 otherwise specified. */
2486 if (access_size_max
== 0) {
2487 access_size_max
= 4;
2490 /* Bound the maximum access by the alignment of the address. */
2491 if (!mr
->ops
->impl
.unaligned
) {
2492 unsigned align_size_max
= addr
& -addr
;
2493 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2494 access_size_max
= align_size_max
;
2498 /* Don't attempt accesses larger than the maximum. */
2499 if (l
> access_size_max
) {
2500 l
= access_size_max
;
2507 static bool prepare_mmio_access(MemoryRegion
*mr
)
2509 bool unlocked
= !qemu_mutex_iothread_locked();
2510 bool release_lock
= false;
2512 if (unlocked
&& mr
->global_locking
) {
2513 qemu_mutex_lock_iothread();
2515 release_lock
= true;
2517 if (mr
->flush_coalesced_mmio
) {
2519 qemu_mutex_lock_iothread();
2521 qemu_flush_coalesced_mmio_buffer();
2523 qemu_mutex_unlock_iothread();
2527 return release_lock
;
2530 /* Called within RCU critical section. */
2531 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2534 int len
, hwaddr addr1
,
2535 hwaddr l
, MemoryRegion
*mr
)
2539 MemTxResult result
= MEMTX_OK
;
2540 bool release_lock
= false;
2543 if (!memory_access_is_direct(mr
, true)) {
2544 release_lock
|= prepare_mmio_access(mr
);
2545 l
= memory_access_size(mr
, l
, addr1
);
2546 /* XXX: could force current_cpu to NULL to avoid
2550 /* 64 bit write access */
2552 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2556 /* 32 bit write access */
2558 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2562 /* 16 bit write access */
2564 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2568 /* 8 bit write access */
2570 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2578 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2579 memcpy(ptr
, buf
, l
);
2580 invalidate_and_set_dirty(mr
, addr1
, l
);
2584 qemu_mutex_unlock_iothread();
2585 release_lock
= false;
2597 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2603 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2604 const uint8_t *buf
, int len
)
2609 MemTxResult result
= MEMTX_OK
;
2614 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2615 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2623 /* Called within RCU critical section. */
2624 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2625 MemTxAttrs attrs
, uint8_t *buf
,
2626 int len
, hwaddr addr1
, hwaddr l
,
2631 MemTxResult result
= MEMTX_OK
;
2632 bool release_lock
= false;
2635 if (!memory_access_is_direct(mr
, false)) {
2637 release_lock
|= prepare_mmio_access(mr
);
2638 l
= memory_access_size(mr
, l
, addr1
);
2641 /* 64 bit read access */
2642 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2647 /* 32 bit read access */
2648 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2653 /* 16 bit read access */
2654 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2659 /* 8 bit read access */
2660 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2669 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2670 memcpy(buf
, ptr
, l
);
2674 qemu_mutex_unlock_iothread();
2675 release_lock
= false;
2687 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2693 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2694 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2699 MemTxResult result
= MEMTX_OK
;
2704 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2705 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2713 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2714 uint8_t *buf
, int len
, bool is_write
)
2717 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2719 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2723 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2724 int len
, int is_write
)
2726 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2727 buf
, len
, is_write
);
2730 enum write_rom_type
{
2735 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2736 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2746 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2748 if (!(memory_region_is_ram(mr
) ||
2749 memory_region_is_romd(mr
))) {
2750 l
= memory_access_size(mr
, l
, addr1
);
2753 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2756 memcpy(ptr
, buf
, l
);
2757 invalidate_and_set_dirty(mr
, addr1
, l
);
2760 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2771 /* used for ROM loading : can write in RAM and ROM */
2772 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2773 const uint8_t *buf
, int len
)
2775 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2778 void cpu_flush_icache_range(hwaddr start
, int len
)
2781 * This function should do the same thing as an icache flush that was
2782 * triggered from within the guest. For TCG we are always cache coherent,
2783 * so there is no need to flush anything. For KVM / Xen we need to flush
2784 * the host's instruction cache at least.
2786 if (tcg_enabled()) {
2790 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2791 start
, NULL
, len
, FLUSH_CACHE
);
2802 static BounceBuffer bounce
;
2804 typedef struct MapClient
{
2806 QLIST_ENTRY(MapClient
) link
;
2809 QemuMutex map_client_list_lock
;
2810 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2811 = QLIST_HEAD_INITIALIZER(map_client_list
);
2813 static void cpu_unregister_map_client_do(MapClient
*client
)
2815 QLIST_REMOVE(client
, link
);
2819 static void cpu_notify_map_clients_locked(void)
2823 while (!QLIST_EMPTY(&map_client_list
)) {
2824 client
= QLIST_FIRST(&map_client_list
);
2825 qemu_bh_schedule(client
->bh
);
2826 cpu_unregister_map_client_do(client
);
2830 void cpu_register_map_client(QEMUBH
*bh
)
2832 MapClient
*client
= g_malloc(sizeof(*client
));
2834 qemu_mutex_lock(&map_client_list_lock
);
2836 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2837 if (!atomic_read(&bounce
.in_use
)) {
2838 cpu_notify_map_clients_locked();
2840 qemu_mutex_unlock(&map_client_list_lock
);
2843 void cpu_exec_init_all(void)
2845 qemu_mutex_init(&ram_list
.mutex
);
2846 /* The data structures we set up here depend on knowing the page size,
2847 * so no more changes can be made after this point.
2848 * In an ideal world, nothing we did before we had finished the
2849 * machine setup would care about the target page size, and we could
2850 * do this much later, rather than requiring board models to state
2851 * up front what their requirements are.
2853 finalize_target_page_bits();
2856 qemu_mutex_init(&map_client_list_lock
);
2859 void cpu_unregister_map_client(QEMUBH
*bh
)
2863 qemu_mutex_lock(&map_client_list_lock
);
2864 QLIST_FOREACH(client
, &map_client_list
, link
) {
2865 if (client
->bh
== bh
) {
2866 cpu_unregister_map_client_do(client
);
2870 qemu_mutex_unlock(&map_client_list_lock
);
2873 static void cpu_notify_map_clients(void)
2875 qemu_mutex_lock(&map_client_list_lock
);
2876 cpu_notify_map_clients_locked();
2877 qemu_mutex_unlock(&map_client_list_lock
);
2880 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2888 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2889 if (!memory_access_is_direct(mr
, is_write
)) {
2890 l
= memory_access_size(mr
, l
, addr
);
2891 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2903 /* Map a physical memory region into a host virtual address.
2904 * May map a subset of the requested range, given by and returned in *plen.
2905 * May return NULL if resources needed to perform the mapping are exhausted.
2906 * Use only for reads OR writes - not for read-modify-write operations.
2907 * Use cpu_register_map_client() to know when retrying the map operation is
2908 * likely to succeed.
2910 void *address_space_map(AddressSpace
*as
,
2917 hwaddr l
, xlat
, base
;
2918 MemoryRegion
*mr
, *this_mr
;
2927 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2929 if (!memory_access_is_direct(mr
, is_write
)) {
2930 if (atomic_xchg(&bounce
.in_use
, true)) {
2934 /* Avoid unbounded allocations */
2935 l
= MIN(l
, TARGET_PAGE_SIZE
);
2936 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2940 memory_region_ref(mr
);
2943 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2949 return bounce
.buffer
;
2963 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2964 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2969 memory_region_ref(mr
);
2971 ptr
= qemu_ram_ptr_length(mr
->ram_block
, base
, plen
);
2977 /* Unmaps a memory region previously mapped by address_space_map().
2978 * Will also mark the memory as dirty if is_write == 1. access_len gives
2979 * the amount of memory that was actually read or written by the caller.
2981 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2982 int is_write
, hwaddr access_len
)
2984 if (buffer
!= bounce
.buffer
) {
2988 mr
= memory_region_from_host(buffer
, &addr1
);
2991 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2993 if (xen_enabled()) {
2994 xen_invalidate_map_cache_entry(buffer
);
2996 memory_region_unref(mr
);
3000 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3001 bounce
.buffer
, access_len
);
3003 qemu_vfree(bounce
.buffer
);
3004 bounce
.buffer
= NULL
;
3005 memory_region_unref(bounce
.mr
);
3006 atomic_mb_set(&bounce
.in_use
, false);
3007 cpu_notify_map_clients();
3010 void *cpu_physical_memory_map(hwaddr addr
,
3014 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3017 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3018 int is_write
, hwaddr access_len
)
3020 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3023 /* warning: addr must be aligned */
3024 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3026 MemTxResult
*result
,
3027 enum device_endian endian
)
3035 bool release_lock
= false;
3038 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3039 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3040 release_lock
|= prepare_mmio_access(mr
);
3043 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3044 #if defined(TARGET_WORDS_BIGENDIAN)
3045 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3049 if (endian
== DEVICE_BIG_ENDIAN
) {
3055 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3057 case DEVICE_LITTLE_ENDIAN
:
3058 val
= ldl_le_p(ptr
);
3060 case DEVICE_BIG_ENDIAN
:
3061 val
= ldl_be_p(ptr
);
3073 qemu_mutex_unlock_iothread();
3079 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3080 MemTxAttrs attrs
, MemTxResult
*result
)
3082 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3083 DEVICE_NATIVE_ENDIAN
);
3086 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3087 MemTxAttrs attrs
, MemTxResult
*result
)
3089 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3090 DEVICE_LITTLE_ENDIAN
);
3093 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3094 MemTxAttrs attrs
, MemTxResult
*result
)
3096 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3100 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3102 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3105 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3107 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3110 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3112 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3115 /* warning: addr must be aligned */
3116 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3118 MemTxResult
*result
,
3119 enum device_endian endian
)
3127 bool release_lock
= false;
3130 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3132 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3133 release_lock
|= prepare_mmio_access(mr
);
3136 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3137 #if defined(TARGET_WORDS_BIGENDIAN)
3138 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3142 if (endian
== DEVICE_BIG_ENDIAN
) {
3148 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3150 case DEVICE_LITTLE_ENDIAN
:
3151 val
= ldq_le_p(ptr
);
3153 case DEVICE_BIG_ENDIAN
:
3154 val
= ldq_be_p(ptr
);
3166 qemu_mutex_unlock_iothread();
3172 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3173 MemTxAttrs attrs
, MemTxResult
*result
)
3175 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3176 DEVICE_NATIVE_ENDIAN
);
3179 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3180 MemTxAttrs attrs
, MemTxResult
*result
)
3182 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3183 DEVICE_LITTLE_ENDIAN
);
3186 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3187 MemTxAttrs attrs
, MemTxResult
*result
)
3189 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3193 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3195 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3198 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3200 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3203 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3205 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3209 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3210 MemTxAttrs attrs
, MemTxResult
*result
)
3215 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3222 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3224 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3227 /* warning: addr must be aligned */
3228 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3231 MemTxResult
*result
,
3232 enum device_endian endian
)
3240 bool release_lock
= false;
3243 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3245 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3246 release_lock
|= prepare_mmio_access(mr
);
3249 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3250 #if defined(TARGET_WORDS_BIGENDIAN)
3251 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3255 if (endian
== DEVICE_BIG_ENDIAN
) {
3261 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3263 case DEVICE_LITTLE_ENDIAN
:
3264 val
= lduw_le_p(ptr
);
3266 case DEVICE_BIG_ENDIAN
:
3267 val
= lduw_be_p(ptr
);
3279 qemu_mutex_unlock_iothread();
3285 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3286 MemTxAttrs attrs
, MemTxResult
*result
)
3288 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3289 DEVICE_NATIVE_ENDIAN
);
3292 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3293 MemTxAttrs attrs
, MemTxResult
*result
)
3295 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3296 DEVICE_LITTLE_ENDIAN
);
3299 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3300 MemTxAttrs attrs
, MemTxResult
*result
)
3302 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3306 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3308 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3311 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3313 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3316 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3318 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3321 /* warning: addr must be aligned. The ram page is not masked as dirty
3322 and the code inside is not invalidated. It is useful if the dirty
3323 bits are used to track modified PTEs */
3324 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3325 MemTxAttrs attrs
, MemTxResult
*result
)
3332 uint8_t dirty_log_mask
;
3333 bool release_lock
= false;
3336 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3338 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3339 release_lock
|= prepare_mmio_access(mr
);
3341 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3343 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3346 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3347 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3348 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
3356 qemu_mutex_unlock_iothread();
3361 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3363 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3366 /* warning: addr must be aligned */
3367 static inline void address_space_stl_internal(AddressSpace
*as
,
3368 hwaddr addr
, uint32_t val
,
3370 MemTxResult
*result
,
3371 enum device_endian endian
)
3378 bool release_lock
= false;
3381 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3383 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3384 release_lock
|= prepare_mmio_access(mr
);
3386 #if defined(TARGET_WORDS_BIGENDIAN)
3387 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3391 if (endian
== DEVICE_BIG_ENDIAN
) {
3395 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3398 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3400 case DEVICE_LITTLE_ENDIAN
:
3403 case DEVICE_BIG_ENDIAN
:
3410 invalidate_and_set_dirty(mr
, addr1
, 4);
3417 qemu_mutex_unlock_iothread();
3422 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3423 MemTxAttrs attrs
, MemTxResult
*result
)
3425 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3426 DEVICE_NATIVE_ENDIAN
);
3429 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3430 MemTxAttrs attrs
, MemTxResult
*result
)
3432 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3433 DEVICE_LITTLE_ENDIAN
);
3436 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3437 MemTxAttrs attrs
, MemTxResult
*result
)
3439 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3443 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3445 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3448 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3450 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3453 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3455 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3459 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3460 MemTxAttrs attrs
, MemTxResult
*result
)
3465 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3471 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3473 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3476 /* warning: addr must be aligned */
3477 static inline void address_space_stw_internal(AddressSpace
*as
,
3478 hwaddr addr
, uint32_t val
,
3480 MemTxResult
*result
,
3481 enum device_endian endian
)
3488 bool release_lock
= false;
3491 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3492 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3493 release_lock
|= prepare_mmio_access(mr
);
3495 #if defined(TARGET_WORDS_BIGENDIAN)
3496 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3500 if (endian
== DEVICE_BIG_ENDIAN
) {
3504 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3507 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3509 case DEVICE_LITTLE_ENDIAN
:
3512 case DEVICE_BIG_ENDIAN
:
3519 invalidate_and_set_dirty(mr
, addr1
, 2);
3526 qemu_mutex_unlock_iothread();
3531 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3532 MemTxAttrs attrs
, MemTxResult
*result
)
3534 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3535 DEVICE_NATIVE_ENDIAN
);
3538 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3539 MemTxAttrs attrs
, MemTxResult
*result
)
3541 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3542 DEVICE_LITTLE_ENDIAN
);
3545 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3546 MemTxAttrs attrs
, MemTxResult
*result
)
3548 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3552 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3554 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3557 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3559 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3562 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3564 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3568 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3569 MemTxAttrs attrs
, MemTxResult
*result
)
3573 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3579 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3580 MemTxAttrs attrs
, MemTxResult
*result
)
3583 val
= cpu_to_le64(val
);
3584 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3589 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3590 MemTxAttrs attrs
, MemTxResult
*result
)
3593 val
= cpu_to_be64(val
);
3594 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3600 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3602 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3605 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3607 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3610 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3612 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3615 /* virtual memory access for debug (includes writing to ROM) */
3616 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3617 uint8_t *buf
, int len
, int is_write
)
3627 page
= addr
& TARGET_PAGE_MASK
;
3628 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3629 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3630 /* if no physical page mapped, return an error */
3631 if (phys_addr
== -1)
3633 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3636 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3638 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3641 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3642 MEMTXATTRS_UNSPECIFIED
,
3653 * Allows code that needs to deal with migration bitmaps etc to still be built
3654 * target independent.
3656 size_t qemu_target_page_bits(void)
3658 return TARGET_PAGE_BITS
;
3664 * A helper function for the _utterly broken_ virtio device model to find out if
3665 * it's running on a big endian machine. Don't do this at home kids!
3667 bool target_words_bigendian(void);
3668 bool target_words_bigendian(void)
3670 #if defined(TARGET_WORDS_BIGENDIAN)
3677 #ifndef CONFIG_USER_ONLY
3678 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3685 mr
= address_space_translate(&address_space_memory
,
3686 phys_addr
, &phys_addr
, &l
, false);
3688 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3693 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3699 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3700 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3701 block
->used_length
, opaque
);