4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
24 #include "qemu/cutils.h"
26 #include "exec/exec-all.h"
28 #include "hw/qdev-core.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #include "hw/xen/xen.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #if defined(CONFIG_USER_ONLY)
40 #else /* !CONFIG_USER_ONLY */
42 #include "exec/memory.h"
43 #include "exec/ioport.h"
44 #include "sysemu/dma.h"
45 #include "exec/address-spaces.h"
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
59 #include "migration/vmstate.h"
61 #include "qemu/range.h"
63 #include "qemu/mmap-alloc.h"
66 //#define DEBUG_SUBPAGE
68 #if !defined(CONFIG_USER_ONLY)
69 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
72 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
74 static MemoryRegion
*system_memory
;
75 static MemoryRegion
*system_io
;
77 AddressSpace address_space_io
;
78 AddressSpace address_space_memory
;
80 MemoryRegion io_mem_rom
, io_mem_notdirty
;
81 static MemoryRegion io_mem_unassigned
;
83 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84 #define RAM_PREALLOC (1 << 0)
86 /* RAM is mmap-ed with MAP_SHARED */
87 #define RAM_SHARED (1 << 1)
89 /* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
92 #define RAM_RESIZEABLE (1 << 2)
96 #ifdef TARGET_PAGE_BITS_VARY
98 bool target_page_bits_decided
;
101 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
102 /* current CPU in the current thread. It is only valid inside
104 __thread CPUState
*current_cpu
;
105 /* 0 = Do not count executed instructions.
106 1 = Precise instruction counting.
107 2 = Adaptive rate instruction counting. */
110 bool set_preferred_target_page_bits(int bits
)
112 /* The target page size is the lowest common denominator for all
113 * the CPUs in the system, so we can only make it smaller, never
114 * larger. And we can't make it smaller once we've committed to
117 #ifdef TARGET_PAGE_BITS_VARY
118 assert(bits
>= TARGET_PAGE_BITS_MIN
);
119 if (target_page_bits
== 0 || target_page_bits
> bits
) {
120 if (target_page_bits_decided
) {
123 target_page_bits
= bits
;
129 #if !defined(CONFIG_USER_ONLY)
131 static void finalize_target_page_bits(void)
133 #ifdef TARGET_PAGE_BITS_VARY
134 if (target_page_bits
== 0) {
135 target_page_bits
= TARGET_PAGE_BITS_MIN
;
137 target_page_bits_decided
= true;
141 typedef struct PhysPageEntry PhysPageEntry
;
143 struct PhysPageEntry
{
144 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
146 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
150 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
152 /* Size of the L2 (and L3, etc) page tables. */
153 #define ADDR_SPACE_BITS 64
156 #define P_L2_SIZE (1 << P_L2_BITS)
158 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
160 typedef PhysPageEntry Node
[P_L2_SIZE
];
162 typedef struct PhysPageMap
{
165 unsigned sections_nb
;
166 unsigned sections_nb_alloc
;
168 unsigned nodes_nb_alloc
;
170 MemoryRegionSection
*sections
;
173 struct AddressSpaceDispatch
{
176 MemoryRegionSection
*mru_section
;
177 /* This is a multi-level map on the physical address space.
178 * The bottom level has pointers to MemoryRegionSections.
180 PhysPageEntry phys_map
;
185 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
186 typedef struct subpage_t
{
190 uint16_t sub_section
[];
193 #define PHYS_SECTION_UNASSIGNED 0
194 #define PHYS_SECTION_NOTDIRTY 1
195 #define PHYS_SECTION_ROM 2
196 #define PHYS_SECTION_WATCH 3
198 static void io_mem_init(void);
199 static void memory_map_init(void);
200 static void tcg_commit(MemoryListener
*listener
);
202 static MemoryRegion io_mem_watch
;
205 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
206 * @cpu: the CPU whose AddressSpace this is
207 * @as: the AddressSpace itself
208 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
209 * @tcg_as_listener: listener for tracking changes to the AddressSpace
211 struct CPUAddressSpace
{
214 struct AddressSpaceDispatch
*memory_dispatch
;
215 MemoryListener tcg_as_listener
;
220 #if !defined(CONFIG_USER_ONLY)
222 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
224 static unsigned alloc_hint
= 16;
225 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
226 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, alloc_hint
);
227 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
228 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
229 alloc_hint
= map
->nodes_nb_alloc
;
233 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
240 ret
= map
->nodes_nb
++;
242 assert(ret
!= PHYS_MAP_NODE_NIL
);
243 assert(ret
!= map
->nodes_nb_alloc
);
245 e
.skip
= leaf
? 0 : 1;
246 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
247 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
248 memcpy(&p
[i
], &e
, sizeof(e
));
253 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
254 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
258 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
260 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
261 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
263 p
= map
->nodes
[lp
->ptr
];
264 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
266 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
267 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
273 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
279 static void phys_page_set(AddressSpaceDispatch
*d
,
280 hwaddr index
, hwaddr nb
,
283 /* Wildly overreserve - it doesn't matter much. */
284 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
286 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
289 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
290 * and update our entry so we can skip it and go directly to the destination.
292 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
)
294 unsigned valid_ptr
= P_L2_SIZE
;
299 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
304 for (i
= 0; i
< P_L2_SIZE
; i
++) {
305 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
312 phys_page_compact(&p
[i
], nodes
);
316 /* We can only compress if there's only one child. */
321 assert(valid_ptr
< P_L2_SIZE
);
323 /* Don't compress if it won't fit in the # of bits we have. */
324 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
328 lp
->ptr
= p
[valid_ptr
].ptr
;
329 if (!p
[valid_ptr
].skip
) {
330 /* If our only child is a leaf, make this a leaf. */
331 /* By design, we should have made this node a leaf to begin with so we
332 * should never reach here.
333 * But since it's so simple to handle this, let's do it just in case we
338 lp
->skip
+= p
[valid_ptr
].skip
;
342 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
344 if (d
->phys_map
.skip
) {
345 phys_page_compact(&d
->phys_map
, d
->map
.nodes
);
349 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
352 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
353 * the section must cover the entire address space.
355 return int128_gethi(section
->size
) ||
356 range_covers_byte(section
->offset_within_address_space
,
357 int128_getlo(section
->size
), addr
);
360 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
361 Node
*nodes
, MemoryRegionSection
*sections
)
364 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
367 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
368 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
369 return §ions
[PHYS_SECTION_UNASSIGNED
];
372 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
375 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
376 return §ions
[lp
.ptr
];
378 return §ions
[PHYS_SECTION_UNASSIGNED
];
382 bool memory_region_is_unassigned(MemoryRegion
*mr
)
384 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
385 && mr
!= &io_mem_watch
;
388 /* Called from RCU critical section */
389 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
391 bool resolve_subpage
)
393 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
397 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
398 section_covers_addr(section
, addr
)) {
401 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
405 if (resolve_subpage
&& section
->mr
->subpage
) {
406 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
407 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
410 atomic_set(&d
->mru_section
, section
);
415 /* Called from RCU critical section */
416 static MemoryRegionSection
*
417 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
418 hwaddr
*plen
, bool resolve_subpage
)
420 MemoryRegionSection
*section
;
424 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
425 /* Compute offset within MemoryRegionSection */
426 addr
-= section
->offset_within_address_space
;
428 /* Compute offset within MemoryRegion */
429 *xlat
= addr
+ section
->offset_within_region
;
433 /* MMIO registers can be expected to perform full-width accesses based only
434 * on their address, without considering adjacent registers that could
435 * decode to completely different MemoryRegions. When such registers
436 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
437 * regions overlap wildly. For this reason we cannot clamp the accesses
440 * If the length is small (as is the case for address_space_ldl/stl),
441 * everything works fine. If the incoming length is large, however,
442 * the caller really has to do the clamping through memory_access_size.
444 if (memory_region_is_ram(mr
)) {
445 diff
= int128_sub(section
->size
, int128_make64(addr
));
446 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
451 /* Called from RCU critical section */
452 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
453 hwaddr
*xlat
, hwaddr
*plen
,
457 MemoryRegionSection
*section
;
461 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
462 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
465 if (!mr
->iommu_ops
) {
469 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
470 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
471 | (addr
& iotlb
.addr_mask
));
472 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
473 if (!(iotlb
.perm
& (1 << is_write
))) {
474 mr
= &io_mem_unassigned
;
478 as
= iotlb
.target_as
;
481 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
482 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
483 *plen
= MIN(page
, *plen
);
490 /* Called from RCU critical section */
491 MemoryRegionSection
*
492 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
493 hwaddr
*xlat
, hwaddr
*plen
)
495 MemoryRegionSection
*section
;
496 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
498 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
500 assert(!section
->mr
->iommu_ops
);
505 #if !defined(CONFIG_USER_ONLY)
507 static int cpu_common_post_load(void *opaque
, int version_id
)
509 CPUState
*cpu
= opaque
;
511 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
512 version_id is increased. */
513 cpu
->interrupt_request
&= ~0x01;
519 static int cpu_common_pre_load(void *opaque
)
521 CPUState
*cpu
= opaque
;
523 cpu
->exception_index
= -1;
528 static bool cpu_common_exception_index_needed(void *opaque
)
530 CPUState
*cpu
= opaque
;
532 return tcg_enabled() && cpu
->exception_index
!= -1;
535 static const VMStateDescription vmstate_cpu_common_exception_index
= {
536 .name
= "cpu_common/exception_index",
538 .minimum_version_id
= 1,
539 .needed
= cpu_common_exception_index_needed
,
540 .fields
= (VMStateField
[]) {
541 VMSTATE_INT32(exception_index
, CPUState
),
542 VMSTATE_END_OF_LIST()
546 static bool cpu_common_crash_occurred_needed(void *opaque
)
548 CPUState
*cpu
= opaque
;
550 return cpu
->crash_occurred
;
553 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
554 .name
= "cpu_common/crash_occurred",
556 .minimum_version_id
= 1,
557 .needed
= cpu_common_crash_occurred_needed
,
558 .fields
= (VMStateField
[]) {
559 VMSTATE_BOOL(crash_occurred
, CPUState
),
560 VMSTATE_END_OF_LIST()
564 const VMStateDescription vmstate_cpu_common
= {
565 .name
= "cpu_common",
567 .minimum_version_id
= 1,
568 .pre_load
= cpu_common_pre_load
,
569 .post_load
= cpu_common_post_load
,
570 .fields
= (VMStateField
[]) {
571 VMSTATE_UINT32(halted
, CPUState
),
572 VMSTATE_UINT32(interrupt_request
, CPUState
),
573 VMSTATE_END_OF_LIST()
575 .subsections
= (const VMStateDescription
*[]) {
576 &vmstate_cpu_common_exception_index
,
577 &vmstate_cpu_common_crash_occurred
,
584 CPUState
*qemu_get_cpu(int index
)
589 if (cpu
->cpu_index
== index
) {
597 #if !defined(CONFIG_USER_ONLY)
598 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
600 CPUAddressSpace
*newas
;
602 /* Target code should have set num_ases before calling us */
603 assert(asidx
< cpu
->num_ases
);
606 /* address space 0 gets the convenience alias */
610 /* KVM cannot currently support multiple address spaces. */
611 assert(asidx
== 0 || !kvm_enabled());
613 if (!cpu
->cpu_ases
) {
614 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
617 newas
= &cpu
->cpu_ases
[asidx
];
621 newas
->tcg_as_listener
.commit
= tcg_commit
;
622 memory_listener_register(&newas
->tcg_as_listener
, as
);
626 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
628 /* Return the AddressSpace corresponding to the specified index */
629 return cpu
->cpu_ases
[asidx
].as
;
633 void cpu_exec_unrealizefn(CPUState
*cpu
)
635 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
637 cpu_list_remove(cpu
);
639 if (cc
->vmsd
!= NULL
) {
640 vmstate_unregister(NULL
, cc
->vmsd
, cpu
);
642 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
643 vmstate_unregister(NULL
, &vmstate_cpu_common
, cpu
);
647 void cpu_exec_initfn(CPUState
*cpu
)
652 #ifndef CONFIG_USER_ONLY
653 cpu
->thread_id
= qemu_get_thread_id();
655 /* This is a softmmu CPU object, so create a property for it
656 * so users can wire up its memory. (This can't go in qom/cpu.c
657 * because that file is compiled only once for both user-mode
658 * and system builds.) The default if no link is set up is to use
659 * the system address space.
661 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
662 (Object
**)&cpu
->memory
,
663 qdev_prop_allow_set_link_before_realize
,
664 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
666 cpu
->memory
= system_memory
;
667 object_ref(OBJECT(cpu
->memory
));
671 void cpu_exec_realizefn(CPUState
*cpu
, Error
**errp
)
673 CPUClass
*cc ATTRIBUTE_UNUSED
= CPU_GET_CLASS(cpu
);
677 #ifndef CONFIG_USER_ONLY
678 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
679 vmstate_register(NULL
, cpu
->cpu_index
, &vmstate_cpu_common
, cpu
);
681 if (cc
->vmsd
!= NULL
) {
682 vmstate_register(NULL
, cpu
->cpu_index
, cc
->vmsd
, cpu
);
687 #if defined(CONFIG_USER_ONLY)
688 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
692 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
697 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
700 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
701 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
703 /* Locks grabbed by tb_invalidate_phys_addr */
704 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
705 phys
| (pc
& ~TARGET_PAGE_MASK
));
710 #if defined(CONFIG_USER_ONLY)
711 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
716 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
722 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
726 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
727 int flags
, CPUWatchpoint
**watchpoint
)
732 /* Add a watchpoint. */
733 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
734 int flags
, CPUWatchpoint
**watchpoint
)
738 /* forbid ranges which are empty or run off the end of the address space */
739 if (len
== 0 || (addr
+ len
- 1) < addr
) {
740 error_report("tried to set invalid watchpoint at %"
741 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
744 wp
= g_malloc(sizeof(*wp
));
750 /* keep all GDB-injected watchpoints in front */
751 if (flags
& BP_GDB
) {
752 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
754 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
757 tlb_flush_page(cpu
, addr
);
764 /* Remove a specific watchpoint. */
765 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
770 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
771 if (addr
== wp
->vaddr
&& len
== wp
->len
772 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
773 cpu_watchpoint_remove_by_ref(cpu
, wp
);
780 /* Remove a specific watchpoint by reference. */
781 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
783 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
785 tlb_flush_page(cpu
, watchpoint
->vaddr
);
790 /* Remove all matching watchpoints. */
791 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
793 CPUWatchpoint
*wp
, *next
;
795 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
796 if (wp
->flags
& mask
) {
797 cpu_watchpoint_remove_by_ref(cpu
, wp
);
802 /* Return true if this watchpoint address matches the specified
803 * access (ie the address range covered by the watchpoint overlaps
804 * partially or completely with the address range covered by the
807 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
811 /* We know the lengths are non-zero, but a little caution is
812 * required to avoid errors in the case where the range ends
813 * exactly at the top of the address space and so addr + len
814 * wraps round to zero.
816 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
817 vaddr addrend
= addr
+ len
- 1;
819 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
824 /* Add a breakpoint. */
825 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
826 CPUBreakpoint
**breakpoint
)
830 bp
= g_malloc(sizeof(*bp
));
835 /* keep all GDB-injected breakpoints in front */
836 if (flags
& BP_GDB
) {
837 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
839 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
842 breakpoint_invalidate(cpu
, pc
);
850 /* Remove a specific breakpoint. */
851 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
855 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
856 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
857 cpu_breakpoint_remove_by_ref(cpu
, bp
);
864 /* Remove a specific breakpoint by reference. */
865 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
867 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
869 breakpoint_invalidate(cpu
, breakpoint
->pc
);
874 /* Remove all matching breakpoints. */
875 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
877 CPUBreakpoint
*bp
, *next
;
879 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
880 if (bp
->flags
& mask
) {
881 cpu_breakpoint_remove_by_ref(cpu
, bp
);
886 /* enable or disable single step mode. EXCP_DEBUG is returned by the
887 CPU loop after each instruction */
888 void cpu_single_step(CPUState
*cpu
, int enabled
)
890 if (cpu
->singlestep_enabled
!= enabled
) {
891 cpu
->singlestep_enabled
= enabled
;
893 kvm_update_guest_debug(cpu
, 0);
895 /* must flush all the translated code to avoid inconsistencies */
896 /* XXX: only flush what is necessary */
902 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
909 fprintf(stderr
, "qemu: fatal: ");
910 vfprintf(stderr
, fmt
, ap
);
911 fprintf(stderr
, "\n");
912 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
913 if (qemu_log_separate()) {
915 qemu_log("qemu: fatal: ");
916 qemu_log_vprintf(fmt
, ap2
);
918 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
926 #if defined(CONFIG_USER_ONLY)
928 struct sigaction act
;
929 sigfillset(&act
.sa_mask
);
930 act
.sa_handler
= SIG_DFL
;
931 sigaction(SIGABRT
, &act
, NULL
);
937 #if !defined(CONFIG_USER_ONLY)
938 /* Called from RCU critical section */
939 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
943 block
= atomic_rcu_read(&ram_list
.mru_block
);
944 if (block
&& addr
- block
->offset
< block
->max_length
) {
947 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
948 if (addr
- block
->offset
< block
->max_length
) {
953 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
957 /* It is safe to write mru_block outside the iothread lock. This
962 * xxx removed from list
966 * call_rcu(reclaim_ramblock, xxx);
969 * atomic_rcu_set is not needed here. The block was already published
970 * when it was placed into the list. Here we're just making an extra
971 * copy of the pointer.
973 ram_list
.mru_block
= block
;
977 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
984 end
= TARGET_PAGE_ALIGN(start
+ length
);
985 start
&= TARGET_PAGE_MASK
;
988 block
= qemu_get_ram_block(start
);
989 assert(block
== qemu_get_ram_block(end
- 1));
990 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
992 tlb_reset_dirty(cpu
, start1
, length
);
997 /* Note: start and end must be within the same ram block. */
998 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
1002 DirtyMemoryBlocks
*blocks
;
1003 unsigned long end
, page
;
1010 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1011 page
= start
>> TARGET_PAGE_BITS
;
1015 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1017 while (page
< end
) {
1018 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1019 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1020 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1022 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1029 if (dirty
&& tcg_enabled()) {
1030 tlb_reset_dirty_range_all(start
, length
);
1036 /* Called from RCU critical section */
1037 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1038 MemoryRegionSection
*section
,
1040 hwaddr paddr
, hwaddr xlat
,
1042 target_ulong
*address
)
1047 if (memory_region_is_ram(section
->mr
)) {
1049 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1050 if (!section
->readonly
) {
1051 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1053 iotlb
|= PHYS_SECTION_ROM
;
1056 AddressSpaceDispatch
*d
;
1058 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1059 iotlb
= section
- d
->map
.sections
;
1063 /* Make accesses to pages with watchpoints go via the
1064 watchpoint trap routines. */
1065 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1066 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1067 /* Avoid trapping reads of pages with a write breakpoint. */
1068 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1069 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1070 *address
|= TLB_MMIO
;
1078 #endif /* defined(CONFIG_USER_ONLY) */
1080 #if !defined(CONFIG_USER_ONLY)
1082 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1084 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1086 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1087 qemu_anon_ram_alloc
;
1090 * Set a custom physical guest memory alloator.
1091 * Accelerators with unusual needs may need this. Hopefully, we can
1092 * get rid of it eventually.
1094 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1096 phys_mem_alloc
= alloc
;
1099 static uint16_t phys_section_add(PhysPageMap
*map
,
1100 MemoryRegionSection
*section
)
1102 /* The physical section number is ORed with a page-aligned
1103 * pointer to produce the iotlb entries. Thus it should
1104 * never overflow into the page-aligned value.
1106 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1108 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1109 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1110 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1111 map
->sections_nb_alloc
);
1113 map
->sections
[map
->sections_nb
] = *section
;
1114 memory_region_ref(section
->mr
);
1115 return map
->sections_nb
++;
1118 static void phys_section_destroy(MemoryRegion
*mr
)
1120 bool have_sub_page
= mr
->subpage
;
1122 memory_region_unref(mr
);
1124 if (have_sub_page
) {
1125 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1126 object_unref(OBJECT(&subpage
->iomem
));
1131 static void phys_sections_free(PhysPageMap
*map
)
1133 while (map
->sections_nb
> 0) {
1134 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1135 phys_section_destroy(section
->mr
);
1137 g_free(map
->sections
);
1141 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1144 hwaddr base
= section
->offset_within_address_space
1146 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1147 d
->map
.nodes
, d
->map
.sections
);
1148 MemoryRegionSection subsection
= {
1149 .offset_within_address_space
= base
,
1150 .size
= int128_make64(TARGET_PAGE_SIZE
),
1154 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1156 if (!(existing
->mr
->subpage
)) {
1157 subpage
= subpage_init(d
->as
, base
);
1158 subsection
.address_space
= d
->as
;
1159 subsection
.mr
= &subpage
->iomem
;
1160 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1161 phys_section_add(&d
->map
, &subsection
));
1163 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1165 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1166 end
= start
+ int128_get64(section
->size
) - 1;
1167 subpage_register(subpage
, start
, end
,
1168 phys_section_add(&d
->map
, section
));
1172 static void register_multipage(AddressSpaceDispatch
*d
,
1173 MemoryRegionSection
*section
)
1175 hwaddr start_addr
= section
->offset_within_address_space
;
1176 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1177 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1181 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1184 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1186 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1187 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1188 MemoryRegionSection now
= *section
, remain
= *section
;
1189 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1191 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1192 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1193 - now
.offset_within_address_space
;
1195 now
.size
= int128_min(int128_make64(left
), now
.size
);
1196 register_subpage(d
, &now
);
1198 now
.size
= int128_zero();
1200 while (int128_ne(remain
.size
, now
.size
)) {
1201 remain
.size
= int128_sub(remain
.size
, now
.size
);
1202 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1203 remain
.offset_within_region
+= int128_get64(now
.size
);
1205 if (int128_lt(remain
.size
, page_size
)) {
1206 register_subpage(d
, &now
);
1207 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1208 now
.size
= page_size
;
1209 register_subpage(d
, &now
);
1211 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1212 register_multipage(d
, &now
);
1217 void qemu_flush_coalesced_mmio_buffer(void)
1220 kvm_flush_coalesced_mmio_buffer();
1223 void qemu_mutex_lock_ramlist(void)
1225 qemu_mutex_lock(&ram_list
.mutex
);
1228 void qemu_mutex_unlock_ramlist(void)
1230 qemu_mutex_unlock(&ram_list
.mutex
);
1234 static void *file_ram_alloc(RAMBlock
*block
,
1239 bool unlink_on_error
= false;
1241 char *sanitized_name
;
1243 void *area
= MAP_FAILED
;
1246 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1248 "host lacks kvm mmu notifiers, -mem-path unsupported");
1253 fd
= open(path
, O_RDWR
);
1255 /* @path names an existing file, use it */
1258 if (errno
== ENOENT
) {
1259 /* @path names a file that doesn't exist, create it */
1260 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1262 unlink_on_error
= true;
1265 } else if (errno
== EISDIR
) {
1266 /* @path names a directory, create a file there */
1267 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1268 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1269 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1275 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1277 g_free(sanitized_name
);
1279 fd
= mkstemp(filename
);
1287 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1288 error_setg_errno(errp
, errno
,
1289 "can't open backing store %s for guest RAM",
1294 * Try again on EINTR and EEXIST. The latter happens when
1295 * something else creates the file between our two open().
1299 block
->page_size
= qemu_fd_getpagesize(fd
);
1300 block
->mr
->align
= block
->page_size
;
1301 #if defined(__s390x__)
1302 if (kvm_enabled()) {
1303 block
->mr
->align
= MAX(block
->mr
->align
, QEMU_VMALLOC_ALIGN
);
1307 if (memory
< block
->page_size
) {
1308 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1309 "or larger than page size 0x%zx",
1310 memory
, block
->page_size
);
1314 memory
= ROUND_UP(memory
, block
->page_size
);
1317 * ftruncate is not supported by hugetlbfs in older
1318 * hosts, so don't bother bailing out on errors.
1319 * If anything goes wrong with it under other filesystems,
1322 if (ftruncate(fd
, memory
)) {
1323 perror("ftruncate");
1326 area
= qemu_ram_mmap(fd
, memory
, block
->mr
->align
,
1327 block
->flags
& RAM_SHARED
);
1328 if (area
== MAP_FAILED
) {
1329 error_setg_errno(errp
, errno
,
1330 "unable to map backing store for guest RAM");
1335 os_mem_prealloc(fd
, area
, memory
, errp
);
1336 if (errp
&& *errp
) {
1345 if (area
!= MAP_FAILED
) {
1346 qemu_ram_munmap(area
, memory
);
1348 if (unlink_on_error
) {
1358 /* Called with the ramlist lock held. */
1359 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1361 RAMBlock
*block
, *next_block
;
1362 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1364 assert(size
!= 0); /* it would hand out same offset multiple times */
1366 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1370 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1371 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1373 end
= block
->offset
+ block
->max_length
;
1375 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1376 if (next_block
->offset
>= end
) {
1377 next
= MIN(next
, next_block
->offset
);
1380 if (next
- end
>= size
&& next
- end
< mingap
) {
1382 mingap
= next
- end
;
1386 if (offset
== RAM_ADDR_MAX
) {
1387 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1395 ram_addr_t
last_ram_offset(void)
1398 ram_addr_t last
= 0;
1401 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1402 last
= MAX(last
, block
->offset
+ block
->max_length
);
1408 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1412 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1413 if (!machine_dump_guest_core(current_machine
)) {
1414 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1416 perror("qemu_madvise");
1417 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1418 "but dump_guest_core=off specified\n");
1423 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1428 /* Called with iothread lock held. */
1429 void qemu_ram_set_idstr(RAMBlock
*new_block
, const char *name
, DeviceState
*dev
)
1434 assert(!new_block
->idstr
[0]);
1437 char *id
= qdev_get_dev_path(dev
);
1439 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1443 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1446 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1447 if (block
!= new_block
&&
1448 !strcmp(block
->idstr
, new_block
->idstr
)) {
1449 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1457 /* Called with iothread lock held. */
1458 void qemu_ram_unset_idstr(RAMBlock
*block
)
1460 /* FIXME: arch_init.c assumes that this is not called throughout
1461 * migration. Ignore the problem since hot-unplug during migration
1462 * does not work anyway.
1465 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1469 size_t qemu_ram_pagesize(RAMBlock
*rb
)
1471 return rb
->page_size
;
1474 static int memory_try_enable_merging(void *addr
, size_t len
)
1476 if (!machine_mem_merge(current_machine
)) {
1477 /* disabled by the user */
1481 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1484 /* Only legal before guest might have detected the memory size: e.g. on
1485 * incoming migration, or right after reset.
1487 * As memory core doesn't know how is memory accessed, it is up to
1488 * resize callback to update device state and/or add assertions to detect
1489 * misuse, if necessary.
1491 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
)
1495 newsize
= HOST_PAGE_ALIGN(newsize
);
1497 if (block
->used_length
== newsize
) {
1501 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1502 error_setg_errno(errp
, EINVAL
,
1503 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1504 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1505 newsize
, block
->used_length
);
1509 if (block
->max_length
< newsize
) {
1510 error_setg_errno(errp
, EINVAL
,
1511 "Length too large: %s: 0x" RAM_ADDR_FMT
1512 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1513 newsize
, block
->max_length
);
1517 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1518 block
->used_length
= newsize
;
1519 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1521 memory_region_set_size(block
->mr
, newsize
);
1522 if (block
->resized
) {
1523 block
->resized(block
->idstr
, newsize
, block
->host
);
1528 /* Called with ram_list.mutex held */
1529 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1530 ram_addr_t new_ram_size
)
1532 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1533 DIRTY_MEMORY_BLOCK_SIZE
);
1534 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1535 DIRTY_MEMORY_BLOCK_SIZE
);
1538 /* Only need to extend if block count increased */
1539 if (new_num_blocks
<= old_num_blocks
) {
1543 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1544 DirtyMemoryBlocks
*old_blocks
;
1545 DirtyMemoryBlocks
*new_blocks
;
1548 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1549 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1550 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1552 if (old_num_blocks
) {
1553 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1554 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1557 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1558 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1561 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1564 g_free_rcu(old_blocks
, rcu
);
1569 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1572 RAMBlock
*last_block
= NULL
;
1573 ram_addr_t old_ram_size
, new_ram_size
;
1576 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1578 qemu_mutex_lock_ramlist();
1579 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1581 if (!new_block
->host
) {
1582 if (xen_enabled()) {
1583 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1584 new_block
->mr
, &err
);
1586 error_propagate(errp
, err
);
1587 qemu_mutex_unlock_ramlist();
1591 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1592 &new_block
->mr
->align
);
1593 if (!new_block
->host
) {
1594 error_setg_errno(errp
, errno
,
1595 "cannot set up guest memory '%s'",
1596 memory_region_name(new_block
->mr
));
1597 qemu_mutex_unlock_ramlist();
1600 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1604 new_ram_size
= MAX(old_ram_size
,
1605 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1606 if (new_ram_size
> old_ram_size
) {
1607 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1608 dirty_memory_extend(old_ram_size
, new_ram_size
);
1610 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1611 * QLIST (which has an RCU-friendly variant) does not have insertion at
1612 * tail, so save the last element in last_block.
1614 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1616 if (block
->max_length
< new_block
->max_length
) {
1621 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1622 } else if (last_block
) {
1623 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1624 } else { /* list is empty */
1625 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1627 ram_list
.mru_block
= NULL
;
1629 /* Write list before version */
1632 qemu_mutex_unlock_ramlist();
1634 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1635 new_block
->used_length
,
1638 if (new_block
->host
) {
1639 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1640 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1641 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
1642 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1647 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1648 bool share
, const char *mem_path
,
1651 RAMBlock
*new_block
;
1652 Error
*local_err
= NULL
;
1654 if (xen_enabled()) {
1655 error_setg(errp
, "-mem-path not supported with Xen");
1659 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1661 * file_ram_alloc() needs to allocate just like
1662 * phys_mem_alloc, but we haven't bothered to provide
1666 "-mem-path not supported with this accelerator");
1670 size
= HOST_PAGE_ALIGN(size
);
1671 new_block
= g_malloc0(sizeof(*new_block
));
1673 new_block
->used_length
= size
;
1674 new_block
->max_length
= size
;
1675 new_block
->flags
= share
? RAM_SHARED
: 0;
1676 new_block
->host
= file_ram_alloc(new_block
, size
,
1678 if (!new_block
->host
) {
1683 ram_block_add(new_block
, &local_err
);
1686 error_propagate(errp
, local_err
);
1694 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1695 void (*resized
)(const char*,
1698 void *host
, bool resizeable
,
1699 MemoryRegion
*mr
, Error
**errp
)
1701 RAMBlock
*new_block
;
1702 Error
*local_err
= NULL
;
1704 size
= HOST_PAGE_ALIGN(size
);
1705 max_size
= HOST_PAGE_ALIGN(max_size
);
1706 new_block
= g_malloc0(sizeof(*new_block
));
1708 new_block
->resized
= resized
;
1709 new_block
->used_length
= size
;
1710 new_block
->max_length
= max_size
;
1711 assert(max_size
>= size
);
1713 new_block
->page_size
= getpagesize();
1714 new_block
->host
= host
;
1716 new_block
->flags
|= RAM_PREALLOC
;
1719 new_block
->flags
|= RAM_RESIZEABLE
;
1721 ram_block_add(new_block
, &local_err
);
1724 error_propagate(errp
, local_err
);
1730 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1731 MemoryRegion
*mr
, Error
**errp
)
1733 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1736 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1738 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1741 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1742 void (*resized
)(const char*,
1745 MemoryRegion
*mr
, Error
**errp
)
1747 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1750 static void reclaim_ramblock(RAMBlock
*block
)
1752 if (block
->flags
& RAM_PREALLOC
) {
1754 } else if (xen_enabled()) {
1755 xen_invalidate_map_cache_entry(block
->host
);
1757 } else if (block
->fd
>= 0) {
1758 qemu_ram_munmap(block
->host
, block
->max_length
);
1762 qemu_anon_ram_free(block
->host
, block
->max_length
);
1767 void qemu_ram_free(RAMBlock
*block
)
1773 qemu_mutex_lock_ramlist();
1774 QLIST_REMOVE_RCU(block
, next
);
1775 ram_list
.mru_block
= NULL
;
1776 /* Write list before version */
1779 call_rcu(block
, reclaim_ramblock
, rcu
);
1780 qemu_mutex_unlock_ramlist();
1784 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1791 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1792 offset
= addr
- block
->offset
;
1793 if (offset
< block
->max_length
) {
1794 vaddr
= ramblock_ptr(block
, offset
);
1795 if (block
->flags
& RAM_PREALLOC
) {
1797 } else if (xen_enabled()) {
1801 if (block
->fd
>= 0) {
1802 flags
|= (block
->flags
& RAM_SHARED
?
1803 MAP_SHARED
: MAP_PRIVATE
);
1804 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1805 flags
, block
->fd
, offset
);
1808 * Remap needs to match alloc. Accelerators that
1809 * set phys_mem_alloc never remap. If they did,
1810 * we'd need a remap hook here.
1812 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1814 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1815 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1818 if (area
!= vaddr
) {
1819 fprintf(stderr
, "Could not remap addr: "
1820 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1824 memory_try_enable_merging(vaddr
, length
);
1825 qemu_ram_setup_dump(vaddr
, length
);
1830 #endif /* !_WIN32 */
1832 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1833 * This should not be used for general purpose DMA. Use address_space_map
1834 * or address_space_rw instead. For local memory (e.g. video ram) that the
1835 * device owns, use memory_region_get_ram_ptr.
1837 * Called within RCU critical section.
1839 void *qemu_map_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1841 RAMBlock
*block
= ram_block
;
1843 if (block
== NULL
) {
1844 block
= qemu_get_ram_block(addr
);
1845 addr
-= block
->offset
;
1848 if (xen_enabled() && block
->host
== NULL
) {
1849 /* We need to check if the requested address is in the RAM
1850 * because we don't want to map the entire memory in QEMU.
1851 * In that case just map until the end of the page.
1853 if (block
->offset
== 0) {
1854 return xen_map_cache(addr
, 0, 0);
1857 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1859 return ramblock_ptr(block
, addr
);
1862 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1863 * but takes a size argument.
1865 * Called within RCU critical section.
1867 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1870 RAMBlock
*block
= ram_block
;
1875 if (block
== NULL
) {
1876 block
= qemu_get_ram_block(addr
);
1877 addr
-= block
->offset
;
1879 *size
= MIN(*size
, block
->max_length
- addr
);
1881 if (xen_enabled() && block
->host
== NULL
) {
1882 /* We need to check if the requested address is in the RAM
1883 * because we don't want to map the entire memory in QEMU.
1884 * In that case just map the requested area.
1886 if (block
->offset
== 0) {
1887 return xen_map_cache(addr
, *size
, 1);
1890 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1893 return ramblock_ptr(block
, addr
);
1897 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1900 * ptr: Host pointer to look up
1901 * round_offset: If true round the result offset down to a page boundary
1902 * *ram_addr: set to result ram_addr
1903 * *offset: set to result offset within the RAMBlock
1905 * Returns: RAMBlock (or NULL if not found)
1907 * By the time this function returns, the returned pointer is not protected
1908 * by RCU anymore. If the caller is not within an RCU critical section and
1909 * does not hold the iothread lock, it must have other means of protecting the
1910 * pointer, such as a reference to the region that includes the incoming
1913 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1917 uint8_t *host
= ptr
;
1919 if (xen_enabled()) {
1920 ram_addr_t ram_addr
;
1922 ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1923 block
= qemu_get_ram_block(ram_addr
);
1925 *offset
= ram_addr
- block
->offset
;
1932 block
= atomic_rcu_read(&ram_list
.mru_block
);
1933 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1937 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1938 /* This case append when the block is not mapped. */
1939 if (block
->host
== NULL
) {
1942 if (host
- block
->host
< block
->max_length
) {
1951 *offset
= (host
- block
->host
);
1953 *offset
&= TARGET_PAGE_MASK
;
1960 * Finds the named RAMBlock
1962 * name: The name of RAMBlock to find
1964 * Returns: RAMBlock (or NULL if not found)
1966 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1970 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1971 if (!strcmp(name
, block
->idstr
)) {
1979 /* Some of the softmmu routines need to translate from a host pointer
1980 (typically a TLB entry) back to a ram offset. */
1981 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
1986 block
= qemu_ram_block_from_host(ptr
, false, &offset
);
1988 return RAM_ADDR_INVALID
;
1991 return block
->offset
+ offset
;
1994 /* Called within RCU critical section. */
1995 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1996 uint64_t val
, unsigned size
)
1998 bool locked
= false;
2000 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2003 tb_invalidate_phys_page_fast(ram_addr
, size
);
2007 stb_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2010 stw_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2013 stl_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2023 /* Set both VGA and migration bits for simplicity and to remove
2024 * the notdirty callback faster.
2026 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2027 DIRTY_CLIENTS_NOCODE
);
2028 /* we remove the notdirty callback only if the code has been
2030 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2031 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2035 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2036 unsigned size
, bool is_write
)
2041 static const MemoryRegionOps notdirty_mem_ops
= {
2042 .write
= notdirty_mem_write
,
2043 .valid
.accepts
= notdirty_mem_accepts
,
2044 .endianness
= DEVICE_NATIVE_ENDIAN
,
2047 /* Generate a debug exception if a watchpoint has been hit. */
2048 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2050 CPUState
*cpu
= current_cpu
;
2051 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2052 CPUArchState
*env
= cpu
->env_ptr
;
2053 target_ulong pc
, cs_base
;
2058 if (cpu
->watchpoint_hit
) {
2059 /* We re-entered the check after replacing the TB. Now raise
2060 * the debug interrupt so that is will trigger after the
2061 * current instruction. */
2062 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2065 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2066 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2067 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2068 && (wp
->flags
& flags
)) {
2069 if (flags
== BP_MEM_READ
) {
2070 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2072 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2074 wp
->hitaddr
= vaddr
;
2075 wp
->hitattrs
= attrs
;
2076 if (!cpu
->watchpoint_hit
) {
2077 if (wp
->flags
& BP_CPU
&&
2078 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2079 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2082 cpu
->watchpoint_hit
= wp
;
2084 /* The tb_lock will be reset when cpu_loop_exit or
2085 * cpu_loop_exit_noexc longjmp back into the cpu_exec
2089 tb_check_watchpoint(cpu
);
2090 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2091 cpu
->exception_index
= EXCP_DEBUG
;
2094 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2095 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2096 cpu_loop_exit_noexc(cpu
);
2100 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2105 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2106 so these check for a hit then pass through to the normal out-of-line
2108 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2109 unsigned size
, MemTxAttrs attrs
)
2113 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2114 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2116 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2119 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2122 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2125 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2133 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2134 uint64_t val
, unsigned size
,
2138 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2139 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2141 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2144 address_space_stb(as
, addr
, val
, attrs
, &res
);
2147 address_space_stw(as
, addr
, val
, attrs
, &res
);
2150 address_space_stl(as
, addr
, val
, attrs
, &res
);
2157 static const MemoryRegionOps watch_mem_ops
= {
2158 .read_with_attrs
= watch_mem_read
,
2159 .write_with_attrs
= watch_mem_write
,
2160 .endianness
= DEVICE_NATIVE_ENDIAN
,
2163 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2164 unsigned len
, MemTxAttrs attrs
)
2166 subpage_t
*subpage
= opaque
;
2170 #if defined(DEBUG_SUBPAGE)
2171 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2172 subpage
, len
, addr
);
2174 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2181 *data
= ldub_p(buf
);
2184 *data
= lduw_p(buf
);
2197 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2198 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2200 subpage_t
*subpage
= opaque
;
2203 #if defined(DEBUG_SUBPAGE)
2204 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2205 " value %"PRIx64
"\n",
2206 __func__
, subpage
, len
, addr
, value
);
2224 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2228 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2229 unsigned len
, bool is_write
)
2231 subpage_t
*subpage
= opaque
;
2232 #if defined(DEBUG_SUBPAGE)
2233 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2234 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2237 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2241 static const MemoryRegionOps subpage_ops
= {
2242 .read_with_attrs
= subpage_read
,
2243 .write_with_attrs
= subpage_write
,
2244 .impl
.min_access_size
= 1,
2245 .impl
.max_access_size
= 8,
2246 .valid
.min_access_size
= 1,
2247 .valid
.max_access_size
= 8,
2248 .valid
.accepts
= subpage_accepts
,
2249 .endianness
= DEVICE_NATIVE_ENDIAN
,
2252 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2257 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2259 idx
= SUBPAGE_IDX(start
);
2260 eidx
= SUBPAGE_IDX(end
);
2261 #if defined(DEBUG_SUBPAGE)
2262 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2263 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2265 for (; idx
<= eidx
; idx
++) {
2266 mmio
->sub_section
[idx
] = section
;
2272 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2276 mmio
= g_malloc0(sizeof(subpage_t
) + TARGET_PAGE_SIZE
* sizeof(uint16_t));
2279 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2280 NULL
, TARGET_PAGE_SIZE
);
2281 mmio
->iomem
.subpage
= true;
2282 #if defined(DEBUG_SUBPAGE)
2283 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2284 mmio
, base
, TARGET_PAGE_SIZE
);
2286 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2291 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2295 MemoryRegionSection section
= {
2296 .address_space
= as
,
2298 .offset_within_address_space
= 0,
2299 .offset_within_region
= 0,
2300 .size
= int128_2_64(),
2303 return phys_section_add(map
, §ion
);
2306 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2308 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2309 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2310 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2311 MemoryRegionSection
*sections
= d
->map
.sections
;
2313 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2316 static void io_mem_init(void)
2318 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2319 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2321 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2323 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2327 static void mem_begin(MemoryListener
*listener
)
2329 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2330 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2333 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2334 assert(n
== PHYS_SECTION_UNASSIGNED
);
2335 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2336 assert(n
== PHYS_SECTION_NOTDIRTY
);
2337 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2338 assert(n
== PHYS_SECTION_ROM
);
2339 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2340 assert(n
== PHYS_SECTION_WATCH
);
2342 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2344 as
->next_dispatch
= d
;
2347 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2349 phys_sections_free(&d
->map
);
2353 static void mem_commit(MemoryListener
*listener
)
2355 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2356 AddressSpaceDispatch
*cur
= as
->dispatch
;
2357 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2359 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2361 atomic_rcu_set(&as
->dispatch
, next
);
2363 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2367 static void tcg_commit(MemoryListener
*listener
)
2369 CPUAddressSpace
*cpuas
;
2370 AddressSpaceDispatch
*d
;
2372 /* since each CPU stores ram addresses in its TLB cache, we must
2373 reset the modified entries */
2374 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2375 cpu_reloading_memory_map();
2376 /* The CPU and TLB are protected by the iothread lock.
2377 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2378 * may have split the RCU critical section.
2380 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2381 cpuas
->memory_dispatch
= d
;
2382 tlb_flush(cpuas
->cpu
, 1);
2385 void address_space_init_dispatch(AddressSpace
*as
)
2387 as
->dispatch
= NULL
;
2388 as
->dispatch_listener
= (MemoryListener
) {
2390 .commit
= mem_commit
,
2391 .region_add
= mem_add
,
2392 .region_nop
= mem_add
,
2395 memory_listener_register(&as
->dispatch_listener
, as
);
2398 void address_space_unregister(AddressSpace
*as
)
2400 memory_listener_unregister(&as
->dispatch_listener
);
2403 void address_space_destroy_dispatch(AddressSpace
*as
)
2405 AddressSpaceDispatch
*d
= as
->dispatch
;
2407 atomic_rcu_set(&as
->dispatch
, NULL
);
2409 call_rcu(d
, address_space_dispatch_free
, rcu
);
2413 static void memory_map_init(void)
2415 system_memory
= g_malloc(sizeof(*system_memory
));
2417 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2418 address_space_init(&address_space_memory
, system_memory
, "memory");
2420 system_io
= g_malloc(sizeof(*system_io
));
2421 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2423 address_space_init(&address_space_io
, system_io
, "I/O");
2426 MemoryRegion
*get_system_memory(void)
2428 return system_memory
;
2431 MemoryRegion
*get_system_io(void)
2436 #endif /* !defined(CONFIG_USER_ONLY) */
2438 /* physical memory access (slow version, mainly for debug) */
2439 #if defined(CONFIG_USER_ONLY)
2440 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2441 uint8_t *buf
, int len
, int is_write
)
2448 page
= addr
& TARGET_PAGE_MASK
;
2449 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2452 flags
= page_get_flags(page
);
2453 if (!(flags
& PAGE_VALID
))
2456 if (!(flags
& PAGE_WRITE
))
2458 /* XXX: this code should not depend on lock_user */
2459 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2462 unlock_user(p
, addr
, l
);
2464 if (!(flags
& PAGE_READ
))
2466 /* XXX: this code should not depend on lock_user */
2467 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2470 unlock_user(p
, addr
, 0);
2481 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2484 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2485 addr
+= memory_region_get_ram_addr(mr
);
2487 /* No early return if dirty_log_mask is or becomes 0, because
2488 * cpu_physical_memory_set_dirty_range will still call
2489 * xen_modified_memory.
2491 if (dirty_log_mask
) {
2493 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2495 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2497 tb_invalidate_phys_range(addr
, addr
+ length
);
2499 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2501 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2504 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2506 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2508 /* Regions are assumed to support 1-4 byte accesses unless
2509 otherwise specified. */
2510 if (access_size_max
== 0) {
2511 access_size_max
= 4;
2514 /* Bound the maximum access by the alignment of the address. */
2515 if (!mr
->ops
->impl
.unaligned
) {
2516 unsigned align_size_max
= addr
& -addr
;
2517 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2518 access_size_max
= align_size_max
;
2522 /* Don't attempt accesses larger than the maximum. */
2523 if (l
> access_size_max
) {
2524 l
= access_size_max
;
2531 static bool prepare_mmio_access(MemoryRegion
*mr
)
2533 bool unlocked
= !qemu_mutex_iothread_locked();
2534 bool release_lock
= false;
2536 if (unlocked
&& mr
->global_locking
) {
2537 qemu_mutex_lock_iothread();
2539 release_lock
= true;
2541 if (mr
->flush_coalesced_mmio
) {
2543 qemu_mutex_lock_iothread();
2545 qemu_flush_coalesced_mmio_buffer();
2547 qemu_mutex_unlock_iothread();
2551 return release_lock
;
2554 /* Called within RCU critical section. */
2555 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2558 int len
, hwaddr addr1
,
2559 hwaddr l
, MemoryRegion
*mr
)
2563 MemTxResult result
= MEMTX_OK
;
2564 bool release_lock
= false;
2567 if (!memory_access_is_direct(mr
, true)) {
2568 release_lock
|= prepare_mmio_access(mr
);
2569 l
= memory_access_size(mr
, l
, addr1
);
2570 /* XXX: could force current_cpu to NULL to avoid
2574 /* 64 bit write access */
2576 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2580 /* 32 bit write access */
2582 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2586 /* 16 bit write access */
2588 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2592 /* 8 bit write access */
2594 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2602 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2603 memcpy(ptr
, buf
, l
);
2604 invalidate_and_set_dirty(mr
, addr1
, l
);
2608 qemu_mutex_unlock_iothread();
2609 release_lock
= false;
2621 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2627 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2628 const uint8_t *buf
, int len
)
2633 MemTxResult result
= MEMTX_OK
;
2638 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2639 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2647 /* Called within RCU critical section. */
2648 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2649 MemTxAttrs attrs
, uint8_t *buf
,
2650 int len
, hwaddr addr1
, hwaddr l
,
2655 MemTxResult result
= MEMTX_OK
;
2656 bool release_lock
= false;
2659 if (!memory_access_is_direct(mr
, false)) {
2661 release_lock
|= prepare_mmio_access(mr
);
2662 l
= memory_access_size(mr
, l
, addr1
);
2665 /* 64 bit read access */
2666 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2671 /* 32 bit read access */
2672 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2677 /* 16 bit read access */
2678 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2683 /* 8 bit read access */
2684 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2693 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2694 memcpy(buf
, ptr
, l
);
2698 qemu_mutex_unlock_iothread();
2699 release_lock
= false;
2711 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2717 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2718 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2723 MemTxResult result
= MEMTX_OK
;
2728 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2729 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2737 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2738 uint8_t *buf
, int len
, bool is_write
)
2741 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2743 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2747 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2748 int len
, int is_write
)
2750 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2751 buf
, len
, is_write
);
2754 enum write_rom_type
{
2759 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2760 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2770 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2772 if (!(memory_region_is_ram(mr
) ||
2773 memory_region_is_romd(mr
))) {
2774 l
= memory_access_size(mr
, l
, addr1
);
2777 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2780 memcpy(ptr
, buf
, l
);
2781 invalidate_and_set_dirty(mr
, addr1
, l
);
2784 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2795 /* used for ROM loading : can write in RAM and ROM */
2796 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2797 const uint8_t *buf
, int len
)
2799 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2802 void cpu_flush_icache_range(hwaddr start
, int len
)
2805 * This function should do the same thing as an icache flush that was
2806 * triggered from within the guest. For TCG we are always cache coherent,
2807 * so there is no need to flush anything. For KVM / Xen we need to flush
2808 * the host's instruction cache at least.
2810 if (tcg_enabled()) {
2814 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2815 start
, NULL
, len
, FLUSH_CACHE
);
2826 static BounceBuffer bounce
;
2828 typedef struct MapClient
{
2830 QLIST_ENTRY(MapClient
) link
;
2833 QemuMutex map_client_list_lock
;
2834 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2835 = QLIST_HEAD_INITIALIZER(map_client_list
);
2837 static void cpu_unregister_map_client_do(MapClient
*client
)
2839 QLIST_REMOVE(client
, link
);
2843 static void cpu_notify_map_clients_locked(void)
2847 while (!QLIST_EMPTY(&map_client_list
)) {
2848 client
= QLIST_FIRST(&map_client_list
);
2849 qemu_bh_schedule(client
->bh
);
2850 cpu_unregister_map_client_do(client
);
2854 void cpu_register_map_client(QEMUBH
*bh
)
2856 MapClient
*client
= g_malloc(sizeof(*client
));
2858 qemu_mutex_lock(&map_client_list_lock
);
2860 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2861 if (!atomic_read(&bounce
.in_use
)) {
2862 cpu_notify_map_clients_locked();
2864 qemu_mutex_unlock(&map_client_list_lock
);
2867 void cpu_exec_init_all(void)
2869 qemu_mutex_init(&ram_list
.mutex
);
2870 /* The data structures we set up here depend on knowing the page size,
2871 * so no more changes can be made after this point.
2872 * In an ideal world, nothing we did before we had finished the
2873 * machine setup would care about the target page size, and we could
2874 * do this much later, rather than requiring board models to state
2875 * up front what their requirements are.
2877 finalize_target_page_bits();
2880 qemu_mutex_init(&map_client_list_lock
);
2883 void cpu_unregister_map_client(QEMUBH
*bh
)
2887 qemu_mutex_lock(&map_client_list_lock
);
2888 QLIST_FOREACH(client
, &map_client_list
, link
) {
2889 if (client
->bh
== bh
) {
2890 cpu_unregister_map_client_do(client
);
2894 qemu_mutex_unlock(&map_client_list_lock
);
2897 static void cpu_notify_map_clients(void)
2899 qemu_mutex_lock(&map_client_list_lock
);
2900 cpu_notify_map_clients_locked();
2901 qemu_mutex_unlock(&map_client_list_lock
);
2904 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2912 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2913 if (!memory_access_is_direct(mr
, is_write
)) {
2914 l
= memory_access_size(mr
, l
, addr
);
2915 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2927 /* Map a physical memory region into a host virtual address.
2928 * May map a subset of the requested range, given by and returned in *plen.
2929 * May return NULL if resources needed to perform the mapping are exhausted.
2930 * Use only for reads OR writes - not for read-modify-write operations.
2931 * Use cpu_register_map_client() to know when retrying the map operation is
2932 * likely to succeed.
2934 void *address_space_map(AddressSpace
*as
,
2941 hwaddr l
, xlat
, base
;
2942 MemoryRegion
*mr
, *this_mr
;
2951 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2953 if (!memory_access_is_direct(mr
, is_write
)) {
2954 if (atomic_xchg(&bounce
.in_use
, true)) {
2958 /* Avoid unbounded allocations */
2959 l
= MIN(l
, TARGET_PAGE_SIZE
);
2960 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2964 memory_region_ref(mr
);
2967 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2973 return bounce
.buffer
;
2987 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2988 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2993 memory_region_ref(mr
);
2995 ptr
= qemu_ram_ptr_length(mr
->ram_block
, base
, plen
);
3001 /* Unmaps a memory region previously mapped by address_space_map().
3002 * Will also mark the memory as dirty if is_write == 1. access_len gives
3003 * the amount of memory that was actually read or written by the caller.
3005 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3006 int is_write
, hwaddr access_len
)
3008 if (buffer
!= bounce
.buffer
) {
3012 mr
= memory_region_from_host(buffer
, &addr1
);
3015 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3017 if (xen_enabled()) {
3018 xen_invalidate_map_cache_entry(buffer
);
3020 memory_region_unref(mr
);
3024 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3025 bounce
.buffer
, access_len
);
3027 qemu_vfree(bounce
.buffer
);
3028 bounce
.buffer
= NULL
;
3029 memory_region_unref(bounce
.mr
);
3030 atomic_mb_set(&bounce
.in_use
, false);
3031 cpu_notify_map_clients();
3034 void *cpu_physical_memory_map(hwaddr addr
,
3038 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3041 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3042 int is_write
, hwaddr access_len
)
3044 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3047 /* warning: addr must be aligned */
3048 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3050 MemTxResult
*result
,
3051 enum device_endian endian
)
3059 bool release_lock
= false;
3062 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3063 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3064 release_lock
|= prepare_mmio_access(mr
);
3067 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3068 #if defined(TARGET_WORDS_BIGENDIAN)
3069 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3073 if (endian
== DEVICE_BIG_ENDIAN
) {
3079 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3081 case DEVICE_LITTLE_ENDIAN
:
3082 val
= ldl_le_p(ptr
);
3084 case DEVICE_BIG_ENDIAN
:
3085 val
= ldl_be_p(ptr
);
3097 qemu_mutex_unlock_iothread();
3103 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3104 MemTxAttrs attrs
, MemTxResult
*result
)
3106 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3107 DEVICE_NATIVE_ENDIAN
);
3110 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3111 MemTxAttrs attrs
, MemTxResult
*result
)
3113 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3114 DEVICE_LITTLE_ENDIAN
);
3117 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3118 MemTxAttrs attrs
, MemTxResult
*result
)
3120 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3124 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3126 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3129 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3131 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3134 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3136 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3139 /* warning: addr must be aligned */
3140 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3142 MemTxResult
*result
,
3143 enum device_endian endian
)
3151 bool release_lock
= false;
3154 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3156 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3157 release_lock
|= prepare_mmio_access(mr
);
3160 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3161 #if defined(TARGET_WORDS_BIGENDIAN)
3162 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3166 if (endian
== DEVICE_BIG_ENDIAN
) {
3172 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3174 case DEVICE_LITTLE_ENDIAN
:
3175 val
= ldq_le_p(ptr
);
3177 case DEVICE_BIG_ENDIAN
:
3178 val
= ldq_be_p(ptr
);
3190 qemu_mutex_unlock_iothread();
3196 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3197 MemTxAttrs attrs
, MemTxResult
*result
)
3199 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3200 DEVICE_NATIVE_ENDIAN
);
3203 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3204 MemTxAttrs attrs
, MemTxResult
*result
)
3206 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3207 DEVICE_LITTLE_ENDIAN
);
3210 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3211 MemTxAttrs attrs
, MemTxResult
*result
)
3213 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3217 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3219 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3222 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3224 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3227 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3229 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3233 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3234 MemTxAttrs attrs
, MemTxResult
*result
)
3239 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3246 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3248 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3251 /* warning: addr must be aligned */
3252 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3255 MemTxResult
*result
,
3256 enum device_endian endian
)
3264 bool release_lock
= false;
3267 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3269 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3270 release_lock
|= prepare_mmio_access(mr
);
3273 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3274 #if defined(TARGET_WORDS_BIGENDIAN)
3275 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3279 if (endian
== DEVICE_BIG_ENDIAN
) {
3285 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3287 case DEVICE_LITTLE_ENDIAN
:
3288 val
= lduw_le_p(ptr
);
3290 case DEVICE_BIG_ENDIAN
:
3291 val
= lduw_be_p(ptr
);
3303 qemu_mutex_unlock_iothread();
3309 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3310 MemTxAttrs attrs
, MemTxResult
*result
)
3312 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3313 DEVICE_NATIVE_ENDIAN
);
3316 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3317 MemTxAttrs attrs
, MemTxResult
*result
)
3319 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3320 DEVICE_LITTLE_ENDIAN
);
3323 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3324 MemTxAttrs attrs
, MemTxResult
*result
)
3326 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3330 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3332 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3335 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3337 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3340 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3342 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3345 /* warning: addr must be aligned. The ram page is not masked as dirty
3346 and the code inside is not invalidated. It is useful if the dirty
3347 bits are used to track modified PTEs */
3348 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3349 MemTxAttrs attrs
, MemTxResult
*result
)
3356 uint8_t dirty_log_mask
;
3357 bool release_lock
= false;
3360 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3362 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3363 release_lock
|= prepare_mmio_access(mr
);
3365 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3367 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3370 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3371 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3372 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
3380 qemu_mutex_unlock_iothread();
3385 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3387 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3390 /* warning: addr must be aligned */
3391 static inline void address_space_stl_internal(AddressSpace
*as
,
3392 hwaddr addr
, uint32_t val
,
3394 MemTxResult
*result
,
3395 enum device_endian endian
)
3402 bool release_lock
= false;
3405 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3407 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3408 release_lock
|= prepare_mmio_access(mr
);
3410 #if defined(TARGET_WORDS_BIGENDIAN)
3411 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3415 if (endian
== DEVICE_BIG_ENDIAN
) {
3419 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3422 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3424 case DEVICE_LITTLE_ENDIAN
:
3427 case DEVICE_BIG_ENDIAN
:
3434 invalidate_and_set_dirty(mr
, addr1
, 4);
3441 qemu_mutex_unlock_iothread();
3446 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3447 MemTxAttrs attrs
, MemTxResult
*result
)
3449 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3450 DEVICE_NATIVE_ENDIAN
);
3453 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3454 MemTxAttrs attrs
, MemTxResult
*result
)
3456 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3457 DEVICE_LITTLE_ENDIAN
);
3460 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3461 MemTxAttrs attrs
, MemTxResult
*result
)
3463 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3467 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3469 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3472 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3474 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3477 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3479 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3483 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3484 MemTxAttrs attrs
, MemTxResult
*result
)
3489 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3495 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3497 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3500 /* warning: addr must be aligned */
3501 static inline void address_space_stw_internal(AddressSpace
*as
,
3502 hwaddr addr
, uint32_t val
,
3504 MemTxResult
*result
,
3505 enum device_endian endian
)
3512 bool release_lock
= false;
3515 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3516 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3517 release_lock
|= prepare_mmio_access(mr
);
3519 #if defined(TARGET_WORDS_BIGENDIAN)
3520 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3524 if (endian
== DEVICE_BIG_ENDIAN
) {
3528 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3531 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3533 case DEVICE_LITTLE_ENDIAN
:
3536 case DEVICE_BIG_ENDIAN
:
3543 invalidate_and_set_dirty(mr
, addr1
, 2);
3550 qemu_mutex_unlock_iothread();
3555 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3556 MemTxAttrs attrs
, MemTxResult
*result
)
3558 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3559 DEVICE_NATIVE_ENDIAN
);
3562 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3563 MemTxAttrs attrs
, MemTxResult
*result
)
3565 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3566 DEVICE_LITTLE_ENDIAN
);
3569 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3570 MemTxAttrs attrs
, MemTxResult
*result
)
3572 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3576 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3578 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3581 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3583 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3586 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3588 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3592 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3593 MemTxAttrs attrs
, MemTxResult
*result
)
3597 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3603 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3604 MemTxAttrs attrs
, MemTxResult
*result
)
3607 val
= cpu_to_le64(val
);
3608 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3613 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3614 MemTxAttrs attrs
, MemTxResult
*result
)
3617 val
= cpu_to_be64(val
);
3618 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3624 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3626 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3629 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3631 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3634 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3636 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3639 /* virtual memory access for debug (includes writing to ROM) */
3640 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3641 uint8_t *buf
, int len
, int is_write
)
3651 page
= addr
& TARGET_PAGE_MASK
;
3652 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3653 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3654 /* if no physical page mapped, return an error */
3655 if (phys_addr
== -1)
3657 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3660 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3662 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3665 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3666 MEMTXATTRS_UNSPECIFIED
,
3677 * Allows code that needs to deal with migration bitmaps etc to still be built
3678 * target independent.
3680 size_t qemu_target_page_bits(void)
3682 return TARGET_PAGE_BITS
;
3688 * A helper function for the _utterly broken_ virtio device model to find out if
3689 * it's running on a big endian machine. Don't do this at home kids!
3691 bool target_words_bigendian(void);
3692 bool target_words_bigendian(void)
3694 #if defined(TARGET_WORDS_BIGENDIAN)
3701 #ifndef CONFIG_USER_ONLY
3702 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3709 mr
= address_space_translate(&address_space_memory
,
3710 phys_addr
, &phys_addr
, &l
, false);
3712 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3717 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3723 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3724 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3725 block
->used_length
, opaque
);