4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
25 #include "qemu/cutils.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "hw/xen/xen.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "qemu/error-report.h"
39 #include "exec/memory.h"
40 #include "sysemu/dma.h"
41 #include "exec/address-spaces.h"
42 #if defined(CONFIG_USER_ONLY)
44 #else /* !CONFIG_USER_ONLY */
45 #include "sysemu/xen-mapcache.h"
48 #include "exec/cpu-all.h"
49 #include "qemu/rcu_queue.h"
50 #include "qemu/main-loop.h"
51 #include "translate-all.h"
52 #include "sysemu/replay.h"
54 #include "exec/memory-internal.h"
55 #include "exec/ram_addr.h"
58 #include "qemu/range.h"
60 #include "qemu/mmap-alloc.h"
63 //#define DEBUG_SUBPAGE
65 #if !defined(CONFIG_USER_ONLY)
66 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
69 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
71 static MemoryRegion
*system_memory
;
72 static MemoryRegion
*system_io
;
74 AddressSpace address_space_io
;
75 AddressSpace address_space_memory
;
77 MemoryRegion io_mem_rom
, io_mem_notdirty
;
78 static MemoryRegion io_mem_unassigned
;
80 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81 #define RAM_PREALLOC (1 << 0)
83 /* RAM is mmap-ed with MAP_SHARED */
84 #define RAM_SHARED (1 << 1)
86 /* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
89 #define RAM_RESIZEABLE (1 << 2)
93 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
94 /* current CPU in the current thread. It is only valid inside
96 __thread CPUState
*current_cpu
;
97 /* 0 = Do not count executed instructions.
98 1 = Precise instruction counting.
99 2 = Adaptive rate instruction counting. */
102 #if !defined(CONFIG_USER_ONLY)
104 typedef struct PhysPageEntry PhysPageEntry
;
106 struct PhysPageEntry
{
107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
113 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
115 /* Size of the L2 (and L3, etc) page tables. */
116 #define ADDR_SPACE_BITS 64
119 #define P_L2_SIZE (1 << P_L2_BITS)
121 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
123 typedef PhysPageEntry Node
[P_L2_SIZE
];
125 typedef struct PhysPageMap
{
128 unsigned sections_nb
;
129 unsigned sections_nb_alloc
;
131 unsigned nodes_nb_alloc
;
133 MemoryRegionSection
*sections
;
136 struct AddressSpaceDispatch
{
139 MemoryRegionSection
*mru_section
;
140 /* This is a multi-level map on the physical address space.
141 * The bottom level has pointers to MemoryRegionSections.
143 PhysPageEntry phys_map
;
148 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
149 typedef struct subpage_t
{
153 uint16_t sub_section
[TARGET_PAGE_SIZE
];
156 #define PHYS_SECTION_UNASSIGNED 0
157 #define PHYS_SECTION_NOTDIRTY 1
158 #define PHYS_SECTION_ROM 2
159 #define PHYS_SECTION_WATCH 3
161 static void io_mem_init(void);
162 static void memory_map_init(void);
163 static void tcg_commit(MemoryListener
*listener
);
165 static MemoryRegion io_mem_watch
;
168 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
169 * @cpu: the CPU whose AddressSpace this is
170 * @as: the AddressSpace itself
171 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
172 * @tcg_as_listener: listener for tracking changes to the AddressSpace
174 struct CPUAddressSpace
{
177 struct AddressSpaceDispatch
*memory_dispatch
;
178 MemoryListener tcg_as_listener
;
183 #if !defined(CONFIG_USER_ONLY)
185 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
187 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
188 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
189 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
190 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
194 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
201 ret
= map
->nodes_nb
++;
203 assert(ret
!= PHYS_MAP_NODE_NIL
);
204 assert(ret
!= map
->nodes_nb_alloc
);
206 e
.skip
= leaf
? 0 : 1;
207 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
208 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
209 memcpy(&p
[i
], &e
, sizeof(e
));
214 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
215 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
219 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
221 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
222 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
224 p
= map
->nodes
[lp
->ptr
];
225 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
227 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
228 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
234 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
240 static void phys_page_set(AddressSpaceDispatch
*d
,
241 hwaddr index
, hwaddr nb
,
244 /* Wildly overreserve - it doesn't matter much. */
245 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
247 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
250 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
251 * and update our entry so we can skip it and go directly to the destination.
253 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
255 unsigned valid_ptr
= P_L2_SIZE
;
260 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
265 for (i
= 0; i
< P_L2_SIZE
; i
++) {
266 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
273 phys_page_compact(&p
[i
], nodes
, compacted
);
277 /* We can only compress if there's only one child. */
282 assert(valid_ptr
< P_L2_SIZE
);
284 /* Don't compress if it won't fit in the # of bits we have. */
285 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
289 lp
->ptr
= p
[valid_ptr
].ptr
;
290 if (!p
[valid_ptr
].skip
) {
291 /* If our only child is a leaf, make this a leaf. */
292 /* By design, we should have made this node a leaf to begin with so we
293 * should never reach here.
294 * But since it's so simple to handle this, let's do it just in case we
299 lp
->skip
+= p
[valid_ptr
].skip
;
303 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
305 DECLARE_BITMAP(compacted
, nodes_nb
);
307 if (d
->phys_map
.skip
) {
308 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
312 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
315 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
316 * the section must cover the entire address space.
318 return section
->size
.hi
||
319 range_covers_byte(section
->offset_within_address_space
,
320 section
->size
.lo
, addr
);
323 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
324 Node
*nodes
, MemoryRegionSection
*sections
)
327 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
330 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
331 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
332 return §ions
[PHYS_SECTION_UNASSIGNED
];
335 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
338 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
339 return §ions
[lp
.ptr
];
341 return §ions
[PHYS_SECTION_UNASSIGNED
];
345 bool memory_region_is_unassigned(MemoryRegion
*mr
)
347 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
348 && mr
!= &io_mem_watch
;
351 /* Called from RCU critical section */
352 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
354 bool resolve_subpage
)
356 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
360 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
361 section_covers_addr(section
, addr
)) {
364 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
368 if (resolve_subpage
&& section
->mr
->subpage
) {
369 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
370 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
373 atomic_set(&d
->mru_section
, section
);
378 /* Called from RCU critical section */
379 static MemoryRegionSection
*
380 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
381 hwaddr
*plen
, bool resolve_subpage
)
383 MemoryRegionSection
*section
;
387 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
388 /* Compute offset within MemoryRegionSection */
389 addr
-= section
->offset_within_address_space
;
391 /* Compute offset within MemoryRegion */
392 *xlat
= addr
+ section
->offset_within_region
;
396 /* MMIO registers can be expected to perform full-width accesses based only
397 * on their address, without considering adjacent registers that could
398 * decode to completely different MemoryRegions. When such registers
399 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
400 * regions overlap wildly. For this reason we cannot clamp the accesses
403 * If the length is small (as is the case for address_space_ldl/stl),
404 * everything works fine. If the incoming length is large, however,
405 * the caller really has to do the clamping through memory_access_size.
407 if (memory_region_is_ram(mr
)) {
408 diff
= int128_sub(section
->size
, int128_make64(addr
));
409 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
414 /* Called from RCU critical section */
415 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
416 hwaddr
*xlat
, hwaddr
*plen
,
420 MemoryRegionSection
*section
;
424 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
425 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
428 if (!mr
->iommu_ops
) {
432 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
433 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
434 | (addr
& iotlb
.addr_mask
));
435 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
436 if (!(iotlb
.perm
& (1 << is_write
))) {
437 mr
= &io_mem_unassigned
;
441 as
= iotlb
.target_as
;
444 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
445 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
446 *plen
= MIN(page
, *plen
);
453 /* Called from RCU critical section */
454 MemoryRegionSection
*
455 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
456 hwaddr
*xlat
, hwaddr
*plen
)
458 MemoryRegionSection
*section
;
459 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
461 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
463 assert(!section
->mr
->iommu_ops
);
468 #if !defined(CONFIG_USER_ONLY)
470 static int cpu_common_post_load(void *opaque
, int version_id
)
472 CPUState
*cpu
= opaque
;
474 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
475 version_id is increased. */
476 cpu
->interrupt_request
&= ~0x01;
482 static int cpu_common_pre_load(void *opaque
)
484 CPUState
*cpu
= opaque
;
486 cpu
->exception_index
= -1;
491 static bool cpu_common_exception_index_needed(void *opaque
)
493 CPUState
*cpu
= opaque
;
495 return tcg_enabled() && cpu
->exception_index
!= -1;
498 static const VMStateDescription vmstate_cpu_common_exception_index
= {
499 .name
= "cpu_common/exception_index",
501 .minimum_version_id
= 1,
502 .needed
= cpu_common_exception_index_needed
,
503 .fields
= (VMStateField
[]) {
504 VMSTATE_INT32(exception_index
, CPUState
),
505 VMSTATE_END_OF_LIST()
509 static bool cpu_common_crash_occurred_needed(void *opaque
)
511 CPUState
*cpu
= opaque
;
513 return cpu
->crash_occurred
;
516 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
517 .name
= "cpu_common/crash_occurred",
519 .minimum_version_id
= 1,
520 .needed
= cpu_common_crash_occurred_needed
,
521 .fields
= (VMStateField
[]) {
522 VMSTATE_BOOL(crash_occurred
, CPUState
),
523 VMSTATE_END_OF_LIST()
527 const VMStateDescription vmstate_cpu_common
= {
528 .name
= "cpu_common",
530 .minimum_version_id
= 1,
531 .pre_load
= cpu_common_pre_load
,
532 .post_load
= cpu_common_post_load
,
533 .fields
= (VMStateField
[]) {
534 VMSTATE_UINT32(halted
, CPUState
),
535 VMSTATE_UINT32(interrupt_request
, CPUState
),
536 VMSTATE_END_OF_LIST()
538 .subsections
= (const VMStateDescription
*[]) {
539 &vmstate_cpu_common_exception_index
,
540 &vmstate_cpu_common_crash_occurred
,
547 CPUState
*qemu_get_cpu(int index
)
552 if (cpu
->cpu_index
== index
) {
560 #if !defined(CONFIG_USER_ONLY)
561 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
563 CPUAddressSpace
*newas
;
565 /* Target code should have set num_ases before calling us */
566 assert(asidx
< cpu
->num_ases
);
569 /* address space 0 gets the convenience alias */
573 /* KVM cannot currently support multiple address spaces. */
574 assert(asidx
== 0 || !kvm_enabled());
576 if (!cpu
->cpu_ases
) {
577 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
580 newas
= &cpu
->cpu_ases
[asidx
];
584 newas
->tcg_as_listener
.commit
= tcg_commit
;
585 memory_listener_register(&newas
->tcg_as_listener
, as
);
589 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
591 /* Return the AddressSpace corresponding to the specified index */
592 return cpu
->cpu_ases
[asidx
].as
;
596 #ifndef CONFIG_USER_ONLY
597 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
599 static int cpu_get_free_index(Error
**errp
)
601 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
603 if (cpu
>= MAX_CPUMASK_BITS
) {
604 error_setg(errp
, "Trying to use more CPUs than max of %d",
609 bitmap_set(cpu_index_map
, cpu
, 1);
613 void cpu_exec_exit(CPUState
*cpu
)
615 if (cpu
->cpu_index
== -1) {
616 /* cpu_index was never allocated by this @cpu or was already freed. */
620 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
625 static int cpu_get_free_index(Error
**errp
)
630 CPU_FOREACH(some_cpu
) {
636 void cpu_exec_exit(CPUState
*cpu
)
641 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
643 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
645 Error
*local_err
= NULL
;
650 #ifndef CONFIG_USER_ONLY
651 cpu
->thread_id
= qemu_get_thread_id();
653 /* This is a softmmu CPU object, so create a property for it
654 * so users can wire up its memory. (This can't go in qom/cpu.c
655 * because that file is compiled only once for both user-mode
656 * and system builds.) The default if no link is set up is to use
657 * the system address space.
659 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
660 (Object
**)&cpu
->memory
,
661 qdev_prop_allow_set_link_before_realize
,
662 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
664 cpu
->memory
= system_memory
;
665 object_ref(OBJECT(cpu
->memory
));
668 #if defined(CONFIG_USER_ONLY)
671 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
673 error_propagate(errp
, local_err
);
674 #if defined(CONFIG_USER_ONLY)
679 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
680 #if defined(CONFIG_USER_ONLY)
683 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
684 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
686 if (cc
->vmsd
!= NULL
) {
687 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
691 #if defined(CONFIG_USER_ONLY)
692 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
694 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
697 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
700 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
701 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
703 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
704 phys
| (pc
& ~TARGET_PAGE_MASK
));
709 #if defined(CONFIG_USER_ONLY)
710 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
715 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
721 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
725 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
726 int flags
, CPUWatchpoint
**watchpoint
)
731 /* Add a watchpoint. */
732 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
733 int flags
, CPUWatchpoint
**watchpoint
)
737 /* forbid ranges which are empty or run off the end of the address space */
738 if (len
== 0 || (addr
+ len
- 1) < addr
) {
739 error_report("tried to set invalid watchpoint at %"
740 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
743 wp
= g_malloc(sizeof(*wp
));
749 /* keep all GDB-injected watchpoints in front */
750 if (flags
& BP_GDB
) {
751 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
753 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
756 tlb_flush_page(cpu
, addr
);
763 /* Remove a specific watchpoint. */
764 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
769 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
770 if (addr
== wp
->vaddr
&& len
== wp
->len
771 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
772 cpu_watchpoint_remove_by_ref(cpu
, wp
);
779 /* Remove a specific watchpoint by reference. */
780 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
782 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
784 tlb_flush_page(cpu
, watchpoint
->vaddr
);
789 /* Remove all matching watchpoints. */
790 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
792 CPUWatchpoint
*wp
, *next
;
794 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
795 if (wp
->flags
& mask
) {
796 cpu_watchpoint_remove_by_ref(cpu
, wp
);
801 /* Return true if this watchpoint address matches the specified
802 * access (ie the address range covered by the watchpoint overlaps
803 * partially or completely with the address range covered by the
806 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
810 /* We know the lengths are non-zero, but a little caution is
811 * required to avoid errors in the case where the range ends
812 * exactly at the top of the address space and so addr + len
813 * wraps round to zero.
815 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
816 vaddr addrend
= addr
+ len
- 1;
818 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
823 /* Add a breakpoint. */
824 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
825 CPUBreakpoint
**breakpoint
)
829 bp
= g_malloc(sizeof(*bp
));
834 /* keep all GDB-injected breakpoints in front */
835 if (flags
& BP_GDB
) {
836 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
838 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
841 breakpoint_invalidate(cpu
, pc
);
849 /* Remove a specific breakpoint. */
850 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
854 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
855 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
856 cpu_breakpoint_remove_by_ref(cpu
, bp
);
863 /* Remove a specific breakpoint by reference. */
864 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
866 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
868 breakpoint_invalidate(cpu
, breakpoint
->pc
);
873 /* Remove all matching breakpoints. */
874 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
876 CPUBreakpoint
*bp
, *next
;
878 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
879 if (bp
->flags
& mask
) {
880 cpu_breakpoint_remove_by_ref(cpu
, bp
);
885 /* enable or disable single step mode. EXCP_DEBUG is returned by the
886 CPU loop after each instruction */
887 void cpu_single_step(CPUState
*cpu
, int enabled
)
889 if (cpu
->singlestep_enabled
!= enabled
) {
890 cpu
->singlestep_enabled
= enabled
;
892 kvm_update_guest_debug(cpu
, 0);
894 /* must flush all the translated code to avoid inconsistencies */
895 /* XXX: only flush what is necessary */
901 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
908 fprintf(stderr
, "qemu: fatal: ");
909 vfprintf(stderr
, fmt
, ap
);
910 fprintf(stderr
, "\n");
911 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
912 if (qemu_log_separate()) {
913 qemu_log("qemu: fatal: ");
914 qemu_log_vprintf(fmt
, ap2
);
916 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
923 #if defined(CONFIG_USER_ONLY)
925 struct sigaction act
;
926 sigfillset(&act
.sa_mask
);
927 act
.sa_handler
= SIG_DFL
;
928 sigaction(SIGABRT
, &act
, NULL
);
934 #if !defined(CONFIG_USER_ONLY)
935 /* Called from RCU critical section */
936 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
940 block
= atomic_rcu_read(&ram_list
.mru_block
);
941 if (block
&& addr
- block
->offset
< block
->max_length
) {
944 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
945 if (addr
- block
->offset
< block
->max_length
) {
950 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
954 /* It is safe to write mru_block outside the iothread lock. This
959 * xxx removed from list
963 * call_rcu(reclaim_ramblock, xxx);
966 * atomic_rcu_set is not needed here. The block was already published
967 * when it was placed into the list. Here we're just making an extra
968 * copy of the pointer.
970 ram_list
.mru_block
= block
;
974 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
981 end
= TARGET_PAGE_ALIGN(start
+ length
);
982 start
&= TARGET_PAGE_MASK
;
985 block
= qemu_get_ram_block(start
);
986 assert(block
== qemu_get_ram_block(end
- 1));
987 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
989 tlb_reset_dirty(cpu
, start1
, length
);
994 /* Note: start and end must be within the same ram block. */
995 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
999 DirtyMemoryBlocks
*blocks
;
1000 unsigned long end
, page
;
1007 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1008 page
= start
>> TARGET_PAGE_BITS
;
1012 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1014 while (page
< end
) {
1015 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1016 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1017 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1019 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1026 if (dirty
&& tcg_enabled()) {
1027 tlb_reset_dirty_range_all(start
, length
);
1033 /* Called from RCU critical section */
1034 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1035 MemoryRegionSection
*section
,
1037 hwaddr paddr
, hwaddr xlat
,
1039 target_ulong
*address
)
1044 if (memory_region_is_ram(section
->mr
)) {
1046 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1048 if (!section
->readonly
) {
1049 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1051 iotlb
|= PHYS_SECTION_ROM
;
1054 AddressSpaceDispatch
*d
;
1056 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1057 iotlb
= section
- d
->map
.sections
;
1061 /* Make accesses to pages with watchpoints go via the
1062 watchpoint trap routines. */
1063 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1064 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1065 /* Avoid trapping reads of pages with a write breakpoint. */
1066 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1067 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1068 *address
|= TLB_MMIO
;
1076 #endif /* defined(CONFIG_USER_ONLY) */
1078 #if !defined(CONFIG_USER_ONLY)
1080 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1082 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1084 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1085 qemu_anon_ram_alloc
;
1088 * Set a custom physical guest memory alloator.
1089 * Accelerators with unusual needs may need this. Hopefully, we can
1090 * get rid of it eventually.
1092 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1094 phys_mem_alloc
= alloc
;
1097 static uint16_t phys_section_add(PhysPageMap
*map
,
1098 MemoryRegionSection
*section
)
1100 /* The physical section number is ORed with a page-aligned
1101 * pointer to produce the iotlb entries. Thus it should
1102 * never overflow into the page-aligned value.
1104 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1106 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1107 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1108 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1109 map
->sections_nb_alloc
);
1111 map
->sections
[map
->sections_nb
] = *section
;
1112 memory_region_ref(section
->mr
);
1113 return map
->sections_nb
++;
1116 static void phys_section_destroy(MemoryRegion
*mr
)
1118 bool have_sub_page
= mr
->subpage
;
1120 memory_region_unref(mr
);
1122 if (have_sub_page
) {
1123 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1124 object_unref(OBJECT(&subpage
->iomem
));
1129 static void phys_sections_free(PhysPageMap
*map
)
1131 while (map
->sections_nb
> 0) {
1132 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1133 phys_section_destroy(section
->mr
);
1135 g_free(map
->sections
);
1139 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1142 hwaddr base
= section
->offset_within_address_space
1144 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1145 d
->map
.nodes
, d
->map
.sections
);
1146 MemoryRegionSection subsection
= {
1147 .offset_within_address_space
= base
,
1148 .size
= int128_make64(TARGET_PAGE_SIZE
),
1152 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1154 if (!(existing
->mr
->subpage
)) {
1155 subpage
= subpage_init(d
->as
, base
);
1156 subsection
.address_space
= d
->as
;
1157 subsection
.mr
= &subpage
->iomem
;
1158 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1159 phys_section_add(&d
->map
, &subsection
));
1161 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1163 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1164 end
= start
+ int128_get64(section
->size
) - 1;
1165 subpage_register(subpage
, start
, end
,
1166 phys_section_add(&d
->map
, section
));
1170 static void register_multipage(AddressSpaceDispatch
*d
,
1171 MemoryRegionSection
*section
)
1173 hwaddr start_addr
= section
->offset_within_address_space
;
1174 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1175 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1179 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1182 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1184 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1185 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1186 MemoryRegionSection now
= *section
, remain
= *section
;
1187 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1189 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1190 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1191 - now
.offset_within_address_space
;
1193 now
.size
= int128_min(int128_make64(left
), now
.size
);
1194 register_subpage(d
, &now
);
1196 now
.size
= int128_zero();
1198 while (int128_ne(remain
.size
, now
.size
)) {
1199 remain
.size
= int128_sub(remain
.size
, now
.size
);
1200 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1201 remain
.offset_within_region
+= int128_get64(now
.size
);
1203 if (int128_lt(remain
.size
, page_size
)) {
1204 register_subpage(d
, &now
);
1205 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1206 now
.size
= page_size
;
1207 register_subpage(d
, &now
);
1209 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1210 register_multipage(d
, &now
);
1215 void qemu_flush_coalesced_mmio_buffer(void)
1218 kvm_flush_coalesced_mmio_buffer();
1221 void qemu_mutex_lock_ramlist(void)
1223 qemu_mutex_lock(&ram_list
.mutex
);
1226 void qemu_mutex_unlock_ramlist(void)
1228 qemu_mutex_unlock(&ram_list
.mutex
);
1232 static void *file_ram_alloc(RAMBlock
*block
,
1237 bool unlink_on_error
= false;
1239 char *sanitized_name
;
1245 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1247 "host lacks kvm mmu notifiers, -mem-path unsupported");
1252 fd
= open(path
, O_RDWR
);
1254 /* @path names an existing file, use it */
1257 if (errno
== ENOENT
) {
1258 /* @path names a file that doesn't exist, create it */
1259 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1261 unlink_on_error
= true;
1264 } else if (errno
== EISDIR
) {
1265 /* @path names a directory, create a file there */
1266 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1267 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1268 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1274 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1276 g_free(sanitized_name
);
1278 fd
= mkstemp(filename
);
1286 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1287 error_setg_errno(errp
, errno
,
1288 "can't open backing store %s for guest RAM",
1293 * Try again on EINTR and EEXIST. The latter happens when
1294 * something else creates the file between our two open().
1298 page_size
= qemu_fd_getpagesize(fd
);
1299 block
->mr
->align
= page_size
;
1301 if (memory
< page_size
) {
1302 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1303 "or larger than page size 0x%" PRIx64
,
1308 memory
= ROUND_UP(memory
, page_size
);
1311 * ftruncate is not supported by hugetlbfs in older
1312 * hosts, so don't bother bailing out on errors.
1313 * If anything goes wrong with it under other filesystems,
1316 if (ftruncate(fd
, memory
)) {
1317 perror("ftruncate");
1320 area
= qemu_ram_mmap(fd
, memory
, page_size
, block
->flags
& RAM_SHARED
);
1321 if (area
== MAP_FAILED
) {
1322 error_setg_errno(errp
, errno
,
1323 "unable to map backing store for guest RAM");
1328 os_mem_prealloc(fd
, area
, memory
);
1335 if (unlink_on_error
) {
1345 /* Called with the ramlist lock held. */
1346 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1348 RAMBlock
*block
, *next_block
;
1349 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1351 assert(size
!= 0); /* it would hand out same offset multiple times */
1353 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1357 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1358 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1360 end
= block
->offset
+ block
->max_length
;
1362 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1363 if (next_block
->offset
>= end
) {
1364 next
= MIN(next
, next_block
->offset
);
1367 if (next
- end
>= size
&& next
- end
< mingap
) {
1369 mingap
= next
- end
;
1373 if (offset
== RAM_ADDR_MAX
) {
1374 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1382 ram_addr_t
last_ram_offset(void)
1385 ram_addr_t last
= 0;
1388 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1389 last
= MAX(last
, block
->offset
+ block
->max_length
);
1395 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1399 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1400 if (!machine_dump_guest_core(current_machine
)) {
1401 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1403 perror("qemu_madvise");
1404 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1405 "but dump_guest_core=off specified\n");
1410 /* Called within an RCU critical section, or while the ramlist lock
1413 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1417 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1418 if (block
->offset
== addr
) {
1426 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1431 /* Called with iothread lock held. */
1432 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1434 RAMBlock
*new_block
, *block
;
1437 new_block
= find_ram_block(addr
);
1439 assert(!new_block
->idstr
[0]);
1442 char *id
= qdev_get_dev_path(dev
);
1444 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1448 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1450 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1451 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1452 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1460 /* Called with iothread lock held. */
1461 void qemu_ram_unset_idstr(ram_addr_t addr
)
1465 /* FIXME: arch_init.c assumes that this is not called throughout
1466 * migration. Ignore the problem since hot-unplug during migration
1467 * does not work anyway.
1471 block
= find_ram_block(addr
);
1473 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1478 static int memory_try_enable_merging(void *addr
, size_t len
)
1480 if (!machine_mem_merge(current_machine
)) {
1481 /* disabled by the user */
1485 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1488 /* Only legal before guest might have detected the memory size: e.g. on
1489 * incoming migration, or right after reset.
1491 * As memory core doesn't know how is memory accessed, it is up to
1492 * resize callback to update device state and/or add assertions to detect
1493 * misuse, if necessary.
1495 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1497 RAMBlock
*block
= find_ram_block(base
);
1501 newsize
= HOST_PAGE_ALIGN(newsize
);
1503 if (block
->used_length
== newsize
) {
1507 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1508 error_setg_errno(errp
, EINVAL
,
1509 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1510 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1511 newsize
, block
->used_length
);
1515 if (block
->max_length
< newsize
) {
1516 error_setg_errno(errp
, EINVAL
,
1517 "Length too large: %s: 0x" RAM_ADDR_FMT
1518 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1519 newsize
, block
->max_length
);
1523 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1524 block
->used_length
= newsize
;
1525 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1527 memory_region_set_size(block
->mr
, newsize
);
1528 if (block
->resized
) {
1529 block
->resized(block
->idstr
, newsize
, block
->host
);
1534 /* Called with ram_list.mutex held */
1535 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1536 ram_addr_t new_ram_size
)
1538 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1539 DIRTY_MEMORY_BLOCK_SIZE
);
1540 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1541 DIRTY_MEMORY_BLOCK_SIZE
);
1544 /* Only need to extend if block count increased */
1545 if (new_num_blocks
<= old_num_blocks
) {
1549 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1550 DirtyMemoryBlocks
*old_blocks
;
1551 DirtyMemoryBlocks
*new_blocks
;
1554 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1555 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1556 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1558 if (old_num_blocks
) {
1559 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1560 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1563 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1564 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1567 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1570 g_free_rcu(old_blocks
, rcu
);
1575 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1578 RAMBlock
*last_block
= NULL
;
1579 ram_addr_t old_ram_size
, new_ram_size
;
1582 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1584 qemu_mutex_lock_ramlist();
1585 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1587 if (!new_block
->host
) {
1588 if (xen_enabled()) {
1589 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1590 new_block
->mr
, &err
);
1592 error_propagate(errp
, err
);
1593 qemu_mutex_unlock_ramlist();
1597 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1598 &new_block
->mr
->align
);
1599 if (!new_block
->host
) {
1600 error_setg_errno(errp
, errno
,
1601 "cannot set up guest memory '%s'",
1602 memory_region_name(new_block
->mr
));
1603 qemu_mutex_unlock_ramlist();
1606 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1610 new_ram_size
= MAX(old_ram_size
,
1611 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1612 if (new_ram_size
> old_ram_size
) {
1613 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1614 dirty_memory_extend(old_ram_size
, new_ram_size
);
1616 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1617 * QLIST (which has an RCU-friendly variant) does not have insertion at
1618 * tail, so save the last element in last_block.
1620 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1622 if (block
->max_length
< new_block
->max_length
) {
1627 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1628 } else if (last_block
) {
1629 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1630 } else { /* list is empty */
1631 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1633 ram_list
.mru_block
= NULL
;
1635 /* Write list before version */
1638 qemu_mutex_unlock_ramlist();
1640 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1641 new_block
->used_length
,
1644 if (new_block
->host
) {
1645 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1646 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1647 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1648 if (kvm_enabled()) {
1649 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1655 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1656 bool share
, const char *mem_path
,
1659 RAMBlock
*new_block
;
1660 Error
*local_err
= NULL
;
1662 if (xen_enabled()) {
1663 error_setg(errp
, "-mem-path not supported with Xen");
1667 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1669 * file_ram_alloc() needs to allocate just like
1670 * phys_mem_alloc, but we haven't bothered to provide
1674 "-mem-path not supported with this accelerator");
1678 size
= HOST_PAGE_ALIGN(size
);
1679 new_block
= g_malloc0(sizeof(*new_block
));
1681 new_block
->used_length
= size
;
1682 new_block
->max_length
= size
;
1683 new_block
->flags
= share
? RAM_SHARED
: 0;
1684 new_block
->host
= file_ram_alloc(new_block
, size
,
1686 if (!new_block
->host
) {
1691 ram_block_add(new_block
, &local_err
);
1694 error_propagate(errp
, local_err
);
1702 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1703 void (*resized
)(const char*,
1706 void *host
, bool resizeable
,
1707 MemoryRegion
*mr
, Error
**errp
)
1709 RAMBlock
*new_block
;
1710 Error
*local_err
= NULL
;
1712 size
= HOST_PAGE_ALIGN(size
);
1713 max_size
= HOST_PAGE_ALIGN(max_size
);
1714 new_block
= g_malloc0(sizeof(*new_block
));
1716 new_block
->resized
= resized
;
1717 new_block
->used_length
= size
;
1718 new_block
->max_length
= max_size
;
1719 assert(max_size
>= size
);
1721 new_block
->host
= host
;
1723 new_block
->flags
|= RAM_PREALLOC
;
1726 new_block
->flags
|= RAM_RESIZEABLE
;
1728 ram_block_add(new_block
, &local_err
);
1731 error_propagate(errp
, local_err
);
1737 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1738 MemoryRegion
*mr
, Error
**errp
)
1740 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1743 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1745 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1748 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1749 void (*resized
)(const char*,
1752 MemoryRegion
*mr
, Error
**errp
)
1754 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1757 static void reclaim_ramblock(RAMBlock
*block
)
1759 if (block
->flags
& RAM_PREALLOC
) {
1761 } else if (xen_enabled()) {
1762 xen_invalidate_map_cache_entry(block
->host
);
1764 } else if (block
->fd
>= 0) {
1765 qemu_ram_munmap(block
->host
, block
->max_length
);
1769 qemu_anon_ram_free(block
->host
, block
->max_length
);
1774 void qemu_ram_free(RAMBlock
*block
)
1776 qemu_mutex_lock_ramlist();
1777 QLIST_REMOVE_RCU(block
, next
);
1778 ram_list
.mru_block
= NULL
;
1779 /* Write list before version */
1782 call_rcu(block
, reclaim_ramblock
, rcu
);
1783 qemu_mutex_unlock_ramlist();
1787 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1794 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1795 offset
= addr
- block
->offset
;
1796 if (offset
< block
->max_length
) {
1797 vaddr
= ramblock_ptr(block
, offset
);
1798 if (block
->flags
& RAM_PREALLOC
) {
1800 } else if (xen_enabled()) {
1804 if (block
->fd
>= 0) {
1805 flags
|= (block
->flags
& RAM_SHARED
?
1806 MAP_SHARED
: MAP_PRIVATE
);
1807 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1808 flags
, block
->fd
, offset
);
1811 * Remap needs to match alloc. Accelerators that
1812 * set phys_mem_alloc never remap. If they did,
1813 * we'd need a remap hook here.
1815 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1817 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1818 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1821 if (area
!= vaddr
) {
1822 fprintf(stderr
, "Could not remap addr: "
1823 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1827 memory_try_enable_merging(vaddr
, length
);
1828 qemu_ram_setup_dump(vaddr
, length
);
1833 #endif /* !_WIN32 */
1835 int qemu_get_ram_fd(ram_addr_t addr
)
1841 block
= qemu_get_ram_block(addr
);
1847 void qemu_set_ram_fd(ram_addr_t addr
, int fd
)
1852 block
= qemu_get_ram_block(addr
);
1857 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1863 block
= qemu_get_ram_block(addr
);
1864 ptr
= ramblock_ptr(block
, 0);
1869 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1870 * This should not be used for general purpose DMA. Use address_space_map
1871 * or address_space_rw instead. For local memory (e.g. video ram) that the
1872 * device owns, use memory_region_get_ram_ptr.
1874 * Called within RCU critical section.
1876 void *qemu_get_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1878 RAMBlock
*block
= ram_block
;
1880 if (block
== NULL
) {
1881 block
= qemu_get_ram_block(addr
);
1884 if (xen_enabled() && block
->host
== NULL
) {
1885 /* We need to check if the requested address is in the RAM
1886 * because we don't want to map the entire memory in QEMU.
1887 * In that case just map until the end of the page.
1889 if (block
->offset
== 0) {
1890 return xen_map_cache(addr
, 0, 0);
1893 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1895 return ramblock_ptr(block
, addr
- block
->offset
);
1898 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1899 * but takes a size argument.
1901 * Called within RCU critical section.
1903 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1906 RAMBlock
*block
= ram_block
;
1907 ram_addr_t offset_inside_block
;
1912 if (block
== NULL
) {
1913 block
= qemu_get_ram_block(addr
);
1915 offset_inside_block
= addr
- block
->offset
;
1916 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1918 if (xen_enabled() && block
->host
== NULL
) {
1919 /* We need to check if the requested address is in the RAM
1920 * because we don't want to map the entire memory in QEMU.
1921 * In that case just map the requested area.
1923 if (block
->offset
== 0) {
1924 return xen_map_cache(addr
, *size
, 1);
1927 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1930 return ramblock_ptr(block
, offset_inside_block
);
1934 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1937 * ptr: Host pointer to look up
1938 * round_offset: If true round the result offset down to a page boundary
1939 * *ram_addr: set to result ram_addr
1940 * *offset: set to result offset within the RAMBlock
1942 * Returns: RAMBlock (or NULL if not found)
1944 * By the time this function returns, the returned pointer is not protected
1945 * by RCU anymore. If the caller is not within an RCU critical section and
1946 * does not hold the iothread lock, it must have other means of protecting the
1947 * pointer, such as a reference to the region that includes the incoming
1950 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1951 ram_addr_t
*ram_addr
,
1955 uint8_t *host
= ptr
;
1957 if (xen_enabled()) {
1959 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1960 block
= qemu_get_ram_block(*ram_addr
);
1962 *offset
= (host
- block
->host
);
1969 block
= atomic_rcu_read(&ram_list
.mru_block
);
1970 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1974 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1975 /* This case append when the block is not mapped. */
1976 if (block
->host
== NULL
) {
1979 if (host
- block
->host
< block
->max_length
) {
1988 *offset
= (host
- block
->host
);
1990 *offset
&= TARGET_PAGE_MASK
;
1992 *ram_addr
= block
->offset
+ *offset
;
1998 * Finds the named RAMBlock
2000 * name: The name of RAMBlock to find
2002 * Returns: RAMBlock (or NULL if not found)
2004 RAMBlock
*qemu_ram_block_by_name(const char *name
)
2008 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
2009 if (!strcmp(name
, block
->idstr
)) {
2017 /* Some of the softmmu routines need to translate from a host pointer
2018 (typically a TLB entry) back to a ram offset. */
2019 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2022 ram_addr_t offset
; /* Not used */
2024 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
2033 /* Called within RCU critical section. */
2034 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
2035 uint64_t val
, unsigned size
)
2037 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2038 tb_invalidate_phys_page_fast(ram_addr
, size
);
2042 stb_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2045 stw_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2048 stl_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2053 /* Set both VGA and migration bits for simplicity and to remove
2054 * the notdirty callback faster.
2056 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2057 DIRTY_CLIENTS_NOCODE
);
2058 /* we remove the notdirty callback only if the code has been
2060 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2061 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2065 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2066 unsigned size
, bool is_write
)
2071 static const MemoryRegionOps notdirty_mem_ops
= {
2072 .write
= notdirty_mem_write
,
2073 .valid
.accepts
= notdirty_mem_accepts
,
2074 .endianness
= DEVICE_NATIVE_ENDIAN
,
2077 /* Generate a debug exception if a watchpoint has been hit. */
2078 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2080 CPUState
*cpu
= current_cpu
;
2081 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2082 CPUArchState
*env
= cpu
->env_ptr
;
2083 target_ulong pc
, cs_base
;
2088 if (cpu
->watchpoint_hit
) {
2089 /* We re-entered the check after replacing the TB. Now raise
2090 * the debug interrupt so that is will trigger after the
2091 * current instruction. */
2092 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2095 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2096 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2097 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2098 && (wp
->flags
& flags
)) {
2099 if (flags
== BP_MEM_READ
) {
2100 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2102 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2104 wp
->hitaddr
= vaddr
;
2105 wp
->hitattrs
= attrs
;
2106 if (!cpu
->watchpoint_hit
) {
2107 if (wp
->flags
& BP_CPU
&&
2108 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2109 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2112 cpu
->watchpoint_hit
= wp
;
2113 tb_check_watchpoint(cpu
);
2114 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2115 cpu
->exception_index
= EXCP_DEBUG
;
2118 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2119 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2120 cpu_resume_from_signal(cpu
, NULL
);
2124 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2129 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2130 so these check for a hit then pass through to the normal out-of-line
2132 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2133 unsigned size
, MemTxAttrs attrs
)
2137 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2138 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2140 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2143 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2146 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2149 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2157 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2158 uint64_t val
, unsigned size
,
2162 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2163 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2165 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2168 address_space_stb(as
, addr
, val
, attrs
, &res
);
2171 address_space_stw(as
, addr
, val
, attrs
, &res
);
2174 address_space_stl(as
, addr
, val
, attrs
, &res
);
2181 static const MemoryRegionOps watch_mem_ops
= {
2182 .read_with_attrs
= watch_mem_read
,
2183 .write_with_attrs
= watch_mem_write
,
2184 .endianness
= DEVICE_NATIVE_ENDIAN
,
2187 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2188 unsigned len
, MemTxAttrs attrs
)
2190 subpage_t
*subpage
= opaque
;
2194 #if defined(DEBUG_SUBPAGE)
2195 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2196 subpage
, len
, addr
);
2198 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2205 *data
= ldub_p(buf
);
2208 *data
= lduw_p(buf
);
2221 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2222 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2224 subpage_t
*subpage
= opaque
;
2227 #if defined(DEBUG_SUBPAGE)
2228 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2229 " value %"PRIx64
"\n",
2230 __func__
, subpage
, len
, addr
, value
);
2248 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2252 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2253 unsigned len
, bool is_write
)
2255 subpage_t
*subpage
= opaque
;
2256 #if defined(DEBUG_SUBPAGE)
2257 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2258 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2261 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2265 static const MemoryRegionOps subpage_ops
= {
2266 .read_with_attrs
= subpage_read
,
2267 .write_with_attrs
= subpage_write
,
2268 .impl
.min_access_size
= 1,
2269 .impl
.max_access_size
= 8,
2270 .valid
.min_access_size
= 1,
2271 .valid
.max_access_size
= 8,
2272 .valid
.accepts
= subpage_accepts
,
2273 .endianness
= DEVICE_NATIVE_ENDIAN
,
2276 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2281 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2283 idx
= SUBPAGE_IDX(start
);
2284 eidx
= SUBPAGE_IDX(end
);
2285 #if defined(DEBUG_SUBPAGE)
2286 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2287 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2289 for (; idx
<= eidx
; idx
++) {
2290 mmio
->sub_section
[idx
] = section
;
2296 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2300 mmio
= g_malloc0(sizeof(subpage_t
));
2304 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2305 NULL
, TARGET_PAGE_SIZE
);
2306 mmio
->iomem
.subpage
= true;
2307 #if defined(DEBUG_SUBPAGE)
2308 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2309 mmio
, base
, TARGET_PAGE_SIZE
);
2311 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2316 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2320 MemoryRegionSection section
= {
2321 .address_space
= as
,
2323 .offset_within_address_space
= 0,
2324 .offset_within_region
= 0,
2325 .size
= int128_2_64(),
2328 return phys_section_add(map
, §ion
);
2331 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2333 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2334 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2335 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2336 MemoryRegionSection
*sections
= d
->map
.sections
;
2338 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2341 static void io_mem_init(void)
2343 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2344 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2346 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2348 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2352 static void mem_begin(MemoryListener
*listener
)
2354 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2355 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2358 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2359 assert(n
== PHYS_SECTION_UNASSIGNED
);
2360 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2361 assert(n
== PHYS_SECTION_NOTDIRTY
);
2362 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2363 assert(n
== PHYS_SECTION_ROM
);
2364 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2365 assert(n
== PHYS_SECTION_WATCH
);
2367 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2369 as
->next_dispatch
= d
;
2372 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2374 phys_sections_free(&d
->map
);
2378 static void mem_commit(MemoryListener
*listener
)
2380 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2381 AddressSpaceDispatch
*cur
= as
->dispatch
;
2382 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2384 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2386 atomic_rcu_set(&as
->dispatch
, next
);
2388 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2392 static void tcg_commit(MemoryListener
*listener
)
2394 CPUAddressSpace
*cpuas
;
2395 AddressSpaceDispatch
*d
;
2397 /* since each CPU stores ram addresses in its TLB cache, we must
2398 reset the modified entries */
2399 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2400 cpu_reloading_memory_map();
2401 /* The CPU and TLB are protected by the iothread lock.
2402 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2403 * may have split the RCU critical section.
2405 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2406 cpuas
->memory_dispatch
= d
;
2407 tlb_flush(cpuas
->cpu
, 1);
2410 void address_space_init_dispatch(AddressSpace
*as
)
2412 as
->dispatch
= NULL
;
2413 as
->dispatch_listener
= (MemoryListener
) {
2415 .commit
= mem_commit
,
2416 .region_add
= mem_add
,
2417 .region_nop
= mem_add
,
2420 memory_listener_register(&as
->dispatch_listener
, as
);
2423 void address_space_unregister(AddressSpace
*as
)
2425 memory_listener_unregister(&as
->dispatch_listener
);
2428 void address_space_destroy_dispatch(AddressSpace
*as
)
2430 AddressSpaceDispatch
*d
= as
->dispatch
;
2432 atomic_rcu_set(&as
->dispatch
, NULL
);
2434 call_rcu(d
, address_space_dispatch_free
, rcu
);
2438 static void memory_map_init(void)
2440 system_memory
= g_malloc(sizeof(*system_memory
));
2442 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2443 address_space_init(&address_space_memory
, system_memory
, "memory");
2445 system_io
= g_malloc(sizeof(*system_io
));
2446 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2448 address_space_init(&address_space_io
, system_io
, "I/O");
2451 MemoryRegion
*get_system_memory(void)
2453 return system_memory
;
2456 MemoryRegion
*get_system_io(void)
2461 #endif /* !defined(CONFIG_USER_ONLY) */
2463 /* physical memory access (slow version, mainly for debug) */
2464 #if defined(CONFIG_USER_ONLY)
2465 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2466 uint8_t *buf
, int len
, int is_write
)
2473 page
= addr
& TARGET_PAGE_MASK
;
2474 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2477 flags
= page_get_flags(page
);
2478 if (!(flags
& PAGE_VALID
))
2481 if (!(flags
& PAGE_WRITE
))
2483 /* XXX: this code should not depend on lock_user */
2484 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2487 unlock_user(p
, addr
, l
);
2489 if (!(flags
& PAGE_READ
))
2491 /* XXX: this code should not depend on lock_user */
2492 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2495 unlock_user(p
, addr
, 0);
2506 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2509 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2510 /* No early return if dirty_log_mask is or becomes 0, because
2511 * cpu_physical_memory_set_dirty_range will still call
2512 * xen_modified_memory.
2514 if (dirty_log_mask
) {
2516 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2518 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2519 tb_invalidate_phys_range(addr
, addr
+ length
);
2520 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2522 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2525 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2527 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2529 /* Regions are assumed to support 1-4 byte accesses unless
2530 otherwise specified. */
2531 if (access_size_max
== 0) {
2532 access_size_max
= 4;
2535 /* Bound the maximum access by the alignment of the address. */
2536 if (!mr
->ops
->impl
.unaligned
) {
2537 unsigned align_size_max
= addr
& -addr
;
2538 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2539 access_size_max
= align_size_max
;
2543 /* Don't attempt accesses larger than the maximum. */
2544 if (l
> access_size_max
) {
2545 l
= access_size_max
;
2552 static bool prepare_mmio_access(MemoryRegion
*mr
)
2554 bool unlocked
= !qemu_mutex_iothread_locked();
2555 bool release_lock
= false;
2557 if (unlocked
&& mr
->global_locking
) {
2558 qemu_mutex_lock_iothread();
2560 release_lock
= true;
2562 if (mr
->flush_coalesced_mmio
) {
2564 qemu_mutex_lock_iothread();
2566 qemu_flush_coalesced_mmio_buffer();
2568 qemu_mutex_unlock_iothread();
2572 return release_lock
;
2575 /* Called within RCU critical section. */
2576 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2579 int len
, hwaddr addr1
,
2580 hwaddr l
, MemoryRegion
*mr
)
2584 MemTxResult result
= MEMTX_OK
;
2585 bool release_lock
= false;
2588 if (!memory_access_is_direct(mr
, true)) {
2589 release_lock
|= prepare_mmio_access(mr
);
2590 l
= memory_access_size(mr
, l
, addr1
);
2591 /* XXX: could force current_cpu to NULL to avoid
2595 /* 64 bit write access */
2597 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2601 /* 32 bit write access */
2603 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2607 /* 16 bit write access */
2609 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2613 /* 8 bit write access */
2615 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2622 addr1
+= memory_region_get_ram_addr(mr
);
2624 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2625 memcpy(ptr
, buf
, l
);
2626 invalidate_and_set_dirty(mr
, addr1
, l
);
2630 qemu_mutex_unlock_iothread();
2631 release_lock
= false;
2643 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2649 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2650 const uint8_t *buf
, int len
)
2655 MemTxResult result
= MEMTX_OK
;
2660 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2661 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2669 /* Called within RCU critical section. */
2670 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2671 MemTxAttrs attrs
, uint8_t *buf
,
2672 int len
, hwaddr addr1
, hwaddr l
,
2677 MemTxResult result
= MEMTX_OK
;
2678 bool release_lock
= false;
2681 if (!memory_access_is_direct(mr
, false)) {
2683 release_lock
|= prepare_mmio_access(mr
);
2684 l
= memory_access_size(mr
, l
, addr1
);
2687 /* 64 bit read access */
2688 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2693 /* 32 bit read access */
2694 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2699 /* 16 bit read access */
2700 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2705 /* 8 bit read access */
2706 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2715 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
2716 memory_region_get_ram_addr(mr
) + addr1
);
2717 memcpy(buf
, ptr
, l
);
2721 qemu_mutex_unlock_iothread();
2722 release_lock
= false;
2734 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2740 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2741 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2746 MemTxResult result
= MEMTX_OK
;
2751 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2752 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2760 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2761 uint8_t *buf
, int len
, bool is_write
)
2764 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2766 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2770 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2771 int len
, int is_write
)
2773 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2774 buf
, len
, is_write
);
2777 enum write_rom_type
{
2782 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2783 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2793 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2795 if (!(memory_region_is_ram(mr
) ||
2796 memory_region_is_romd(mr
))) {
2797 l
= memory_access_size(mr
, l
, addr1
);
2799 addr1
+= memory_region_get_ram_addr(mr
);
2801 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2804 memcpy(ptr
, buf
, l
);
2805 invalidate_and_set_dirty(mr
, addr1
, l
);
2808 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2819 /* used for ROM loading : can write in RAM and ROM */
2820 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2821 const uint8_t *buf
, int len
)
2823 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2826 void cpu_flush_icache_range(hwaddr start
, int len
)
2829 * This function should do the same thing as an icache flush that was
2830 * triggered from within the guest. For TCG we are always cache coherent,
2831 * so there is no need to flush anything. For KVM / Xen we need to flush
2832 * the host's instruction cache at least.
2834 if (tcg_enabled()) {
2838 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2839 start
, NULL
, len
, FLUSH_CACHE
);
2850 static BounceBuffer bounce
;
2852 typedef struct MapClient
{
2854 QLIST_ENTRY(MapClient
) link
;
2857 QemuMutex map_client_list_lock
;
2858 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2859 = QLIST_HEAD_INITIALIZER(map_client_list
);
2861 static void cpu_unregister_map_client_do(MapClient
*client
)
2863 QLIST_REMOVE(client
, link
);
2867 static void cpu_notify_map_clients_locked(void)
2871 while (!QLIST_EMPTY(&map_client_list
)) {
2872 client
= QLIST_FIRST(&map_client_list
);
2873 qemu_bh_schedule(client
->bh
);
2874 cpu_unregister_map_client_do(client
);
2878 void cpu_register_map_client(QEMUBH
*bh
)
2880 MapClient
*client
= g_malloc(sizeof(*client
));
2882 qemu_mutex_lock(&map_client_list_lock
);
2884 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2885 if (!atomic_read(&bounce
.in_use
)) {
2886 cpu_notify_map_clients_locked();
2888 qemu_mutex_unlock(&map_client_list_lock
);
2891 void cpu_exec_init_all(void)
2893 qemu_mutex_init(&ram_list
.mutex
);
2896 qemu_mutex_init(&map_client_list_lock
);
2899 void cpu_unregister_map_client(QEMUBH
*bh
)
2903 qemu_mutex_lock(&map_client_list_lock
);
2904 QLIST_FOREACH(client
, &map_client_list
, link
) {
2905 if (client
->bh
== bh
) {
2906 cpu_unregister_map_client_do(client
);
2910 qemu_mutex_unlock(&map_client_list_lock
);
2913 static void cpu_notify_map_clients(void)
2915 qemu_mutex_lock(&map_client_list_lock
);
2916 cpu_notify_map_clients_locked();
2917 qemu_mutex_unlock(&map_client_list_lock
);
2920 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2928 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2929 if (!memory_access_is_direct(mr
, is_write
)) {
2930 l
= memory_access_size(mr
, l
, addr
);
2931 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2943 /* Map a physical memory region into a host virtual address.
2944 * May map a subset of the requested range, given by and returned in *plen.
2945 * May return NULL if resources needed to perform the mapping are exhausted.
2946 * Use only for reads OR writes - not for read-modify-write operations.
2947 * Use cpu_register_map_client() to know when retrying the map operation is
2948 * likely to succeed.
2950 void *address_space_map(AddressSpace
*as
,
2957 hwaddr l
, xlat
, base
;
2958 MemoryRegion
*mr
, *this_mr
;
2968 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2970 if (!memory_access_is_direct(mr
, is_write
)) {
2971 if (atomic_xchg(&bounce
.in_use
, true)) {
2975 /* Avoid unbounded allocations */
2976 l
= MIN(l
, TARGET_PAGE_SIZE
);
2977 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2981 memory_region_ref(mr
);
2984 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2990 return bounce
.buffer
;
2994 raddr
= memory_region_get_ram_addr(mr
);
3005 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
3006 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
3011 memory_region_ref(mr
);
3013 ptr
= qemu_ram_ptr_length(mr
->ram_block
, raddr
+ base
, plen
);
3019 /* Unmaps a memory region previously mapped by address_space_map().
3020 * Will also mark the memory as dirty if is_write == 1. access_len gives
3021 * the amount of memory that was actually read or written by the caller.
3023 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3024 int is_write
, hwaddr access_len
)
3026 if (buffer
!= bounce
.buffer
) {
3030 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
3033 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3035 if (xen_enabled()) {
3036 xen_invalidate_map_cache_entry(buffer
);
3038 memory_region_unref(mr
);
3042 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3043 bounce
.buffer
, access_len
);
3045 qemu_vfree(bounce
.buffer
);
3046 bounce
.buffer
= NULL
;
3047 memory_region_unref(bounce
.mr
);
3048 atomic_mb_set(&bounce
.in_use
, false);
3049 cpu_notify_map_clients();
3052 void *cpu_physical_memory_map(hwaddr addr
,
3056 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3059 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3060 int is_write
, hwaddr access_len
)
3062 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3065 /* warning: addr must be aligned */
3066 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3068 MemTxResult
*result
,
3069 enum device_endian endian
)
3077 bool release_lock
= false;
3080 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3081 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3082 release_lock
|= prepare_mmio_access(mr
);
3085 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3086 #if defined(TARGET_WORDS_BIGENDIAN)
3087 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3091 if (endian
== DEVICE_BIG_ENDIAN
) {
3097 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3098 (memory_region_get_ram_addr(mr
)
3102 case DEVICE_LITTLE_ENDIAN
:
3103 val
= ldl_le_p(ptr
);
3105 case DEVICE_BIG_ENDIAN
:
3106 val
= ldl_be_p(ptr
);
3118 qemu_mutex_unlock_iothread();
3124 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3125 MemTxAttrs attrs
, MemTxResult
*result
)
3127 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3128 DEVICE_NATIVE_ENDIAN
);
3131 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3132 MemTxAttrs attrs
, MemTxResult
*result
)
3134 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3135 DEVICE_LITTLE_ENDIAN
);
3138 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3139 MemTxAttrs attrs
, MemTxResult
*result
)
3141 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3145 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3147 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3150 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3152 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3155 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3157 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3160 /* warning: addr must be aligned */
3161 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3163 MemTxResult
*result
,
3164 enum device_endian endian
)
3172 bool release_lock
= false;
3175 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3177 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3178 release_lock
|= prepare_mmio_access(mr
);
3181 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3182 #if defined(TARGET_WORDS_BIGENDIAN)
3183 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3187 if (endian
== DEVICE_BIG_ENDIAN
) {
3193 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3194 (memory_region_get_ram_addr(mr
)
3198 case DEVICE_LITTLE_ENDIAN
:
3199 val
= ldq_le_p(ptr
);
3201 case DEVICE_BIG_ENDIAN
:
3202 val
= ldq_be_p(ptr
);
3214 qemu_mutex_unlock_iothread();
3220 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3221 MemTxAttrs attrs
, MemTxResult
*result
)
3223 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3224 DEVICE_NATIVE_ENDIAN
);
3227 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3228 MemTxAttrs attrs
, MemTxResult
*result
)
3230 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3231 DEVICE_LITTLE_ENDIAN
);
3234 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3235 MemTxAttrs attrs
, MemTxResult
*result
)
3237 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3241 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3243 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3246 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3248 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3251 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3253 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3257 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3258 MemTxAttrs attrs
, MemTxResult
*result
)
3263 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3270 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3272 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3275 /* warning: addr must be aligned */
3276 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3279 MemTxResult
*result
,
3280 enum device_endian endian
)
3288 bool release_lock
= false;
3291 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3293 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3294 release_lock
|= prepare_mmio_access(mr
);
3297 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3298 #if defined(TARGET_WORDS_BIGENDIAN)
3299 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3303 if (endian
== DEVICE_BIG_ENDIAN
) {
3309 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3310 (memory_region_get_ram_addr(mr
)
3314 case DEVICE_LITTLE_ENDIAN
:
3315 val
= lduw_le_p(ptr
);
3317 case DEVICE_BIG_ENDIAN
:
3318 val
= lduw_be_p(ptr
);
3330 qemu_mutex_unlock_iothread();
3336 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3337 MemTxAttrs attrs
, MemTxResult
*result
)
3339 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3340 DEVICE_NATIVE_ENDIAN
);
3343 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3344 MemTxAttrs attrs
, MemTxResult
*result
)
3346 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3347 DEVICE_LITTLE_ENDIAN
);
3350 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3351 MemTxAttrs attrs
, MemTxResult
*result
)
3353 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3357 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3359 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3362 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3364 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3367 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3369 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3372 /* warning: addr must be aligned. The ram page is not masked as dirty
3373 and the code inside is not invalidated. It is useful if the dirty
3374 bits are used to track modified PTEs */
3375 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3376 MemTxAttrs attrs
, MemTxResult
*result
)
3383 uint8_t dirty_log_mask
;
3384 bool release_lock
= false;
3387 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3389 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3390 release_lock
|= prepare_mmio_access(mr
);
3392 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3394 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3395 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3398 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3399 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3400 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3407 qemu_mutex_unlock_iothread();
3412 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3414 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3417 /* warning: addr must be aligned */
3418 static inline void address_space_stl_internal(AddressSpace
*as
,
3419 hwaddr addr
, uint32_t val
,
3421 MemTxResult
*result
,
3422 enum device_endian endian
)
3429 bool release_lock
= false;
3432 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3434 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3435 release_lock
|= prepare_mmio_access(mr
);
3437 #if defined(TARGET_WORDS_BIGENDIAN)
3438 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3442 if (endian
== DEVICE_BIG_ENDIAN
) {
3446 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3449 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3450 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3452 case DEVICE_LITTLE_ENDIAN
:
3455 case DEVICE_BIG_ENDIAN
:
3462 invalidate_and_set_dirty(mr
, addr1
, 4);
3469 qemu_mutex_unlock_iothread();
3474 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3475 MemTxAttrs attrs
, MemTxResult
*result
)
3477 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3478 DEVICE_NATIVE_ENDIAN
);
3481 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3482 MemTxAttrs attrs
, MemTxResult
*result
)
3484 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3485 DEVICE_LITTLE_ENDIAN
);
3488 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3489 MemTxAttrs attrs
, MemTxResult
*result
)
3491 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3495 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3497 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3500 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3502 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3505 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3507 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3511 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3512 MemTxAttrs attrs
, MemTxResult
*result
)
3517 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3523 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3525 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3528 /* warning: addr must be aligned */
3529 static inline void address_space_stw_internal(AddressSpace
*as
,
3530 hwaddr addr
, uint32_t val
,
3532 MemTxResult
*result
,
3533 enum device_endian endian
)
3540 bool release_lock
= false;
3543 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3544 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3545 release_lock
|= prepare_mmio_access(mr
);
3547 #if defined(TARGET_WORDS_BIGENDIAN)
3548 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3552 if (endian
== DEVICE_BIG_ENDIAN
) {
3556 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3559 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3560 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3562 case DEVICE_LITTLE_ENDIAN
:
3565 case DEVICE_BIG_ENDIAN
:
3572 invalidate_and_set_dirty(mr
, addr1
, 2);
3579 qemu_mutex_unlock_iothread();
3584 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3585 MemTxAttrs attrs
, MemTxResult
*result
)
3587 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3588 DEVICE_NATIVE_ENDIAN
);
3591 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3592 MemTxAttrs attrs
, MemTxResult
*result
)
3594 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3595 DEVICE_LITTLE_ENDIAN
);
3598 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3599 MemTxAttrs attrs
, MemTxResult
*result
)
3601 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3605 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3607 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3610 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3612 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3615 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3617 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3621 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3622 MemTxAttrs attrs
, MemTxResult
*result
)
3626 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3632 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3633 MemTxAttrs attrs
, MemTxResult
*result
)
3636 val
= cpu_to_le64(val
);
3637 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3642 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3643 MemTxAttrs attrs
, MemTxResult
*result
)
3646 val
= cpu_to_be64(val
);
3647 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3653 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3655 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3658 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3660 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3663 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3665 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3668 /* virtual memory access for debug (includes writing to ROM) */
3669 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3670 uint8_t *buf
, int len
, int is_write
)
3680 page
= addr
& TARGET_PAGE_MASK
;
3681 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3682 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3683 /* if no physical page mapped, return an error */
3684 if (phys_addr
== -1)
3686 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3689 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3691 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3694 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3695 MEMTXATTRS_UNSPECIFIED
,
3706 * Allows code that needs to deal with migration bitmaps etc to still be built
3707 * target independent.
3709 size_t qemu_target_page_bits(void)
3711 return TARGET_PAGE_BITS
;
3717 * A helper function for the _utterly broken_ virtio device model to find out if
3718 * it's running on a big endian machine. Don't do this at home kids!
3720 bool target_words_bigendian(void);
3721 bool target_words_bigendian(void)
3723 #if defined(TARGET_WORDS_BIGENDIAN)
3730 #ifndef CONFIG_USER_ONLY
3731 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3738 mr
= address_space_translate(&address_space_memory
,
3739 phys_addr
, &phys_addr
, &l
, false);
3741 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3746 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3752 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3753 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3754 block
->used_length
, opaque
);