4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
24 #include "qemu/cutils.h"
26 #include "exec/exec-all.h"
28 #include "hw/qdev-core.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #include "hw/xen/xen.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #if defined(CONFIG_USER_ONLY)
40 #else /* !CONFIG_USER_ONLY */
42 #include "exec/memory.h"
43 #include "exec/ioport.h"
44 #include "sysemu/dma.h"
45 #include "exec/address-spaces.h"
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
59 #include "migration/vmstate.h"
61 #include "qemu/range.h"
63 #include "qemu/mmap-alloc.h"
66 //#define DEBUG_SUBPAGE
68 #if !defined(CONFIG_USER_ONLY)
69 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
72 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
74 static MemoryRegion
*system_memory
;
75 static MemoryRegion
*system_io
;
77 AddressSpace address_space_io
;
78 AddressSpace address_space_memory
;
80 MemoryRegion io_mem_rom
, io_mem_notdirty
;
81 static MemoryRegion io_mem_unassigned
;
83 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84 #define RAM_PREALLOC (1 << 0)
86 /* RAM is mmap-ed with MAP_SHARED */
87 #define RAM_SHARED (1 << 1)
89 /* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
92 #define RAM_RESIZEABLE (1 << 2)
96 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
97 /* current CPU in the current thread. It is only valid inside
99 __thread CPUState
*current_cpu
;
100 /* 0 = Do not count executed instructions.
101 1 = Precise instruction counting.
102 2 = Adaptive rate instruction counting. */
105 #if !defined(CONFIG_USER_ONLY)
107 typedef struct PhysPageEntry PhysPageEntry
;
109 struct PhysPageEntry
{
110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
116 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
118 /* Size of the L2 (and L3, etc) page tables. */
119 #define ADDR_SPACE_BITS 64
122 #define P_L2_SIZE (1 << P_L2_BITS)
124 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
126 typedef PhysPageEntry Node
[P_L2_SIZE
];
128 typedef struct PhysPageMap
{
131 unsigned sections_nb
;
132 unsigned sections_nb_alloc
;
134 unsigned nodes_nb_alloc
;
136 MemoryRegionSection
*sections
;
139 struct AddressSpaceDispatch
{
142 MemoryRegionSection
*mru_section
;
143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
146 PhysPageEntry phys_map
;
151 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152 typedef struct subpage_t
{
156 uint16_t sub_section
[TARGET_PAGE_SIZE
];
159 #define PHYS_SECTION_UNASSIGNED 0
160 #define PHYS_SECTION_NOTDIRTY 1
161 #define PHYS_SECTION_ROM 2
162 #define PHYS_SECTION_WATCH 3
164 static void io_mem_init(void);
165 static void memory_map_init(void);
166 static void tcg_commit(MemoryListener
*listener
);
168 static MemoryRegion io_mem_watch
;
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
177 struct CPUAddressSpace
{
180 struct AddressSpaceDispatch
*memory_dispatch
;
181 MemoryListener tcg_as_listener
;
186 #if !defined(CONFIG_USER_ONLY)
188 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
190 static unsigned alloc_hint
= 16;
191 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
192 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, alloc_hint
);
193 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
194 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
195 alloc_hint
= map
->nodes_nb_alloc
;
199 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
206 ret
= map
->nodes_nb
++;
208 assert(ret
!= PHYS_MAP_NODE_NIL
);
209 assert(ret
!= map
->nodes_nb_alloc
);
211 e
.skip
= leaf
? 0 : 1;
212 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
213 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
214 memcpy(&p
[i
], &e
, sizeof(e
));
219 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
220 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
224 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
226 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
227 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
229 p
= map
->nodes
[lp
->ptr
];
230 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
232 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
233 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
239 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
245 static void phys_page_set(AddressSpaceDispatch
*d
,
246 hwaddr index
, hwaddr nb
,
249 /* Wildly overreserve - it doesn't matter much. */
250 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
252 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
255 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
256 * and update our entry so we can skip it and go directly to the destination.
258 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
)
260 unsigned valid_ptr
= P_L2_SIZE
;
265 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
270 for (i
= 0; i
< P_L2_SIZE
; i
++) {
271 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
278 phys_page_compact(&p
[i
], nodes
);
282 /* We can only compress if there's only one child. */
287 assert(valid_ptr
< P_L2_SIZE
);
289 /* Don't compress if it won't fit in the # of bits we have. */
290 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
294 lp
->ptr
= p
[valid_ptr
].ptr
;
295 if (!p
[valid_ptr
].skip
) {
296 /* If our only child is a leaf, make this a leaf. */
297 /* By design, we should have made this node a leaf to begin with so we
298 * should never reach here.
299 * But since it's so simple to handle this, let's do it just in case we
304 lp
->skip
+= p
[valid_ptr
].skip
;
308 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
310 if (d
->phys_map
.skip
) {
311 phys_page_compact(&d
->phys_map
, d
->map
.nodes
);
315 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
318 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
319 * the section must cover the entire address space.
321 return section
->size
.hi
||
322 range_covers_byte(section
->offset_within_address_space
,
323 section
->size
.lo
, addr
);
326 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
327 Node
*nodes
, MemoryRegionSection
*sections
)
330 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
333 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
334 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
335 return §ions
[PHYS_SECTION_UNASSIGNED
];
338 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
341 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
342 return §ions
[lp
.ptr
];
344 return §ions
[PHYS_SECTION_UNASSIGNED
];
348 bool memory_region_is_unassigned(MemoryRegion
*mr
)
350 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
351 && mr
!= &io_mem_watch
;
354 /* Called from RCU critical section */
355 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
357 bool resolve_subpage
)
359 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
363 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
364 section_covers_addr(section
, addr
)) {
367 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
371 if (resolve_subpage
&& section
->mr
->subpage
) {
372 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
373 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
376 atomic_set(&d
->mru_section
, section
);
381 /* Called from RCU critical section */
382 static MemoryRegionSection
*
383 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
384 hwaddr
*plen
, bool resolve_subpage
)
386 MemoryRegionSection
*section
;
390 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
391 /* Compute offset within MemoryRegionSection */
392 addr
-= section
->offset_within_address_space
;
394 /* Compute offset within MemoryRegion */
395 *xlat
= addr
+ section
->offset_within_region
;
399 /* MMIO registers can be expected to perform full-width accesses based only
400 * on their address, without considering adjacent registers that could
401 * decode to completely different MemoryRegions. When such registers
402 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
403 * regions overlap wildly. For this reason we cannot clamp the accesses
406 * If the length is small (as is the case for address_space_ldl/stl),
407 * everything works fine. If the incoming length is large, however,
408 * the caller really has to do the clamping through memory_access_size.
410 if (memory_region_is_ram(mr
)) {
411 diff
= int128_sub(section
->size
, int128_make64(addr
));
412 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
417 /* Called from RCU critical section */
418 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
419 hwaddr
*xlat
, hwaddr
*plen
,
423 MemoryRegionSection
*section
;
427 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
428 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
431 if (!mr
->iommu_ops
) {
435 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
436 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
437 | (addr
& iotlb
.addr_mask
));
438 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
439 if (!(iotlb
.perm
& (1 << is_write
))) {
440 mr
= &io_mem_unassigned
;
444 as
= iotlb
.target_as
;
447 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
448 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
449 *plen
= MIN(page
, *plen
);
456 /* Called from RCU critical section */
457 MemoryRegionSection
*
458 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
459 hwaddr
*xlat
, hwaddr
*plen
)
461 MemoryRegionSection
*section
;
462 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
464 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
466 assert(!section
->mr
->iommu_ops
);
471 #if !defined(CONFIG_USER_ONLY)
473 static int cpu_common_post_load(void *opaque
, int version_id
)
475 CPUState
*cpu
= opaque
;
477 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
478 version_id is increased. */
479 cpu
->interrupt_request
&= ~0x01;
485 static int cpu_common_pre_load(void *opaque
)
487 CPUState
*cpu
= opaque
;
489 cpu
->exception_index
= -1;
494 static bool cpu_common_exception_index_needed(void *opaque
)
496 CPUState
*cpu
= opaque
;
498 return tcg_enabled() && cpu
->exception_index
!= -1;
501 static const VMStateDescription vmstate_cpu_common_exception_index
= {
502 .name
= "cpu_common/exception_index",
504 .minimum_version_id
= 1,
505 .needed
= cpu_common_exception_index_needed
,
506 .fields
= (VMStateField
[]) {
507 VMSTATE_INT32(exception_index
, CPUState
),
508 VMSTATE_END_OF_LIST()
512 static bool cpu_common_crash_occurred_needed(void *opaque
)
514 CPUState
*cpu
= opaque
;
516 return cpu
->crash_occurred
;
519 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
520 .name
= "cpu_common/crash_occurred",
522 .minimum_version_id
= 1,
523 .needed
= cpu_common_crash_occurred_needed
,
524 .fields
= (VMStateField
[]) {
525 VMSTATE_BOOL(crash_occurred
, CPUState
),
526 VMSTATE_END_OF_LIST()
530 const VMStateDescription vmstate_cpu_common
= {
531 .name
= "cpu_common",
533 .minimum_version_id
= 1,
534 .pre_load
= cpu_common_pre_load
,
535 .post_load
= cpu_common_post_load
,
536 .fields
= (VMStateField
[]) {
537 VMSTATE_UINT32(halted
, CPUState
),
538 VMSTATE_UINT32(interrupt_request
, CPUState
),
539 VMSTATE_END_OF_LIST()
541 .subsections
= (const VMStateDescription
*[]) {
542 &vmstate_cpu_common_exception_index
,
543 &vmstate_cpu_common_crash_occurred
,
550 CPUState
*qemu_get_cpu(int index
)
555 if (cpu
->cpu_index
== index
) {
563 #if !defined(CONFIG_USER_ONLY)
564 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
566 CPUAddressSpace
*newas
;
568 /* Target code should have set num_ases before calling us */
569 assert(asidx
< cpu
->num_ases
);
572 /* address space 0 gets the convenience alias */
576 /* KVM cannot currently support multiple address spaces. */
577 assert(asidx
== 0 || !kvm_enabled());
579 if (!cpu
->cpu_ases
) {
580 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
583 newas
= &cpu
->cpu_ases
[asidx
];
587 newas
->tcg_as_listener
.commit
= tcg_commit
;
588 memory_listener_register(&newas
->tcg_as_listener
, as
);
592 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
594 /* Return the AddressSpace corresponding to the specified index */
595 return cpu
->cpu_ases
[asidx
].as
;
599 void cpu_exec_exit(CPUState
*cpu
)
601 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
603 cpu_list_remove(cpu
);
605 if (cc
->vmsd
!= NULL
) {
606 vmstate_unregister(NULL
, cc
->vmsd
, cpu
);
608 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
609 vmstate_unregister(NULL
, &vmstate_cpu_common
, cpu
);
613 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
615 CPUClass
*cc ATTRIBUTE_UNUSED
= CPU_GET_CLASS(cpu
);
616 Error
*local_err ATTRIBUTE_UNUSED
= NULL
;
621 #ifndef CONFIG_USER_ONLY
622 cpu
->thread_id
= qemu_get_thread_id();
624 /* This is a softmmu CPU object, so create a property for it
625 * so users can wire up its memory. (This can't go in qom/cpu.c
626 * because that file is compiled only once for both user-mode
627 * and system builds.) The default if no link is set up is to use
628 * the system address space.
630 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
631 (Object
**)&cpu
->memory
,
632 qdev_prop_allow_set_link_before_realize
,
633 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
635 cpu
->memory
= system_memory
;
636 object_ref(OBJECT(cpu
->memory
));
641 #ifndef CONFIG_USER_ONLY
642 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
643 vmstate_register(NULL
, cpu
->cpu_index
, &vmstate_cpu_common
, cpu
);
645 if (cc
->vmsd
!= NULL
) {
646 vmstate_register(NULL
, cpu
->cpu_index
, cc
->vmsd
, cpu
);
651 #if defined(CONFIG_USER_ONLY)
652 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
654 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
657 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
660 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
661 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
663 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
664 phys
| (pc
& ~TARGET_PAGE_MASK
));
669 #if defined(CONFIG_USER_ONLY)
670 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
675 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
681 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
685 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
686 int flags
, CPUWatchpoint
**watchpoint
)
691 /* Add a watchpoint. */
692 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
693 int flags
, CPUWatchpoint
**watchpoint
)
697 /* forbid ranges which are empty or run off the end of the address space */
698 if (len
== 0 || (addr
+ len
- 1) < addr
) {
699 error_report("tried to set invalid watchpoint at %"
700 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
703 wp
= g_malloc(sizeof(*wp
));
709 /* keep all GDB-injected watchpoints in front */
710 if (flags
& BP_GDB
) {
711 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
713 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
716 tlb_flush_page(cpu
, addr
);
723 /* Remove a specific watchpoint. */
724 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
729 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
730 if (addr
== wp
->vaddr
&& len
== wp
->len
731 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
732 cpu_watchpoint_remove_by_ref(cpu
, wp
);
739 /* Remove a specific watchpoint by reference. */
740 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
742 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
744 tlb_flush_page(cpu
, watchpoint
->vaddr
);
749 /* Remove all matching watchpoints. */
750 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
752 CPUWatchpoint
*wp
, *next
;
754 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
755 if (wp
->flags
& mask
) {
756 cpu_watchpoint_remove_by_ref(cpu
, wp
);
761 /* Return true if this watchpoint address matches the specified
762 * access (ie the address range covered by the watchpoint overlaps
763 * partially or completely with the address range covered by the
766 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
770 /* We know the lengths are non-zero, but a little caution is
771 * required to avoid errors in the case where the range ends
772 * exactly at the top of the address space and so addr + len
773 * wraps round to zero.
775 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
776 vaddr addrend
= addr
+ len
- 1;
778 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
783 /* Add a breakpoint. */
784 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
785 CPUBreakpoint
**breakpoint
)
789 bp
= g_malloc(sizeof(*bp
));
794 /* keep all GDB-injected breakpoints in front */
795 if (flags
& BP_GDB
) {
796 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
798 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
801 breakpoint_invalidate(cpu
, pc
);
809 /* Remove a specific breakpoint. */
810 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
814 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
815 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
816 cpu_breakpoint_remove_by_ref(cpu
, bp
);
823 /* Remove a specific breakpoint by reference. */
824 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
826 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
828 breakpoint_invalidate(cpu
, breakpoint
->pc
);
833 /* Remove all matching breakpoints. */
834 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
836 CPUBreakpoint
*bp
, *next
;
838 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
839 if (bp
->flags
& mask
) {
840 cpu_breakpoint_remove_by_ref(cpu
, bp
);
845 /* enable or disable single step mode. EXCP_DEBUG is returned by the
846 CPU loop after each instruction */
847 void cpu_single_step(CPUState
*cpu
, int enabled
)
849 if (cpu
->singlestep_enabled
!= enabled
) {
850 cpu
->singlestep_enabled
= enabled
;
852 kvm_update_guest_debug(cpu
, 0);
854 /* must flush all the translated code to avoid inconsistencies */
855 /* XXX: only flush what is necessary */
861 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
868 fprintf(stderr
, "qemu: fatal: ");
869 vfprintf(stderr
, fmt
, ap
);
870 fprintf(stderr
, "\n");
871 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
872 if (qemu_log_separate()) {
873 qemu_log("qemu: fatal: ");
874 qemu_log_vprintf(fmt
, ap2
);
876 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
883 #if defined(CONFIG_USER_ONLY)
885 struct sigaction act
;
886 sigfillset(&act
.sa_mask
);
887 act
.sa_handler
= SIG_DFL
;
888 sigaction(SIGABRT
, &act
, NULL
);
894 #if !defined(CONFIG_USER_ONLY)
895 /* Called from RCU critical section */
896 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
900 block
= atomic_rcu_read(&ram_list
.mru_block
);
901 if (block
&& addr
- block
->offset
< block
->max_length
) {
904 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
905 if (addr
- block
->offset
< block
->max_length
) {
910 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
914 /* It is safe to write mru_block outside the iothread lock. This
919 * xxx removed from list
923 * call_rcu(reclaim_ramblock, xxx);
926 * atomic_rcu_set is not needed here. The block was already published
927 * when it was placed into the list. Here we're just making an extra
928 * copy of the pointer.
930 ram_list
.mru_block
= block
;
934 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
941 end
= TARGET_PAGE_ALIGN(start
+ length
);
942 start
&= TARGET_PAGE_MASK
;
945 block
= qemu_get_ram_block(start
);
946 assert(block
== qemu_get_ram_block(end
- 1));
947 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
949 tlb_reset_dirty(cpu
, start1
, length
);
954 /* Note: start and end must be within the same ram block. */
955 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
959 DirtyMemoryBlocks
*blocks
;
960 unsigned long end
, page
;
967 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
968 page
= start
>> TARGET_PAGE_BITS
;
972 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
975 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
976 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
977 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
979 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
986 if (dirty
&& tcg_enabled()) {
987 tlb_reset_dirty_range_all(start
, length
);
993 /* Called from RCU critical section */
994 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
995 MemoryRegionSection
*section
,
997 hwaddr paddr
, hwaddr xlat
,
999 target_ulong
*address
)
1004 if (memory_region_is_ram(section
->mr
)) {
1006 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1007 if (!section
->readonly
) {
1008 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1010 iotlb
|= PHYS_SECTION_ROM
;
1013 AddressSpaceDispatch
*d
;
1015 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1016 iotlb
= section
- d
->map
.sections
;
1020 /* Make accesses to pages with watchpoints go via the
1021 watchpoint trap routines. */
1022 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1023 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1024 /* Avoid trapping reads of pages with a write breakpoint. */
1025 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1026 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1027 *address
|= TLB_MMIO
;
1035 #endif /* defined(CONFIG_USER_ONLY) */
1037 #if !defined(CONFIG_USER_ONLY)
1039 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1041 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1043 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1044 qemu_anon_ram_alloc
;
1047 * Set a custom physical guest memory alloator.
1048 * Accelerators with unusual needs may need this. Hopefully, we can
1049 * get rid of it eventually.
1051 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1053 phys_mem_alloc
= alloc
;
1056 static uint16_t phys_section_add(PhysPageMap
*map
,
1057 MemoryRegionSection
*section
)
1059 /* The physical section number is ORed with a page-aligned
1060 * pointer to produce the iotlb entries. Thus it should
1061 * never overflow into the page-aligned value.
1063 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1065 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1066 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1067 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1068 map
->sections_nb_alloc
);
1070 map
->sections
[map
->sections_nb
] = *section
;
1071 memory_region_ref(section
->mr
);
1072 return map
->sections_nb
++;
1075 static void phys_section_destroy(MemoryRegion
*mr
)
1077 bool have_sub_page
= mr
->subpage
;
1079 memory_region_unref(mr
);
1081 if (have_sub_page
) {
1082 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1083 object_unref(OBJECT(&subpage
->iomem
));
1088 static void phys_sections_free(PhysPageMap
*map
)
1090 while (map
->sections_nb
> 0) {
1091 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1092 phys_section_destroy(section
->mr
);
1094 g_free(map
->sections
);
1098 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1101 hwaddr base
= section
->offset_within_address_space
1103 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1104 d
->map
.nodes
, d
->map
.sections
);
1105 MemoryRegionSection subsection
= {
1106 .offset_within_address_space
= base
,
1107 .size
= int128_make64(TARGET_PAGE_SIZE
),
1111 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1113 if (!(existing
->mr
->subpage
)) {
1114 subpage
= subpage_init(d
->as
, base
);
1115 subsection
.address_space
= d
->as
;
1116 subsection
.mr
= &subpage
->iomem
;
1117 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1118 phys_section_add(&d
->map
, &subsection
));
1120 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1122 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1123 end
= start
+ int128_get64(section
->size
) - 1;
1124 subpage_register(subpage
, start
, end
,
1125 phys_section_add(&d
->map
, section
));
1129 static void register_multipage(AddressSpaceDispatch
*d
,
1130 MemoryRegionSection
*section
)
1132 hwaddr start_addr
= section
->offset_within_address_space
;
1133 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1134 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1138 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1141 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1143 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1144 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1145 MemoryRegionSection now
= *section
, remain
= *section
;
1146 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1148 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1149 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1150 - now
.offset_within_address_space
;
1152 now
.size
= int128_min(int128_make64(left
), now
.size
);
1153 register_subpage(d
, &now
);
1155 now
.size
= int128_zero();
1157 while (int128_ne(remain
.size
, now
.size
)) {
1158 remain
.size
= int128_sub(remain
.size
, now
.size
);
1159 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1160 remain
.offset_within_region
+= int128_get64(now
.size
);
1162 if (int128_lt(remain
.size
, page_size
)) {
1163 register_subpage(d
, &now
);
1164 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1165 now
.size
= page_size
;
1166 register_subpage(d
, &now
);
1168 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1169 register_multipage(d
, &now
);
1174 void qemu_flush_coalesced_mmio_buffer(void)
1177 kvm_flush_coalesced_mmio_buffer();
1180 void qemu_mutex_lock_ramlist(void)
1182 qemu_mutex_lock(&ram_list
.mutex
);
1185 void qemu_mutex_unlock_ramlist(void)
1187 qemu_mutex_unlock(&ram_list
.mutex
);
1191 static void *file_ram_alloc(RAMBlock
*block
,
1196 bool unlink_on_error
= false;
1198 char *sanitized_name
;
1200 void *area
= MAP_FAILED
;
1203 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1205 "host lacks kvm mmu notifiers, -mem-path unsupported");
1210 fd
= open(path
, O_RDWR
);
1212 /* @path names an existing file, use it */
1215 if (errno
== ENOENT
) {
1216 /* @path names a file that doesn't exist, create it */
1217 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1219 unlink_on_error
= true;
1222 } else if (errno
== EISDIR
) {
1223 /* @path names a directory, create a file there */
1224 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1225 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1226 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1232 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1234 g_free(sanitized_name
);
1236 fd
= mkstemp(filename
);
1244 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1245 error_setg_errno(errp
, errno
,
1246 "can't open backing store %s for guest RAM",
1251 * Try again on EINTR and EEXIST. The latter happens when
1252 * something else creates the file between our two open().
1256 block
->page_size
= qemu_fd_getpagesize(fd
);
1257 block
->mr
->align
= block
->page_size
;
1258 #if defined(__s390x__)
1259 if (kvm_enabled()) {
1260 block
->mr
->align
= MAX(block
->mr
->align
, QEMU_VMALLOC_ALIGN
);
1264 if (memory
< block
->page_size
) {
1265 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1266 "or larger than page size 0x%zx",
1267 memory
, block
->page_size
);
1271 memory
= ROUND_UP(memory
, block
->page_size
);
1274 * ftruncate is not supported by hugetlbfs in older
1275 * hosts, so don't bother bailing out on errors.
1276 * If anything goes wrong with it under other filesystems,
1279 if (ftruncate(fd
, memory
)) {
1280 perror("ftruncate");
1283 area
= qemu_ram_mmap(fd
, memory
, block
->mr
->align
,
1284 block
->flags
& RAM_SHARED
);
1285 if (area
== MAP_FAILED
) {
1286 error_setg_errno(errp
, errno
,
1287 "unable to map backing store for guest RAM");
1292 os_mem_prealloc(fd
, area
, memory
, errp
);
1293 if (errp
&& *errp
) {
1302 if (area
!= MAP_FAILED
) {
1303 qemu_ram_munmap(area
, memory
);
1305 if (unlink_on_error
) {
1315 /* Called with the ramlist lock held. */
1316 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1318 RAMBlock
*block
, *next_block
;
1319 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1321 assert(size
!= 0); /* it would hand out same offset multiple times */
1323 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1327 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1328 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1330 end
= block
->offset
+ block
->max_length
;
1332 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1333 if (next_block
->offset
>= end
) {
1334 next
= MIN(next
, next_block
->offset
);
1337 if (next
- end
>= size
&& next
- end
< mingap
) {
1339 mingap
= next
- end
;
1343 if (offset
== RAM_ADDR_MAX
) {
1344 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1352 ram_addr_t
last_ram_offset(void)
1355 ram_addr_t last
= 0;
1358 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1359 last
= MAX(last
, block
->offset
+ block
->max_length
);
1365 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1369 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1370 if (!machine_dump_guest_core(current_machine
)) {
1371 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1373 perror("qemu_madvise");
1374 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1375 "but dump_guest_core=off specified\n");
1380 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1385 /* Called with iothread lock held. */
1386 void qemu_ram_set_idstr(RAMBlock
*new_block
, const char *name
, DeviceState
*dev
)
1391 assert(!new_block
->idstr
[0]);
1394 char *id
= qdev_get_dev_path(dev
);
1396 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1400 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1403 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1404 if (block
!= new_block
&&
1405 !strcmp(block
->idstr
, new_block
->idstr
)) {
1406 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1414 /* Called with iothread lock held. */
1415 void qemu_ram_unset_idstr(RAMBlock
*block
)
1417 /* FIXME: arch_init.c assumes that this is not called throughout
1418 * migration. Ignore the problem since hot-unplug during migration
1419 * does not work anyway.
1422 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1426 size_t qemu_ram_pagesize(RAMBlock
*rb
)
1428 return rb
->page_size
;
1431 static int memory_try_enable_merging(void *addr
, size_t len
)
1433 if (!machine_mem_merge(current_machine
)) {
1434 /* disabled by the user */
1438 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1441 /* Only legal before guest might have detected the memory size: e.g. on
1442 * incoming migration, or right after reset.
1444 * As memory core doesn't know how is memory accessed, it is up to
1445 * resize callback to update device state and/or add assertions to detect
1446 * misuse, if necessary.
1448 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
)
1452 newsize
= HOST_PAGE_ALIGN(newsize
);
1454 if (block
->used_length
== newsize
) {
1458 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1459 error_setg_errno(errp
, EINVAL
,
1460 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1461 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1462 newsize
, block
->used_length
);
1466 if (block
->max_length
< newsize
) {
1467 error_setg_errno(errp
, EINVAL
,
1468 "Length too large: %s: 0x" RAM_ADDR_FMT
1469 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1470 newsize
, block
->max_length
);
1474 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1475 block
->used_length
= newsize
;
1476 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1478 memory_region_set_size(block
->mr
, newsize
);
1479 if (block
->resized
) {
1480 block
->resized(block
->idstr
, newsize
, block
->host
);
1485 /* Called with ram_list.mutex held */
1486 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1487 ram_addr_t new_ram_size
)
1489 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1490 DIRTY_MEMORY_BLOCK_SIZE
);
1491 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1492 DIRTY_MEMORY_BLOCK_SIZE
);
1495 /* Only need to extend if block count increased */
1496 if (new_num_blocks
<= old_num_blocks
) {
1500 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1501 DirtyMemoryBlocks
*old_blocks
;
1502 DirtyMemoryBlocks
*new_blocks
;
1505 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1506 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1507 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1509 if (old_num_blocks
) {
1510 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1511 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1514 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1515 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1518 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1521 g_free_rcu(old_blocks
, rcu
);
1526 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1529 RAMBlock
*last_block
= NULL
;
1530 ram_addr_t old_ram_size
, new_ram_size
;
1533 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1535 qemu_mutex_lock_ramlist();
1536 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1538 if (!new_block
->host
) {
1539 if (xen_enabled()) {
1540 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1541 new_block
->mr
, &err
);
1543 error_propagate(errp
, err
);
1544 qemu_mutex_unlock_ramlist();
1548 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1549 &new_block
->mr
->align
);
1550 if (!new_block
->host
) {
1551 error_setg_errno(errp
, errno
,
1552 "cannot set up guest memory '%s'",
1553 memory_region_name(new_block
->mr
));
1554 qemu_mutex_unlock_ramlist();
1557 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1561 new_ram_size
= MAX(old_ram_size
,
1562 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1563 if (new_ram_size
> old_ram_size
) {
1564 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1565 dirty_memory_extend(old_ram_size
, new_ram_size
);
1567 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1568 * QLIST (which has an RCU-friendly variant) does not have insertion at
1569 * tail, so save the last element in last_block.
1571 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1573 if (block
->max_length
< new_block
->max_length
) {
1578 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1579 } else if (last_block
) {
1580 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1581 } else { /* list is empty */
1582 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1584 ram_list
.mru_block
= NULL
;
1586 /* Write list before version */
1589 qemu_mutex_unlock_ramlist();
1591 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1592 new_block
->used_length
,
1595 if (new_block
->host
) {
1596 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1597 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1598 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
1599 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1604 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1605 bool share
, const char *mem_path
,
1608 RAMBlock
*new_block
;
1609 Error
*local_err
= NULL
;
1611 if (xen_enabled()) {
1612 error_setg(errp
, "-mem-path not supported with Xen");
1616 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1618 * file_ram_alloc() needs to allocate just like
1619 * phys_mem_alloc, but we haven't bothered to provide
1623 "-mem-path not supported with this accelerator");
1627 size
= HOST_PAGE_ALIGN(size
);
1628 new_block
= g_malloc0(sizeof(*new_block
));
1630 new_block
->used_length
= size
;
1631 new_block
->max_length
= size
;
1632 new_block
->flags
= share
? RAM_SHARED
: 0;
1633 new_block
->host
= file_ram_alloc(new_block
, size
,
1635 if (!new_block
->host
) {
1640 ram_block_add(new_block
, &local_err
);
1643 error_propagate(errp
, local_err
);
1651 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1652 void (*resized
)(const char*,
1655 void *host
, bool resizeable
,
1656 MemoryRegion
*mr
, Error
**errp
)
1658 RAMBlock
*new_block
;
1659 Error
*local_err
= NULL
;
1661 size
= HOST_PAGE_ALIGN(size
);
1662 max_size
= HOST_PAGE_ALIGN(max_size
);
1663 new_block
= g_malloc0(sizeof(*new_block
));
1665 new_block
->resized
= resized
;
1666 new_block
->used_length
= size
;
1667 new_block
->max_length
= max_size
;
1668 assert(max_size
>= size
);
1670 new_block
->page_size
= getpagesize();
1671 new_block
->host
= host
;
1673 new_block
->flags
|= RAM_PREALLOC
;
1676 new_block
->flags
|= RAM_RESIZEABLE
;
1678 ram_block_add(new_block
, &local_err
);
1681 error_propagate(errp
, local_err
);
1687 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1688 MemoryRegion
*mr
, Error
**errp
)
1690 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1693 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1695 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1698 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1699 void (*resized
)(const char*,
1702 MemoryRegion
*mr
, Error
**errp
)
1704 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1707 static void reclaim_ramblock(RAMBlock
*block
)
1709 if (block
->flags
& RAM_PREALLOC
) {
1711 } else if (xen_enabled()) {
1712 xen_invalidate_map_cache_entry(block
->host
);
1714 } else if (block
->fd
>= 0) {
1715 qemu_ram_munmap(block
->host
, block
->max_length
);
1719 qemu_anon_ram_free(block
->host
, block
->max_length
);
1724 void qemu_ram_free(RAMBlock
*block
)
1730 qemu_mutex_lock_ramlist();
1731 QLIST_REMOVE_RCU(block
, next
);
1732 ram_list
.mru_block
= NULL
;
1733 /* Write list before version */
1736 call_rcu(block
, reclaim_ramblock
, rcu
);
1737 qemu_mutex_unlock_ramlist();
1741 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1748 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1749 offset
= addr
- block
->offset
;
1750 if (offset
< block
->max_length
) {
1751 vaddr
= ramblock_ptr(block
, offset
);
1752 if (block
->flags
& RAM_PREALLOC
) {
1754 } else if (xen_enabled()) {
1758 if (block
->fd
>= 0) {
1759 flags
|= (block
->flags
& RAM_SHARED
?
1760 MAP_SHARED
: MAP_PRIVATE
);
1761 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1762 flags
, block
->fd
, offset
);
1765 * Remap needs to match alloc. Accelerators that
1766 * set phys_mem_alloc never remap. If they did,
1767 * we'd need a remap hook here.
1769 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1771 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1772 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1775 if (area
!= vaddr
) {
1776 fprintf(stderr
, "Could not remap addr: "
1777 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1781 memory_try_enable_merging(vaddr
, length
);
1782 qemu_ram_setup_dump(vaddr
, length
);
1787 #endif /* !_WIN32 */
1789 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1790 * This should not be used for general purpose DMA. Use address_space_map
1791 * or address_space_rw instead. For local memory (e.g. video ram) that the
1792 * device owns, use memory_region_get_ram_ptr.
1794 * Called within RCU critical section.
1796 void *qemu_map_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1798 RAMBlock
*block
= ram_block
;
1800 if (block
== NULL
) {
1801 block
= qemu_get_ram_block(addr
);
1802 addr
-= block
->offset
;
1805 if (xen_enabled() && block
->host
== NULL
) {
1806 /* We need to check if the requested address is in the RAM
1807 * because we don't want to map the entire memory in QEMU.
1808 * In that case just map until the end of the page.
1810 if (block
->offset
== 0) {
1811 return xen_map_cache(addr
, 0, 0);
1814 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1816 return ramblock_ptr(block
, addr
);
1819 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1820 * but takes a size argument.
1822 * Called within RCU critical section.
1824 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1827 RAMBlock
*block
= ram_block
;
1832 if (block
== NULL
) {
1833 block
= qemu_get_ram_block(addr
);
1834 addr
-= block
->offset
;
1836 *size
= MIN(*size
, block
->max_length
- addr
);
1838 if (xen_enabled() && block
->host
== NULL
) {
1839 /* We need to check if the requested address is in the RAM
1840 * because we don't want to map the entire memory in QEMU.
1841 * In that case just map the requested area.
1843 if (block
->offset
== 0) {
1844 return xen_map_cache(addr
, *size
, 1);
1847 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1850 return ramblock_ptr(block
, addr
);
1854 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1857 * ptr: Host pointer to look up
1858 * round_offset: If true round the result offset down to a page boundary
1859 * *ram_addr: set to result ram_addr
1860 * *offset: set to result offset within the RAMBlock
1862 * Returns: RAMBlock (or NULL if not found)
1864 * By the time this function returns, the returned pointer is not protected
1865 * by RCU anymore. If the caller is not within an RCU critical section and
1866 * does not hold the iothread lock, it must have other means of protecting the
1867 * pointer, such as a reference to the region that includes the incoming
1870 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1874 uint8_t *host
= ptr
;
1876 if (xen_enabled()) {
1877 ram_addr_t ram_addr
;
1879 ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1880 block
= qemu_get_ram_block(ram_addr
);
1882 *offset
= ram_addr
- block
->offset
;
1889 block
= atomic_rcu_read(&ram_list
.mru_block
);
1890 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1894 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1895 /* This case append when the block is not mapped. */
1896 if (block
->host
== NULL
) {
1899 if (host
- block
->host
< block
->max_length
) {
1908 *offset
= (host
- block
->host
);
1910 *offset
&= TARGET_PAGE_MASK
;
1917 * Finds the named RAMBlock
1919 * name: The name of RAMBlock to find
1921 * Returns: RAMBlock (or NULL if not found)
1923 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1927 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1928 if (!strcmp(name
, block
->idstr
)) {
1936 /* Some of the softmmu routines need to translate from a host pointer
1937 (typically a TLB entry) back to a ram offset. */
1938 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
1943 block
= qemu_ram_block_from_host(ptr
, false, &offset
);
1945 return RAM_ADDR_INVALID
;
1948 return block
->offset
+ offset
;
1951 /* Called within RCU critical section. */
1952 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1953 uint64_t val
, unsigned size
)
1955 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1956 tb_invalidate_phys_page_fast(ram_addr
, size
);
1960 stb_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
1963 stw_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
1966 stl_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
1971 /* Set both VGA and migration bits for simplicity and to remove
1972 * the notdirty callback faster.
1974 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
1975 DIRTY_CLIENTS_NOCODE
);
1976 /* we remove the notdirty callback only if the code has been
1978 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1979 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
1983 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1984 unsigned size
, bool is_write
)
1989 static const MemoryRegionOps notdirty_mem_ops
= {
1990 .write
= notdirty_mem_write
,
1991 .valid
.accepts
= notdirty_mem_accepts
,
1992 .endianness
= DEVICE_NATIVE_ENDIAN
,
1995 /* Generate a debug exception if a watchpoint has been hit. */
1996 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
1998 CPUState
*cpu
= current_cpu
;
1999 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2000 CPUArchState
*env
= cpu
->env_ptr
;
2001 target_ulong pc
, cs_base
;
2006 if (cpu
->watchpoint_hit
) {
2007 /* We re-entered the check after replacing the TB. Now raise
2008 * the debug interrupt so that is will trigger after the
2009 * current instruction. */
2010 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2013 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2014 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2015 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2016 && (wp
->flags
& flags
)) {
2017 if (flags
== BP_MEM_READ
) {
2018 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2020 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2022 wp
->hitaddr
= vaddr
;
2023 wp
->hitattrs
= attrs
;
2024 if (!cpu
->watchpoint_hit
) {
2025 if (wp
->flags
& BP_CPU
&&
2026 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2027 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2030 cpu
->watchpoint_hit
= wp
;
2031 tb_check_watchpoint(cpu
);
2032 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2033 cpu
->exception_index
= EXCP_DEBUG
;
2036 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2037 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2038 cpu_loop_exit_noexc(cpu
);
2042 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2047 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2048 so these check for a hit then pass through to the normal out-of-line
2050 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2051 unsigned size
, MemTxAttrs attrs
)
2055 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2056 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2058 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2061 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2064 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2067 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2075 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2076 uint64_t val
, unsigned size
,
2080 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2081 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2083 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2086 address_space_stb(as
, addr
, val
, attrs
, &res
);
2089 address_space_stw(as
, addr
, val
, attrs
, &res
);
2092 address_space_stl(as
, addr
, val
, attrs
, &res
);
2099 static const MemoryRegionOps watch_mem_ops
= {
2100 .read_with_attrs
= watch_mem_read
,
2101 .write_with_attrs
= watch_mem_write
,
2102 .endianness
= DEVICE_NATIVE_ENDIAN
,
2105 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2106 unsigned len
, MemTxAttrs attrs
)
2108 subpage_t
*subpage
= opaque
;
2112 #if defined(DEBUG_SUBPAGE)
2113 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2114 subpage
, len
, addr
);
2116 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2123 *data
= ldub_p(buf
);
2126 *data
= lduw_p(buf
);
2139 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2140 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2142 subpage_t
*subpage
= opaque
;
2145 #if defined(DEBUG_SUBPAGE)
2146 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2147 " value %"PRIx64
"\n",
2148 __func__
, subpage
, len
, addr
, value
);
2166 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2170 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2171 unsigned len
, bool is_write
)
2173 subpage_t
*subpage
= opaque
;
2174 #if defined(DEBUG_SUBPAGE)
2175 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2176 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2179 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2183 static const MemoryRegionOps subpage_ops
= {
2184 .read_with_attrs
= subpage_read
,
2185 .write_with_attrs
= subpage_write
,
2186 .impl
.min_access_size
= 1,
2187 .impl
.max_access_size
= 8,
2188 .valid
.min_access_size
= 1,
2189 .valid
.max_access_size
= 8,
2190 .valid
.accepts
= subpage_accepts
,
2191 .endianness
= DEVICE_NATIVE_ENDIAN
,
2194 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2199 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2201 idx
= SUBPAGE_IDX(start
);
2202 eidx
= SUBPAGE_IDX(end
);
2203 #if defined(DEBUG_SUBPAGE)
2204 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2205 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2207 for (; idx
<= eidx
; idx
++) {
2208 mmio
->sub_section
[idx
] = section
;
2214 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2218 mmio
= g_malloc0(sizeof(subpage_t
));
2222 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2223 NULL
, TARGET_PAGE_SIZE
);
2224 mmio
->iomem
.subpage
= true;
2225 #if defined(DEBUG_SUBPAGE)
2226 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2227 mmio
, base
, TARGET_PAGE_SIZE
);
2229 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2234 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2238 MemoryRegionSection section
= {
2239 .address_space
= as
,
2241 .offset_within_address_space
= 0,
2242 .offset_within_region
= 0,
2243 .size
= int128_2_64(),
2246 return phys_section_add(map
, §ion
);
2249 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2251 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2252 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2253 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2254 MemoryRegionSection
*sections
= d
->map
.sections
;
2256 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2259 static void io_mem_init(void)
2261 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2262 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2264 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2266 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2270 static void mem_begin(MemoryListener
*listener
)
2272 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2273 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2276 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2277 assert(n
== PHYS_SECTION_UNASSIGNED
);
2278 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2279 assert(n
== PHYS_SECTION_NOTDIRTY
);
2280 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2281 assert(n
== PHYS_SECTION_ROM
);
2282 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2283 assert(n
== PHYS_SECTION_WATCH
);
2285 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2287 as
->next_dispatch
= d
;
2290 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2292 phys_sections_free(&d
->map
);
2296 static void mem_commit(MemoryListener
*listener
)
2298 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2299 AddressSpaceDispatch
*cur
= as
->dispatch
;
2300 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2302 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2304 atomic_rcu_set(&as
->dispatch
, next
);
2306 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2310 static void tcg_commit(MemoryListener
*listener
)
2312 CPUAddressSpace
*cpuas
;
2313 AddressSpaceDispatch
*d
;
2315 /* since each CPU stores ram addresses in its TLB cache, we must
2316 reset the modified entries */
2317 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2318 cpu_reloading_memory_map();
2319 /* The CPU and TLB are protected by the iothread lock.
2320 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2321 * may have split the RCU critical section.
2323 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2324 cpuas
->memory_dispatch
= d
;
2325 tlb_flush(cpuas
->cpu
, 1);
2328 void address_space_init_dispatch(AddressSpace
*as
)
2330 as
->dispatch
= NULL
;
2331 as
->dispatch_listener
= (MemoryListener
) {
2333 .commit
= mem_commit
,
2334 .region_add
= mem_add
,
2335 .region_nop
= mem_add
,
2338 memory_listener_register(&as
->dispatch_listener
, as
);
2341 void address_space_unregister(AddressSpace
*as
)
2343 memory_listener_unregister(&as
->dispatch_listener
);
2346 void address_space_destroy_dispatch(AddressSpace
*as
)
2348 AddressSpaceDispatch
*d
= as
->dispatch
;
2350 atomic_rcu_set(&as
->dispatch
, NULL
);
2352 call_rcu(d
, address_space_dispatch_free
, rcu
);
2356 static void memory_map_init(void)
2358 system_memory
= g_malloc(sizeof(*system_memory
));
2360 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2361 address_space_init(&address_space_memory
, system_memory
, "memory");
2363 system_io
= g_malloc(sizeof(*system_io
));
2364 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2366 address_space_init(&address_space_io
, system_io
, "I/O");
2369 MemoryRegion
*get_system_memory(void)
2371 return system_memory
;
2374 MemoryRegion
*get_system_io(void)
2379 #endif /* !defined(CONFIG_USER_ONLY) */
2381 /* physical memory access (slow version, mainly for debug) */
2382 #if defined(CONFIG_USER_ONLY)
2383 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2384 uint8_t *buf
, int len
, int is_write
)
2391 page
= addr
& TARGET_PAGE_MASK
;
2392 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2395 flags
= page_get_flags(page
);
2396 if (!(flags
& PAGE_VALID
))
2399 if (!(flags
& PAGE_WRITE
))
2401 /* XXX: this code should not depend on lock_user */
2402 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2405 unlock_user(p
, addr
, l
);
2407 if (!(flags
& PAGE_READ
))
2409 /* XXX: this code should not depend on lock_user */
2410 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2413 unlock_user(p
, addr
, 0);
2424 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2427 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2428 addr
+= memory_region_get_ram_addr(mr
);
2430 /* No early return if dirty_log_mask is or becomes 0, because
2431 * cpu_physical_memory_set_dirty_range will still call
2432 * xen_modified_memory.
2434 if (dirty_log_mask
) {
2436 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2438 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2439 tb_invalidate_phys_range(addr
, addr
+ length
);
2440 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2442 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2445 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2447 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2449 /* Regions are assumed to support 1-4 byte accesses unless
2450 otherwise specified. */
2451 if (access_size_max
== 0) {
2452 access_size_max
= 4;
2455 /* Bound the maximum access by the alignment of the address. */
2456 if (!mr
->ops
->impl
.unaligned
) {
2457 unsigned align_size_max
= addr
& -addr
;
2458 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2459 access_size_max
= align_size_max
;
2463 /* Don't attempt accesses larger than the maximum. */
2464 if (l
> access_size_max
) {
2465 l
= access_size_max
;
2472 static bool prepare_mmio_access(MemoryRegion
*mr
)
2474 bool unlocked
= !qemu_mutex_iothread_locked();
2475 bool release_lock
= false;
2477 if (unlocked
&& mr
->global_locking
) {
2478 qemu_mutex_lock_iothread();
2480 release_lock
= true;
2482 if (mr
->flush_coalesced_mmio
) {
2484 qemu_mutex_lock_iothread();
2486 qemu_flush_coalesced_mmio_buffer();
2488 qemu_mutex_unlock_iothread();
2492 return release_lock
;
2495 /* Called within RCU critical section. */
2496 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2499 int len
, hwaddr addr1
,
2500 hwaddr l
, MemoryRegion
*mr
)
2504 MemTxResult result
= MEMTX_OK
;
2505 bool release_lock
= false;
2508 if (!memory_access_is_direct(mr
, true)) {
2509 release_lock
|= prepare_mmio_access(mr
);
2510 l
= memory_access_size(mr
, l
, addr1
);
2511 /* XXX: could force current_cpu to NULL to avoid
2515 /* 64 bit write access */
2517 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2521 /* 32 bit write access */
2523 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2527 /* 16 bit write access */
2529 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2533 /* 8 bit write access */
2535 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2543 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2544 memcpy(ptr
, buf
, l
);
2545 invalidate_and_set_dirty(mr
, addr1
, l
);
2549 qemu_mutex_unlock_iothread();
2550 release_lock
= false;
2562 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2568 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2569 const uint8_t *buf
, int len
)
2574 MemTxResult result
= MEMTX_OK
;
2579 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2580 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2588 /* Called within RCU critical section. */
2589 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2590 MemTxAttrs attrs
, uint8_t *buf
,
2591 int len
, hwaddr addr1
, hwaddr l
,
2596 MemTxResult result
= MEMTX_OK
;
2597 bool release_lock
= false;
2600 if (!memory_access_is_direct(mr
, false)) {
2602 release_lock
|= prepare_mmio_access(mr
);
2603 l
= memory_access_size(mr
, l
, addr1
);
2606 /* 64 bit read access */
2607 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2612 /* 32 bit read access */
2613 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2618 /* 16 bit read access */
2619 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2624 /* 8 bit read access */
2625 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2634 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2635 memcpy(buf
, ptr
, l
);
2639 qemu_mutex_unlock_iothread();
2640 release_lock
= false;
2652 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2658 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2659 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2664 MemTxResult result
= MEMTX_OK
;
2669 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2670 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2678 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2679 uint8_t *buf
, int len
, bool is_write
)
2682 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2684 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2688 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2689 int len
, int is_write
)
2691 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2692 buf
, len
, is_write
);
2695 enum write_rom_type
{
2700 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2701 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2711 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2713 if (!(memory_region_is_ram(mr
) ||
2714 memory_region_is_romd(mr
))) {
2715 l
= memory_access_size(mr
, l
, addr1
);
2718 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2721 memcpy(ptr
, buf
, l
);
2722 invalidate_and_set_dirty(mr
, addr1
, l
);
2725 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2736 /* used for ROM loading : can write in RAM and ROM */
2737 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2738 const uint8_t *buf
, int len
)
2740 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2743 void cpu_flush_icache_range(hwaddr start
, int len
)
2746 * This function should do the same thing as an icache flush that was
2747 * triggered from within the guest. For TCG we are always cache coherent,
2748 * so there is no need to flush anything. For KVM / Xen we need to flush
2749 * the host's instruction cache at least.
2751 if (tcg_enabled()) {
2755 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2756 start
, NULL
, len
, FLUSH_CACHE
);
2767 static BounceBuffer bounce
;
2769 typedef struct MapClient
{
2771 QLIST_ENTRY(MapClient
) link
;
2774 QemuMutex map_client_list_lock
;
2775 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2776 = QLIST_HEAD_INITIALIZER(map_client_list
);
2778 static void cpu_unregister_map_client_do(MapClient
*client
)
2780 QLIST_REMOVE(client
, link
);
2784 static void cpu_notify_map_clients_locked(void)
2788 while (!QLIST_EMPTY(&map_client_list
)) {
2789 client
= QLIST_FIRST(&map_client_list
);
2790 qemu_bh_schedule(client
->bh
);
2791 cpu_unregister_map_client_do(client
);
2795 void cpu_register_map_client(QEMUBH
*bh
)
2797 MapClient
*client
= g_malloc(sizeof(*client
));
2799 qemu_mutex_lock(&map_client_list_lock
);
2801 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2802 if (!atomic_read(&bounce
.in_use
)) {
2803 cpu_notify_map_clients_locked();
2805 qemu_mutex_unlock(&map_client_list_lock
);
2808 void cpu_exec_init_all(void)
2810 qemu_mutex_init(&ram_list
.mutex
);
2813 qemu_mutex_init(&map_client_list_lock
);
2816 void cpu_unregister_map_client(QEMUBH
*bh
)
2820 qemu_mutex_lock(&map_client_list_lock
);
2821 QLIST_FOREACH(client
, &map_client_list
, link
) {
2822 if (client
->bh
== bh
) {
2823 cpu_unregister_map_client_do(client
);
2827 qemu_mutex_unlock(&map_client_list_lock
);
2830 static void cpu_notify_map_clients(void)
2832 qemu_mutex_lock(&map_client_list_lock
);
2833 cpu_notify_map_clients_locked();
2834 qemu_mutex_unlock(&map_client_list_lock
);
2837 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2845 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2846 if (!memory_access_is_direct(mr
, is_write
)) {
2847 l
= memory_access_size(mr
, l
, addr
);
2848 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2860 /* Map a physical memory region into a host virtual address.
2861 * May map a subset of the requested range, given by and returned in *plen.
2862 * May return NULL if resources needed to perform the mapping are exhausted.
2863 * Use only for reads OR writes - not for read-modify-write operations.
2864 * Use cpu_register_map_client() to know when retrying the map operation is
2865 * likely to succeed.
2867 void *address_space_map(AddressSpace
*as
,
2874 hwaddr l
, xlat
, base
;
2875 MemoryRegion
*mr
, *this_mr
;
2884 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2886 if (!memory_access_is_direct(mr
, is_write
)) {
2887 if (atomic_xchg(&bounce
.in_use
, true)) {
2891 /* Avoid unbounded allocations */
2892 l
= MIN(l
, TARGET_PAGE_SIZE
);
2893 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2897 memory_region_ref(mr
);
2900 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2906 return bounce
.buffer
;
2920 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2921 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2926 memory_region_ref(mr
);
2928 ptr
= qemu_ram_ptr_length(mr
->ram_block
, base
, plen
);
2934 /* Unmaps a memory region previously mapped by address_space_map().
2935 * Will also mark the memory as dirty if is_write == 1. access_len gives
2936 * the amount of memory that was actually read or written by the caller.
2938 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2939 int is_write
, hwaddr access_len
)
2941 if (buffer
!= bounce
.buffer
) {
2945 mr
= memory_region_from_host(buffer
, &addr1
);
2948 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2950 if (xen_enabled()) {
2951 xen_invalidate_map_cache_entry(buffer
);
2953 memory_region_unref(mr
);
2957 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2958 bounce
.buffer
, access_len
);
2960 qemu_vfree(bounce
.buffer
);
2961 bounce
.buffer
= NULL
;
2962 memory_region_unref(bounce
.mr
);
2963 atomic_mb_set(&bounce
.in_use
, false);
2964 cpu_notify_map_clients();
2967 void *cpu_physical_memory_map(hwaddr addr
,
2971 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2974 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2975 int is_write
, hwaddr access_len
)
2977 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2980 /* warning: addr must be aligned */
2981 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2983 MemTxResult
*result
,
2984 enum device_endian endian
)
2992 bool release_lock
= false;
2995 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2996 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2997 release_lock
|= prepare_mmio_access(mr
);
3000 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3001 #if defined(TARGET_WORDS_BIGENDIAN)
3002 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3006 if (endian
== DEVICE_BIG_ENDIAN
) {
3012 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3014 case DEVICE_LITTLE_ENDIAN
:
3015 val
= ldl_le_p(ptr
);
3017 case DEVICE_BIG_ENDIAN
:
3018 val
= ldl_be_p(ptr
);
3030 qemu_mutex_unlock_iothread();
3036 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3037 MemTxAttrs attrs
, MemTxResult
*result
)
3039 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3040 DEVICE_NATIVE_ENDIAN
);
3043 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3044 MemTxAttrs attrs
, MemTxResult
*result
)
3046 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3047 DEVICE_LITTLE_ENDIAN
);
3050 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3051 MemTxAttrs attrs
, MemTxResult
*result
)
3053 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3057 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3059 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3062 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3064 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3067 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3069 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3072 /* warning: addr must be aligned */
3073 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3075 MemTxResult
*result
,
3076 enum device_endian endian
)
3084 bool release_lock
= false;
3087 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3089 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3090 release_lock
|= prepare_mmio_access(mr
);
3093 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3094 #if defined(TARGET_WORDS_BIGENDIAN)
3095 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3099 if (endian
== DEVICE_BIG_ENDIAN
) {
3105 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3107 case DEVICE_LITTLE_ENDIAN
:
3108 val
= ldq_le_p(ptr
);
3110 case DEVICE_BIG_ENDIAN
:
3111 val
= ldq_be_p(ptr
);
3123 qemu_mutex_unlock_iothread();
3129 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3130 MemTxAttrs attrs
, MemTxResult
*result
)
3132 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3133 DEVICE_NATIVE_ENDIAN
);
3136 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3137 MemTxAttrs attrs
, MemTxResult
*result
)
3139 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3140 DEVICE_LITTLE_ENDIAN
);
3143 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3144 MemTxAttrs attrs
, MemTxResult
*result
)
3146 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3150 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3152 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3155 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3157 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3160 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3162 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3166 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3167 MemTxAttrs attrs
, MemTxResult
*result
)
3172 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3179 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3181 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3184 /* warning: addr must be aligned */
3185 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3188 MemTxResult
*result
,
3189 enum device_endian endian
)
3197 bool release_lock
= false;
3200 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3202 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3203 release_lock
|= prepare_mmio_access(mr
);
3206 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3207 #if defined(TARGET_WORDS_BIGENDIAN)
3208 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3212 if (endian
== DEVICE_BIG_ENDIAN
) {
3218 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3220 case DEVICE_LITTLE_ENDIAN
:
3221 val
= lduw_le_p(ptr
);
3223 case DEVICE_BIG_ENDIAN
:
3224 val
= lduw_be_p(ptr
);
3236 qemu_mutex_unlock_iothread();
3242 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3243 MemTxAttrs attrs
, MemTxResult
*result
)
3245 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3246 DEVICE_NATIVE_ENDIAN
);
3249 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3250 MemTxAttrs attrs
, MemTxResult
*result
)
3252 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3253 DEVICE_LITTLE_ENDIAN
);
3256 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3257 MemTxAttrs attrs
, MemTxResult
*result
)
3259 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3263 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3265 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3268 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3270 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3273 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3275 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3278 /* warning: addr must be aligned. The ram page is not masked as dirty
3279 and the code inside is not invalidated. It is useful if the dirty
3280 bits are used to track modified PTEs */
3281 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3282 MemTxAttrs attrs
, MemTxResult
*result
)
3289 uint8_t dirty_log_mask
;
3290 bool release_lock
= false;
3293 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3295 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3296 release_lock
|= prepare_mmio_access(mr
);
3298 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3300 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3303 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3304 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3305 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
3313 qemu_mutex_unlock_iothread();
3318 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3320 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3323 /* warning: addr must be aligned */
3324 static inline void address_space_stl_internal(AddressSpace
*as
,
3325 hwaddr addr
, uint32_t val
,
3327 MemTxResult
*result
,
3328 enum device_endian endian
)
3335 bool release_lock
= false;
3338 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3340 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3341 release_lock
|= prepare_mmio_access(mr
);
3343 #if defined(TARGET_WORDS_BIGENDIAN)
3344 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3348 if (endian
== DEVICE_BIG_ENDIAN
) {
3352 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3355 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3357 case DEVICE_LITTLE_ENDIAN
:
3360 case DEVICE_BIG_ENDIAN
:
3367 invalidate_and_set_dirty(mr
, addr1
, 4);
3374 qemu_mutex_unlock_iothread();
3379 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3380 MemTxAttrs attrs
, MemTxResult
*result
)
3382 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3383 DEVICE_NATIVE_ENDIAN
);
3386 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3387 MemTxAttrs attrs
, MemTxResult
*result
)
3389 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3390 DEVICE_LITTLE_ENDIAN
);
3393 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3394 MemTxAttrs attrs
, MemTxResult
*result
)
3396 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3400 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3402 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3405 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3407 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3410 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3412 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3416 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3417 MemTxAttrs attrs
, MemTxResult
*result
)
3422 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3428 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3430 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3433 /* warning: addr must be aligned */
3434 static inline void address_space_stw_internal(AddressSpace
*as
,
3435 hwaddr addr
, uint32_t val
,
3437 MemTxResult
*result
,
3438 enum device_endian endian
)
3445 bool release_lock
= false;
3448 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3449 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3450 release_lock
|= prepare_mmio_access(mr
);
3452 #if defined(TARGET_WORDS_BIGENDIAN)
3453 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3457 if (endian
== DEVICE_BIG_ENDIAN
) {
3461 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3464 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3466 case DEVICE_LITTLE_ENDIAN
:
3469 case DEVICE_BIG_ENDIAN
:
3476 invalidate_and_set_dirty(mr
, addr1
, 2);
3483 qemu_mutex_unlock_iothread();
3488 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3489 MemTxAttrs attrs
, MemTxResult
*result
)
3491 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3492 DEVICE_NATIVE_ENDIAN
);
3495 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3496 MemTxAttrs attrs
, MemTxResult
*result
)
3498 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3499 DEVICE_LITTLE_ENDIAN
);
3502 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3503 MemTxAttrs attrs
, MemTxResult
*result
)
3505 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3509 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3511 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3514 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3516 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3519 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3521 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3525 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3526 MemTxAttrs attrs
, MemTxResult
*result
)
3530 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3536 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3537 MemTxAttrs attrs
, MemTxResult
*result
)
3540 val
= cpu_to_le64(val
);
3541 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3546 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3547 MemTxAttrs attrs
, MemTxResult
*result
)
3550 val
= cpu_to_be64(val
);
3551 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3557 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3559 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3562 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3564 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3567 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3569 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3572 /* virtual memory access for debug (includes writing to ROM) */
3573 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3574 uint8_t *buf
, int len
, int is_write
)
3584 page
= addr
& TARGET_PAGE_MASK
;
3585 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3586 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3587 /* if no physical page mapped, return an error */
3588 if (phys_addr
== -1)
3590 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3593 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3595 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3598 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3599 MEMTXATTRS_UNSPECIFIED
,
3610 * Allows code that needs to deal with migration bitmaps etc to still be built
3611 * target independent.
3613 size_t qemu_target_page_bits(void)
3615 return TARGET_PAGE_BITS
;
3621 * A helper function for the _utterly broken_ virtio device model to find out if
3622 * it's running on a big endian machine. Don't do this at home kids!
3624 bool target_words_bigendian(void);
3625 bool target_words_bigendian(void)
3627 #if defined(TARGET_WORDS_BIGENDIAN)
3634 #ifndef CONFIG_USER_ONLY
3635 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3642 mr
= address_space_translate(&address_space_memory
,
3643 phys_addr
, &phys_addr
, &l
, false);
3645 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3650 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3656 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3657 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3658 block
->used_length
, opaque
);