4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
24 #include "qemu/cutils.h"
26 #include "exec/exec-all.h"
28 #include "hw/qdev-core.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #include "hw/xen/xen.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #if defined(CONFIG_USER_ONLY)
40 #else /* !CONFIG_USER_ONLY */
42 #include "exec/memory.h"
43 #include "exec/ioport.h"
44 #include "sysemu/dma.h"
45 #include "exec/address-spaces.h"
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
59 #include "migration/vmstate.h"
61 #include "qemu/range.h"
63 #include "qemu/mmap-alloc.h"
66 //#define DEBUG_SUBPAGE
68 #if !defined(CONFIG_USER_ONLY)
69 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
72 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
74 static MemoryRegion
*system_memory
;
75 static MemoryRegion
*system_io
;
77 AddressSpace address_space_io
;
78 AddressSpace address_space_memory
;
80 MemoryRegion io_mem_rom
, io_mem_notdirty
;
81 static MemoryRegion io_mem_unassigned
;
83 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84 #define RAM_PREALLOC (1 << 0)
86 /* RAM is mmap-ed with MAP_SHARED */
87 #define RAM_SHARED (1 << 1)
89 /* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
92 #define RAM_RESIZEABLE (1 << 2)
96 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
97 /* current CPU in the current thread. It is only valid inside
99 __thread CPUState
*current_cpu
;
100 /* 0 = Do not count executed instructions.
101 1 = Precise instruction counting.
102 2 = Adaptive rate instruction counting. */
105 #if !defined(CONFIG_USER_ONLY)
107 typedef struct PhysPageEntry PhysPageEntry
;
109 struct PhysPageEntry
{
110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
116 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
118 /* Size of the L2 (and L3, etc) page tables. */
119 #define ADDR_SPACE_BITS 64
122 #define P_L2_SIZE (1 << P_L2_BITS)
124 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
126 typedef PhysPageEntry Node
[P_L2_SIZE
];
128 typedef struct PhysPageMap
{
131 unsigned sections_nb
;
132 unsigned sections_nb_alloc
;
134 unsigned nodes_nb_alloc
;
136 MemoryRegionSection
*sections
;
139 struct AddressSpaceDispatch
{
142 MemoryRegionSection
*mru_section
;
143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
146 PhysPageEntry phys_map
;
151 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152 typedef struct subpage_t
{
156 uint16_t sub_section
[TARGET_PAGE_SIZE
];
159 #define PHYS_SECTION_UNASSIGNED 0
160 #define PHYS_SECTION_NOTDIRTY 1
161 #define PHYS_SECTION_ROM 2
162 #define PHYS_SECTION_WATCH 3
164 static void io_mem_init(void);
165 static void memory_map_init(void);
166 static void tcg_commit(MemoryListener
*listener
);
168 static MemoryRegion io_mem_watch
;
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
177 struct CPUAddressSpace
{
180 struct AddressSpaceDispatch
*memory_dispatch
;
181 MemoryListener tcg_as_listener
;
186 #if !defined(CONFIG_USER_ONLY)
188 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
190 static unsigned alloc_hint
= 16;
191 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
192 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, alloc_hint
);
193 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
194 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
195 alloc_hint
= map
->nodes_nb_alloc
;
199 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
206 ret
= map
->nodes_nb
++;
208 assert(ret
!= PHYS_MAP_NODE_NIL
);
209 assert(ret
!= map
->nodes_nb_alloc
);
211 e
.skip
= leaf
? 0 : 1;
212 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
213 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
214 memcpy(&p
[i
], &e
, sizeof(e
));
219 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
220 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
224 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
226 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
227 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
229 p
= map
->nodes
[lp
->ptr
];
230 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
232 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
233 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
239 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
245 static void phys_page_set(AddressSpaceDispatch
*d
,
246 hwaddr index
, hwaddr nb
,
249 /* Wildly overreserve - it doesn't matter much. */
250 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
252 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
255 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
256 * and update our entry so we can skip it and go directly to the destination.
258 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
)
260 unsigned valid_ptr
= P_L2_SIZE
;
265 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
270 for (i
= 0; i
< P_L2_SIZE
; i
++) {
271 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
278 phys_page_compact(&p
[i
], nodes
);
282 /* We can only compress if there's only one child. */
287 assert(valid_ptr
< P_L2_SIZE
);
289 /* Don't compress if it won't fit in the # of bits we have. */
290 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
294 lp
->ptr
= p
[valid_ptr
].ptr
;
295 if (!p
[valid_ptr
].skip
) {
296 /* If our only child is a leaf, make this a leaf. */
297 /* By design, we should have made this node a leaf to begin with so we
298 * should never reach here.
299 * But since it's so simple to handle this, let's do it just in case we
304 lp
->skip
+= p
[valid_ptr
].skip
;
308 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
310 if (d
->phys_map
.skip
) {
311 phys_page_compact(&d
->phys_map
, d
->map
.nodes
);
315 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
318 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
319 * the section must cover the entire address space.
321 return section
->size
.hi
||
322 range_covers_byte(section
->offset_within_address_space
,
323 section
->size
.lo
, addr
);
326 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
327 Node
*nodes
, MemoryRegionSection
*sections
)
330 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
333 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
334 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
335 return §ions
[PHYS_SECTION_UNASSIGNED
];
338 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
341 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
342 return §ions
[lp
.ptr
];
344 return §ions
[PHYS_SECTION_UNASSIGNED
];
348 bool memory_region_is_unassigned(MemoryRegion
*mr
)
350 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
351 && mr
!= &io_mem_watch
;
354 /* Called from RCU critical section */
355 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
357 bool resolve_subpage
)
359 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
363 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
364 section_covers_addr(section
, addr
)) {
367 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
371 if (resolve_subpage
&& section
->mr
->subpage
) {
372 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
373 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
376 atomic_set(&d
->mru_section
, section
);
381 /* Called from RCU critical section */
382 static MemoryRegionSection
*
383 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
384 hwaddr
*plen
, bool resolve_subpage
)
386 MemoryRegionSection
*section
;
390 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
391 /* Compute offset within MemoryRegionSection */
392 addr
-= section
->offset_within_address_space
;
394 /* Compute offset within MemoryRegion */
395 *xlat
= addr
+ section
->offset_within_region
;
399 /* MMIO registers can be expected to perform full-width accesses based only
400 * on their address, without considering adjacent registers that could
401 * decode to completely different MemoryRegions. When such registers
402 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
403 * regions overlap wildly. For this reason we cannot clamp the accesses
406 * If the length is small (as is the case for address_space_ldl/stl),
407 * everything works fine. If the incoming length is large, however,
408 * the caller really has to do the clamping through memory_access_size.
410 if (memory_region_is_ram(mr
)) {
411 diff
= int128_sub(section
->size
, int128_make64(addr
));
412 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
417 /* Called from RCU critical section */
418 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
419 hwaddr
*xlat
, hwaddr
*plen
,
423 MemoryRegionSection
*section
;
427 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
428 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
431 if (!mr
->iommu_ops
) {
435 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
436 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
437 | (addr
& iotlb
.addr_mask
));
438 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
439 if (!(iotlb
.perm
& (1 << is_write
))) {
440 mr
= &io_mem_unassigned
;
444 as
= iotlb
.target_as
;
447 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
448 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
449 *plen
= MIN(page
, *plen
);
456 /* Called from RCU critical section */
457 MemoryRegionSection
*
458 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
459 hwaddr
*xlat
, hwaddr
*plen
)
461 MemoryRegionSection
*section
;
462 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
464 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
466 assert(!section
->mr
->iommu_ops
);
471 #if !defined(CONFIG_USER_ONLY)
473 static int cpu_common_post_load(void *opaque
, int version_id
)
475 CPUState
*cpu
= opaque
;
477 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
478 version_id is increased. */
479 cpu
->interrupt_request
&= ~0x01;
485 static int cpu_common_pre_load(void *opaque
)
487 CPUState
*cpu
= opaque
;
489 cpu
->exception_index
= -1;
494 static bool cpu_common_exception_index_needed(void *opaque
)
496 CPUState
*cpu
= opaque
;
498 return tcg_enabled() && cpu
->exception_index
!= -1;
501 static const VMStateDescription vmstate_cpu_common_exception_index
= {
502 .name
= "cpu_common/exception_index",
504 .minimum_version_id
= 1,
505 .needed
= cpu_common_exception_index_needed
,
506 .fields
= (VMStateField
[]) {
507 VMSTATE_INT32(exception_index
, CPUState
),
508 VMSTATE_END_OF_LIST()
512 static bool cpu_common_crash_occurred_needed(void *opaque
)
514 CPUState
*cpu
= opaque
;
516 return cpu
->crash_occurred
;
519 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
520 .name
= "cpu_common/crash_occurred",
522 .minimum_version_id
= 1,
523 .needed
= cpu_common_crash_occurred_needed
,
524 .fields
= (VMStateField
[]) {
525 VMSTATE_BOOL(crash_occurred
, CPUState
),
526 VMSTATE_END_OF_LIST()
530 const VMStateDescription vmstate_cpu_common
= {
531 .name
= "cpu_common",
533 .minimum_version_id
= 1,
534 .pre_load
= cpu_common_pre_load
,
535 .post_load
= cpu_common_post_load
,
536 .fields
= (VMStateField
[]) {
537 VMSTATE_UINT32(halted
, CPUState
),
538 VMSTATE_UINT32(interrupt_request
, CPUState
),
539 VMSTATE_END_OF_LIST()
541 .subsections
= (const VMStateDescription
*[]) {
542 &vmstate_cpu_common_exception_index
,
543 &vmstate_cpu_common_crash_occurred
,
550 CPUState
*qemu_get_cpu(int index
)
555 if (cpu
->cpu_index
== index
) {
563 #if !defined(CONFIG_USER_ONLY)
564 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
566 CPUAddressSpace
*newas
;
568 /* Target code should have set num_ases before calling us */
569 assert(asidx
< cpu
->num_ases
);
572 /* address space 0 gets the convenience alias */
576 /* KVM cannot currently support multiple address spaces. */
577 assert(asidx
== 0 || !kvm_enabled());
579 if (!cpu
->cpu_ases
) {
580 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
583 newas
= &cpu
->cpu_ases
[asidx
];
587 newas
->tcg_as_listener
.commit
= tcg_commit
;
588 memory_listener_register(&newas
->tcg_as_listener
, as
);
592 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
594 /* Return the AddressSpace corresponding to the specified index */
595 return cpu
->cpu_ases
[asidx
].as
;
599 void cpu_exec_exit(CPUState
*cpu
)
601 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
603 cpu_list_remove(cpu
);
605 if (cc
->vmsd
!= NULL
) {
606 vmstate_unregister(NULL
, cc
->vmsd
, cpu
);
608 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
609 vmstate_unregister(NULL
, &vmstate_cpu_common
, cpu
);
613 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
615 CPUClass
*cc ATTRIBUTE_UNUSED
= CPU_GET_CLASS(cpu
);
616 Error
*local_err ATTRIBUTE_UNUSED
= NULL
;
618 #ifdef TARGET_WORDS_BIGENDIAN
619 cpu
->bigendian
= true;
621 cpu
->bigendian
= false;
626 #ifndef CONFIG_USER_ONLY
627 cpu
->thread_id
= qemu_get_thread_id();
629 /* This is a softmmu CPU object, so create a property for it
630 * so users can wire up its memory. (This can't go in qom/cpu.c
631 * because that file is compiled only once for both user-mode
632 * and system builds.) The default if no link is set up is to use
633 * the system address space.
635 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
636 (Object
**)&cpu
->memory
,
637 qdev_prop_allow_set_link_before_realize
,
638 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
640 cpu
->memory
= system_memory
;
641 object_ref(OBJECT(cpu
->memory
));
646 #ifndef CONFIG_USER_ONLY
647 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
648 vmstate_register(NULL
, cpu
->cpu_index
, &vmstate_cpu_common
, cpu
);
650 if (cc
->vmsd
!= NULL
) {
651 vmstate_register(NULL
, cpu
->cpu_index
, cc
->vmsd
, cpu
);
656 #if defined(CONFIG_USER_ONLY)
657 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
659 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
662 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
665 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
666 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
668 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
669 phys
| (pc
& ~TARGET_PAGE_MASK
));
674 #if defined(CONFIG_USER_ONLY)
675 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
680 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
686 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
690 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
691 int flags
, CPUWatchpoint
**watchpoint
)
696 /* Add a watchpoint. */
697 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
698 int flags
, CPUWatchpoint
**watchpoint
)
702 /* forbid ranges which are empty or run off the end of the address space */
703 if (len
== 0 || (addr
+ len
- 1) < addr
) {
704 error_report("tried to set invalid watchpoint at %"
705 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
708 wp
= g_malloc(sizeof(*wp
));
714 /* keep all GDB-injected watchpoints in front */
715 if (flags
& BP_GDB
) {
716 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
718 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
721 tlb_flush_page(cpu
, addr
);
728 /* Remove a specific watchpoint. */
729 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
734 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
735 if (addr
== wp
->vaddr
&& len
== wp
->len
736 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
737 cpu_watchpoint_remove_by_ref(cpu
, wp
);
744 /* Remove a specific watchpoint by reference. */
745 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
747 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
749 tlb_flush_page(cpu
, watchpoint
->vaddr
);
754 /* Remove all matching watchpoints. */
755 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
757 CPUWatchpoint
*wp
, *next
;
759 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
760 if (wp
->flags
& mask
) {
761 cpu_watchpoint_remove_by_ref(cpu
, wp
);
766 /* Return true if this watchpoint address matches the specified
767 * access (ie the address range covered by the watchpoint overlaps
768 * partially or completely with the address range covered by the
771 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
775 /* We know the lengths are non-zero, but a little caution is
776 * required to avoid errors in the case where the range ends
777 * exactly at the top of the address space and so addr + len
778 * wraps round to zero.
780 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
781 vaddr addrend
= addr
+ len
- 1;
783 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
788 /* Add a breakpoint. */
789 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
790 CPUBreakpoint
**breakpoint
)
794 bp
= g_malloc(sizeof(*bp
));
799 /* keep all GDB-injected breakpoints in front */
800 if (flags
& BP_GDB
) {
801 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
803 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
806 breakpoint_invalidate(cpu
, pc
);
814 /* Remove a specific breakpoint. */
815 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
819 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
820 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
821 cpu_breakpoint_remove_by_ref(cpu
, bp
);
828 /* Remove a specific breakpoint by reference. */
829 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
831 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
833 breakpoint_invalidate(cpu
, breakpoint
->pc
);
838 /* Remove all matching breakpoints. */
839 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
841 CPUBreakpoint
*bp
, *next
;
843 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
844 if (bp
->flags
& mask
) {
845 cpu_breakpoint_remove_by_ref(cpu
, bp
);
850 /* enable or disable single step mode. EXCP_DEBUG is returned by the
851 CPU loop after each instruction */
852 void cpu_single_step(CPUState
*cpu
, int enabled
)
854 if (cpu
->singlestep_enabled
!= enabled
) {
855 cpu
->singlestep_enabled
= enabled
;
857 kvm_update_guest_debug(cpu
, 0);
859 /* must flush all the translated code to avoid inconsistencies */
860 /* XXX: only flush what is necessary */
866 void QEMU_NORETURN
cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
873 fprintf(stderr
, "qemu: fatal: ");
874 vfprintf(stderr
, fmt
, ap
);
875 fprintf(stderr
, "\n");
876 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
877 if (qemu_log_separate()) {
878 qemu_log("qemu: fatal: ");
879 qemu_log_vprintf(fmt
, ap2
);
881 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
888 #if defined(CONFIG_USER_ONLY)
890 struct sigaction act
;
891 sigfillset(&act
.sa_mask
);
892 act
.sa_handler
= SIG_DFL
;
893 sigaction(SIGABRT
, &act
, NULL
);
899 #if !defined(CONFIG_USER_ONLY)
900 /* Called from RCU critical section */
901 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
905 block
= atomic_rcu_read(&ram_list
.mru_block
);
906 if (block
&& addr
- block
->offset
< block
->max_length
) {
909 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
910 if (addr
- block
->offset
< block
->max_length
) {
915 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
919 /* It is safe to write mru_block outside the iothread lock. This
924 * xxx removed from list
928 * call_rcu(reclaim_ramblock, xxx);
931 * atomic_rcu_set is not needed here. The block was already published
932 * when it was placed into the list. Here we're just making an extra
933 * copy of the pointer.
935 ram_list
.mru_block
= block
;
939 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
946 end
= TARGET_PAGE_ALIGN(start
+ length
);
947 start
&= TARGET_PAGE_MASK
;
950 block
= qemu_get_ram_block(start
);
951 assert(block
== qemu_get_ram_block(end
- 1));
952 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
954 tlb_reset_dirty(cpu
, start1
, length
);
959 /* Note: start and end must be within the same ram block. */
960 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
964 DirtyMemoryBlocks
*blocks
;
965 unsigned long end
, page
;
972 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
973 page
= start
>> TARGET_PAGE_BITS
;
977 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
980 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
981 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
982 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
984 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
991 if (dirty
&& tcg_enabled()) {
992 tlb_reset_dirty_range_all(start
, length
);
998 /* Called from RCU critical section */
999 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1000 MemoryRegionSection
*section
,
1002 hwaddr paddr
, hwaddr xlat
,
1004 target_ulong
*address
)
1009 if (memory_region_is_ram(section
->mr
)) {
1011 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1012 if (!section
->readonly
) {
1013 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1015 iotlb
|= PHYS_SECTION_ROM
;
1018 AddressSpaceDispatch
*d
;
1020 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1021 iotlb
= section
- d
->map
.sections
;
1025 /* Make accesses to pages with watchpoints go via the
1026 watchpoint trap routines. */
1027 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1028 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1029 /* Avoid trapping reads of pages with a write breakpoint. */
1030 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1031 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1032 *address
|= TLB_MMIO
;
1040 #endif /* defined(CONFIG_USER_ONLY) */
1042 #if !defined(CONFIG_USER_ONLY)
1044 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1046 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1048 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1049 qemu_anon_ram_alloc
;
1052 * Set a custom physical guest memory alloator.
1053 * Accelerators with unusual needs may need this. Hopefully, we can
1054 * get rid of it eventually.
1056 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1058 phys_mem_alloc
= alloc
;
1061 static uint16_t phys_section_add(PhysPageMap
*map
,
1062 MemoryRegionSection
*section
)
1064 /* The physical section number is ORed with a page-aligned
1065 * pointer to produce the iotlb entries. Thus it should
1066 * never overflow into the page-aligned value.
1068 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1070 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1071 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1072 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1073 map
->sections_nb_alloc
);
1075 map
->sections
[map
->sections_nb
] = *section
;
1076 memory_region_ref(section
->mr
);
1077 return map
->sections_nb
++;
1080 static void phys_section_destroy(MemoryRegion
*mr
)
1082 bool have_sub_page
= mr
->subpage
;
1084 memory_region_unref(mr
);
1086 if (have_sub_page
) {
1087 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1088 object_unref(OBJECT(&subpage
->iomem
));
1093 static void phys_sections_free(PhysPageMap
*map
)
1095 while (map
->sections_nb
> 0) {
1096 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1097 phys_section_destroy(section
->mr
);
1099 g_free(map
->sections
);
1103 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1106 hwaddr base
= section
->offset_within_address_space
1108 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1109 d
->map
.nodes
, d
->map
.sections
);
1110 MemoryRegionSection subsection
= {
1111 .offset_within_address_space
= base
,
1112 .size
= int128_make64(TARGET_PAGE_SIZE
),
1116 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1118 if (!(existing
->mr
->subpage
)) {
1119 subpage
= subpage_init(d
->as
, base
);
1120 subsection
.address_space
= d
->as
;
1121 subsection
.mr
= &subpage
->iomem
;
1122 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1123 phys_section_add(&d
->map
, &subsection
));
1125 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1127 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1128 end
= start
+ int128_get64(section
->size
) - 1;
1129 subpage_register(subpage
, start
, end
,
1130 phys_section_add(&d
->map
, section
));
1134 static void register_multipage(AddressSpaceDispatch
*d
,
1135 MemoryRegionSection
*section
)
1137 hwaddr start_addr
= section
->offset_within_address_space
;
1138 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1139 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1143 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1146 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1148 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1149 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1150 MemoryRegionSection now
= *section
, remain
= *section
;
1151 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1153 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1154 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1155 - now
.offset_within_address_space
;
1157 now
.size
= int128_min(int128_make64(left
), now
.size
);
1158 register_subpage(d
, &now
);
1160 now
.size
= int128_zero();
1162 while (int128_ne(remain
.size
, now
.size
)) {
1163 remain
.size
= int128_sub(remain
.size
, now
.size
);
1164 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1165 remain
.offset_within_region
+= int128_get64(now
.size
);
1167 if (int128_lt(remain
.size
, page_size
)) {
1168 register_subpage(d
, &now
);
1169 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1170 now
.size
= page_size
;
1171 register_subpage(d
, &now
);
1173 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1174 register_multipage(d
, &now
);
1179 void qemu_flush_coalesced_mmio_buffer(void)
1182 kvm_flush_coalesced_mmio_buffer();
1185 void qemu_mutex_lock_ramlist(void)
1187 qemu_mutex_lock(&ram_list
.mutex
);
1190 void qemu_mutex_unlock_ramlist(void)
1192 qemu_mutex_unlock(&ram_list
.mutex
);
1196 static void *file_ram_alloc(RAMBlock
*block
,
1201 bool unlink_on_error
= false;
1203 char *sanitized_name
;
1205 void * volatile area
= MAP_FAILED
;
1209 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1211 "host lacks kvm mmu notifiers, -mem-path unsupported");
1216 fd
= open(path
, O_RDWR
);
1218 /* @path names an existing file, use it */
1221 if (errno
== ENOENT
) {
1222 /* @path names a file that doesn't exist, create it */
1223 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1225 unlink_on_error
= true;
1228 } else if (errno
== EISDIR
) {
1229 /* @path names a directory, create a file there */
1230 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1231 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1232 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1238 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1240 g_free(sanitized_name
);
1242 fd
= mkstemp(filename
);
1250 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1251 error_setg_errno(errp
, errno
,
1252 "can't open backing store %s for guest RAM",
1257 * Try again on EINTR and EEXIST. The latter happens when
1258 * something else creates the file between our two open().
1262 page_size
= qemu_fd_getpagesize(fd
);
1263 block
->mr
->align
= MAX(page_size
, QEMU_VMALLOC_ALIGN
);
1265 if (memory
< page_size
) {
1266 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1267 "or larger than page size 0x%" PRIx64
,
1272 memory
= ROUND_UP(memory
, page_size
);
1275 * ftruncate is not supported by hugetlbfs in older
1276 * hosts, so don't bother bailing out on errors.
1277 * If anything goes wrong with it under other filesystems,
1280 if (ftruncate(fd
, memory
)) {
1281 perror("ftruncate");
1284 area
= qemu_ram_mmap(fd
, memory
, block
->mr
->align
,
1285 block
->flags
& RAM_SHARED
);
1286 if (area
== MAP_FAILED
) {
1287 error_setg_errno(errp
, errno
,
1288 "unable to map backing store for guest RAM");
1293 os_mem_prealloc(fd
, area
, memory
, errp
);
1294 if (errp
&& *errp
) {
1303 if (area
!= MAP_FAILED
) {
1304 qemu_ram_munmap(area
, memory
);
1306 if (unlink_on_error
) {
1316 /* Called with the ramlist lock held. */
1317 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1319 RAMBlock
*block
, *next_block
;
1320 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1322 assert(size
!= 0); /* it would hand out same offset multiple times */
1324 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1328 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1329 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1331 end
= block
->offset
+ block
->max_length
;
1333 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1334 if (next_block
->offset
>= end
) {
1335 next
= MIN(next
, next_block
->offset
);
1338 if (next
- end
>= size
&& next
- end
< mingap
) {
1340 mingap
= next
- end
;
1344 if (offset
== RAM_ADDR_MAX
) {
1345 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1353 ram_addr_t
last_ram_offset(void)
1356 ram_addr_t last
= 0;
1359 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1360 last
= MAX(last
, block
->offset
+ block
->max_length
);
1366 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1370 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1371 if (!machine_dump_guest_core(current_machine
)) {
1372 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1374 perror("qemu_madvise");
1375 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1376 "but dump_guest_core=off specified\n");
1381 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1386 /* Called with iothread lock held. */
1387 void qemu_ram_set_idstr(RAMBlock
*new_block
, const char *name
, DeviceState
*dev
)
1392 assert(!new_block
->idstr
[0]);
1395 char *id
= qdev_get_dev_path(dev
);
1397 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1401 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1404 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1405 if (block
!= new_block
&&
1406 !strcmp(block
->idstr
, new_block
->idstr
)) {
1407 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1415 /* Called with iothread lock held. */
1416 void qemu_ram_unset_idstr(RAMBlock
*block
)
1418 /* FIXME: arch_init.c assumes that this is not called throughout
1419 * migration. Ignore the problem since hot-unplug during migration
1420 * does not work anyway.
1423 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1427 static int memory_try_enable_merging(void *addr
, size_t len
)
1429 if (!machine_mem_merge(current_machine
)) {
1430 /* disabled by the user */
1434 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1437 /* Only legal before guest might have detected the memory size: e.g. on
1438 * incoming migration, or right after reset.
1440 * As memory core doesn't know how is memory accessed, it is up to
1441 * resize callback to update device state and/or add assertions to detect
1442 * misuse, if necessary.
1444 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
)
1448 newsize
= HOST_PAGE_ALIGN(newsize
);
1450 if (block
->used_length
== newsize
) {
1454 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1455 error_setg_errno(errp
, EINVAL
,
1456 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1457 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1458 newsize
, block
->used_length
);
1462 if (block
->max_length
< newsize
) {
1463 error_setg_errno(errp
, EINVAL
,
1464 "Length too large: %s: 0x" RAM_ADDR_FMT
1465 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1466 newsize
, block
->max_length
);
1470 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1471 block
->used_length
= newsize
;
1472 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1474 memory_region_set_size(block
->mr
, newsize
);
1475 if (block
->resized
) {
1476 block
->resized(block
->idstr
, newsize
, block
->host
);
1481 /* Called with ram_list.mutex held */
1482 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1483 ram_addr_t new_ram_size
)
1485 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1486 DIRTY_MEMORY_BLOCK_SIZE
);
1487 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1488 DIRTY_MEMORY_BLOCK_SIZE
);
1491 /* Only need to extend if block count increased */
1492 if (new_num_blocks
<= old_num_blocks
) {
1496 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1497 DirtyMemoryBlocks
*old_blocks
;
1498 DirtyMemoryBlocks
*new_blocks
;
1501 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1502 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1503 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1505 if (old_num_blocks
) {
1506 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1507 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1510 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1511 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1514 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1517 g_free_rcu(old_blocks
, rcu
);
1522 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1525 RAMBlock
*last_block
= NULL
;
1526 ram_addr_t old_ram_size
, new_ram_size
;
1529 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1531 qemu_mutex_lock_ramlist();
1532 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1534 if (!new_block
->host
) {
1535 if (xen_enabled()) {
1536 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1537 new_block
->mr
, &err
);
1539 error_propagate(errp
, err
);
1540 qemu_mutex_unlock_ramlist();
1544 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1545 &new_block
->mr
->align
);
1546 if (!new_block
->host
) {
1547 error_setg_errno(errp
, errno
,
1548 "cannot set up guest memory '%s'",
1549 memory_region_name(new_block
->mr
));
1550 qemu_mutex_unlock_ramlist();
1553 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1557 new_ram_size
= MAX(old_ram_size
,
1558 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1559 if (new_ram_size
> old_ram_size
) {
1560 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1561 dirty_memory_extend(old_ram_size
, new_ram_size
);
1563 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1564 * QLIST (which has an RCU-friendly variant) does not have insertion at
1565 * tail, so save the last element in last_block.
1567 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1569 if (block
->max_length
< new_block
->max_length
) {
1574 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1575 } else if (last_block
) {
1576 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1577 } else { /* list is empty */
1578 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1580 ram_list
.mru_block
= NULL
;
1582 /* Write list before version */
1585 qemu_mutex_unlock_ramlist();
1587 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1588 new_block
->used_length
,
1591 if (new_block
->host
) {
1592 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1593 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1594 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
1595 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1600 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1601 bool share
, const char *mem_path
,
1604 RAMBlock
*new_block
;
1605 Error
*local_err
= NULL
;
1607 if (xen_enabled()) {
1608 error_setg(errp
, "-mem-path not supported with Xen");
1612 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1614 * file_ram_alloc() needs to allocate just like
1615 * phys_mem_alloc, but we haven't bothered to provide
1619 "-mem-path not supported with this accelerator");
1623 size
= HOST_PAGE_ALIGN(size
);
1624 new_block
= g_malloc0(sizeof(*new_block
));
1626 new_block
->used_length
= size
;
1627 new_block
->max_length
= size
;
1628 new_block
->flags
= share
? RAM_SHARED
: 0;
1629 new_block
->host
= file_ram_alloc(new_block
, size
,
1631 if (!new_block
->host
) {
1636 ram_block_add(new_block
, &local_err
);
1639 error_propagate(errp
, local_err
);
1647 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1648 void (*resized
)(const char*,
1651 void *host
, bool resizeable
,
1652 MemoryRegion
*mr
, Error
**errp
)
1654 RAMBlock
*new_block
;
1655 Error
*local_err
= NULL
;
1657 size
= HOST_PAGE_ALIGN(size
);
1658 max_size
= HOST_PAGE_ALIGN(max_size
);
1659 new_block
= g_malloc0(sizeof(*new_block
));
1661 new_block
->resized
= resized
;
1662 new_block
->used_length
= size
;
1663 new_block
->max_length
= max_size
;
1664 assert(max_size
>= size
);
1666 new_block
->host
= host
;
1668 new_block
->flags
|= RAM_PREALLOC
;
1671 new_block
->flags
|= RAM_RESIZEABLE
;
1673 ram_block_add(new_block
, &local_err
);
1676 error_propagate(errp
, local_err
);
1682 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1683 MemoryRegion
*mr
, Error
**errp
)
1685 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1688 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1690 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1693 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1694 void (*resized
)(const char*,
1697 MemoryRegion
*mr
, Error
**errp
)
1699 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1702 static void reclaim_ramblock(RAMBlock
*block
)
1704 if (block
->flags
& RAM_PREALLOC
) {
1706 } else if (xen_enabled()) {
1707 xen_invalidate_map_cache_entry(block
->host
);
1709 } else if (block
->fd
>= 0) {
1710 qemu_ram_munmap(block
->host
, block
->max_length
);
1714 qemu_anon_ram_free(block
->host
, block
->max_length
);
1719 void qemu_ram_free(RAMBlock
*block
)
1725 qemu_mutex_lock_ramlist();
1726 QLIST_REMOVE_RCU(block
, next
);
1727 ram_list
.mru_block
= NULL
;
1728 /* Write list before version */
1731 call_rcu(block
, reclaim_ramblock
, rcu
);
1732 qemu_mutex_unlock_ramlist();
1736 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1743 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1744 offset
= addr
- block
->offset
;
1745 if (offset
< block
->max_length
) {
1746 vaddr
= ramblock_ptr(block
, offset
);
1747 if (block
->flags
& RAM_PREALLOC
) {
1749 } else if (xen_enabled()) {
1753 if (block
->fd
>= 0) {
1754 flags
|= (block
->flags
& RAM_SHARED
?
1755 MAP_SHARED
: MAP_PRIVATE
);
1756 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1757 flags
, block
->fd
, offset
);
1760 * Remap needs to match alloc. Accelerators that
1761 * set phys_mem_alloc never remap. If they did,
1762 * we'd need a remap hook here.
1764 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1766 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1767 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1770 if (area
!= vaddr
) {
1771 fprintf(stderr
, "Could not remap addr: "
1772 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1776 memory_try_enable_merging(vaddr
, length
);
1777 qemu_ram_setup_dump(vaddr
, length
);
1782 #endif /* !_WIN32 */
1784 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1785 * This should not be used for general purpose DMA. Use address_space_map
1786 * or address_space_rw instead. For local memory (e.g. video ram) that the
1787 * device owns, use memory_region_get_ram_ptr.
1789 * Called within RCU critical section.
1791 void *qemu_map_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1793 RAMBlock
*block
= ram_block
;
1795 if (block
== NULL
) {
1796 block
= qemu_get_ram_block(addr
);
1797 addr
-= block
->offset
;
1800 if (xen_enabled() && block
->host
== NULL
) {
1801 /* We need to check if the requested address is in the RAM
1802 * because we don't want to map the entire memory in QEMU.
1803 * In that case just map until the end of the page.
1805 if (block
->offset
== 0) {
1806 return xen_map_cache(addr
, 0, 0);
1809 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1811 return ramblock_ptr(block
, addr
);
1814 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1815 * but takes a size argument.
1817 * Called within RCU critical section.
1819 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1822 RAMBlock
*block
= ram_block
;
1827 if (block
== NULL
) {
1828 block
= qemu_get_ram_block(addr
);
1829 addr
-= block
->offset
;
1831 *size
= MIN(*size
, block
->max_length
- addr
);
1833 if (xen_enabled() && block
->host
== NULL
) {
1834 /* We need to check if the requested address is in the RAM
1835 * because we don't want to map the entire memory in QEMU.
1836 * In that case just map the requested area.
1838 if (block
->offset
== 0) {
1839 return xen_map_cache(addr
, *size
, 1);
1842 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1845 return ramblock_ptr(block
, addr
);
1849 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1852 * ptr: Host pointer to look up
1853 * round_offset: If true round the result offset down to a page boundary
1854 * *ram_addr: set to result ram_addr
1855 * *offset: set to result offset within the RAMBlock
1857 * Returns: RAMBlock (or NULL if not found)
1859 * By the time this function returns, the returned pointer is not protected
1860 * by RCU anymore. If the caller is not within an RCU critical section and
1861 * does not hold the iothread lock, it must have other means of protecting the
1862 * pointer, such as a reference to the region that includes the incoming
1865 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1869 uint8_t *host
= ptr
;
1871 if (xen_enabled()) {
1872 ram_addr_t ram_addr
;
1874 ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1875 block
= qemu_get_ram_block(ram_addr
);
1877 *offset
= ram_addr
- block
->offset
;
1884 block
= atomic_rcu_read(&ram_list
.mru_block
);
1885 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1889 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1890 /* This case append when the block is not mapped. */
1891 if (block
->host
== NULL
) {
1894 if (host
- block
->host
< block
->max_length
) {
1903 *offset
= (host
- block
->host
);
1905 *offset
&= TARGET_PAGE_MASK
;
1912 * Finds the named RAMBlock
1914 * name: The name of RAMBlock to find
1916 * Returns: RAMBlock (or NULL if not found)
1918 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1922 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1923 if (!strcmp(name
, block
->idstr
)) {
1931 /* Some of the softmmu routines need to translate from a host pointer
1932 (typically a TLB entry) back to a ram offset. */
1933 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
1938 block
= qemu_ram_block_from_host(ptr
, false, &offset
);
1940 return RAM_ADDR_INVALID
;
1943 return block
->offset
+ offset
;
1946 /* Called within RCU critical section. */
1947 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1948 uint64_t val
, unsigned size
)
1950 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1951 tb_invalidate_phys_page_fast(ram_addr
, size
);
1955 stb_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
1958 stw_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
1961 stl_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
1966 /* Set both VGA and migration bits for simplicity and to remove
1967 * the notdirty callback faster.
1969 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
1970 DIRTY_CLIENTS_NOCODE
);
1971 /* we remove the notdirty callback only if the code has been
1973 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1974 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
1978 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1979 unsigned size
, bool is_write
)
1984 static const MemoryRegionOps notdirty_mem_ops
= {
1985 .write
= notdirty_mem_write
,
1986 .valid
.accepts
= notdirty_mem_accepts
,
1987 .endianness
= DEVICE_NATIVE_ENDIAN
,
1990 /* Generate a debug exception if a watchpoint has been hit. */
1991 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
1993 CPUState
*cpu
= current_cpu
;
1994 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
1995 CPUArchState
*env
= cpu
->env_ptr
;
1996 target_ulong pc
, cs_base
;
2001 if (cpu
->watchpoint_hit
) {
2002 /* We re-entered the check after replacing the TB. Now raise
2003 * the debug interrupt so that is will trigger after the
2004 * current instruction. */
2005 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2008 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2009 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2010 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2011 && (wp
->flags
& flags
)) {
2012 if (flags
== BP_MEM_READ
) {
2013 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2015 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2017 wp
->hitaddr
= vaddr
;
2018 wp
->hitattrs
= attrs
;
2019 if (!cpu
->watchpoint_hit
) {
2020 if (wp
->flags
& BP_CPU
&&
2021 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2022 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2025 cpu
->watchpoint_hit
= wp
;
2026 tb_check_watchpoint(cpu
);
2027 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2028 cpu
->exception_index
= EXCP_DEBUG
;
2031 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2032 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2033 cpu_loop_exit_noexc(cpu
);
2037 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2042 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2043 so these check for a hit then pass through to the normal out-of-line
2045 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2046 unsigned size
, MemTxAttrs attrs
)
2050 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2051 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2053 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2056 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2059 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2062 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2070 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2071 uint64_t val
, unsigned size
,
2075 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2076 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2078 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2081 address_space_stb(as
, addr
, val
, attrs
, &res
);
2084 address_space_stw(as
, addr
, val
, attrs
, &res
);
2087 address_space_stl(as
, addr
, val
, attrs
, &res
);
2094 static const MemoryRegionOps watch_mem_ops
= {
2095 .read_with_attrs
= watch_mem_read
,
2096 .write_with_attrs
= watch_mem_write
,
2097 .endianness
= DEVICE_NATIVE_ENDIAN
,
2100 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2101 unsigned len
, MemTxAttrs attrs
)
2103 subpage_t
*subpage
= opaque
;
2107 #if defined(DEBUG_SUBPAGE)
2108 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2109 subpage
, len
, addr
);
2111 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2118 *data
= ldub_p(buf
);
2121 *data
= lduw_p(buf
);
2134 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2135 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2137 subpage_t
*subpage
= opaque
;
2140 #if defined(DEBUG_SUBPAGE)
2141 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2142 " value %"PRIx64
"\n",
2143 __func__
, subpage
, len
, addr
, value
);
2161 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2165 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2166 unsigned len
, bool is_write
)
2168 subpage_t
*subpage
= opaque
;
2169 #if defined(DEBUG_SUBPAGE)
2170 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2171 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2174 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2178 static const MemoryRegionOps subpage_ops
= {
2179 .read_with_attrs
= subpage_read
,
2180 .write_with_attrs
= subpage_write
,
2181 .impl
.min_access_size
= 1,
2182 .impl
.max_access_size
= 8,
2183 .valid
.min_access_size
= 1,
2184 .valid
.max_access_size
= 8,
2185 .valid
.accepts
= subpage_accepts
,
2186 .endianness
= DEVICE_NATIVE_ENDIAN
,
2189 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2194 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2196 idx
= SUBPAGE_IDX(start
);
2197 eidx
= SUBPAGE_IDX(end
);
2198 #if defined(DEBUG_SUBPAGE)
2199 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2200 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2202 for (; idx
<= eidx
; idx
++) {
2203 mmio
->sub_section
[idx
] = section
;
2209 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2213 mmio
= g_malloc0(sizeof(subpage_t
));
2217 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2218 NULL
, TARGET_PAGE_SIZE
);
2219 mmio
->iomem
.subpage
= true;
2220 #if defined(DEBUG_SUBPAGE)
2221 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2222 mmio
, base
, TARGET_PAGE_SIZE
);
2224 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2229 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2233 MemoryRegionSection section
= {
2234 .address_space
= as
,
2236 .offset_within_address_space
= 0,
2237 .offset_within_region
= 0,
2238 .size
= int128_2_64(),
2241 return phys_section_add(map
, §ion
);
2244 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2246 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2247 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2248 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2249 MemoryRegionSection
*sections
= d
->map
.sections
;
2251 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2254 static void io_mem_init(void)
2256 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2257 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2259 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2261 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2265 static void mem_begin(MemoryListener
*listener
)
2267 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2268 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2271 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2272 assert(n
== PHYS_SECTION_UNASSIGNED
);
2273 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2274 assert(n
== PHYS_SECTION_NOTDIRTY
);
2275 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2276 assert(n
== PHYS_SECTION_ROM
);
2277 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2278 assert(n
== PHYS_SECTION_WATCH
);
2280 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2282 as
->next_dispatch
= d
;
2285 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2287 phys_sections_free(&d
->map
);
2291 static void mem_commit(MemoryListener
*listener
)
2293 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2294 AddressSpaceDispatch
*cur
= as
->dispatch
;
2295 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2297 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2299 atomic_rcu_set(&as
->dispatch
, next
);
2301 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2305 static void tcg_commit(MemoryListener
*listener
)
2307 CPUAddressSpace
*cpuas
;
2308 AddressSpaceDispatch
*d
;
2310 /* since each CPU stores ram addresses in its TLB cache, we must
2311 reset the modified entries */
2312 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2313 cpu_reloading_memory_map();
2314 /* The CPU and TLB are protected by the iothread lock.
2315 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2316 * may have split the RCU critical section.
2318 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2319 cpuas
->memory_dispatch
= d
;
2320 tlb_flush(cpuas
->cpu
, 1);
2323 void address_space_init_dispatch(AddressSpace
*as
)
2325 as
->dispatch
= NULL
;
2326 as
->dispatch_listener
= (MemoryListener
) {
2328 .commit
= mem_commit
,
2329 .region_add
= mem_add
,
2330 .region_nop
= mem_add
,
2333 memory_listener_register(&as
->dispatch_listener
, as
);
2336 void address_space_unregister(AddressSpace
*as
)
2338 memory_listener_unregister(&as
->dispatch_listener
);
2341 void address_space_destroy_dispatch(AddressSpace
*as
)
2343 AddressSpaceDispatch
*d
= as
->dispatch
;
2345 atomic_rcu_set(&as
->dispatch
, NULL
);
2347 call_rcu(d
, address_space_dispatch_free
, rcu
);
2351 static void memory_map_init(void)
2353 system_memory
= g_malloc(sizeof(*system_memory
));
2355 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2356 address_space_init(&address_space_memory
, system_memory
, "memory");
2358 system_io
= g_malloc(sizeof(*system_io
));
2359 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2361 address_space_init(&address_space_io
, system_io
, "I/O");
2364 MemoryRegion
*get_system_memory(void)
2366 return system_memory
;
2369 MemoryRegion
*get_system_io(void)
2374 #endif /* !defined(CONFIG_USER_ONLY) */
2376 /* physical memory access (slow version, mainly for debug) */
2377 #if defined(CONFIG_USER_ONLY)
2378 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2379 uint8_t *buf
, int len
, int is_write
)
2386 page
= addr
& TARGET_PAGE_MASK
;
2387 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2390 flags
= page_get_flags(page
);
2391 if (!(flags
& PAGE_VALID
))
2394 if (!(flags
& PAGE_WRITE
))
2396 /* XXX: this code should not depend on lock_user */
2397 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2400 unlock_user(p
, addr
, l
);
2402 if (!(flags
& PAGE_READ
))
2404 /* XXX: this code should not depend on lock_user */
2405 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2408 unlock_user(p
, addr
, 0);
2419 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2422 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2423 addr
+= memory_region_get_ram_addr(mr
);
2425 /* No early return if dirty_log_mask is or becomes 0, because
2426 * cpu_physical_memory_set_dirty_range will still call
2427 * xen_modified_memory.
2429 if (dirty_log_mask
) {
2431 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2433 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2434 tb_invalidate_phys_range(addr
, addr
+ length
);
2435 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2437 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2440 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2442 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2444 /* Regions are assumed to support 1-4 byte accesses unless
2445 otherwise specified. */
2446 if (access_size_max
== 0) {
2447 access_size_max
= 4;
2450 /* Bound the maximum access by the alignment of the address. */
2451 if (!mr
->ops
->impl
.unaligned
) {
2452 unsigned align_size_max
= addr
& -addr
;
2453 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2454 access_size_max
= align_size_max
;
2458 /* Don't attempt accesses larger than the maximum. */
2459 if (l
> access_size_max
) {
2460 l
= access_size_max
;
2467 static bool prepare_mmio_access(MemoryRegion
*mr
)
2469 bool unlocked
= !qemu_mutex_iothread_locked();
2470 bool release_lock
= false;
2472 if (unlocked
&& mr
->global_locking
) {
2473 qemu_mutex_lock_iothread();
2475 release_lock
= true;
2477 if (mr
->flush_coalesced_mmio
) {
2479 qemu_mutex_lock_iothread();
2481 qemu_flush_coalesced_mmio_buffer();
2483 qemu_mutex_unlock_iothread();
2487 return release_lock
;
2490 /* Called within RCU critical section. */
2491 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2494 int len
, hwaddr addr1
,
2495 hwaddr l
, MemoryRegion
*mr
)
2499 MemTxResult result
= MEMTX_OK
;
2500 bool release_lock
= false;
2503 if (!memory_access_is_direct(mr
, true)) {
2504 release_lock
|= prepare_mmio_access(mr
);
2505 l
= memory_access_size(mr
, l
, addr1
);
2506 /* XXX: could force current_cpu to NULL to avoid
2510 /* 64 bit write access */
2512 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2516 /* 32 bit write access */
2518 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2522 /* 16 bit write access */
2524 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2528 /* 8 bit write access */
2530 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2538 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2539 memcpy(ptr
, buf
, l
);
2540 invalidate_and_set_dirty(mr
, addr1
, l
);
2544 qemu_mutex_unlock_iothread();
2545 release_lock
= false;
2557 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2563 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2564 const uint8_t *buf
, int len
)
2569 MemTxResult result
= MEMTX_OK
;
2574 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2575 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2583 /* Called within RCU critical section. */
2584 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2585 MemTxAttrs attrs
, uint8_t *buf
,
2586 int len
, hwaddr addr1
, hwaddr l
,
2591 MemTxResult result
= MEMTX_OK
;
2592 bool release_lock
= false;
2595 if (!memory_access_is_direct(mr
, false)) {
2597 release_lock
|= prepare_mmio_access(mr
);
2598 l
= memory_access_size(mr
, l
, addr1
);
2601 /* 64 bit read access */
2602 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2607 /* 32 bit read access */
2608 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2613 /* 16 bit read access */
2614 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2619 /* 8 bit read access */
2620 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2629 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2630 memcpy(buf
, ptr
, l
);
2634 qemu_mutex_unlock_iothread();
2635 release_lock
= false;
2647 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2653 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2654 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2659 MemTxResult result
= MEMTX_OK
;
2664 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2665 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2673 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2674 uint8_t *buf
, int len
, bool is_write
)
2677 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2679 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2683 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2684 int len
, int is_write
)
2686 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2687 buf
, len
, is_write
);
2690 enum write_rom_type
{
2695 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2696 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2706 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2708 if (!(memory_region_is_ram(mr
) ||
2709 memory_region_is_romd(mr
))) {
2710 l
= memory_access_size(mr
, l
, addr1
);
2713 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2716 memcpy(ptr
, buf
, l
);
2717 invalidate_and_set_dirty(mr
, addr1
, l
);
2720 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2731 /* used for ROM loading : can write in RAM and ROM */
2732 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2733 const uint8_t *buf
, int len
)
2735 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2738 void cpu_flush_icache_range(hwaddr start
, int len
)
2741 * This function should do the same thing as an icache flush that was
2742 * triggered from within the guest. For TCG we are always cache coherent,
2743 * so there is no need to flush anything. For KVM / Xen we need to flush
2744 * the host's instruction cache at least.
2746 if (tcg_enabled()) {
2750 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2751 start
, NULL
, len
, FLUSH_CACHE
);
2762 static BounceBuffer bounce
;
2764 typedef struct MapClient
{
2766 QLIST_ENTRY(MapClient
) link
;
2769 QemuMutex map_client_list_lock
;
2770 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2771 = QLIST_HEAD_INITIALIZER(map_client_list
);
2773 static void cpu_unregister_map_client_do(MapClient
*client
)
2775 QLIST_REMOVE(client
, link
);
2779 static void cpu_notify_map_clients_locked(void)
2783 while (!QLIST_EMPTY(&map_client_list
)) {
2784 client
= QLIST_FIRST(&map_client_list
);
2785 qemu_bh_schedule(client
->bh
);
2786 cpu_unregister_map_client_do(client
);
2790 void cpu_register_map_client(QEMUBH
*bh
)
2792 MapClient
*client
= g_malloc(sizeof(*client
));
2794 qemu_mutex_lock(&map_client_list_lock
);
2796 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2797 if (!atomic_read(&bounce
.in_use
)) {
2798 cpu_notify_map_clients_locked();
2800 qemu_mutex_unlock(&map_client_list_lock
);
2803 void cpu_exec_init_all(void)
2805 qemu_mutex_init(&ram_list
.mutex
);
2808 qemu_mutex_init(&map_client_list_lock
);
2811 void cpu_unregister_map_client(QEMUBH
*bh
)
2815 qemu_mutex_lock(&map_client_list_lock
);
2816 QLIST_FOREACH(client
, &map_client_list
, link
) {
2817 if (client
->bh
== bh
) {
2818 cpu_unregister_map_client_do(client
);
2822 qemu_mutex_unlock(&map_client_list_lock
);
2825 static void cpu_notify_map_clients(void)
2827 qemu_mutex_lock(&map_client_list_lock
);
2828 cpu_notify_map_clients_locked();
2829 qemu_mutex_unlock(&map_client_list_lock
);
2832 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2840 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2841 if (!memory_access_is_direct(mr
, is_write
)) {
2842 l
= memory_access_size(mr
, l
, addr
);
2843 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2855 /* Map a physical memory region into a host virtual address.
2856 * May map a subset of the requested range, given by and returned in *plen.
2857 * May return NULL if resources needed to perform the mapping are exhausted.
2858 * Use only for reads OR writes - not for read-modify-write operations.
2859 * Use cpu_register_map_client() to know when retrying the map operation is
2860 * likely to succeed.
2862 void *address_space_map(AddressSpace
*as
,
2869 hwaddr l
, xlat
, base
;
2870 MemoryRegion
*mr
, *this_mr
;
2879 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2881 if (!memory_access_is_direct(mr
, is_write
)) {
2882 if (atomic_xchg(&bounce
.in_use
, true)) {
2886 /* Avoid unbounded allocations */
2887 l
= MIN(l
, TARGET_PAGE_SIZE
);
2888 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2892 memory_region_ref(mr
);
2895 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2901 return bounce
.buffer
;
2915 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2916 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2921 memory_region_ref(mr
);
2923 ptr
= qemu_ram_ptr_length(mr
->ram_block
, base
, plen
);
2929 /* Unmaps a memory region previously mapped by address_space_map().
2930 * Will also mark the memory as dirty if is_write == 1. access_len gives
2931 * the amount of memory that was actually read or written by the caller.
2933 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2934 int is_write
, hwaddr access_len
)
2936 if (buffer
!= bounce
.buffer
) {
2940 mr
= memory_region_from_host(buffer
, &addr1
);
2943 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2945 if (xen_enabled()) {
2946 xen_invalidate_map_cache_entry(buffer
);
2948 memory_region_unref(mr
);
2952 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2953 bounce
.buffer
, access_len
);
2955 qemu_vfree(bounce
.buffer
);
2956 bounce
.buffer
= NULL
;
2957 memory_region_unref(bounce
.mr
);
2958 atomic_mb_set(&bounce
.in_use
, false);
2959 cpu_notify_map_clients();
2962 void *cpu_physical_memory_map(hwaddr addr
,
2966 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2969 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2970 int is_write
, hwaddr access_len
)
2972 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2975 /* warning: addr must be aligned */
2976 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2978 MemTxResult
*result
,
2979 enum device_endian endian
)
2987 bool release_lock
= false;
2990 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2991 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2992 release_lock
|= prepare_mmio_access(mr
);
2995 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
2996 #if defined(TARGET_WORDS_BIGENDIAN)
2997 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3001 if (endian
== DEVICE_BIG_ENDIAN
) {
3007 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3009 case DEVICE_LITTLE_ENDIAN
:
3010 val
= ldl_le_p(ptr
);
3012 case DEVICE_BIG_ENDIAN
:
3013 val
= ldl_be_p(ptr
);
3025 qemu_mutex_unlock_iothread();
3031 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3032 MemTxAttrs attrs
, MemTxResult
*result
)
3034 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3035 DEVICE_NATIVE_ENDIAN
);
3038 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3039 MemTxAttrs attrs
, MemTxResult
*result
)
3041 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3042 DEVICE_LITTLE_ENDIAN
);
3045 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3046 MemTxAttrs attrs
, MemTxResult
*result
)
3048 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3052 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3054 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3057 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3059 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3062 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3064 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3067 /* warning: addr must be aligned */
3068 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3070 MemTxResult
*result
,
3071 enum device_endian endian
)
3079 bool release_lock
= false;
3082 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3084 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3085 release_lock
|= prepare_mmio_access(mr
);
3088 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3089 #if defined(TARGET_WORDS_BIGENDIAN)
3090 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3094 if (endian
== DEVICE_BIG_ENDIAN
) {
3100 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3102 case DEVICE_LITTLE_ENDIAN
:
3103 val
= ldq_le_p(ptr
);
3105 case DEVICE_BIG_ENDIAN
:
3106 val
= ldq_be_p(ptr
);
3118 qemu_mutex_unlock_iothread();
3124 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3125 MemTxAttrs attrs
, MemTxResult
*result
)
3127 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3128 DEVICE_NATIVE_ENDIAN
);
3131 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3132 MemTxAttrs attrs
, MemTxResult
*result
)
3134 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3135 DEVICE_LITTLE_ENDIAN
);
3138 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3139 MemTxAttrs attrs
, MemTxResult
*result
)
3141 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3145 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3147 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3150 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3152 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3155 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3157 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3161 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3162 MemTxAttrs attrs
, MemTxResult
*result
)
3167 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3174 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3176 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3179 /* warning: addr must be aligned */
3180 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3183 MemTxResult
*result
,
3184 enum device_endian endian
)
3192 bool release_lock
= false;
3195 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3197 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3198 release_lock
|= prepare_mmio_access(mr
);
3201 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3202 #if defined(TARGET_WORDS_BIGENDIAN)
3203 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3207 if (endian
== DEVICE_BIG_ENDIAN
) {
3213 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3215 case DEVICE_LITTLE_ENDIAN
:
3216 val
= lduw_le_p(ptr
);
3218 case DEVICE_BIG_ENDIAN
:
3219 val
= lduw_be_p(ptr
);
3231 qemu_mutex_unlock_iothread();
3237 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3238 MemTxAttrs attrs
, MemTxResult
*result
)
3240 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3241 DEVICE_NATIVE_ENDIAN
);
3244 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3245 MemTxAttrs attrs
, MemTxResult
*result
)
3247 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3248 DEVICE_LITTLE_ENDIAN
);
3251 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3252 MemTxAttrs attrs
, MemTxResult
*result
)
3254 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3258 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3260 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3263 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3265 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3268 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3270 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3273 /* warning: addr must be aligned. The ram page is not masked as dirty
3274 and the code inside is not invalidated. It is useful if the dirty
3275 bits are used to track modified PTEs */
3276 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3277 MemTxAttrs attrs
, MemTxResult
*result
)
3284 uint8_t dirty_log_mask
;
3285 bool release_lock
= false;
3288 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3290 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3291 release_lock
|= prepare_mmio_access(mr
);
3293 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3295 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3298 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3299 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3300 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
3308 qemu_mutex_unlock_iothread();
3313 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3315 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3318 /* warning: addr must be aligned */
3319 static inline void address_space_stl_internal(AddressSpace
*as
,
3320 hwaddr addr
, uint32_t val
,
3322 MemTxResult
*result
,
3323 enum device_endian endian
)
3330 bool release_lock
= false;
3333 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3335 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3336 release_lock
|= prepare_mmio_access(mr
);
3338 #if defined(TARGET_WORDS_BIGENDIAN)
3339 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3343 if (endian
== DEVICE_BIG_ENDIAN
) {
3347 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3350 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3352 case DEVICE_LITTLE_ENDIAN
:
3355 case DEVICE_BIG_ENDIAN
:
3362 invalidate_and_set_dirty(mr
, addr1
, 4);
3369 qemu_mutex_unlock_iothread();
3374 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3375 MemTxAttrs attrs
, MemTxResult
*result
)
3377 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3378 DEVICE_NATIVE_ENDIAN
);
3381 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3382 MemTxAttrs attrs
, MemTxResult
*result
)
3384 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3385 DEVICE_LITTLE_ENDIAN
);
3388 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3389 MemTxAttrs attrs
, MemTxResult
*result
)
3391 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3395 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3397 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3400 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3402 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3405 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3407 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3411 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3412 MemTxAttrs attrs
, MemTxResult
*result
)
3417 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3423 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3425 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3428 /* warning: addr must be aligned */
3429 static inline void address_space_stw_internal(AddressSpace
*as
,
3430 hwaddr addr
, uint32_t val
,
3432 MemTxResult
*result
,
3433 enum device_endian endian
)
3440 bool release_lock
= false;
3443 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3444 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3445 release_lock
|= prepare_mmio_access(mr
);
3447 #if defined(TARGET_WORDS_BIGENDIAN)
3448 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3452 if (endian
== DEVICE_BIG_ENDIAN
) {
3456 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3459 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3461 case DEVICE_LITTLE_ENDIAN
:
3464 case DEVICE_BIG_ENDIAN
:
3471 invalidate_and_set_dirty(mr
, addr1
, 2);
3478 qemu_mutex_unlock_iothread();
3483 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3484 MemTxAttrs attrs
, MemTxResult
*result
)
3486 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3487 DEVICE_NATIVE_ENDIAN
);
3490 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3491 MemTxAttrs attrs
, MemTxResult
*result
)
3493 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3494 DEVICE_LITTLE_ENDIAN
);
3497 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3498 MemTxAttrs attrs
, MemTxResult
*result
)
3500 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3504 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3506 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3509 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3511 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3514 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3516 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3520 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3521 MemTxAttrs attrs
, MemTxResult
*result
)
3525 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3531 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3532 MemTxAttrs attrs
, MemTxResult
*result
)
3535 val
= cpu_to_le64(val
);
3536 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3541 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3542 MemTxAttrs attrs
, MemTxResult
*result
)
3545 val
= cpu_to_be64(val
);
3546 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3552 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3554 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3557 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3559 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3562 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3564 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3567 /* virtual memory access for debug (includes writing to ROM) */
3568 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3569 uint8_t *buf
, int len
, int is_write
)
3579 page
= addr
& TARGET_PAGE_MASK
;
3580 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3581 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3582 /* if no physical page mapped, return an error */
3583 if (phys_addr
== -1)
3585 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3588 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3590 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3593 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3594 MEMTXATTRS_UNSPECIFIED
,
3605 * Allows code that needs to deal with migration bitmaps etc to still be built
3606 * target independent.
3608 size_t qemu_target_page_bits(void)
3610 return TARGET_PAGE_BITS
;
3616 * A helper function for the _utterly broken_ virtio device model to find out if
3617 * it's running on a big endian machine. Don't do this at home kids!
3619 bool target_words_bigendian(void);
3620 bool target_words_bigendian(void)
3622 #if defined(TARGET_WORDS_BIGENDIAN)
3629 #ifndef CONFIG_USER_ONLY
3630 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3637 mr
= address_space_translate(&address_space_memory
,
3638 phys_addr
, &phys_addr
, &l
, false);
3640 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3645 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3651 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3652 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3653 block
->used_length
, opaque
);