4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
58 #include "qemu/range.h"
60 #include "qemu/mmap-alloc.h"
63 //#define DEBUG_SUBPAGE
65 #if !defined(CONFIG_USER_ONLY)
66 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
69 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
71 static MemoryRegion
*system_memory
;
72 static MemoryRegion
*system_io
;
74 AddressSpace address_space_io
;
75 AddressSpace address_space_memory
;
77 MemoryRegion io_mem_rom
, io_mem_notdirty
;
78 static MemoryRegion io_mem_unassigned
;
80 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81 #define RAM_PREALLOC (1 << 0)
83 /* RAM is mmap-ed with MAP_SHARED */
84 #define RAM_SHARED (1 << 1)
86 /* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
89 #define RAM_RESIZEABLE (1 << 2)
91 /* RAM is backed by an mmapped file.
93 #define RAM_FILE (1 << 3)
96 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
97 /* current CPU in the current thread. It is only valid inside
99 __thread CPUState
*current_cpu
;
100 /* 0 = Do not count executed instructions.
101 1 = Precise instruction counting.
102 2 = Adaptive rate instruction counting. */
105 #if !defined(CONFIG_USER_ONLY)
107 typedef struct PhysPageEntry PhysPageEntry
;
109 struct PhysPageEntry
{
110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
116 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
118 /* Size of the L2 (and L3, etc) page tables. */
119 #define ADDR_SPACE_BITS 64
122 #define P_L2_SIZE (1 << P_L2_BITS)
124 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
126 typedef PhysPageEntry Node
[P_L2_SIZE
];
128 typedef struct PhysPageMap
{
131 unsigned sections_nb
;
132 unsigned sections_nb_alloc
;
134 unsigned nodes_nb_alloc
;
136 MemoryRegionSection
*sections
;
139 struct AddressSpaceDispatch
{
142 /* This is a multi-level map on the physical address space.
143 * The bottom level has pointers to MemoryRegionSections.
145 PhysPageEntry phys_map
;
150 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
151 typedef struct subpage_t
{
155 uint16_t sub_section
[TARGET_PAGE_SIZE
];
158 #define PHYS_SECTION_UNASSIGNED 0
159 #define PHYS_SECTION_NOTDIRTY 1
160 #define PHYS_SECTION_ROM 2
161 #define PHYS_SECTION_WATCH 3
163 static void io_mem_init(void);
164 static void memory_map_init(void);
165 static void tcg_commit(MemoryListener
*listener
);
167 static MemoryRegion io_mem_watch
;
170 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
171 * @cpu: the CPU whose AddressSpace this is
172 * @as: the AddressSpace itself
173 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
174 * @tcg_as_listener: listener for tracking changes to the AddressSpace
176 struct CPUAddressSpace
{
179 struct AddressSpaceDispatch
*memory_dispatch
;
180 MemoryListener tcg_as_listener
;
185 #if !defined(CONFIG_USER_ONLY)
187 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
189 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
190 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
191 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
192 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
196 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
203 ret
= map
->nodes_nb
++;
205 assert(ret
!= PHYS_MAP_NODE_NIL
);
206 assert(ret
!= map
->nodes_nb_alloc
);
208 e
.skip
= leaf
? 0 : 1;
209 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
210 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
211 memcpy(&p
[i
], &e
, sizeof(e
));
216 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
217 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
221 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
223 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
224 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
226 p
= map
->nodes
[lp
->ptr
];
227 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
229 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
230 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
236 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
242 static void phys_page_set(AddressSpaceDispatch
*d
,
243 hwaddr index
, hwaddr nb
,
246 /* Wildly overreserve - it doesn't matter much. */
247 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
249 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
252 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
253 * and update our entry so we can skip it and go directly to the destination.
255 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
257 unsigned valid_ptr
= P_L2_SIZE
;
262 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
267 for (i
= 0; i
< P_L2_SIZE
; i
++) {
268 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
275 phys_page_compact(&p
[i
], nodes
, compacted
);
279 /* We can only compress if there's only one child. */
284 assert(valid_ptr
< P_L2_SIZE
);
286 /* Don't compress if it won't fit in the # of bits we have. */
287 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
291 lp
->ptr
= p
[valid_ptr
].ptr
;
292 if (!p
[valid_ptr
].skip
) {
293 /* If our only child is a leaf, make this a leaf. */
294 /* By design, we should have made this node a leaf to begin with so we
295 * should never reach here.
296 * But since it's so simple to handle this, let's do it just in case we
301 lp
->skip
+= p
[valid_ptr
].skip
;
305 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
307 DECLARE_BITMAP(compacted
, nodes_nb
);
309 if (d
->phys_map
.skip
) {
310 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
314 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
315 Node
*nodes
, MemoryRegionSection
*sections
)
318 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
321 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
322 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
323 return §ions
[PHYS_SECTION_UNASSIGNED
];
326 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
329 if (sections
[lp
.ptr
].size
.hi
||
330 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
331 sections
[lp
.ptr
].size
.lo
, addr
)) {
332 return §ions
[lp
.ptr
];
334 return §ions
[PHYS_SECTION_UNASSIGNED
];
338 bool memory_region_is_unassigned(MemoryRegion
*mr
)
340 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
341 && mr
!= &io_mem_watch
;
344 /* Called from RCU critical section */
345 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
347 bool resolve_subpage
)
349 MemoryRegionSection
*section
;
352 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
353 if (resolve_subpage
&& section
->mr
->subpage
) {
354 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
355 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
360 /* Called from RCU critical section */
361 static MemoryRegionSection
*
362 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
363 hwaddr
*plen
, bool resolve_subpage
)
365 MemoryRegionSection
*section
;
369 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
370 /* Compute offset within MemoryRegionSection */
371 addr
-= section
->offset_within_address_space
;
373 /* Compute offset within MemoryRegion */
374 *xlat
= addr
+ section
->offset_within_region
;
378 /* MMIO registers can be expected to perform full-width accesses based only
379 * on their address, without considering adjacent registers that could
380 * decode to completely different MemoryRegions. When such registers
381 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
382 * regions overlap wildly. For this reason we cannot clamp the accesses
385 * If the length is small (as is the case for address_space_ldl/stl),
386 * everything works fine. If the incoming length is large, however,
387 * the caller really has to do the clamping through memory_access_size.
389 if (memory_region_is_ram(mr
)) {
390 diff
= int128_sub(section
->size
, int128_make64(addr
));
391 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
396 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
398 if (memory_region_is_ram(mr
)) {
399 return !(is_write
&& mr
->readonly
);
401 if (memory_region_is_romd(mr
)) {
408 /* Called from RCU critical section */
409 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
410 hwaddr
*xlat
, hwaddr
*plen
,
414 MemoryRegionSection
*section
;
418 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
419 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
422 if (!mr
->iommu_ops
) {
426 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
427 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
428 | (addr
& iotlb
.addr_mask
));
429 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
430 if (!(iotlb
.perm
& (1 << is_write
))) {
431 mr
= &io_mem_unassigned
;
435 as
= iotlb
.target_as
;
438 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
439 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
440 *plen
= MIN(page
, *plen
);
447 /* Called from RCU critical section */
448 MemoryRegionSection
*
449 address_space_translate_for_iotlb(CPUState
*cpu
, hwaddr addr
,
450 hwaddr
*xlat
, hwaddr
*plen
)
452 MemoryRegionSection
*section
;
453 section
= address_space_translate_internal(cpu
->cpu_ases
[0].memory_dispatch
,
454 addr
, xlat
, plen
, false);
456 assert(!section
->mr
->iommu_ops
);
461 #if !defined(CONFIG_USER_ONLY)
463 static int cpu_common_post_load(void *opaque
, int version_id
)
465 CPUState
*cpu
= opaque
;
467 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
468 version_id is increased. */
469 cpu
->interrupt_request
&= ~0x01;
475 static int cpu_common_pre_load(void *opaque
)
477 CPUState
*cpu
= opaque
;
479 cpu
->exception_index
= -1;
484 static bool cpu_common_exception_index_needed(void *opaque
)
486 CPUState
*cpu
= opaque
;
488 return tcg_enabled() && cpu
->exception_index
!= -1;
491 static const VMStateDescription vmstate_cpu_common_exception_index
= {
492 .name
= "cpu_common/exception_index",
494 .minimum_version_id
= 1,
495 .needed
= cpu_common_exception_index_needed
,
496 .fields
= (VMStateField
[]) {
497 VMSTATE_INT32(exception_index
, CPUState
),
498 VMSTATE_END_OF_LIST()
502 static bool cpu_common_crash_occurred_needed(void *opaque
)
504 CPUState
*cpu
= opaque
;
506 return cpu
->crash_occurred
;
509 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
510 .name
= "cpu_common/crash_occurred",
512 .minimum_version_id
= 1,
513 .needed
= cpu_common_crash_occurred_needed
,
514 .fields
= (VMStateField
[]) {
515 VMSTATE_BOOL(crash_occurred
, CPUState
),
516 VMSTATE_END_OF_LIST()
520 const VMStateDescription vmstate_cpu_common
= {
521 .name
= "cpu_common",
523 .minimum_version_id
= 1,
524 .pre_load
= cpu_common_pre_load
,
525 .post_load
= cpu_common_post_load
,
526 .fields
= (VMStateField
[]) {
527 VMSTATE_UINT32(halted
, CPUState
),
528 VMSTATE_UINT32(interrupt_request
, CPUState
),
529 VMSTATE_END_OF_LIST()
531 .subsections
= (const VMStateDescription
*[]) {
532 &vmstate_cpu_common_exception_index
,
533 &vmstate_cpu_common_crash_occurred
,
540 CPUState
*qemu_get_cpu(int index
)
545 if (cpu
->cpu_index
== index
) {
553 #if !defined(CONFIG_USER_ONLY)
554 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
556 /* We only support one address space per cpu at the moment. */
557 assert(cpu
->as
== as
);
560 /* We've already registered the listener for our only AS */
564 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, 1);
565 cpu
->cpu_ases
[0].cpu
= cpu
;
566 cpu
->cpu_ases
[0].as
= as
;
567 cpu
->cpu_ases
[0].tcg_as_listener
.commit
= tcg_commit
;
568 memory_listener_register(&cpu
->cpu_ases
[0].tcg_as_listener
, as
);
572 #ifndef CONFIG_USER_ONLY
573 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
575 static int cpu_get_free_index(Error
**errp
)
577 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
579 if (cpu
>= MAX_CPUMASK_BITS
) {
580 error_setg(errp
, "Trying to use more CPUs than max of %d",
585 bitmap_set(cpu_index_map
, cpu
, 1);
589 void cpu_exec_exit(CPUState
*cpu
)
591 if (cpu
->cpu_index
== -1) {
592 /* cpu_index was never allocated by this @cpu or was already freed. */
596 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
601 static int cpu_get_free_index(Error
**errp
)
606 CPU_FOREACH(some_cpu
) {
612 void cpu_exec_exit(CPUState
*cpu
)
617 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
619 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
621 Error
*local_err
= NULL
;
623 #ifndef CONFIG_USER_ONLY
624 cpu
->as
= &address_space_memory
;
625 cpu
->thread_id
= qemu_get_thread_id();
628 #if defined(CONFIG_USER_ONLY)
631 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
633 error_propagate(errp
, local_err
);
634 #if defined(CONFIG_USER_ONLY)
639 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
640 #if defined(CONFIG_USER_ONLY)
643 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
644 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
646 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
647 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
648 cpu_save
, cpu_load
, cpu
->env_ptr
);
649 assert(cc
->vmsd
== NULL
);
650 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
652 if (cc
->vmsd
!= NULL
) {
653 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
657 #if defined(CONFIG_USER_ONLY)
658 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
660 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
663 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
665 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
667 tb_invalidate_phys_addr(cpu
->as
,
668 phys
| (pc
& ~TARGET_PAGE_MASK
));
673 #if defined(CONFIG_USER_ONLY)
674 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
679 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
685 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
689 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
690 int flags
, CPUWatchpoint
**watchpoint
)
695 /* Add a watchpoint. */
696 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
697 int flags
, CPUWatchpoint
**watchpoint
)
701 /* forbid ranges which are empty or run off the end of the address space */
702 if (len
== 0 || (addr
+ len
- 1) < addr
) {
703 error_report("tried to set invalid watchpoint at %"
704 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
707 wp
= g_malloc(sizeof(*wp
));
713 /* keep all GDB-injected watchpoints in front */
714 if (flags
& BP_GDB
) {
715 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
717 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
720 tlb_flush_page(cpu
, addr
);
727 /* Remove a specific watchpoint. */
728 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
733 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
734 if (addr
== wp
->vaddr
&& len
== wp
->len
735 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
736 cpu_watchpoint_remove_by_ref(cpu
, wp
);
743 /* Remove a specific watchpoint by reference. */
744 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
746 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
748 tlb_flush_page(cpu
, watchpoint
->vaddr
);
753 /* Remove all matching watchpoints. */
754 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
756 CPUWatchpoint
*wp
, *next
;
758 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
759 if (wp
->flags
& mask
) {
760 cpu_watchpoint_remove_by_ref(cpu
, wp
);
765 /* Return true if this watchpoint address matches the specified
766 * access (ie the address range covered by the watchpoint overlaps
767 * partially or completely with the address range covered by the
770 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
774 /* We know the lengths are non-zero, but a little caution is
775 * required to avoid errors in the case where the range ends
776 * exactly at the top of the address space and so addr + len
777 * wraps round to zero.
779 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
780 vaddr addrend
= addr
+ len
- 1;
782 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
787 /* Add a breakpoint. */
788 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
789 CPUBreakpoint
**breakpoint
)
793 bp
= g_malloc(sizeof(*bp
));
798 /* keep all GDB-injected breakpoints in front */
799 if (flags
& BP_GDB
) {
800 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
802 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
805 breakpoint_invalidate(cpu
, pc
);
813 /* Remove a specific breakpoint. */
814 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
818 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
819 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
820 cpu_breakpoint_remove_by_ref(cpu
, bp
);
827 /* Remove a specific breakpoint by reference. */
828 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
830 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
832 breakpoint_invalidate(cpu
, breakpoint
->pc
);
837 /* Remove all matching breakpoints. */
838 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
840 CPUBreakpoint
*bp
, *next
;
842 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
843 if (bp
->flags
& mask
) {
844 cpu_breakpoint_remove_by_ref(cpu
, bp
);
849 /* enable or disable single step mode. EXCP_DEBUG is returned by the
850 CPU loop after each instruction */
851 void cpu_single_step(CPUState
*cpu
, int enabled
)
853 if (cpu
->singlestep_enabled
!= enabled
) {
854 cpu
->singlestep_enabled
= enabled
;
856 kvm_update_guest_debug(cpu
, 0);
858 /* must flush all the translated code to avoid inconsistencies */
859 /* XXX: only flush what is necessary */
865 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
872 fprintf(stderr
, "qemu: fatal: ");
873 vfprintf(stderr
, fmt
, ap
);
874 fprintf(stderr
, "\n");
875 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
876 if (qemu_log_enabled()) {
877 qemu_log("qemu: fatal: ");
878 qemu_log_vprintf(fmt
, ap2
);
880 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
887 #if defined(CONFIG_USER_ONLY)
889 struct sigaction act
;
890 sigfillset(&act
.sa_mask
);
891 act
.sa_handler
= SIG_DFL
;
892 sigaction(SIGABRT
, &act
, NULL
);
898 #if !defined(CONFIG_USER_ONLY)
899 /* Called from RCU critical section */
900 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
904 block
= atomic_rcu_read(&ram_list
.mru_block
);
905 if (block
&& addr
- block
->offset
< block
->max_length
) {
908 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
909 if (addr
- block
->offset
< block
->max_length
) {
914 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
918 /* It is safe to write mru_block outside the iothread lock. This
923 * xxx removed from list
927 * call_rcu(reclaim_ramblock, xxx);
930 * atomic_rcu_set is not needed here. The block was already published
931 * when it was placed into the list. Here we're just making an extra
932 * copy of the pointer.
934 ram_list
.mru_block
= block
;
938 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
945 end
= TARGET_PAGE_ALIGN(start
+ length
);
946 start
&= TARGET_PAGE_MASK
;
949 block
= qemu_get_ram_block(start
);
950 assert(block
== qemu_get_ram_block(end
- 1));
951 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
953 tlb_reset_dirty(cpu
, start1
, length
);
958 /* Note: start and end must be within the same ram block. */
959 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
963 unsigned long end
, page
;
970 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
971 page
= start
>> TARGET_PAGE_BITS
;
972 dirty
= bitmap_test_and_clear_atomic(ram_list
.dirty_memory
[client
],
975 if (dirty
&& tcg_enabled()) {
976 tlb_reset_dirty_range_all(start
, length
);
982 /* Called from RCU critical section */
983 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
984 MemoryRegionSection
*section
,
986 hwaddr paddr
, hwaddr xlat
,
988 target_ulong
*address
)
993 if (memory_region_is_ram(section
->mr
)) {
995 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
997 if (!section
->readonly
) {
998 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1000 iotlb
|= PHYS_SECTION_ROM
;
1003 AddressSpaceDispatch
*d
;
1005 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1006 iotlb
= section
- d
->map
.sections
;
1010 /* Make accesses to pages with watchpoints go via the
1011 watchpoint trap routines. */
1012 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1013 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1014 /* Avoid trapping reads of pages with a write breakpoint. */
1015 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1016 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1017 *address
|= TLB_MMIO
;
1025 #endif /* defined(CONFIG_USER_ONLY) */
1027 #if !defined(CONFIG_USER_ONLY)
1029 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1031 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1033 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1034 qemu_anon_ram_alloc
;
1037 * Set a custom physical guest memory alloator.
1038 * Accelerators with unusual needs may need this. Hopefully, we can
1039 * get rid of it eventually.
1041 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1043 phys_mem_alloc
= alloc
;
1046 static uint16_t phys_section_add(PhysPageMap
*map
,
1047 MemoryRegionSection
*section
)
1049 /* The physical section number is ORed with a page-aligned
1050 * pointer to produce the iotlb entries. Thus it should
1051 * never overflow into the page-aligned value.
1053 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1055 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1056 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1057 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1058 map
->sections_nb_alloc
);
1060 map
->sections
[map
->sections_nb
] = *section
;
1061 memory_region_ref(section
->mr
);
1062 return map
->sections_nb
++;
1065 static void phys_section_destroy(MemoryRegion
*mr
)
1067 bool have_sub_page
= mr
->subpage
;
1069 memory_region_unref(mr
);
1071 if (have_sub_page
) {
1072 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1073 object_unref(OBJECT(&subpage
->iomem
));
1078 static void phys_sections_free(PhysPageMap
*map
)
1080 while (map
->sections_nb
> 0) {
1081 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1082 phys_section_destroy(section
->mr
);
1084 g_free(map
->sections
);
1088 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1091 hwaddr base
= section
->offset_within_address_space
1093 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1094 d
->map
.nodes
, d
->map
.sections
);
1095 MemoryRegionSection subsection
= {
1096 .offset_within_address_space
= base
,
1097 .size
= int128_make64(TARGET_PAGE_SIZE
),
1101 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1103 if (!(existing
->mr
->subpage
)) {
1104 subpage
= subpage_init(d
->as
, base
);
1105 subsection
.address_space
= d
->as
;
1106 subsection
.mr
= &subpage
->iomem
;
1107 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1108 phys_section_add(&d
->map
, &subsection
));
1110 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1112 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1113 end
= start
+ int128_get64(section
->size
) - 1;
1114 subpage_register(subpage
, start
, end
,
1115 phys_section_add(&d
->map
, section
));
1119 static void register_multipage(AddressSpaceDispatch
*d
,
1120 MemoryRegionSection
*section
)
1122 hwaddr start_addr
= section
->offset_within_address_space
;
1123 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1124 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1128 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1131 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1133 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1134 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1135 MemoryRegionSection now
= *section
, remain
= *section
;
1136 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1138 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1139 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1140 - now
.offset_within_address_space
;
1142 now
.size
= int128_min(int128_make64(left
), now
.size
);
1143 register_subpage(d
, &now
);
1145 now
.size
= int128_zero();
1147 while (int128_ne(remain
.size
, now
.size
)) {
1148 remain
.size
= int128_sub(remain
.size
, now
.size
);
1149 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1150 remain
.offset_within_region
+= int128_get64(now
.size
);
1152 if (int128_lt(remain
.size
, page_size
)) {
1153 register_subpage(d
, &now
);
1154 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1155 now
.size
= page_size
;
1156 register_subpage(d
, &now
);
1158 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1159 register_multipage(d
, &now
);
1164 void qemu_flush_coalesced_mmio_buffer(void)
1167 kvm_flush_coalesced_mmio_buffer();
1170 void qemu_mutex_lock_ramlist(void)
1172 qemu_mutex_lock(&ram_list
.mutex
);
1175 void qemu_mutex_unlock_ramlist(void)
1177 qemu_mutex_unlock(&ram_list
.mutex
);
1182 #include <sys/vfs.h>
1184 #define HUGETLBFS_MAGIC 0x958458f6
1186 static long gethugepagesize(const char *path
, Error
**errp
)
1192 ret
= statfs(path
, &fs
);
1193 } while (ret
!= 0 && errno
== EINTR
);
1196 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1204 static void *file_ram_alloc(RAMBlock
*block
,
1211 char *sanitized_name
;
1216 Error
*local_err
= NULL
;
1218 hpagesize
= gethugepagesize(path
, &local_err
);
1220 error_propagate(errp
, local_err
);
1223 block
->mr
->align
= hpagesize
;
1225 if (memory
< hpagesize
) {
1226 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1227 "or larger than huge page size 0x%" PRIx64
,
1232 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1234 "host lacks kvm mmu notifiers, -mem-path unsupported");
1238 if (!stat(path
, &st
) && S_ISDIR(st
.st_mode
)) {
1239 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1240 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1241 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1247 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1249 g_free(sanitized_name
);
1251 fd
= mkstemp(filename
);
1257 fd
= open(path
, O_RDWR
| O_CREAT
, 0644);
1261 error_setg_errno(errp
, errno
,
1262 "unable to create backing store for hugepages");
1266 memory
= ROUND_UP(memory
, hpagesize
);
1269 * ftruncate is not supported by hugetlbfs in older
1270 * hosts, so don't bother bailing out on errors.
1271 * If anything goes wrong with it under other filesystems,
1274 if (ftruncate(fd
, memory
)) {
1275 perror("ftruncate");
1278 area
= qemu_ram_mmap(fd
, memory
, hpagesize
, block
->flags
& RAM_SHARED
);
1279 if (area
== MAP_FAILED
) {
1280 error_setg_errno(errp
, errno
,
1281 "unable to map backing store for hugepages");
1287 os_mem_prealloc(fd
, area
, memory
);
1298 /* Called with the ramlist lock held. */
1299 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1301 RAMBlock
*block
, *next_block
;
1302 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1304 assert(size
!= 0); /* it would hand out same offset multiple times */
1306 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1310 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1311 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1313 end
= block
->offset
+ block
->max_length
;
1315 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1316 if (next_block
->offset
>= end
) {
1317 next
= MIN(next
, next_block
->offset
);
1320 if (next
- end
>= size
&& next
- end
< mingap
) {
1322 mingap
= next
- end
;
1326 if (offset
== RAM_ADDR_MAX
) {
1327 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1335 ram_addr_t
last_ram_offset(void)
1338 ram_addr_t last
= 0;
1341 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1342 last
= MAX(last
, block
->offset
+ block
->max_length
);
1348 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1352 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1353 if (!machine_dump_guest_core(current_machine
)) {
1354 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1356 perror("qemu_madvise");
1357 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1358 "but dump_guest_core=off specified\n");
1363 /* Called within an RCU critical section, or while the ramlist lock
1366 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1370 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1371 if (block
->offset
== addr
) {
1379 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1384 /* Called with iothread lock held. */
1385 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1387 RAMBlock
*new_block
, *block
;
1390 new_block
= find_ram_block(addr
);
1392 assert(!new_block
->idstr
[0]);
1395 char *id
= qdev_get_dev_path(dev
);
1397 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1401 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1403 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1404 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1405 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1413 /* Called with iothread lock held. */
1414 void qemu_ram_unset_idstr(ram_addr_t addr
)
1418 /* FIXME: arch_init.c assumes that this is not called throughout
1419 * migration. Ignore the problem since hot-unplug during migration
1420 * does not work anyway.
1424 block
= find_ram_block(addr
);
1426 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1431 static int memory_try_enable_merging(void *addr
, size_t len
)
1433 if (!machine_mem_merge(current_machine
)) {
1434 /* disabled by the user */
1438 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1441 /* Only legal before guest might have detected the memory size: e.g. on
1442 * incoming migration, or right after reset.
1444 * As memory core doesn't know how is memory accessed, it is up to
1445 * resize callback to update device state and/or add assertions to detect
1446 * misuse, if necessary.
1448 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1450 RAMBlock
*block
= find_ram_block(base
);
1454 newsize
= HOST_PAGE_ALIGN(newsize
);
1456 if (block
->used_length
== newsize
) {
1460 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1461 error_setg_errno(errp
, EINVAL
,
1462 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1463 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1464 newsize
, block
->used_length
);
1468 if (block
->max_length
< newsize
) {
1469 error_setg_errno(errp
, EINVAL
,
1470 "Length too large: %s: 0x" RAM_ADDR_FMT
1471 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1472 newsize
, block
->max_length
);
1476 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1477 block
->used_length
= newsize
;
1478 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1480 memory_region_set_size(block
->mr
, newsize
);
1481 if (block
->resized
) {
1482 block
->resized(block
->idstr
, newsize
, block
->host
);
1487 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1490 RAMBlock
*last_block
= NULL
;
1491 ram_addr_t old_ram_size
, new_ram_size
;
1493 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1495 qemu_mutex_lock_ramlist();
1496 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1498 if (!new_block
->host
) {
1499 if (xen_enabled()) {
1500 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1503 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1504 &new_block
->mr
->align
);
1505 if (!new_block
->host
) {
1506 error_setg_errno(errp
, errno
,
1507 "cannot set up guest memory '%s'",
1508 memory_region_name(new_block
->mr
));
1509 qemu_mutex_unlock_ramlist();
1512 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1516 new_ram_size
= MAX(old_ram_size
,
1517 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1518 if (new_ram_size
> old_ram_size
) {
1519 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1521 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1522 * QLIST (which has an RCU-friendly variant) does not have insertion at
1523 * tail, so save the last element in last_block.
1525 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1527 if (block
->max_length
< new_block
->max_length
) {
1532 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1533 } else if (last_block
) {
1534 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1535 } else { /* list is empty */
1536 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1538 ram_list
.mru_block
= NULL
;
1540 /* Write list before version */
1543 qemu_mutex_unlock_ramlist();
1545 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1547 if (new_ram_size
> old_ram_size
) {
1550 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1551 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1552 ram_list
.dirty_memory
[i
] =
1553 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1554 old_ram_size
, new_ram_size
);
1557 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1558 new_block
->used_length
,
1561 if (new_block
->host
) {
1562 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1563 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1564 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1565 if (kvm_enabled()) {
1566 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1570 return new_block
->offset
;
1574 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1575 bool share
, const char *mem_path
,
1578 RAMBlock
*new_block
;
1580 Error
*local_err
= NULL
;
1582 if (xen_enabled()) {
1583 error_setg(errp
, "-mem-path not supported with Xen");
1587 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1589 * file_ram_alloc() needs to allocate just like
1590 * phys_mem_alloc, but we haven't bothered to provide
1594 "-mem-path not supported with this accelerator");
1598 size
= HOST_PAGE_ALIGN(size
);
1599 new_block
= g_malloc0(sizeof(*new_block
));
1601 new_block
->used_length
= size
;
1602 new_block
->max_length
= size
;
1603 new_block
->flags
= share
? RAM_SHARED
: 0;
1604 new_block
->flags
|= RAM_FILE
;
1605 new_block
->host
= file_ram_alloc(new_block
, size
,
1607 if (!new_block
->host
) {
1612 addr
= ram_block_add(new_block
, &local_err
);
1615 error_propagate(errp
, local_err
);
1623 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1624 void (*resized
)(const char*,
1627 void *host
, bool resizeable
,
1628 MemoryRegion
*mr
, Error
**errp
)
1630 RAMBlock
*new_block
;
1632 Error
*local_err
= NULL
;
1634 size
= HOST_PAGE_ALIGN(size
);
1635 max_size
= HOST_PAGE_ALIGN(max_size
);
1636 new_block
= g_malloc0(sizeof(*new_block
));
1638 new_block
->resized
= resized
;
1639 new_block
->used_length
= size
;
1640 new_block
->max_length
= max_size
;
1641 assert(max_size
>= size
);
1643 new_block
->host
= host
;
1645 new_block
->flags
|= RAM_PREALLOC
;
1648 new_block
->flags
|= RAM_RESIZEABLE
;
1650 addr
= ram_block_add(new_block
, &local_err
);
1653 error_propagate(errp
, local_err
);
1659 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1660 MemoryRegion
*mr
, Error
**errp
)
1662 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1665 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1667 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1670 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1671 void (*resized
)(const char*,
1674 MemoryRegion
*mr
, Error
**errp
)
1676 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1679 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1683 qemu_mutex_lock_ramlist();
1684 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1685 if (addr
== block
->offset
) {
1686 QLIST_REMOVE_RCU(block
, next
);
1687 ram_list
.mru_block
= NULL
;
1688 /* Write list before version */
1691 g_free_rcu(block
, rcu
);
1695 qemu_mutex_unlock_ramlist();
1698 static void reclaim_ramblock(RAMBlock
*block
)
1700 if (block
->flags
& RAM_PREALLOC
) {
1702 } else if (xen_enabled()) {
1703 xen_invalidate_map_cache_entry(block
->host
);
1705 } else if (block
->fd
>= 0) {
1706 if (block
->flags
& RAM_FILE
) {
1707 qemu_ram_munmap(block
->host
, block
->max_length
);
1709 munmap(block
->host
, block
->max_length
);
1714 qemu_anon_ram_free(block
->host
, block
->max_length
);
1719 void qemu_ram_free(ram_addr_t addr
)
1723 qemu_mutex_lock_ramlist();
1724 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1725 if (addr
== block
->offset
) {
1726 QLIST_REMOVE_RCU(block
, next
);
1727 ram_list
.mru_block
= NULL
;
1728 /* Write list before version */
1731 call_rcu(block
, reclaim_ramblock
, rcu
);
1735 qemu_mutex_unlock_ramlist();
1739 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1746 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1747 offset
= addr
- block
->offset
;
1748 if (offset
< block
->max_length
) {
1749 vaddr
= ramblock_ptr(block
, offset
);
1750 if (block
->flags
& RAM_PREALLOC
) {
1752 } else if (xen_enabled()) {
1756 if (block
->fd
>= 0) {
1757 flags
|= (block
->flags
& RAM_SHARED
?
1758 MAP_SHARED
: MAP_PRIVATE
);
1759 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1760 flags
, block
->fd
, offset
);
1763 * Remap needs to match alloc. Accelerators that
1764 * set phys_mem_alloc never remap. If they did,
1765 * we'd need a remap hook here.
1767 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1769 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1770 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1773 if (area
!= vaddr
) {
1774 fprintf(stderr
, "Could not remap addr: "
1775 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1779 memory_try_enable_merging(vaddr
, length
);
1780 qemu_ram_setup_dump(vaddr
, length
);
1785 #endif /* !_WIN32 */
1787 int qemu_get_ram_fd(ram_addr_t addr
)
1793 block
= qemu_get_ram_block(addr
);
1799 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1805 block
= qemu_get_ram_block(addr
);
1806 ptr
= ramblock_ptr(block
, 0);
1811 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1812 * This should not be used for general purpose DMA. Use address_space_map
1813 * or address_space_rw instead. For local memory (e.g. video ram) that the
1814 * device owns, use memory_region_get_ram_ptr.
1816 * By the time this function returns, the returned pointer is not protected
1817 * by RCU anymore. If the caller is not within an RCU critical section and
1818 * does not hold the iothread lock, it must have other means of protecting the
1819 * pointer, such as a reference to the region that includes the incoming
1822 void *qemu_get_ram_ptr(ram_addr_t addr
)
1828 block
= qemu_get_ram_block(addr
);
1830 if (xen_enabled() && block
->host
== NULL
) {
1831 /* We need to check if the requested address is in the RAM
1832 * because we don't want to map the entire memory in QEMU.
1833 * In that case just map until the end of the page.
1835 if (block
->offset
== 0) {
1836 ptr
= xen_map_cache(addr
, 0, 0);
1840 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1842 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1849 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1850 * but takes a size argument.
1852 * By the time this function returns, the returned pointer is not protected
1853 * by RCU anymore. If the caller is not within an RCU critical section and
1854 * does not hold the iothread lock, it must have other means of protecting the
1855 * pointer, such as a reference to the region that includes the incoming
1858 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1864 if (xen_enabled()) {
1865 return xen_map_cache(addr
, *size
, 1);
1869 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1870 if (addr
- block
->offset
< block
->max_length
) {
1871 if (addr
- block
->offset
+ *size
> block
->max_length
)
1872 *size
= block
->max_length
- addr
+ block
->offset
;
1873 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1879 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1885 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1888 * ptr: Host pointer to look up
1889 * round_offset: If true round the result offset down to a page boundary
1890 * *ram_addr: set to result ram_addr
1891 * *offset: set to result offset within the RAMBlock
1893 * Returns: RAMBlock (or NULL if not found)
1895 * By the time this function returns, the returned pointer is not protected
1896 * by RCU anymore. If the caller is not within an RCU critical section and
1897 * does not hold the iothread lock, it must have other means of protecting the
1898 * pointer, such as a reference to the region that includes the incoming
1901 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1902 ram_addr_t
*ram_addr
,
1906 uint8_t *host
= ptr
;
1908 if (xen_enabled()) {
1910 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1911 block
= qemu_get_ram_block(*ram_addr
);
1913 *offset
= (host
- block
->host
);
1920 block
= atomic_rcu_read(&ram_list
.mru_block
);
1921 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1925 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1926 /* This case append when the block is not mapped. */
1927 if (block
->host
== NULL
) {
1930 if (host
- block
->host
< block
->max_length
) {
1939 *offset
= (host
- block
->host
);
1941 *offset
&= TARGET_PAGE_MASK
;
1943 *ram_addr
= block
->offset
+ *offset
;
1949 * Finds the named RAMBlock
1951 * name: The name of RAMBlock to find
1953 * Returns: RAMBlock (or NULL if not found)
1955 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1959 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1960 if (!strcmp(name
, block
->idstr
)) {
1968 /* Some of the softmmu routines need to translate from a host pointer
1969 (typically a TLB entry) back to a ram offset. */
1970 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1973 ram_addr_t offset
; /* Not used */
1975 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
1984 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1985 uint64_t val
, unsigned size
)
1987 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1988 tb_invalidate_phys_page_fast(ram_addr
, size
);
1992 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1995 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1998 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2003 /* Set both VGA and migration bits for simplicity and to remove
2004 * the notdirty callback faster.
2006 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2007 DIRTY_CLIENTS_NOCODE
);
2008 /* we remove the notdirty callback only if the code has been
2010 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2011 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2015 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2016 unsigned size
, bool is_write
)
2021 static const MemoryRegionOps notdirty_mem_ops
= {
2022 .write
= notdirty_mem_write
,
2023 .valid
.accepts
= notdirty_mem_accepts
,
2024 .endianness
= DEVICE_NATIVE_ENDIAN
,
2027 /* Generate a debug exception if a watchpoint has been hit. */
2028 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2030 CPUState
*cpu
= current_cpu
;
2031 CPUArchState
*env
= cpu
->env_ptr
;
2032 target_ulong pc
, cs_base
;
2037 if (cpu
->watchpoint_hit
) {
2038 /* We re-entered the check after replacing the TB. Now raise
2039 * the debug interrupt so that is will trigger after the
2040 * current instruction. */
2041 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2044 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2045 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2046 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2047 && (wp
->flags
& flags
)) {
2048 if (flags
== BP_MEM_READ
) {
2049 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2051 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2053 wp
->hitaddr
= vaddr
;
2054 wp
->hitattrs
= attrs
;
2055 if (!cpu
->watchpoint_hit
) {
2056 cpu
->watchpoint_hit
= wp
;
2057 tb_check_watchpoint(cpu
);
2058 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2059 cpu
->exception_index
= EXCP_DEBUG
;
2062 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2063 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2064 cpu_resume_from_signal(cpu
, NULL
);
2068 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2073 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2074 so these check for a hit then pass through to the normal out-of-line
2076 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2077 unsigned size
, MemTxAttrs attrs
)
2082 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2085 data
= address_space_ldub(&address_space_memory
, addr
, attrs
, &res
);
2088 data
= address_space_lduw(&address_space_memory
, addr
, attrs
, &res
);
2091 data
= address_space_ldl(&address_space_memory
, addr
, attrs
, &res
);
2099 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2100 uint64_t val
, unsigned size
,
2105 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2108 address_space_stb(&address_space_memory
, addr
, val
, attrs
, &res
);
2111 address_space_stw(&address_space_memory
, addr
, val
, attrs
, &res
);
2114 address_space_stl(&address_space_memory
, addr
, val
, attrs
, &res
);
2121 static const MemoryRegionOps watch_mem_ops
= {
2122 .read_with_attrs
= watch_mem_read
,
2123 .write_with_attrs
= watch_mem_write
,
2124 .endianness
= DEVICE_NATIVE_ENDIAN
,
2127 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2128 unsigned len
, MemTxAttrs attrs
)
2130 subpage_t
*subpage
= opaque
;
2134 #if defined(DEBUG_SUBPAGE)
2135 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2136 subpage
, len
, addr
);
2138 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2145 *data
= ldub_p(buf
);
2148 *data
= lduw_p(buf
);
2161 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2162 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2164 subpage_t
*subpage
= opaque
;
2167 #if defined(DEBUG_SUBPAGE)
2168 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2169 " value %"PRIx64
"\n",
2170 __func__
, subpage
, len
, addr
, value
);
2188 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2192 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2193 unsigned len
, bool is_write
)
2195 subpage_t
*subpage
= opaque
;
2196 #if defined(DEBUG_SUBPAGE)
2197 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2198 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2201 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2205 static const MemoryRegionOps subpage_ops
= {
2206 .read_with_attrs
= subpage_read
,
2207 .write_with_attrs
= subpage_write
,
2208 .impl
.min_access_size
= 1,
2209 .impl
.max_access_size
= 8,
2210 .valid
.min_access_size
= 1,
2211 .valid
.max_access_size
= 8,
2212 .valid
.accepts
= subpage_accepts
,
2213 .endianness
= DEVICE_NATIVE_ENDIAN
,
2216 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2221 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2223 idx
= SUBPAGE_IDX(start
);
2224 eidx
= SUBPAGE_IDX(end
);
2225 #if defined(DEBUG_SUBPAGE)
2226 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2227 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2229 for (; idx
<= eidx
; idx
++) {
2230 mmio
->sub_section
[idx
] = section
;
2236 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2240 mmio
= g_malloc0(sizeof(subpage_t
));
2244 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2245 NULL
, TARGET_PAGE_SIZE
);
2246 mmio
->iomem
.subpage
= true;
2247 #if defined(DEBUG_SUBPAGE)
2248 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2249 mmio
, base
, TARGET_PAGE_SIZE
);
2251 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2256 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2260 MemoryRegionSection section
= {
2261 .address_space
= as
,
2263 .offset_within_address_space
= 0,
2264 .offset_within_region
= 0,
2265 .size
= int128_2_64(),
2268 return phys_section_add(map
, §ion
);
2271 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
)
2273 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[0];
2274 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2275 MemoryRegionSection
*sections
= d
->map
.sections
;
2277 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2280 static void io_mem_init(void)
2282 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2283 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2285 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2287 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2291 static void mem_begin(MemoryListener
*listener
)
2293 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2294 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2297 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2298 assert(n
== PHYS_SECTION_UNASSIGNED
);
2299 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2300 assert(n
== PHYS_SECTION_NOTDIRTY
);
2301 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2302 assert(n
== PHYS_SECTION_ROM
);
2303 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2304 assert(n
== PHYS_SECTION_WATCH
);
2306 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2308 as
->next_dispatch
= d
;
2311 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2313 phys_sections_free(&d
->map
);
2317 static void mem_commit(MemoryListener
*listener
)
2319 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2320 AddressSpaceDispatch
*cur
= as
->dispatch
;
2321 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2323 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2325 atomic_rcu_set(&as
->dispatch
, next
);
2327 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2331 static void tcg_commit(MemoryListener
*listener
)
2333 CPUAddressSpace
*cpuas
;
2334 AddressSpaceDispatch
*d
;
2336 /* since each CPU stores ram addresses in its TLB cache, we must
2337 reset the modified entries */
2338 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2339 cpu_reloading_memory_map();
2340 /* The CPU and TLB are protected by the iothread lock.
2341 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2342 * may have split the RCU critical section.
2344 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2345 cpuas
->memory_dispatch
= d
;
2346 tlb_flush(cpuas
->cpu
, 1);
2349 void address_space_init_dispatch(AddressSpace
*as
)
2351 as
->dispatch
= NULL
;
2352 as
->dispatch_listener
= (MemoryListener
) {
2354 .commit
= mem_commit
,
2355 .region_add
= mem_add
,
2356 .region_nop
= mem_add
,
2359 memory_listener_register(&as
->dispatch_listener
, as
);
2362 void address_space_unregister(AddressSpace
*as
)
2364 memory_listener_unregister(&as
->dispatch_listener
);
2367 void address_space_destroy_dispatch(AddressSpace
*as
)
2369 AddressSpaceDispatch
*d
= as
->dispatch
;
2371 atomic_rcu_set(&as
->dispatch
, NULL
);
2373 call_rcu(d
, address_space_dispatch_free
, rcu
);
2377 static void memory_map_init(void)
2379 system_memory
= g_malloc(sizeof(*system_memory
));
2381 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2382 address_space_init(&address_space_memory
, system_memory
, "memory");
2384 system_io
= g_malloc(sizeof(*system_io
));
2385 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2387 address_space_init(&address_space_io
, system_io
, "I/O");
2390 MemoryRegion
*get_system_memory(void)
2392 return system_memory
;
2395 MemoryRegion
*get_system_io(void)
2400 #endif /* !defined(CONFIG_USER_ONLY) */
2402 /* physical memory access (slow version, mainly for debug) */
2403 #if defined(CONFIG_USER_ONLY)
2404 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2405 uint8_t *buf
, int len
, int is_write
)
2412 page
= addr
& TARGET_PAGE_MASK
;
2413 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2416 flags
= page_get_flags(page
);
2417 if (!(flags
& PAGE_VALID
))
2420 if (!(flags
& PAGE_WRITE
))
2422 /* XXX: this code should not depend on lock_user */
2423 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2426 unlock_user(p
, addr
, l
);
2428 if (!(flags
& PAGE_READ
))
2430 /* XXX: this code should not depend on lock_user */
2431 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2434 unlock_user(p
, addr
, 0);
2445 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2448 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2449 /* No early return if dirty_log_mask is or becomes 0, because
2450 * cpu_physical_memory_set_dirty_range will still call
2451 * xen_modified_memory.
2453 if (dirty_log_mask
) {
2455 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2457 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2458 tb_invalidate_phys_range(addr
, addr
+ length
);
2459 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2461 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2464 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2466 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2468 /* Regions are assumed to support 1-4 byte accesses unless
2469 otherwise specified. */
2470 if (access_size_max
== 0) {
2471 access_size_max
= 4;
2474 /* Bound the maximum access by the alignment of the address. */
2475 if (!mr
->ops
->impl
.unaligned
) {
2476 unsigned align_size_max
= addr
& -addr
;
2477 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2478 access_size_max
= align_size_max
;
2482 /* Don't attempt accesses larger than the maximum. */
2483 if (l
> access_size_max
) {
2484 l
= access_size_max
;
2491 static bool prepare_mmio_access(MemoryRegion
*mr
)
2493 bool unlocked
= !qemu_mutex_iothread_locked();
2494 bool release_lock
= false;
2496 if (unlocked
&& mr
->global_locking
) {
2497 qemu_mutex_lock_iothread();
2499 release_lock
= true;
2501 if (mr
->flush_coalesced_mmio
) {
2503 qemu_mutex_lock_iothread();
2505 qemu_flush_coalesced_mmio_buffer();
2507 qemu_mutex_unlock_iothread();
2511 return release_lock
;
2514 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2515 uint8_t *buf
, int len
, bool is_write
)
2522 MemTxResult result
= MEMTX_OK
;
2523 bool release_lock
= false;
2528 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2531 if (!memory_access_is_direct(mr
, is_write
)) {
2532 release_lock
|= prepare_mmio_access(mr
);
2533 l
= memory_access_size(mr
, l
, addr1
);
2534 /* XXX: could force current_cpu to NULL to avoid
2538 /* 64 bit write access */
2540 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2544 /* 32 bit write access */
2546 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2550 /* 16 bit write access */
2552 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2556 /* 8 bit write access */
2558 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2565 addr1
+= memory_region_get_ram_addr(mr
);
2567 ptr
= qemu_get_ram_ptr(addr1
);
2568 memcpy(ptr
, buf
, l
);
2569 invalidate_and_set_dirty(mr
, addr1
, l
);
2572 if (!memory_access_is_direct(mr
, is_write
)) {
2574 release_lock
|= prepare_mmio_access(mr
);
2575 l
= memory_access_size(mr
, l
, addr1
);
2578 /* 64 bit read access */
2579 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2584 /* 32 bit read access */
2585 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2590 /* 16 bit read access */
2591 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2596 /* 8 bit read access */
2597 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2606 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2607 memcpy(buf
, ptr
, l
);
2612 qemu_mutex_unlock_iothread();
2613 release_lock
= false;
2625 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2626 const uint8_t *buf
, int len
)
2628 return address_space_rw(as
, addr
, attrs
, (uint8_t *)buf
, len
, true);
2631 MemTxResult
address_space_read(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2632 uint8_t *buf
, int len
)
2634 return address_space_rw(as
, addr
, attrs
, buf
, len
, false);
2638 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2639 int len
, int is_write
)
2641 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2642 buf
, len
, is_write
);
2645 enum write_rom_type
{
2650 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2651 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2661 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2663 if (!(memory_region_is_ram(mr
) ||
2664 memory_region_is_romd(mr
))) {
2665 l
= memory_access_size(mr
, l
, addr1
);
2667 addr1
+= memory_region_get_ram_addr(mr
);
2669 ptr
= qemu_get_ram_ptr(addr1
);
2672 memcpy(ptr
, buf
, l
);
2673 invalidate_and_set_dirty(mr
, addr1
, l
);
2676 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2687 /* used for ROM loading : can write in RAM and ROM */
2688 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2689 const uint8_t *buf
, int len
)
2691 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2694 void cpu_flush_icache_range(hwaddr start
, int len
)
2697 * This function should do the same thing as an icache flush that was
2698 * triggered from within the guest. For TCG we are always cache coherent,
2699 * so there is no need to flush anything. For KVM / Xen we need to flush
2700 * the host's instruction cache at least.
2702 if (tcg_enabled()) {
2706 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2707 start
, NULL
, len
, FLUSH_CACHE
);
2718 static BounceBuffer bounce
;
2720 typedef struct MapClient
{
2722 QLIST_ENTRY(MapClient
) link
;
2725 QemuMutex map_client_list_lock
;
2726 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2727 = QLIST_HEAD_INITIALIZER(map_client_list
);
2729 static void cpu_unregister_map_client_do(MapClient
*client
)
2731 QLIST_REMOVE(client
, link
);
2735 static void cpu_notify_map_clients_locked(void)
2739 while (!QLIST_EMPTY(&map_client_list
)) {
2740 client
= QLIST_FIRST(&map_client_list
);
2741 qemu_bh_schedule(client
->bh
);
2742 cpu_unregister_map_client_do(client
);
2746 void cpu_register_map_client(QEMUBH
*bh
)
2748 MapClient
*client
= g_malloc(sizeof(*client
));
2750 qemu_mutex_lock(&map_client_list_lock
);
2752 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2753 if (!atomic_read(&bounce
.in_use
)) {
2754 cpu_notify_map_clients_locked();
2756 qemu_mutex_unlock(&map_client_list_lock
);
2759 void cpu_exec_init_all(void)
2761 qemu_mutex_init(&ram_list
.mutex
);
2764 qemu_mutex_init(&map_client_list_lock
);
2767 void cpu_unregister_map_client(QEMUBH
*bh
)
2771 qemu_mutex_lock(&map_client_list_lock
);
2772 QLIST_FOREACH(client
, &map_client_list
, link
) {
2773 if (client
->bh
== bh
) {
2774 cpu_unregister_map_client_do(client
);
2778 qemu_mutex_unlock(&map_client_list_lock
);
2781 static void cpu_notify_map_clients(void)
2783 qemu_mutex_lock(&map_client_list_lock
);
2784 cpu_notify_map_clients_locked();
2785 qemu_mutex_unlock(&map_client_list_lock
);
2788 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2796 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2797 if (!memory_access_is_direct(mr
, is_write
)) {
2798 l
= memory_access_size(mr
, l
, addr
);
2799 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2811 /* Map a physical memory region into a host virtual address.
2812 * May map a subset of the requested range, given by and returned in *plen.
2813 * May return NULL if resources needed to perform the mapping are exhausted.
2814 * Use only for reads OR writes - not for read-modify-write operations.
2815 * Use cpu_register_map_client() to know when retrying the map operation is
2816 * likely to succeed.
2818 void *address_space_map(AddressSpace
*as
,
2825 hwaddr l
, xlat
, base
;
2826 MemoryRegion
*mr
, *this_mr
;
2835 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2837 if (!memory_access_is_direct(mr
, is_write
)) {
2838 if (atomic_xchg(&bounce
.in_use
, true)) {
2842 /* Avoid unbounded allocations */
2843 l
= MIN(l
, TARGET_PAGE_SIZE
);
2844 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2848 memory_region_ref(mr
);
2851 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2857 return bounce
.buffer
;
2861 raddr
= memory_region_get_ram_addr(mr
);
2872 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2873 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2878 memory_region_ref(mr
);
2881 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2884 /* Unmaps a memory region previously mapped by address_space_map().
2885 * Will also mark the memory as dirty if is_write == 1. access_len gives
2886 * the amount of memory that was actually read or written by the caller.
2888 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2889 int is_write
, hwaddr access_len
)
2891 if (buffer
!= bounce
.buffer
) {
2895 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2898 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2900 if (xen_enabled()) {
2901 xen_invalidate_map_cache_entry(buffer
);
2903 memory_region_unref(mr
);
2907 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2908 bounce
.buffer
, access_len
);
2910 qemu_vfree(bounce
.buffer
);
2911 bounce
.buffer
= NULL
;
2912 memory_region_unref(bounce
.mr
);
2913 atomic_mb_set(&bounce
.in_use
, false);
2914 cpu_notify_map_clients();
2917 void *cpu_physical_memory_map(hwaddr addr
,
2921 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2924 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2925 int is_write
, hwaddr access_len
)
2927 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2930 /* warning: addr must be aligned */
2931 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2933 MemTxResult
*result
,
2934 enum device_endian endian
)
2942 bool release_lock
= false;
2945 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2946 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2947 release_lock
|= prepare_mmio_access(mr
);
2950 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
2951 #if defined(TARGET_WORDS_BIGENDIAN)
2952 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2956 if (endian
== DEVICE_BIG_ENDIAN
) {
2962 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2966 case DEVICE_LITTLE_ENDIAN
:
2967 val
= ldl_le_p(ptr
);
2969 case DEVICE_BIG_ENDIAN
:
2970 val
= ldl_be_p(ptr
);
2982 qemu_mutex_unlock_iothread();
2988 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
2989 MemTxAttrs attrs
, MemTxResult
*result
)
2991 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2992 DEVICE_NATIVE_ENDIAN
);
2995 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
2996 MemTxAttrs attrs
, MemTxResult
*result
)
2998 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2999 DEVICE_LITTLE_ENDIAN
);
3002 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3003 MemTxAttrs attrs
, MemTxResult
*result
)
3005 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3009 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3011 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3014 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3016 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3019 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3021 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3024 /* warning: addr must be aligned */
3025 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3027 MemTxResult
*result
,
3028 enum device_endian endian
)
3036 bool release_lock
= false;
3039 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3041 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3042 release_lock
|= prepare_mmio_access(mr
);
3045 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3046 #if defined(TARGET_WORDS_BIGENDIAN)
3047 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3051 if (endian
== DEVICE_BIG_ENDIAN
) {
3057 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3061 case DEVICE_LITTLE_ENDIAN
:
3062 val
= ldq_le_p(ptr
);
3064 case DEVICE_BIG_ENDIAN
:
3065 val
= ldq_be_p(ptr
);
3077 qemu_mutex_unlock_iothread();
3083 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3084 MemTxAttrs attrs
, MemTxResult
*result
)
3086 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3087 DEVICE_NATIVE_ENDIAN
);
3090 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3091 MemTxAttrs attrs
, MemTxResult
*result
)
3093 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3094 DEVICE_LITTLE_ENDIAN
);
3097 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3098 MemTxAttrs attrs
, MemTxResult
*result
)
3100 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3104 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3106 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3109 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3111 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3114 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3116 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3120 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3121 MemTxAttrs attrs
, MemTxResult
*result
)
3126 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3133 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3135 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3138 /* warning: addr must be aligned */
3139 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3142 MemTxResult
*result
,
3143 enum device_endian endian
)
3151 bool release_lock
= false;
3154 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3156 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3157 release_lock
|= prepare_mmio_access(mr
);
3160 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3161 #if defined(TARGET_WORDS_BIGENDIAN)
3162 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3166 if (endian
== DEVICE_BIG_ENDIAN
) {
3172 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3176 case DEVICE_LITTLE_ENDIAN
:
3177 val
= lduw_le_p(ptr
);
3179 case DEVICE_BIG_ENDIAN
:
3180 val
= lduw_be_p(ptr
);
3192 qemu_mutex_unlock_iothread();
3198 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3199 MemTxAttrs attrs
, MemTxResult
*result
)
3201 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3202 DEVICE_NATIVE_ENDIAN
);
3205 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3206 MemTxAttrs attrs
, MemTxResult
*result
)
3208 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3209 DEVICE_LITTLE_ENDIAN
);
3212 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3213 MemTxAttrs attrs
, MemTxResult
*result
)
3215 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3219 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3221 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3224 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3226 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3229 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3231 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3234 /* warning: addr must be aligned. The ram page is not masked as dirty
3235 and the code inside is not invalidated. It is useful if the dirty
3236 bits are used to track modified PTEs */
3237 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3238 MemTxAttrs attrs
, MemTxResult
*result
)
3245 uint8_t dirty_log_mask
;
3246 bool release_lock
= false;
3249 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3251 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3252 release_lock
|= prepare_mmio_access(mr
);
3254 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3256 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3257 ptr
= qemu_get_ram_ptr(addr1
);
3260 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3261 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3262 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3269 qemu_mutex_unlock_iothread();
3274 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3276 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3279 /* warning: addr must be aligned */
3280 static inline void address_space_stl_internal(AddressSpace
*as
,
3281 hwaddr addr
, uint32_t val
,
3283 MemTxResult
*result
,
3284 enum device_endian endian
)
3291 bool release_lock
= false;
3294 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3296 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3297 release_lock
|= prepare_mmio_access(mr
);
3299 #if defined(TARGET_WORDS_BIGENDIAN)
3300 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3304 if (endian
== DEVICE_BIG_ENDIAN
) {
3308 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3311 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3312 ptr
= qemu_get_ram_ptr(addr1
);
3314 case DEVICE_LITTLE_ENDIAN
:
3317 case DEVICE_BIG_ENDIAN
:
3324 invalidate_and_set_dirty(mr
, addr1
, 4);
3331 qemu_mutex_unlock_iothread();
3336 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3337 MemTxAttrs attrs
, MemTxResult
*result
)
3339 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3340 DEVICE_NATIVE_ENDIAN
);
3343 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3344 MemTxAttrs attrs
, MemTxResult
*result
)
3346 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3347 DEVICE_LITTLE_ENDIAN
);
3350 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3351 MemTxAttrs attrs
, MemTxResult
*result
)
3353 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3357 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3359 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3362 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3364 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3367 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3369 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3373 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3374 MemTxAttrs attrs
, MemTxResult
*result
)
3379 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3385 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3387 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3390 /* warning: addr must be aligned */
3391 static inline void address_space_stw_internal(AddressSpace
*as
,
3392 hwaddr addr
, uint32_t val
,
3394 MemTxResult
*result
,
3395 enum device_endian endian
)
3402 bool release_lock
= false;
3405 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3406 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3407 release_lock
|= prepare_mmio_access(mr
);
3409 #if defined(TARGET_WORDS_BIGENDIAN)
3410 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3414 if (endian
== DEVICE_BIG_ENDIAN
) {
3418 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3421 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3422 ptr
= qemu_get_ram_ptr(addr1
);
3424 case DEVICE_LITTLE_ENDIAN
:
3427 case DEVICE_BIG_ENDIAN
:
3434 invalidate_and_set_dirty(mr
, addr1
, 2);
3441 qemu_mutex_unlock_iothread();
3446 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3447 MemTxAttrs attrs
, MemTxResult
*result
)
3449 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3450 DEVICE_NATIVE_ENDIAN
);
3453 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3454 MemTxAttrs attrs
, MemTxResult
*result
)
3456 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3457 DEVICE_LITTLE_ENDIAN
);
3460 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3461 MemTxAttrs attrs
, MemTxResult
*result
)
3463 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3467 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3469 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3472 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3474 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3477 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3479 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3483 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3484 MemTxAttrs attrs
, MemTxResult
*result
)
3488 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3494 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3495 MemTxAttrs attrs
, MemTxResult
*result
)
3498 val
= cpu_to_le64(val
);
3499 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3504 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3505 MemTxAttrs attrs
, MemTxResult
*result
)
3508 val
= cpu_to_be64(val
);
3509 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3515 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3517 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3520 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3522 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3525 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3527 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3530 /* virtual memory access for debug (includes writing to ROM) */
3531 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3532 uint8_t *buf
, int len
, int is_write
)
3539 page
= addr
& TARGET_PAGE_MASK
;
3540 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
3541 /* if no physical page mapped, return an error */
3542 if (phys_addr
== -1)
3544 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3547 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3549 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
3551 address_space_rw(cpu
->as
, phys_addr
, MEMTXATTRS_UNSPECIFIED
,
3562 * Allows code that needs to deal with migration bitmaps etc to still be built
3563 * target independent.
3565 size_t qemu_target_page_bits(void)
3567 return TARGET_PAGE_BITS
;
3573 * A helper function for the _utterly broken_ virtio device model to find out if
3574 * it's running on a big endian machine. Don't do this at home kids!
3576 bool target_words_bigendian(void);
3577 bool target_words_bigendian(void)
3579 #if defined(TARGET_WORDS_BIGENDIAN)
3586 #ifndef CONFIG_USER_ONLY
3587 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3594 mr
= address_space_translate(&address_space_memory
,
3595 phys_addr
, &phys_addr
, &l
, false);
3597 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3602 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3608 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3609 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3610 block
->used_length
, opaque
);