4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
54 #include "sysemu/qtest.h"
56 #include "exec/memory-internal.h"
57 #include "exec/ram_addr.h"
59 #include "qemu/range.h"
61 #include "qemu/mmap-alloc.h"
64 //#define DEBUG_SUBPAGE
66 #if !defined(CONFIG_USER_ONLY)
67 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
68 * are protected by the ramlist lock.
70 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
72 static MemoryRegion
*system_memory
;
73 static MemoryRegion
*system_io
;
75 AddressSpace address_space_io
;
76 AddressSpace address_space_memory
;
78 MemoryRegion io_mem_rom
, io_mem_notdirty
;
79 static MemoryRegion io_mem_unassigned
;
81 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
82 #define RAM_PREALLOC (1 << 0)
84 /* RAM is mmap-ed with MAP_SHARED */
85 #define RAM_SHARED (1 << 1)
87 /* Only a portion of RAM (used_length) is actually used, and migrated.
88 * This used_length size can change across reboots.
90 #define RAM_RESIZEABLE (1 << 2)
92 /* RAM is backed by an mmapped file.
94 #define RAM_FILE (1 << 3)
97 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
98 /* current CPU in the current thread. It is only valid inside
100 __thread CPUState
*current_cpu
;
101 /* 0 = Do not count executed instructions.
102 1 = Precise instruction counting.
103 2 = Adaptive rate instruction counting. */
106 #if !defined(CONFIG_USER_ONLY)
108 typedef struct PhysPageEntry PhysPageEntry
;
110 struct PhysPageEntry
{
111 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
113 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
117 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
119 /* Size of the L2 (and L3, etc) page tables. */
120 #define ADDR_SPACE_BITS 64
123 #define P_L2_SIZE (1 << P_L2_BITS)
125 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
127 typedef PhysPageEntry Node
[P_L2_SIZE
];
129 typedef struct PhysPageMap
{
132 unsigned sections_nb
;
133 unsigned sections_nb_alloc
;
135 unsigned nodes_nb_alloc
;
137 MemoryRegionSection
*sections
;
140 struct AddressSpaceDispatch
{
143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
146 PhysPageEntry phys_map
;
151 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152 typedef struct subpage_t
{
156 uint16_t sub_section
[TARGET_PAGE_SIZE
];
159 #define PHYS_SECTION_UNASSIGNED 0
160 #define PHYS_SECTION_NOTDIRTY 1
161 #define PHYS_SECTION_ROM 2
162 #define PHYS_SECTION_WATCH 3
164 static void io_mem_init(void);
165 static void memory_map_init(void);
166 static void tcg_commit(MemoryListener
*listener
);
168 static MemoryRegion io_mem_watch
;
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
177 struct CPUAddressSpace
{
180 struct AddressSpaceDispatch
*memory_dispatch
;
181 MemoryListener tcg_as_listener
;
186 #if !defined(CONFIG_USER_ONLY)
188 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
190 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
191 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
192 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
193 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
197 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
204 ret
= map
->nodes_nb
++;
206 assert(ret
!= PHYS_MAP_NODE_NIL
);
207 assert(ret
!= map
->nodes_nb_alloc
);
209 e
.skip
= leaf
? 0 : 1;
210 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
211 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
212 memcpy(&p
[i
], &e
, sizeof(e
));
217 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
218 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
222 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
224 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
225 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
227 p
= map
->nodes
[lp
->ptr
];
228 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
230 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
231 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
237 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
243 static void phys_page_set(AddressSpaceDispatch
*d
,
244 hwaddr index
, hwaddr nb
,
247 /* Wildly overreserve - it doesn't matter much. */
248 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
250 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
253 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
254 * and update our entry so we can skip it and go directly to the destination.
256 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
258 unsigned valid_ptr
= P_L2_SIZE
;
263 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
268 for (i
= 0; i
< P_L2_SIZE
; i
++) {
269 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
276 phys_page_compact(&p
[i
], nodes
, compacted
);
280 /* We can only compress if there's only one child. */
285 assert(valid_ptr
< P_L2_SIZE
);
287 /* Don't compress if it won't fit in the # of bits we have. */
288 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
292 lp
->ptr
= p
[valid_ptr
].ptr
;
293 if (!p
[valid_ptr
].skip
) {
294 /* If our only child is a leaf, make this a leaf. */
295 /* By design, we should have made this node a leaf to begin with so we
296 * should never reach here.
297 * But since it's so simple to handle this, let's do it just in case we
302 lp
->skip
+= p
[valid_ptr
].skip
;
306 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
308 DECLARE_BITMAP(compacted
, nodes_nb
);
310 if (d
->phys_map
.skip
) {
311 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
315 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
316 Node
*nodes
, MemoryRegionSection
*sections
)
319 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
322 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
323 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
324 return §ions
[PHYS_SECTION_UNASSIGNED
];
327 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
330 if (sections
[lp
.ptr
].size
.hi
||
331 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
332 sections
[lp
.ptr
].size
.lo
, addr
)) {
333 return §ions
[lp
.ptr
];
335 return §ions
[PHYS_SECTION_UNASSIGNED
];
339 bool memory_region_is_unassigned(MemoryRegion
*mr
)
341 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
342 && mr
!= &io_mem_watch
;
345 /* Called from RCU critical section */
346 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
348 bool resolve_subpage
)
350 MemoryRegionSection
*section
;
353 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
354 if (resolve_subpage
&& section
->mr
->subpage
) {
355 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
356 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
361 /* Called from RCU critical section */
362 static MemoryRegionSection
*
363 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
364 hwaddr
*plen
, bool resolve_subpage
)
366 MemoryRegionSection
*section
;
370 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
371 /* Compute offset within MemoryRegionSection */
372 addr
-= section
->offset_within_address_space
;
374 /* Compute offset within MemoryRegion */
375 *xlat
= addr
+ section
->offset_within_region
;
379 /* MMIO registers can be expected to perform full-width accesses based only
380 * on their address, without considering adjacent registers that could
381 * decode to completely different MemoryRegions. When such registers
382 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
383 * regions overlap wildly. For this reason we cannot clamp the accesses
386 * If the length is small (as is the case for address_space_ldl/stl),
387 * everything works fine. If the incoming length is large, however,
388 * the caller really has to do the clamping through memory_access_size.
390 if (memory_region_is_ram(mr
)) {
391 diff
= int128_sub(section
->size
, int128_make64(addr
));
392 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
397 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
399 if (memory_region_is_ram(mr
)) {
400 return !(is_write
&& mr
->readonly
);
402 if (memory_region_is_romd(mr
)) {
409 /* Called from RCU critical section */
410 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
411 hwaddr
*xlat
, hwaddr
*plen
,
415 MemoryRegionSection
*section
;
419 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
420 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
423 if (!mr
->iommu_ops
) {
427 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
428 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
429 | (addr
& iotlb
.addr_mask
));
430 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
431 if (!(iotlb
.perm
& (1 << is_write
))) {
432 mr
= &io_mem_unassigned
;
436 as
= iotlb
.target_as
;
439 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
440 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
441 *plen
= MIN(page
, *plen
);
448 /* Called from RCU critical section */
449 MemoryRegionSection
*
450 address_space_translate_for_iotlb(CPUState
*cpu
, hwaddr addr
,
451 hwaddr
*xlat
, hwaddr
*plen
)
453 MemoryRegionSection
*section
;
454 section
= address_space_translate_internal(cpu
->cpu_ases
[0].memory_dispatch
,
455 addr
, xlat
, plen
, false);
457 assert(!section
->mr
->iommu_ops
);
462 #if !defined(CONFIG_USER_ONLY)
464 static int cpu_common_post_load(void *opaque
, int version_id
)
466 CPUState
*cpu
= opaque
;
468 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
469 version_id is increased. */
470 cpu
->interrupt_request
&= ~0x01;
476 static int cpu_common_pre_load(void *opaque
)
478 CPUState
*cpu
= opaque
;
480 cpu
->exception_index
= -1;
485 static bool cpu_common_exception_index_needed(void *opaque
)
487 CPUState
*cpu
= opaque
;
489 return tcg_enabled() && cpu
->exception_index
!= -1;
492 static const VMStateDescription vmstate_cpu_common_exception_index
= {
493 .name
= "cpu_common/exception_index",
495 .minimum_version_id
= 1,
496 .needed
= cpu_common_exception_index_needed
,
497 .fields
= (VMStateField
[]) {
498 VMSTATE_INT32(exception_index
, CPUState
),
499 VMSTATE_END_OF_LIST()
503 static bool cpu_common_crash_occurred_needed(void *opaque
)
505 CPUState
*cpu
= opaque
;
507 return cpu
->crash_occurred
;
510 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
511 .name
= "cpu_common/crash_occurred",
513 .minimum_version_id
= 1,
514 .needed
= cpu_common_crash_occurred_needed
,
515 .fields
= (VMStateField
[]) {
516 VMSTATE_BOOL(crash_occurred
, CPUState
),
517 VMSTATE_END_OF_LIST()
521 const VMStateDescription vmstate_cpu_common
= {
522 .name
= "cpu_common",
524 .minimum_version_id
= 1,
525 .pre_load
= cpu_common_pre_load
,
526 .post_load
= cpu_common_post_load
,
527 .fields
= (VMStateField
[]) {
528 VMSTATE_UINT32(halted
, CPUState
),
529 VMSTATE_UINT32(interrupt_request
, CPUState
),
530 VMSTATE_END_OF_LIST()
532 .subsections
= (const VMStateDescription
*[]) {
533 &vmstate_cpu_common_exception_index
,
534 &vmstate_cpu_common_crash_occurred
,
541 CPUState
*qemu_get_cpu(int index
)
546 if (cpu
->cpu_index
== index
) {
554 #if !defined(CONFIG_USER_ONLY)
555 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
557 /* We only support one address space per cpu at the moment. */
558 assert(cpu
->as
== as
);
561 /* We've already registered the listener for our only AS */
565 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, 1);
566 cpu
->cpu_ases
[0].cpu
= cpu
;
567 cpu
->cpu_ases
[0].as
= as
;
568 cpu
->cpu_ases
[0].tcg_as_listener
.commit
= tcg_commit
;
569 memory_listener_register(&cpu
->cpu_ases
[0].tcg_as_listener
, as
);
573 #ifndef CONFIG_USER_ONLY
574 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
576 static int cpu_get_free_index(Error
**errp
)
578 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
580 if (cpu
>= MAX_CPUMASK_BITS
) {
581 error_setg(errp
, "Trying to use more CPUs than max of %d",
586 bitmap_set(cpu_index_map
, cpu
, 1);
590 void cpu_exec_exit(CPUState
*cpu
)
592 if (cpu
->cpu_index
== -1) {
593 /* cpu_index was never allocated by this @cpu or was already freed. */
597 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
602 static int cpu_get_free_index(Error
**errp
)
607 CPU_FOREACH(some_cpu
) {
613 void cpu_exec_exit(CPUState
*cpu
)
618 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
620 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
622 Error
*local_err
= NULL
;
624 #ifdef TARGET_WORDS_BIGENDIAN
625 cpu
->bigendian
= true;
627 cpu
->bigendian
= false;
630 #ifndef CONFIG_USER_ONLY
631 cpu
->as
= &address_space_memory
;
632 cpu
->thread_id
= qemu_get_thread_id();
635 #if defined(CONFIG_USER_ONLY)
638 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
640 error_propagate(errp
, local_err
);
641 #if defined(CONFIG_USER_ONLY)
646 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
647 #if defined(CONFIG_USER_ONLY)
650 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
651 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
653 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
654 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
655 cpu_save
, cpu_load
, cpu
->env_ptr
);
656 assert(cc
->vmsd
== NULL
);
657 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
659 if (cc
->vmsd
!= NULL
) {
660 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
664 #if defined(CONFIG_USER_ONLY)
665 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
667 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
670 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
672 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
674 tb_invalidate_phys_addr(cpu
->as
,
675 phys
| (pc
& ~TARGET_PAGE_MASK
));
680 #if defined(CONFIG_USER_ONLY)
681 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
686 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
692 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
696 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
697 int flags
, CPUWatchpoint
**watchpoint
)
702 /* Add a watchpoint. */
703 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
704 int flags
, CPUWatchpoint
**watchpoint
)
708 /* forbid ranges which are empty or run off the end of the address space */
709 if (len
== 0 || (addr
+ len
- 1) < addr
) {
710 error_report("tried to set invalid watchpoint at %"
711 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
714 wp
= g_malloc(sizeof(*wp
));
720 /* keep all GDB-injected watchpoints in front */
721 if (flags
& BP_GDB
) {
722 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
724 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
727 tlb_flush_page(cpu
, addr
);
734 /* Remove a specific watchpoint. */
735 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
740 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
741 if (addr
== wp
->vaddr
&& len
== wp
->len
742 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
743 cpu_watchpoint_remove_by_ref(cpu
, wp
);
750 /* Remove a specific watchpoint by reference. */
751 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
753 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
755 tlb_flush_page(cpu
, watchpoint
->vaddr
);
760 /* Remove all matching watchpoints. */
761 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
763 CPUWatchpoint
*wp
, *next
;
765 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
766 if (wp
->flags
& mask
) {
767 cpu_watchpoint_remove_by_ref(cpu
, wp
);
772 /* Return true if this watchpoint address matches the specified
773 * access (ie the address range covered by the watchpoint overlaps
774 * partially or completely with the address range covered by the
777 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
781 /* We know the lengths are non-zero, but a little caution is
782 * required to avoid errors in the case where the range ends
783 * exactly at the top of the address space and so addr + len
784 * wraps round to zero.
786 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
787 vaddr addrend
= addr
+ len
- 1;
789 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
794 /* Add a breakpoint. */
795 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
796 CPUBreakpoint
**breakpoint
)
800 bp
= g_malloc(sizeof(*bp
));
805 /* keep all GDB-injected breakpoints in front */
806 if (flags
& BP_GDB
) {
807 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
809 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
812 breakpoint_invalidate(cpu
, pc
);
820 /* Remove a specific breakpoint. */
821 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
825 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
826 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
827 cpu_breakpoint_remove_by_ref(cpu
, bp
);
834 /* Remove a specific breakpoint by reference. */
835 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
837 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
839 breakpoint_invalidate(cpu
, breakpoint
->pc
);
844 /* Remove all matching breakpoints. */
845 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
847 CPUBreakpoint
*bp
, *next
;
849 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
850 if (bp
->flags
& mask
) {
851 cpu_breakpoint_remove_by_ref(cpu
, bp
);
856 /* enable or disable single step mode. EXCP_DEBUG is returned by the
857 CPU loop after each instruction */
858 void cpu_single_step(CPUState
*cpu
, int enabled
)
860 if (cpu
->singlestep_enabled
!= enabled
) {
861 cpu
->singlestep_enabled
= enabled
;
863 kvm_update_guest_debug(cpu
, 0);
865 /* must flush all the translated code to avoid inconsistencies */
866 /* XXX: only flush what is necessary */
872 void QEMU_NORETURN
cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
879 fprintf(stderr
, "qemu: fatal: ");
880 vfprintf(stderr
, fmt
, ap
);
881 fprintf(stderr
, "\n");
882 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
883 if (qemu_log_enabled()) {
884 qemu_log("qemu: fatal: ");
885 qemu_log_vprintf(fmt
, ap2
);
887 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
894 #if defined(CONFIG_USER_ONLY)
896 struct sigaction act
;
897 sigfillset(&act
.sa_mask
);
898 act
.sa_handler
= SIG_DFL
;
899 sigaction(SIGABRT
, &act
, NULL
);
905 #if !defined(CONFIG_USER_ONLY)
906 /* Called from RCU critical section */
907 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
911 block
= atomic_rcu_read(&ram_list
.mru_block
);
912 if (block
&& addr
- block
->offset
< block
->max_length
) {
915 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
916 if (addr
- block
->offset
< block
->max_length
) {
921 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
925 /* It is safe to write mru_block outside the iothread lock. This
930 * xxx removed from list
934 * call_rcu(reclaim_ramblock, xxx);
937 * atomic_rcu_set is not needed here. The block was already published
938 * when it was placed into the list. Here we're just making an extra
939 * copy of the pointer.
941 ram_list
.mru_block
= block
;
945 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
952 end
= TARGET_PAGE_ALIGN(start
+ length
);
953 start
&= TARGET_PAGE_MASK
;
956 block
= qemu_get_ram_block(start
);
957 assert(block
== qemu_get_ram_block(end
- 1));
958 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
960 tlb_reset_dirty(cpu
, start1
, length
);
965 /* Note: start and end must be within the same ram block. */
966 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
970 unsigned long end
, page
;
977 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
978 page
= start
>> TARGET_PAGE_BITS
;
979 dirty
= bitmap_test_and_clear_atomic(ram_list
.dirty_memory
[client
],
982 if (dirty
&& tcg_enabled()) {
983 tlb_reset_dirty_range_all(start
, length
);
989 /* Called from RCU critical section */
990 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
991 MemoryRegionSection
*section
,
993 hwaddr paddr
, hwaddr xlat
,
995 target_ulong
*address
)
1000 if (memory_region_is_ram(section
->mr
)) {
1002 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1004 if (!section
->readonly
) {
1005 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1007 iotlb
|= PHYS_SECTION_ROM
;
1010 AddressSpaceDispatch
*d
;
1012 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1013 iotlb
= section
- d
->map
.sections
;
1017 /* Make accesses to pages with watchpoints go via the
1018 watchpoint trap routines. */
1019 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1020 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1021 /* Avoid trapping reads of pages with a write breakpoint. */
1022 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1023 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1024 *address
|= TLB_MMIO
;
1032 #endif /* defined(CONFIG_USER_ONLY) */
1034 #if !defined(CONFIG_USER_ONLY)
1036 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1038 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1040 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1041 qemu_anon_ram_alloc
;
1044 * Set a custom physical guest memory alloator.
1045 * Accelerators with unusual needs may need this. Hopefully, we can
1046 * get rid of it eventually.
1048 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1050 phys_mem_alloc
= alloc
;
1053 static uint16_t phys_section_add(PhysPageMap
*map
,
1054 MemoryRegionSection
*section
)
1056 /* The physical section number is ORed with a page-aligned
1057 * pointer to produce the iotlb entries. Thus it should
1058 * never overflow into the page-aligned value.
1060 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1062 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1063 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1064 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1065 map
->sections_nb_alloc
);
1067 map
->sections
[map
->sections_nb
] = *section
;
1068 memory_region_ref(section
->mr
);
1069 return map
->sections_nb
++;
1072 static void phys_section_destroy(MemoryRegion
*mr
)
1074 memory_region_unref(mr
);
1077 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1078 object_unref(OBJECT(&subpage
->iomem
));
1083 static void phys_sections_free(PhysPageMap
*map
)
1085 while (map
->sections_nb
> 0) {
1086 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1087 phys_section_destroy(section
->mr
);
1089 g_free(map
->sections
);
1093 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1096 hwaddr base
= section
->offset_within_address_space
1098 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1099 d
->map
.nodes
, d
->map
.sections
);
1100 MemoryRegionSection subsection
= {
1101 .offset_within_address_space
= base
,
1102 .size
= int128_make64(TARGET_PAGE_SIZE
),
1106 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1108 if (!(existing
->mr
->subpage
)) {
1109 subpage
= subpage_init(d
->as
, base
);
1110 subsection
.address_space
= d
->as
;
1111 subsection
.mr
= &subpage
->iomem
;
1112 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1113 phys_section_add(&d
->map
, &subsection
));
1115 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1117 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1118 end
= start
+ int128_get64(section
->size
) - 1;
1119 subpage_register(subpage
, start
, end
,
1120 phys_section_add(&d
->map
, section
));
1124 static void register_multipage(AddressSpaceDispatch
*d
,
1125 MemoryRegionSection
*section
)
1127 hwaddr start_addr
= section
->offset_within_address_space
;
1128 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1129 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1133 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1136 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1138 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1139 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1140 MemoryRegionSection now
= *section
, remain
= *section
;
1141 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1143 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1144 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1145 - now
.offset_within_address_space
;
1147 now
.size
= int128_min(int128_make64(left
), now
.size
);
1148 register_subpage(d
, &now
);
1150 now
.size
= int128_zero();
1152 while (int128_ne(remain
.size
, now
.size
)) {
1153 remain
.size
= int128_sub(remain
.size
, now
.size
);
1154 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1155 remain
.offset_within_region
+= int128_get64(now
.size
);
1157 if (int128_lt(remain
.size
, page_size
)) {
1158 register_subpage(d
, &now
);
1159 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1160 now
.size
= page_size
;
1161 register_subpage(d
, &now
);
1163 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1164 register_multipage(d
, &now
);
1169 void qemu_flush_coalesced_mmio_buffer(void)
1172 kvm_flush_coalesced_mmio_buffer();
1175 void qemu_mutex_lock_ramlist(void)
1177 qemu_mutex_lock(&ram_list
.mutex
);
1180 void qemu_mutex_unlock_ramlist(void)
1182 qemu_mutex_unlock(&ram_list
.mutex
);
1187 #include <sys/vfs.h>
1189 #define HUGETLBFS_MAGIC 0x958458f6
1191 static long gethugepagesize(const char *path
, Error
**errp
)
1197 ret
= statfs(path
, &fs
);
1198 } while (ret
!= 0 && errno
== EINTR
);
1201 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1206 if (!qtest_driver() &&
1207 fs
.f_type
!= HUGETLBFS_MAGIC
) {
1208 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1214 static void *file_ram_alloc(RAMBlock
*block
,
1221 char *sanitized_name
;
1223 void * volatile area
= NULL
;
1226 Error
*local_err
= NULL
;
1228 hpagesize
= gethugepagesize(path
, &local_err
);
1230 error_propagate(errp
, local_err
);
1233 block
->mr
->align
= hpagesize
;
1235 if (memory
< hpagesize
) {
1236 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1237 "or larger than huge page size 0x%" PRIx64
,
1242 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1244 "host lacks kvm mmu notifiers, -mem-path unsupported");
1248 if (!stat(path
, &st
) && S_ISDIR(st
.st_mode
)) {
1249 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1250 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1251 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1257 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1259 g_free(sanitized_name
);
1261 fd
= mkstemp(filename
);
1267 fd
= open(path
, O_RDWR
| O_CREAT
, 0644);
1271 error_setg_errno(errp
, errno
,
1272 "unable to create backing store for hugepages");
1276 memory
= ROUND_UP(memory
, hpagesize
);
1279 * ftruncate is not supported by hugetlbfs in older
1280 * hosts, so don't bother bailing out on errors.
1281 * If anything goes wrong with it under other filesystems,
1284 if (ftruncate(fd
, memory
)) {
1285 perror("ftruncate");
1288 area
= qemu_ram_mmap(fd
, memory
, hpagesize
, block
->flags
& RAM_SHARED
);
1289 if (area
== MAP_FAILED
) {
1290 error_setg_errno(errp
, errno
,
1291 "unable to map backing store for hugepages");
1297 os_mem_prealloc(fd
, area
, memory
);
1308 /* Called with the ramlist lock held. */
1309 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1311 RAMBlock
*block
, *next_block
;
1312 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1314 assert(size
!= 0); /* it would hand out same offset multiple times */
1316 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1320 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1321 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1323 end
= block
->offset
+ block
->max_length
;
1325 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1326 if (next_block
->offset
>= end
) {
1327 next
= MIN(next
, next_block
->offset
);
1330 if (next
- end
>= size
&& next
- end
< mingap
) {
1332 mingap
= next
- end
;
1336 if (offset
== RAM_ADDR_MAX
) {
1337 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1345 ram_addr_t
last_ram_offset(void)
1348 ram_addr_t last
= 0;
1351 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1352 last
= MAX(last
, block
->offset
+ block
->max_length
);
1358 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1362 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1363 if (!machine_dump_guest_core(current_machine
)) {
1364 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1366 perror("qemu_madvise");
1367 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1368 "but dump_guest_core=off specified\n");
1373 /* Called within an RCU critical section, or while the ramlist lock
1376 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1380 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1381 if (block
->offset
== addr
) {
1389 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1394 /* Called with iothread lock held. */
1395 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1397 RAMBlock
*new_block
, *block
;
1400 new_block
= find_ram_block(addr
);
1402 assert(!new_block
->idstr
[0]);
1405 char *id
= qdev_get_dev_path(dev
);
1407 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1411 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1413 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1414 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1415 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1423 /* Called with iothread lock held. */
1424 void qemu_ram_unset_idstr(ram_addr_t addr
)
1428 /* FIXME: arch_init.c assumes that this is not called throughout
1429 * migration. Ignore the problem since hot-unplug during migration
1430 * does not work anyway.
1434 block
= find_ram_block(addr
);
1436 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1441 static int memory_try_enable_merging(void *addr
, size_t len
)
1443 if (!machine_mem_merge(current_machine
)) {
1444 /* disabled by the user */
1448 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1451 /* Only legal before guest might have detected the memory size: e.g. on
1452 * incoming migration, or right after reset.
1454 * As memory core doesn't know how is memory accessed, it is up to
1455 * resize callback to update device state and/or add assertions to detect
1456 * misuse, if necessary.
1458 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1460 RAMBlock
*block
= find_ram_block(base
);
1464 newsize
= HOST_PAGE_ALIGN(newsize
);
1466 if (block
->used_length
== newsize
) {
1470 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1471 error_setg_errno(errp
, EINVAL
,
1472 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1473 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1474 newsize
, block
->used_length
);
1478 if (block
->max_length
< newsize
) {
1479 error_setg_errno(errp
, EINVAL
,
1480 "Length too large: %s: 0x" RAM_ADDR_FMT
1481 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1482 newsize
, block
->max_length
);
1486 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1487 block
->used_length
= newsize
;
1488 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1490 memory_region_set_size(block
->mr
, newsize
);
1491 if (block
->resized
) {
1492 block
->resized(block
->idstr
, newsize
, block
->host
);
1497 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1500 RAMBlock
*last_block
= NULL
;
1501 ram_addr_t old_ram_size
, new_ram_size
;
1503 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1505 qemu_mutex_lock_ramlist();
1506 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1508 if (!new_block
->host
) {
1509 if (xen_enabled()) {
1510 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1513 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1514 &new_block
->mr
->align
);
1515 if (!new_block
->host
) {
1516 error_setg_errno(errp
, errno
,
1517 "cannot set up guest memory '%s'",
1518 memory_region_name(new_block
->mr
));
1519 qemu_mutex_unlock_ramlist();
1522 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1526 new_ram_size
= MAX(old_ram_size
,
1527 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1528 if (new_ram_size
> old_ram_size
) {
1529 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1531 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1532 * QLIST (which has an RCU-friendly variant) does not have insertion at
1533 * tail, so save the last element in last_block.
1535 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1537 if (block
->max_length
< new_block
->max_length
) {
1542 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1543 } else if (last_block
) {
1544 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1545 } else { /* list is empty */
1546 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1548 ram_list
.mru_block
= NULL
;
1550 /* Write list before version */
1553 qemu_mutex_unlock_ramlist();
1555 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1557 if (new_ram_size
> old_ram_size
) {
1560 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1561 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1562 ram_list
.dirty_memory
[i
] =
1563 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1564 old_ram_size
, new_ram_size
);
1567 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1568 new_block
->used_length
,
1571 if (new_block
->host
) {
1572 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1573 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1574 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1575 if (kvm_enabled()) {
1576 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1580 return new_block
->offset
;
1584 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1585 bool share
, const char *mem_path
,
1588 RAMBlock
*new_block
;
1590 Error
*local_err
= NULL
;
1592 if (xen_enabled()) {
1593 error_setg(errp
, "-mem-path not supported with Xen");
1597 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1599 * file_ram_alloc() needs to allocate just like
1600 * phys_mem_alloc, but we haven't bothered to provide
1604 "-mem-path not supported with this accelerator");
1608 size
= HOST_PAGE_ALIGN(size
);
1609 new_block
= g_malloc0(sizeof(*new_block
));
1611 new_block
->used_length
= size
;
1612 new_block
->max_length
= size
;
1613 new_block
->flags
= share
? RAM_SHARED
: 0;
1614 new_block
->flags
|= RAM_FILE
;
1615 new_block
->host
= file_ram_alloc(new_block
, size
,
1617 if (!new_block
->host
) {
1622 addr
= ram_block_add(new_block
, &local_err
);
1625 error_propagate(errp
, local_err
);
1633 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1634 void (*resized
)(const char*,
1637 void *host
, bool resizeable
,
1638 MemoryRegion
*mr
, Error
**errp
)
1640 RAMBlock
*new_block
;
1642 Error
*local_err
= NULL
;
1644 size
= HOST_PAGE_ALIGN(size
);
1645 max_size
= HOST_PAGE_ALIGN(max_size
);
1646 new_block
= g_malloc0(sizeof(*new_block
));
1648 new_block
->resized
= resized
;
1649 new_block
->used_length
= size
;
1650 new_block
->max_length
= max_size
;
1651 assert(max_size
>= size
);
1653 new_block
->host
= host
;
1655 new_block
->flags
|= RAM_PREALLOC
;
1658 new_block
->flags
|= RAM_RESIZEABLE
;
1660 addr
= ram_block_add(new_block
, &local_err
);
1663 error_propagate(errp
, local_err
);
1669 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1670 MemoryRegion
*mr
, Error
**errp
)
1672 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1675 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1677 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1680 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1681 void (*resized
)(const char*,
1684 MemoryRegion
*mr
, Error
**errp
)
1686 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1689 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1693 qemu_mutex_lock_ramlist();
1694 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1695 if (addr
== block
->offset
) {
1696 QLIST_REMOVE_RCU(block
, next
);
1697 ram_list
.mru_block
= NULL
;
1698 /* Write list before version */
1701 g_free_rcu(block
, rcu
);
1705 qemu_mutex_unlock_ramlist();
1708 static void reclaim_ramblock(RAMBlock
*block
)
1710 if (block
->flags
& RAM_PREALLOC
) {
1712 } else if (xen_enabled()) {
1713 xen_invalidate_map_cache_entry(block
->host
);
1715 } else if (block
->fd
>= 0) {
1716 if (block
->flags
& RAM_FILE
) {
1717 qemu_ram_munmap(block
->host
, block
->max_length
);
1719 munmap(block
->host
, block
->max_length
);
1724 qemu_anon_ram_free(block
->host
, block
->max_length
);
1729 void qemu_ram_free(ram_addr_t addr
)
1733 qemu_mutex_lock_ramlist();
1734 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1735 if (addr
== block
->offset
) {
1736 QLIST_REMOVE_RCU(block
, next
);
1737 ram_list
.mru_block
= NULL
;
1738 /* Write list before version */
1741 call_rcu(block
, reclaim_ramblock
, rcu
);
1745 qemu_mutex_unlock_ramlist();
1749 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1756 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1757 offset
= addr
- block
->offset
;
1758 if (offset
< block
->max_length
) {
1759 vaddr
= ramblock_ptr(block
, offset
);
1760 if (block
->flags
& RAM_PREALLOC
) {
1762 } else if (xen_enabled()) {
1766 if (block
->fd
>= 0) {
1767 flags
|= (block
->flags
& RAM_SHARED
?
1768 MAP_SHARED
: MAP_PRIVATE
);
1769 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1770 flags
, block
->fd
, offset
);
1773 * Remap needs to match alloc. Accelerators that
1774 * set phys_mem_alloc never remap. If they did,
1775 * we'd need a remap hook here.
1777 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1779 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1780 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1783 if (area
!= vaddr
) {
1784 fprintf(stderr
, "Could not remap addr: "
1785 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1789 memory_try_enable_merging(vaddr
, length
);
1790 qemu_ram_setup_dump(vaddr
, length
);
1795 #endif /* !_WIN32 */
1797 int qemu_get_ram_fd(ram_addr_t addr
)
1803 block
= qemu_get_ram_block(addr
);
1809 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1815 block
= qemu_get_ram_block(addr
);
1816 ptr
= ramblock_ptr(block
, 0);
1821 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1822 * This should not be used for general purpose DMA. Use address_space_map
1823 * or address_space_rw instead. For local memory (e.g. video ram) that the
1824 * device owns, use memory_region_get_ram_ptr.
1826 * By the time this function returns, the returned pointer is not protected
1827 * by RCU anymore. If the caller is not within an RCU critical section and
1828 * does not hold the iothread lock, it must have other means of protecting the
1829 * pointer, such as a reference to the region that includes the incoming
1832 void *qemu_get_ram_ptr(ram_addr_t addr
)
1838 block
= qemu_get_ram_block(addr
);
1840 if (xen_enabled() && block
->host
== NULL
) {
1841 /* We need to check if the requested address is in the RAM
1842 * because we don't want to map the entire memory in QEMU.
1843 * In that case just map until the end of the page.
1845 if (block
->offset
== 0) {
1846 ptr
= xen_map_cache(addr
, 0, 0);
1850 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1852 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1859 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1860 * but takes a size argument.
1862 * By the time this function returns, the returned pointer is not protected
1863 * by RCU anymore. If the caller is not within an RCU critical section and
1864 * does not hold the iothread lock, it must have other means of protecting the
1865 * pointer, such as a reference to the region that includes the incoming
1868 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1874 if (xen_enabled()) {
1875 return xen_map_cache(addr
, *size
, 1);
1879 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1880 if (addr
- block
->offset
< block
->max_length
) {
1881 if (addr
- block
->offset
+ *size
> block
->max_length
)
1882 *size
= block
->max_length
- addr
+ block
->offset
;
1883 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1889 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1895 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1898 * ptr: Host pointer to look up
1899 * round_offset: If true round the result offset down to a page boundary
1900 * *ram_addr: set to result ram_addr
1901 * *offset: set to result offset within the RAMBlock
1903 * Returns: RAMBlock (or NULL if not found)
1905 * By the time this function returns, the returned pointer is not protected
1906 * by RCU anymore. If the caller is not within an RCU critical section and
1907 * does not hold the iothread lock, it must have other means of protecting the
1908 * pointer, such as a reference to the region that includes the incoming
1911 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1912 ram_addr_t
*ram_addr
,
1916 uint8_t *host
= ptr
;
1918 if (xen_enabled()) {
1920 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1921 block
= qemu_get_ram_block(*ram_addr
);
1923 *offset
= (host
- block
->host
);
1930 block
= atomic_rcu_read(&ram_list
.mru_block
);
1931 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1935 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1936 /* This case append when the block is not mapped. */
1937 if (block
->host
== NULL
) {
1940 if (host
- block
->host
< block
->max_length
) {
1949 *offset
= (host
- block
->host
);
1951 *offset
&= TARGET_PAGE_MASK
;
1953 *ram_addr
= block
->offset
+ *offset
;
1959 * Finds the named RAMBlock
1961 * name: The name of RAMBlock to find
1963 * Returns: RAMBlock (or NULL if not found)
1965 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1969 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1970 if (!strcmp(name
, block
->idstr
)) {
1978 /* Some of the softmmu routines need to translate from a host pointer
1979 (typically a TLB entry) back to a ram offset. */
1980 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1983 ram_addr_t offset
; /* Not used */
1985 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
1994 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1995 uint64_t val
, unsigned size
)
1997 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1998 tb_invalidate_phys_page_fast(ram_addr
, size
);
2002 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2005 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2008 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2013 /* Set both VGA and migration bits for simplicity and to remove
2014 * the notdirty callback faster.
2016 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2017 DIRTY_CLIENTS_NOCODE
);
2018 /* we remove the notdirty callback only if the code has been
2020 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2021 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2025 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2026 unsigned size
, bool is_write
)
2031 static const MemoryRegionOps notdirty_mem_ops
= {
2032 .write
= notdirty_mem_write
,
2033 .valid
.accepts
= notdirty_mem_accepts
,
2034 .endianness
= DEVICE_NATIVE_ENDIAN
,
2037 /* Generate a debug exception if a watchpoint has been hit. */
2038 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2040 CPUState
*cpu
= current_cpu
;
2041 CPUArchState
*env
= cpu
->env_ptr
;
2042 target_ulong pc
, cs_base
;
2047 if (cpu
->watchpoint_hit
) {
2048 /* We re-entered the check after replacing the TB. Now raise
2049 * the debug interrupt so that is will trigger after the
2050 * current instruction. */
2051 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2054 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2055 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2056 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2057 && (wp
->flags
& flags
)) {
2058 if (flags
== BP_MEM_READ
) {
2059 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2061 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2063 wp
->hitaddr
= vaddr
;
2064 wp
->hitattrs
= attrs
;
2065 if (!cpu
->watchpoint_hit
) {
2066 cpu
->watchpoint_hit
= wp
;
2067 tb_check_watchpoint(cpu
);
2068 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2069 cpu
->exception_index
= EXCP_DEBUG
;
2072 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2073 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2074 cpu_resume_from_signal(cpu
, NULL
);
2078 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2083 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2084 so these check for a hit then pass through to the normal out-of-line
2086 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2087 unsigned size
, MemTxAttrs attrs
)
2092 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2095 data
= address_space_ldub(&address_space_memory
, addr
, attrs
, &res
);
2098 data
= address_space_lduw(&address_space_memory
, addr
, attrs
, &res
);
2101 data
= address_space_ldl(&address_space_memory
, addr
, attrs
, &res
);
2109 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2110 uint64_t val
, unsigned size
,
2115 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2118 address_space_stb(&address_space_memory
, addr
, val
, attrs
, &res
);
2121 address_space_stw(&address_space_memory
, addr
, val
, attrs
, &res
);
2124 address_space_stl(&address_space_memory
, addr
, val
, attrs
, &res
);
2131 static const MemoryRegionOps watch_mem_ops
= {
2132 .read_with_attrs
= watch_mem_read
,
2133 .write_with_attrs
= watch_mem_write
,
2134 .endianness
= DEVICE_NATIVE_ENDIAN
,
2137 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2138 unsigned len
, MemTxAttrs attrs
)
2140 subpage_t
*subpage
= opaque
;
2144 #if defined(DEBUG_SUBPAGE)
2145 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2146 subpage
, len
, addr
);
2148 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2155 *data
= ldub_p(buf
);
2158 *data
= lduw_p(buf
);
2171 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2172 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2174 subpage_t
*subpage
= opaque
;
2177 #if defined(DEBUG_SUBPAGE)
2178 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2179 " value %"PRIx64
"\n",
2180 __func__
, subpage
, len
, addr
, value
);
2198 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2202 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2203 unsigned len
, bool is_write
)
2205 subpage_t
*subpage
= opaque
;
2206 #if defined(DEBUG_SUBPAGE)
2207 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2208 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2211 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2215 static const MemoryRegionOps subpage_ops
= {
2216 .read_with_attrs
= subpage_read
,
2217 .write_with_attrs
= subpage_write
,
2218 .impl
.min_access_size
= 1,
2219 .impl
.max_access_size
= 8,
2220 .valid
.min_access_size
= 1,
2221 .valid
.max_access_size
= 8,
2222 .valid
.accepts
= subpage_accepts
,
2223 .endianness
= DEVICE_NATIVE_ENDIAN
,
2226 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2231 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2233 idx
= SUBPAGE_IDX(start
);
2234 eidx
= SUBPAGE_IDX(end
);
2235 #if defined(DEBUG_SUBPAGE)
2236 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2237 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2239 for (; idx
<= eidx
; idx
++) {
2240 mmio
->sub_section
[idx
] = section
;
2246 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2250 mmio
= g_malloc0(sizeof(subpage_t
));
2254 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2255 NULL
, TARGET_PAGE_SIZE
);
2256 mmio
->iomem
.subpage
= true;
2257 #if defined(DEBUG_SUBPAGE)
2258 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2259 mmio
, base
, TARGET_PAGE_SIZE
);
2261 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2266 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2270 MemoryRegionSection section
= {
2271 .address_space
= as
,
2273 .offset_within_address_space
= 0,
2274 .offset_within_region
= 0,
2275 .size
= int128_2_64(),
2278 return phys_section_add(map
, §ion
);
2281 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
)
2283 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[0];
2284 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2285 MemoryRegionSection
*sections
= d
->map
.sections
;
2287 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2290 static void io_mem_init(void)
2292 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2293 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2295 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2297 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2301 static void mem_begin(MemoryListener
*listener
)
2303 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2304 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2307 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2308 assert(n
== PHYS_SECTION_UNASSIGNED
);
2309 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2310 assert(n
== PHYS_SECTION_NOTDIRTY
);
2311 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2312 assert(n
== PHYS_SECTION_ROM
);
2313 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2314 assert(n
== PHYS_SECTION_WATCH
);
2316 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2318 as
->next_dispatch
= d
;
2321 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2323 phys_sections_free(&d
->map
);
2327 static void mem_commit(MemoryListener
*listener
)
2329 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2330 AddressSpaceDispatch
*cur
= as
->dispatch
;
2331 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2333 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2335 atomic_rcu_set(&as
->dispatch
, next
);
2337 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2341 static void tcg_commit(MemoryListener
*listener
)
2343 CPUAddressSpace
*cpuas
;
2344 AddressSpaceDispatch
*d
;
2346 /* since each CPU stores ram addresses in its TLB cache, we must
2347 reset the modified entries */
2348 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2349 cpu_reloading_memory_map();
2350 /* The CPU and TLB are protected by the iothread lock.
2351 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2352 * may have split the RCU critical section.
2354 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2355 cpuas
->memory_dispatch
= d
;
2356 tlb_flush(cpuas
->cpu
, 1);
2359 void address_space_init_dispatch(AddressSpace
*as
)
2361 as
->dispatch
= NULL
;
2362 as
->dispatch_listener
= (MemoryListener
) {
2364 .commit
= mem_commit
,
2365 .region_add
= mem_add
,
2366 .region_nop
= mem_add
,
2369 memory_listener_register(&as
->dispatch_listener
, as
);
2372 void address_space_unregister(AddressSpace
*as
)
2374 memory_listener_unregister(&as
->dispatch_listener
);
2377 void address_space_destroy_dispatch(AddressSpace
*as
)
2379 AddressSpaceDispatch
*d
= as
->dispatch
;
2381 atomic_rcu_set(&as
->dispatch
, NULL
);
2383 call_rcu(d
, address_space_dispatch_free
, rcu
);
2387 static void memory_map_init(void)
2389 system_memory
= g_malloc(sizeof(*system_memory
));
2391 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2392 address_space_init(&address_space_memory
, system_memory
, "memory");
2394 system_io
= g_malloc(sizeof(*system_io
));
2395 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2397 address_space_init(&address_space_io
, system_io
, "I/O");
2400 MemoryRegion
*get_system_memory(void)
2402 return system_memory
;
2405 MemoryRegion
*get_system_io(void)
2410 #endif /* !defined(CONFIG_USER_ONLY) */
2412 /* physical memory access (slow version, mainly for debug) */
2413 #if defined(CONFIG_USER_ONLY)
2414 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2415 uint8_t *buf
, int len
, int is_write
)
2422 page
= addr
& TARGET_PAGE_MASK
;
2423 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2426 flags
= page_get_flags(page
);
2427 if (!(flags
& PAGE_VALID
))
2430 if (!(flags
& PAGE_WRITE
))
2432 /* XXX: this code should not depend on lock_user */
2433 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2436 unlock_user(p
, addr
, l
);
2438 if (!(flags
& PAGE_READ
))
2440 /* XXX: this code should not depend on lock_user */
2441 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2444 unlock_user(p
, addr
, 0);
2455 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2458 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2459 /* No early return if dirty_log_mask is or becomes 0, because
2460 * cpu_physical_memory_set_dirty_range will still call
2461 * xen_modified_memory.
2463 if (dirty_log_mask
) {
2465 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2467 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2468 tb_invalidate_phys_range(addr
, addr
+ length
);
2469 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2471 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2474 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2476 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2478 /* Regions are assumed to support 1-4 byte accesses unless
2479 otherwise specified. */
2480 if (access_size_max
== 0) {
2481 access_size_max
= 4;
2484 /* Bound the maximum access by the alignment of the address. */
2485 if (!mr
->ops
->impl
.unaligned
) {
2486 unsigned align_size_max
= addr
& -addr
;
2487 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2488 access_size_max
= align_size_max
;
2492 /* Don't attempt accesses larger than the maximum. */
2493 if (l
> access_size_max
) {
2494 l
= access_size_max
;
2501 static bool prepare_mmio_access(MemoryRegion
*mr
)
2503 bool unlocked
= !qemu_mutex_iothread_locked();
2504 bool release_lock
= false;
2506 if (unlocked
&& mr
->global_locking
) {
2507 qemu_mutex_lock_iothread();
2509 release_lock
= true;
2511 if (mr
->flush_coalesced_mmio
) {
2513 qemu_mutex_lock_iothread();
2515 qemu_flush_coalesced_mmio_buffer();
2517 qemu_mutex_unlock_iothread();
2521 return release_lock
;
2524 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2525 uint8_t *buf
, int len
, bool is_write
)
2532 MemTxResult result
= MEMTX_OK
;
2533 bool release_lock
= false;
2538 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2541 if (!memory_access_is_direct(mr
, is_write
)) {
2542 release_lock
|= prepare_mmio_access(mr
);
2543 l
= memory_access_size(mr
, l
, addr1
);
2544 /* XXX: could force current_cpu to NULL to avoid
2548 /* 64 bit write access */
2550 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2554 /* 32 bit write access */
2556 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2560 /* 16 bit write access */
2562 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2566 /* 8 bit write access */
2568 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2575 addr1
+= memory_region_get_ram_addr(mr
);
2577 ptr
= qemu_get_ram_ptr(addr1
);
2578 memcpy(ptr
, buf
, l
);
2579 invalidate_and_set_dirty(mr
, addr1
, l
);
2582 if (!memory_access_is_direct(mr
, is_write
)) {
2584 release_lock
|= prepare_mmio_access(mr
);
2585 l
= memory_access_size(mr
, l
, addr1
);
2588 /* 64 bit read access */
2589 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2594 /* 32 bit read access */
2595 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2600 /* 16 bit read access */
2601 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2606 /* 8 bit read access */
2607 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2616 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2617 memcpy(buf
, ptr
, l
);
2622 qemu_mutex_unlock_iothread();
2623 release_lock
= false;
2635 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2636 const uint8_t *buf
, int len
)
2638 return address_space_rw(as
, addr
, attrs
, (uint8_t *)buf
, len
, true);
2641 MemTxResult
address_space_read(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2642 uint8_t *buf
, int len
)
2644 return address_space_rw(as
, addr
, attrs
, buf
, len
, false);
2648 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2649 int len
, int is_write
)
2651 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2652 buf
, len
, is_write
);
2655 enum write_rom_type
{
2660 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2661 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2671 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2673 if (!(memory_region_is_ram(mr
) ||
2674 memory_region_is_romd(mr
))) {
2675 l
= memory_access_size(mr
, l
, addr1
);
2677 addr1
+= memory_region_get_ram_addr(mr
);
2679 ptr
= qemu_get_ram_ptr(addr1
);
2682 memcpy(ptr
, buf
, l
);
2683 invalidate_and_set_dirty(mr
, addr1
, l
);
2686 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2697 /* used for ROM loading : can write in RAM and ROM */
2698 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2699 const uint8_t *buf
, int len
)
2701 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2704 void cpu_flush_icache_range(hwaddr start
, int len
)
2707 * This function should do the same thing as an icache flush that was
2708 * triggered from within the guest. For TCG we are always cache coherent,
2709 * so there is no need to flush anything. For KVM / Xen we need to flush
2710 * the host's instruction cache at least.
2712 if (tcg_enabled()) {
2716 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2717 start
, NULL
, len
, FLUSH_CACHE
);
2728 static BounceBuffer bounce
;
2730 typedef struct MapClient
{
2732 QLIST_ENTRY(MapClient
) link
;
2735 QemuMutex map_client_list_lock
;
2736 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2737 = QLIST_HEAD_INITIALIZER(map_client_list
);
2739 static void cpu_unregister_map_client_do(MapClient
*client
)
2741 QLIST_REMOVE(client
, link
);
2745 static void cpu_notify_map_clients_locked(void)
2749 while (!QLIST_EMPTY(&map_client_list
)) {
2750 client
= QLIST_FIRST(&map_client_list
);
2751 qemu_bh_schedule(client
->bh
);
2752 cpu_unregister_map_client_do(client
);
2756 void cpu_register_map_client(QEMUBH
*bh
)
2758 MapClient
*client
= g_malloc(sizeof(*client
));
2760 qemu_mutex_lock(&map_client_list_lock
);
2762 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2763 if (!atomic_read(&bounce
.in_use
)) {
2764 cpu_notify_map_clients_locked();
2766 qemu_mutex_unlock(&map_client_list_lock
);
2769 void cpu_exec_init_all(void)
2771 qemu_mutex_init(&ram_list
.mutex
);
2774 qemu_mutex_init(&map_client_list_lock
);
2777 void cpu_unregister_map_client(QEMUBH
*bh
)
2781 qemu_mutex_lock(&map_client_list_lock
);
2782 QLIST_FOREACH(client
, &map_client_list
, link
) {
2783 if (client
->bh
== bh
) {
2784 cpu_unregister_map_client_do(client
);
2788 qemu_mutex_unlock(&map_client_list_lock
);
2791 static void cpu_notify_map_clients(void)
2793 qemu_mutex_lock(&map_client_list_lock
);
2794 cpu_notify_map_clients_locked();
2795 qemu_mutex_unlock(&map_client_list_lock
);
2798 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2806 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2807 if (!memory_access_is_direct(mr
, is_write
)) {
2808 l
= memory_access_size(mr
, l
, addr
);
2809 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2821 /* Map a physical memory region into a host virtual address.
2822 * May map a subset of the requested range, given by and returned in *plen.
2823 * May return NULL if resources needed to perform the mapping are exhausted.
2824 * Use only for reads OR writes - not for read-modify-write operations.
2825 * Use cpu_register_map_client() to know when retrying the map operation is
2826 * likely to succeed.
2828 void *address_space_map(AddressSpace
*as
,
2835 hwaddr l
, xlat
, base
;
2836 MemoryRegion
*mr
, *this_mr
;
2845 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2847 if (!memory_access_is_direct(mr
, is_write
)) {
2848 if (atomic_xchg(&bounce
.in_use
, true)) {
2852 /* Avoid unbounded allocations */
2853 l
= MIN(l
, TARGET_PAGE_SIZE
);
2854 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2858 memory_region_ref(mr
);
2861 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2867 return bounce
.buffer
;
2871 raddr
= memory_region_get_ram_addr(mr
);
2882 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2883 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2888 memory_region_ref(mr
);
2891 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2894 /* Unmaps a memory region previously mapped by address_space_map().
2895 * Will also mark the memory as dirty if is_write == 1. access_len gives
2896 * the amount of memory that was actually read or written by the caller.
2898 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2899 int is_write
, hwaddr access_len
)
2901 if (buffer
!= bounce
.buffer
) {
2905 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2908 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2910 if (xen_enabled()) {
2911 xen_invalidate_map_cache_entry(buffer
);
2913 memory_region_unref(mr
);
2917 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2918 bounce
.buffer
, access_len
);
2920 qemu_vfree(bounce
.buffer
);
2921 bounce
.buffer
= NULL
;
2922 memory_region_unref(bounce
.mr
);
2923 atomic_mb_set(&bounce
.in_use
, false);
2924 cpu_notify_map_clients();
2927 void *cpu_physical_memory_map(hwaddr addr
,
2931 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2934 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2935 int is_write
, hwaddr access_len
)
2937 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2940 /* warning: addr must be aligned */
2941 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2943 MemTxResult
*result
,
2944 enum device_endian endian
)
2952 bool release_lock
= false;
2955 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2956 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2957 release_lock
|= prepare_mmio_access(mr
);
2960 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
2961 #if defined(TARGET_WORDS_BIGENDIAN)
2962 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2966 if (endian
== DEVICE_BIG_ENDIAN
) {
2972 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2976 case DEVICE_LITTLE_ENDIAN
:
2977 val
= ldl_le_p(ptr
);
2979 case DEVICE_BIG_ENDIAN
:
2980 val
= ldl_be_p(ptr
);
2992 qemu_mutex_unlock_iothread();
2998 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
2999 MemTxAttrs attrs
, MemTxResult
*result
)
3001 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3002 DEVICE_NATIVE_ENDIAN
);
3005 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3006 MemTxAttrs attrs
, MemTxResult
*result
)
3008 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3009 DEVICE_LITTLE_ENDIAN
);
3012 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3013 MemTxAttrs attrs
, MemTxResult
*result
)
3015 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3019 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3021 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3024 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3026 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3029 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3031 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3034 /* warning: addr must be aligned */
3035 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3037 MemTxResult
*result
,
3038 enum device_endian endian
)
3046 bool release_lock
= false;
3049 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3051 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3052 release_lock
|= prepare_mmio_access(mr
);
3055 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3056 #if defined(TARGET_WORDS_BIGENDIAN)
3057 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3061 if (endian
== DEVICE_BIG_ENDIAN
) {
3067 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3071 case DEVICE_LITTLE_ENDIAN
:
3072 val
= ldq_le_p(ptr
);
3074 case DEVICE_BIG_ENDIAN
:
3075 val
= ldq_be_p(ptr
);
3087 qemu_mutex_unlock_iothread();
3093 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3094 MemTxAttrs attrs
, MemTxResult
*result
)
3096 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3097 DEVICE_NATIVE_ENDIAN
);
3100 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3101 MemTxAttrs attrs
, MemTxResult
*result
)
3103 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3104 DEVICE_LITTLE_ENDIAN
);
3107 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3108 MemTxAttrs attrs
, MemTxResult
*result
)
3110 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3114 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3116 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3119 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3121 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3124 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3126 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3130 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3131 MemTxAttrs attrs
, MemTxResult
*result
)
3136 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3143 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3145 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3148 /* warning: addr must be aligned */
3149 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3152 MemTxResult
*result
,
3153 enum device_endian endian
)
3161 bool release_lock
= false;
3164 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3166 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3167 release_lock
|= prepare_mmio_access(mr
);
3170 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3171 #if defined(TARGET_WORDS_BIGENDIAN)
3172 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3176 if (endian
== DEVICE_BIG_ENDIAN
) {
3182 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3186 case DEVICE_LITTLE_ENDIAN
:
3187 val
= lduw_le_p(ptr
);
3189 case DEVICE_BIG_ENDIAN
:
3190 val
= lduw_be_p(ptr
);
3202 qemu_mutex_unlock_iothread();
3208 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3209 MemTxAttrs attrs
, MemTxResult
*result
)
3211 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3212 DEVICE_NATIVE_ENDIAN
);
3215 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3216 MemTxAttrs attrs
, MemTxResult
*result
)
3218 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3219 DEVICE_LITTLE_ENDIAN
);
3222 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3223 MemTxAttrs attrs
, MemTxResult
*result
)
3225 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3229 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3231 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3234 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3236 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3239 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3241 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3244 /* warning: addr must be aligned. The ram page is not masked as dirty
3245 and the code inside is not invalidated. It is useful if the dirty
3246 bits are used to track modified PTEs */
3247 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3248 MemTxAttrs attrs
, MemTxResult
*result
)
3255 uint8_t dirty_log_mask
;
3256 bool release_lock
= false;
3259 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3261 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3262 release_lock
|= prepare_mmio_access(mr
);
3264 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3266 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3267 ptr
= qemu_get_ram_ptr(addr1
);
3270 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3271 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3272 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3279 qemu_mutex_unlock_iothread();
3284 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3286 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3289 /* warning: addr must be aligned */
3290 static inline void address_space_stl_internal(AddressSpace
*as
,
3291 hwaddr addr
, uint32_t val
,
3293 MemTxResult
*result
,
3294 enum device_endian endian
)
3301 bool release_lock
= false;
3304 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3306 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3307 release_lock
|= prepare_mmio_access(mr
);
3309 #if defined(TARGET_WORDS_BIGENDIAN)
3310 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3314 if (endian
== DEVICE_BIG_ENDIAN
) {
3318 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3321 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3322 ptr
= qemu_get_ram_ptr(addr1
);
3324 case DEVICE_LITTLE_ENDIAN
:
3327 case DEVICE_BIG_ENDIAN
:
3334 invalidate_and_set_dirty(mr
, addr1
, 4);
3341 qemu_mutex_unlock_iothread();
3346 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3347 MemTxAttrs attrs
, MemTxResult
*result
)
3349 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3350 DEVICE_NATIVE_ENDIAN
);
3353 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3354 MemTxAttrs attrs
, MemTxResult
*result
)
3356 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3357 DEVICE_LITTLE_ENDIAN
);
3360 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3361 MemTxAttrs attrs
, MemTxResult
*result
)
3363 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3367 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3369 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3372 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3374 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3377 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3379 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3383 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3384 MemTxAttrs attrs
, MemTxResult
*result
)
3389 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3395 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3397 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3400 /* warning: addr must be aligned */
3401 static inline void address_space_stw_internal(AddressSpace
*as
,
3402 hwaddr addr
, uint32_t val
,
3404 MemTxResult
*result
,
3405 enum device_endian endian
)
3412 bool release_lock
= false;
3415 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3416 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3417 release_lock
|= prepare_mmio_access(mr
);
3419 #if defined(TARGET_WORDS_BIGENDIAN)
3420 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3424 if (endian
== DEVICE_BIG_ENDIAN
) {
3428 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3431 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3432 ptr
= qemu_get_ram_ptr(addr1
);
3434 case DEVICE_LITTLE_ENDIAN
:
3437 case DEVICE_BIG_ENDIAN
:
3444 invalidate_and_set_dirty(mr
, addr1
, 2);
3451 qemu_mutex_unlock_iothread();
3456 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3457 MemTxAttrs attrs
, MemTxResult
*result
)
3459 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3460 DEVICE_NATIVE_ENDIAN
);
3463 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3464 MemTxAttrs attrs
, MemTxResult
*result
)
3466 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3467 DEVICE_LITTLE_ENDIAN
);
3470 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3471 MemTxAttrs attrs
, MemTxResult
*result
)
3473 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3477 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3479 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3482 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3484 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3487 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3489 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3493 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3494 MemTxAttrs attrs
, MemTxResult
*result
)
3498 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3504 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3505 MemTxAttrs attrs
, MemTxResult
*result
)
3508 val
= cpu_to_le64(val
);
3509 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3514 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3515 MemTxAttrs attrs
, MemTxResult
*result
)
3518 val
= cpu_to_be64(val
);
3519 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3525 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3527 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3530 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3532 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3535 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3537 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3540 /* virtual memory access for debug (includes writing to ROM) */
3541 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3542 uint8_t *buf
, int len
, int is_write
)
3549 page
= addr
& TARGET_PAGE_MASK
;
3550 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
3551 /* if no physical page mapped, return an error */
3552 if (phys_addr
== -1)
3554 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3557 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3559 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
3561 address_space_rw(cpu
->as
, phys_addr
, MEMTXATTRS_UNSPECIFIED
,
3572 * Allows code that needs to deal with migration bitmaps etc to still be built
3573 * target independent.
3575 size_t qemu_target_page_bits(void)
3577 return TARGET_PAGE_BITS
;
3583 * A helper function for the _utterly broken_ virtio device model to find out if
3584 * it's running on a big endian machine. Don't do this at home kids!
3586 bool target_words_bigendian(void);
3587 bool target_words_bigendian(void)
3589 #if defined(TARGET_WORDS_BIGENDIAN)
3596 #ifndef CONFIG_USER_ONLY
3597 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3604 mr
= address_space_translate(&address_space_memory
,
3605 phys_addr
, &phys_addr
, &l
, false);
3607 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3612 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3618 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3619 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3620 block
->used_length
, opaque
);