4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
58 #include "qemu/range.h"
60 #include "qemu/mmap-alloc.h"
63 //#define DEBUG_SUBPAGE
65 #if !defined(CONFIG_USER_ONLY)
66 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
69 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
71 static MemoryRegion
*system_memory
;
72 static MemoryRegion
*system_io
;
74 AddressSpace address_space_io
;
75 AddressSpace address_space_memory
;
77 MemoryRegion io_mem_rom
, io_mem_notdirty
;
78 static MemoryRegion io_mem_unassigned
;
80 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81 #define RAM_PREALLOC (1 << 0)
83 /* RAM is mmap-ed with MAP_SHARED */
84 #define RAM_SHARED (1 << 1)
86 /* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
89 #define RAM_RESIZEABLE (1 << 2)
93 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
94 /* current CPU in the current thread. It is only valid inside
96 __thread CPUState
*current_cpu
;
97 /* 0 = Do not count executed instructions.
98 1 = Precise instruction counting.
99 2 = Adaptive rate instruction counting. */
102 #if !defined(CONFIG_USER_ONLY)
104 typedef struct PhysPageEntry PhysPageEntry
;
106 struct PhysPageEntry
{
107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
113 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
115 /* Size of the L2 (and L3, etc) page tables. */
116 #define ADDR_SPACE_BITS 64
119 #define P_L2_SIZE (1 << P_L2_BITS)
121 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
123 typedef PhysPageEntry Node
[P_L2_SIZE
];
125 typedef struct PhysPageMap
{
128 unsigned sections_nb
;
129 unsigned sections_nb_alloc
;
131 unsigned nodes_nb_alloc
;
133 MemoryRegionSection
*sections
;
136 struct AddressSpaceDispatch
{
139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
142 PhysPageEntry phys_map
;
147 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148 typedef struct subpage_t
{
152 uint16_t sub_section
[TARGET_PAGE_SIZE
];
155 #define PHYS_SECTION_UNASSIGNED 0
156 #define PHYS_SECTION_NOTDIRTY 1
157 #define PHYS_SECTION_ROM 2
158 #define PHYS_SECTION_WATCH 3
160 static void io_mem_init(void);
161 static void memory_map_init(void);
162 static void tcg_commit(MemoryListener
*listener
);
164 static MemoryRegion io_mem_watch
;
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
173 struct CPUAddressSpace
{
176 struct AddressSpaceDispatch
*memory_dispatch
;
177 MemoryListener tcg_as_listener
;
182 #if !defined(CONFIG_USER_ONLY)
184 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
186 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
187 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
188 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
189 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
193 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
200 ret
= map
->nodes_nb
++;
202 assert(ret
!= PHYS_MAP_NODE_NIL
);
203 assert(ret
!= map
->nodes_nb_alloc
);
205 e
.skip
= leaf
? 0 : 1;
206 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
207 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
208 memcpy(&p
[i
], &e
, sizeof(e
));
213 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
214 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
218 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
220 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
221 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
223 p
= map
->nodes
[lp
->ptr
];
224 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
226 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
227 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
233 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
239 static void phys_page_set(AddressSpaceDispatch
*d
,
240 hwaddr index
, hwaddr nb
,
243 /* Wildly overreserve - it doesn't matter much. */
244 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
246 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
249 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
252 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
254 unsigned valid_ptr
= P_L2_SIZE
;
259 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
264 for (i
= 0; i
< P_L2_SIZE
; i
++) {
265 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
272 phys_page_compact(&p
[i
], nodes
, compacted
);
276 /* We can only compress if there's only one child. */
281 assert(valid_ptr
< P_L2_SIZE
);
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
288 lp
->ptr
= p
[valid_ptr
].ptr
;
289 if (!p
[valid_ptr
].skip
) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
298 lp
->skip
+= p
[valid_ptr
].skip
;
302 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
304 DECLARE_BITMAP(compacted
, nodes_nb
);
306 if (d
->phys_map
.skip
) {
307 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
311 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
312 Node
*nodes
, MemoryRegionSection
*sections
)
315 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
318 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
319 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
320 return §ions
[PHYS_SECTION_UNASSIGNED
];
323 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
326 if (sections
[lp
.ptr
].size
.hi
||
327 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
328 sections
[lp
.ptr
].size
.lo
, addr
)) {
329 return §ions
[lp
.ptr
];
331 return §ions
[PHYS_SECTION_UNASSIGNED
];
335 bool memory_region_is_unassigned(MemoryRegion
*mr
)
337 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
338 && mr
!= &io_mem_watch
;
341 /* Called from RCU critical section */
342 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
344 bool resolve_subpage
)
346 MemoryRegionSection
*section
;
349 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
350 if (resolve_subpage
&& section
->mr
->subpage
) {
351 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
352 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
357 /* Called from RCU critical section */
358 static MemoryRegionSection
*
359 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
360 hwaddr
*plen
, bool resolve_subpage
)
362 MemoryRegionSection
*section
;
366 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
367 /* Compute offset within MemoryRegionSection */
368 addr
-= section
->offset_within_address_space
;
370 /* Compute offset within MemoryRegion */
371 *xlat
= addr
+ section
->offset_within_region
;
375 /* MMIO registers can be expected to perform full-width accesses based only
376 * on their address, without considering adjacent registers that could
377 * decode to completely different MemoryRegions. When such registers
378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379 * regions overlap wildly. For this reason we cannot clamp the accesses
382 * If the length is small (as is the case for address_space_ldl/stl),
383 * everything works fine. If the incoming length is large, however,
384 * the caller really has to do the clamping through memory_access_size.
386 if (memory_region_is_ram(mr
)) {
387 diff
= int128_sub(section
->size
, int128_make64(addr
));
388 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
393 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
395 if (memory_region_is_ram(mr
)) {
396 return !(is_write
&& mr
->readonly
);
398 if (memory_region_is_romd(mr
)) {
405 /* Called from RCU critical section */
406 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
407 hwaddr
*xlat
, hwaddr
*plen
,
411 MemoryRegionSection
*section
;
415 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
416 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
419 if (!mr
->iommu_ops
) {
423 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
424 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
425 | (addr
& iotlb
.addr_mask
));
426 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
427 if (!(iotlb
.perm
& (1 << is_write
))) {
428 mr
= &io_mem_unassigned
;
432 as
= iotlb
.target_as
;
435 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
436 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
437 *plen
= MIN(page
, *plen
);
444 /* Called from RCU critical section */
445 MemoryRegionSection
*
446 address_space_translate_for_iotlb(CPUState
*cpu
, hwaddr addr
,
447 hwaddr
*xlat
, hwaddr
*plen
)
449 MemoryRegionSection
*section
;
450 section
= address_space_translate_internal(cpu
->cpu_ases
[0].memory_dispatch
,
451 addr
, xlat
, plen
, false);
453 assert(!section
->mr
->iommu_ops
);
458 #if !defined(CONFIG_USER_ONLY)
460 static int cpu_common_post_load(void *opaque
, int version_id
)
462 CPUState
*cpu
= opaque
;
464 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
465 version_id is increased. */
466 cpu
->interrupt_request
&= ~0x01;
472 static int cpu_common_pre_load(void *opaque
)
474 CPUState
*cpu
= opaque
;
476 cpu
->exception_index
= -1;
481 static bool cpu_common_exception_index_needed(void *opaque
)
483 CPUState
*cpu
= opaque
;
485 return tcg_enabled() && cpu
->exception_index
!= -1;
488 static const VMStateDescription vmstate_cpu_common_exception_index
= {
489 .name
= "cpu_common/exception_index",
491 .minimum_version_id
= 1,
492 .needed
= cpu_common_exception_index_needed
,
493 .fields
= (VMStateField
[]) {
494 VMSTATE_INT32(exception_index
, CPUState
),
495 VMSTATE_END_OF_LIST()
499 static bool cpu_common_crash_occurred_needed(void *opaque
)
501 CPUState
*cpu
= opaque
;
503 return cpu
->crash_occurred
;
506 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
507 .name
= "cpu_common/crash_occurred",
509 .minimum_version_id
= 1,
510 .needed
= cpu_common_crash_occurred_needed
,
511 .fields
= (VMStateField
[]) {
512 VMSTATE_BOOL(crash_occurred
, CPUState
),
513 VMSTATE_END_OF_LIST()
517 const VMStateDescription vmstate_cpu_common
= {
518 .name
= "cpu_common",
520 .minimum_version_id
= 1,
521 .pre_load
= cpu_common_pre_load
,
522 .post_load
= cpu_common_post_load
,
523 .fields
= (VMStateField
[]) {
524 VMSTATE_UINT32(halted
, CPUState
),
525 VMSTATE_UINT32(interrupt_request
, CPUState
),
526 VMSTATE_END_OF_LIST()
528 .subsections
= (const VMStateDescription
*[]) {
529 &vmstate_cpu_common_exception_index
,
530 &vmstate_cpu_common_crash_occurred
,
537 CPUState
*qemu_get_cpu(int index
)
542 if (cpu
->cpu_index
== index
) {
550 #if !defined(CONFIG_USER_ONLY)
551 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
553 /* We only support one address space per cpu at the moment. */
554 assert(cpu
->as
== as
);
557 /* We've already registered the listener for our only AS */
561 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, 1);
562 cpu
->cpu_ases
[0].cpu
= cpu
;
563 cpu
->cpu_ases
[0].as
= as
;
564 cpu
->cpu_ases
[0].tcg_as_listener
.commit
= tcg_commit
;
565 memory_listener_register(&cpu
->cpu_ases
[0].tcg_as_listener
, as
);
569 #ifndef CONFIG_USER_ONLY
570 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
572 static int cpu_get_free_index(Error
**errp
)
574 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
576 if (cpu
>= MAX_CPUMASK_BITS
) {
577 error_setg(errp
, "Trying to use more CPUs than max of %d",
582 bitmap_set(cpu_index_map
, cpu
, 1);
586 void cpu_exec_exit(CPUState
*cpu
)
588 if (cpu
->cpu_index
== -1) {
589 /* cpu_index was never allocated by this @cpu or was already freed. */
593 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
598 static int cpu_get_free_index(Error
**errp
)
603 CPU_FOREACH(some_cpu
) {
609 void cpu_exec_exit(CPUState
*cpu
)
614 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
616 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
618 Error
*local_err
= NULL
;
620 #ifndef CONFIG_USER_ONLY
621 cpu
->as
= &address_space_memory
;
622 cpu
->thread_id
= qemu_get_thread_id();
625 #if defined(CONFIG_USER_ONLY)
628 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
630 error_propagate(errp
, local_err
);
631 #if defined(CONFIG_USER_ONLY)
636 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
637 #if defined(CONFIG_USER_ONLY)
640 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
641 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
643 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
644 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
645 cpu_save
, cpu_load
, cpu
->env_ptr
);
646 assert(cc
->vmsd
== NULL
);
647 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
649 if (cc
->vmsd
!= NULL
) {
650 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
654 #if defined(CONFIG_USER_ONLY)
655 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
657 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
660 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
662 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
664 tb_invalidate_phys_addr(cpu
->as
,
665 phys
| (pc
& ~TARGET_PAGE_MASK
));
670 #if defined(CONFIG_USER_ONLY)
671 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
676 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
682 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
686 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
687 int flags
, CPUWatchpoint
**watchpoint
)
692 /* Add a watchpoint. */
693 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
694 int flags
, CPUWatchpoint
**watchpoint
)
698 /* forbid ranges which are empty or run off the end of the address space */
699 if (len
== 0 || (addr
+ len
- 1) < addr
) {
700 error_report("tried to set invalid watchpoint at %"
701 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
704 wp
= g_malloc(sizeof(*wp
));
710 /* keep all GDB-injected watchpoints in front */
711 if (flags
& BP_GDB
) {
712 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
714 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
717 tlb_flush_page(cpu
, addr
);
724 /* Remove a specific watchpoint. */
725 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
730 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
731 if (addr
== wp
->vaddr
&& len
== wp
->len
732 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
733 cpu_watchpoint_remove_by_ref(cpu
, wp
);
740 /* Remove a specific watchpoint by reference. */
741 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
743 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
745 tlb_flush_page(cpu
, watchpoint
->vaddr
);
750 /* Remove all matching watchpoints. */
751 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
753 CPUWatchpoint
*wp
, *next
;
755 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
756 if (wp
->flags
& mask
) {
757 cpu_watchpoint_remove_by_ref(cpu
, wp
);
762 /* Return true if this watchpoint address matches the specified
763 * access (ie the address range covered by the watchpoint overlaps
764 * partially or completely with the address range covered by the
767 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
771 /* We know the lengths are non-zero, but a little caution is
772 * required to avoid errors in the case where the range ends
773 * exactly at the top of the address space and so addr + len
774 * wraps round to zero.
776 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
777 vaddr addrend
= addr
+ len
- 1;
779 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
784 /* Add a breakpoint. */
785 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
786 CPUBreakpoint
**breakpoint
)
790 bp
= g_malloc(sizeof(*bp
));
795 /* keep all GDB-injected breakpoints in front */
796 if (flags
& BP_GDB
) {
797 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
799 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
802 breakpoint_invalidate(cpu
, pc
);
810 /* Remove a specific breakpoint. */
811 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
815 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
816 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
817 cpu_breakpoint_remove_by_ref(cpu
, bp
);
824 /* Remove a specific breakpoint by reference. */
825 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
827 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
829 breakpoint_invalidate(cpu
, breakpoint
->pc
);
834 /* Remove all matching breakpoints. */
835 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
837 CPUBreakpoint
*bp
, *next
;
839 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
840 if (bp
->flags
& mask
) {
841 cpu_breakpoint_remove_by_ref(cpu
, bp
);
846 /* enable or disable single step mode. EXCP_DEBUG is returned by the
847 CPU loop after each instruction */
848 void cpu_single_step(CPUState
*cpu
, int enabled
)
850 if (cpu
->singlestep_enabled
!= enabled
) {
851 cpu
->singlestep_enabled
= enabled
;
853 kvm_update_guest_debug(cpu
, 0);
855 /* must flush all the translated code to avoid inconsistencies */
856 /* XXX: only flush what is necessary */
862 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
869 fprintf(stderr
, "qemu: fatal: ");
870 vfprintf(stderr
, fmt
, ap
);
871 fprintf(stderr
, "\n");
872 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
873 if (qemu_log_separate()) {
874 qemu_log("qemu: fatal: ");
875 qemu_log_vprintf(fmt
, ap2
);
877 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
884 #if defined(CONFIG_USER_ONLY)
886 struct sigaction act
;
887 sigfillset(&act
.sa_mask
);
888 act
.sa_handler
= SIG_DFL
;
889 sigaction(SIGABRT
, &act
, NULL
);
895 #if !defined(CONFIG_USER_ONLY)
896 /* Called from RCU critical section */
897 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
901 block
= atomic_rcu_read(&ram_list
.mru_block
);
902 if (block
&& addr
- block
->offset
< block
->max_length
) {
905 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
906 if (addr
- block
->offset
< block
->max_length
) {
911 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
915 /* It is safe to write mru_block outside the iothread lock. This
920 * xxx removed from list
924 * call_rcu(reclaim_ramblock, xxx);
927 * atomic_rcu_set is not needed here. The block was already published
928 * when it was placed into the list. Here we're just making an extra
929 * copy of the pointer.
931 ram_list
.mru_block
= block
;
935 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
942 end
= TARGET_PAGE_ALIGN(start
+ length
);
943 start
&= TARGET_PAGE_MASK
;
946 block
= qemu_get_ram_block(start
);
947 assert(block
== qemu_get_ram_block(end
- 1));
948 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
950 tlb_reset_dirty(cpu
, start1
, length
);
955 /* Note: start and end must be within the same ram block. */
956 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
960 unsigned long end
, page
;
967 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
968 page
= start
>> TARGET_PAGE_BITS
;
969 dirty
= bitmap_test_and_clear_atomic(ram_list
.dirty_memory
[client
],
972 if (dirty
&& tcg_enabled()) {
973 tlb_reset_dirty_range_all(start
, length
);
979 /* Called from RCU critical section */
980 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
981 MemoryRegionSection
*section
,
983 hwaddr paddr
, hwaddr xlat
,
985 target_ulong
*address
)
990 if (memory_region_is_ram(section
->mr
)) {
992 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
994 if (!section
->readonly
) {
995 iotlb
|= PHYS_SECTION_NOTDIRTY
;
997 iotlb
|= PHYS_SECTION_ROM
;
1000 AddressSpaceDispatch
*d
;
1002 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1003 iotlb
= section
- d
->map
.sections
;
1007 /* Make accesses to pages with watchpoints go via the
1008 watchpoint trap routines. */
1009 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1010 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1011 /* Avoid trapping reads of pages with a write breakpoint. */
1012 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1013 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1014 *address
|= TLB_MMIO
;
1022 #endif /* defined(CONFIG_USER_ONLY) */
1024 #if !defined(CONFIG_USER_ONLY)
1026 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1028 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1030 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1031 qemu_anon_ram_alloc
;
1034 * Set a custom physical guest memory alloator.
1035 * Accelerators with unusual needs may need this. Hopefully, we can
1036 * get rid of it eventually.
1038 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1040 phys_mem_alloc
= alloc
;
1043 static uint16_t phys_section_add(PhysPageMap
*map
,
1044 MemoryRegionSection
*section
)
1046 /* The physical section number is ORed with a page-aligned
1047 * pointer to produce the iotlb entries. Thus it should
1048 * never overflow into the page-aligned value.
1050 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1052 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1053 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1054 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1055 map
->sections_nb_alloc
);
1057 map
->sections
[map
->sections_nb
] = *section
;
1058 memory_region_ref(section
->mr
);
1059 return map
->sections_nb
++;
1062 static void phys_section_destroy(MemoryRegion
*mr
)
1064 bool have_sub_page
= mr
->subpage
;
1066 memory_region_unref(mr
);
1068 if (have_sub_page
) {
1069 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1070 object_unref(OBJECT(&subpage
->iomem
));
1075 static void phys_sections_free(PhysPageMap
*map
)
1077 while (map
->sections_nb
> 0) {
1078 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1079 phys_section_destroy(section
->mr
);
1081 g_free(map
->sections
);
1085 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1088 hwaddr base
= section
->offset_within_address_space
1090 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1091 d
->map
.nodes
, d
->map
.sections
);
1092 MemoryRegionSection subsection
= {
1093 .offset_within_address_space
= base
,
1094 .size
= int128_make64(TARGET_PAGE_SIZE
),
1098 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1100 if (!(existing
->mr
->subpage
)) {
1101 subpage
= subpage_init(d
->as
, base
);
1102 subsection
.address_space
= d
->as
;
1103 subsection
.mr
= &subpage
->iomem
;
1104 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1105 phys_section_add(&d
->map
, &subsection
));
1107 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1109 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1110 end
= start
+ int128_get64(section
->size
) - 1;
1111 subpage_register(subpage
, start
, end
,
1112 phys_section_add(&d
->map
, section
));
1116 static void register_multipage(AddressSpaceDispatch
*d
,
1117 MemoryRegionSection
*section
)
1119 hwaddr start_addr
= section
->offset_within_address_space
;
1120 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1121 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1125 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1128 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1130 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1131 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1132 MemoryRegionSection now
= *section
, remain
= *section
;
1133 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1135 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1136 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1137 - now
.offset_within_address_space
;
1139 now
.size
= int128_min(int128_make64(left
), now
.size
);
1140 register_subpage(d
, &now
);
1142 now
.size
= int128_zero();
1144 while (int128_ne(remain
.size
, now
.size
)) {
1145 remain
.size
= int128_sub(remain
.size
, now
.size
);
1146 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1147 remain
.offset_within_region
+= int128_get64(now
.size
);
1149 if (int128_lt(remain
.size
, page_size
)) {
1150 register_subpage(d
, &now
);
1151 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1152 now
.size
= page_size
;
1153 register_subpage(d
, &now
);
1155 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1156 register_multipage(d
, &now
);
1161 void qemu_flush_coalesced_mmio_buffer(void)
1164 kvm_flush_coalesced_mmio_buffer();
1167 void qemu_mutex_lock_ramlist(void)
1169 qemu_mutex_lock(&ram_list
.mutex
);
1172 void qemu_mutex_unlock_ramlist(void)
1174 qemu_mutex_unlock(&ram_list
.mutex
);
1179 #include <sys/vfs.h>
1181 #define HUGETLBFS_MAGIC 0x958458f6
1183 static long gethugepagesize(const char *path
, Error
**errp
)
1189 ret
= statfs(path
, &fs
);
1190 } while (ret
!= 0 && errno
== EINTR
);
1193 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1201 static void *file_ram_alloc(RAMBlock
*block
,
1208 char *sanitized_name
;
1213 Error
*local_err
= NULL
;
1215 hpagesize
= gethugepagesize(path
, &local_err
);
1217 error_propagate(errp
, local_err
);
1220 block
->mr
->align
= hpagesize
;
1222 if (memory
< hpagesize
) {
1223 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1224 "or larger than huge page size 0x%" PRIx64
,
1229 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1231 "host lacks kvm mmu notifiers, -mem-path unsupported");
1235 if (!stat(path
, &st
) && S_ISDIR(st
.st_mode
)) {
1236 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1237 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1238 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1244 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1246 g_free(sanitized_name
);
1248 fd
= mkstemp(filename
);
1254 fd
= open(path
, O_RDWR
| O_CREAT
, 0644);
1258 error_setg_errno(errp
, errno
,
1259 "unable to create backing store for hugepages");
1263 memory
= ROUND_UP(memory
, hpagesize
);
1266 * ftruncate is not supported by hugetlbfs in older
1267 * hosts, so don't bother bailing out on errors.
1268 * If anything goes wrong with it under other filesystems,
1271 if (ftruncate(fd
, memory
)) {
1272 perror("ftruncate");
1275 area
= qemu_ram_mmap(fd
, memory
, hpagesize
, block
->flags
& RAM_SHARED
);
1276 if (area
== MAP_FAILED
) {
1277 error_setg_errno(errp
, errno
,
1278 "unable to map backing store for hugepages");
1284 os_mem_prealloc(fd
, area
, memory
);
1295 /* Called with the ramlist lock held. */
1296 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1298 RAMBlock
*block
, *next_block
;
1299 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1301 assert(size
!= 0); /* it would hand out same offset multiple times */
1303 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1307 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1308 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1310 end
= block
->offset
+ block
->max_length
;
1312 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1313 if (next_block
->offset
>= end
) {
1314 next
= MIN(next
, next_block
->offset
);
1317 if (next
- end
>= size
&& next
- end
< mingap
) {
1319 mingap
= next
- end
;
1323 if (offset
== RAM_ADDR_MAX
) {
1324 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1332 ram_addr_t
last_ram_offset(void)
1335 ram_addr_t last
= 0;
1338 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1339 last
= MAX(last
, block
->offset
+ block
->max_length
);
1345 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1349 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1350 if (!machine_dump_guest_core(current_machine
)) {
1351 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1353 perror("qemu_madvise");
1354 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1355 "but dump_guest_core=off specified\n");
1360 /* Called within an RCU critical section, or while the ramlist lock
1363 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1367 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1368 if (block
->offset
== addr
) {
1376 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1381 /* Called with iothread lock held. */
1382 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1384 RAMBlock
*new_block
, *block
;
1387 new_block
= find_ram_block(addr
);
1389 assert(!new_block
->idstr
[0]);
1392 char *id
= qdev_get_dev_path(dev
);
1394 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1398 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1400 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1401 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1402 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1410 /* Called with iothread lock held. */
1411 void qemu_ram_unset_idstr(ram_addr_t addr
)
1415 /* FIXME: arch_init.c assumes that this is not called throughout
1416 * migration. Ignore the problem since hot-unplug during migration
1417 * does not work anyway.
1421 block
= find_ram_block(addr
);
1423 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1428 static int memory_try_enable_merging(void *addr
, size_t len
)
1430 if (!machine_mem_merge(current_machine
)) {
1431 /* disabled by the user */
1435 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1438 /* Only legal before guest might have detected the memory size: e.g. on
1439 * incoming migration, or right after reset.
1441 * As memory core doesn't know how is memory accessed, it is up to
1442 * resize callback to update device state and/or add assertions to detect
1443 * misuse, if necessary.
1445 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1447 RAMBlock
*block
= find_ram_block(base
);
1451 newsize
= HOST_PAGE_ALIGN(newsize
);
1453 if (block
->used_length
== newsize
) {
1457 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1458 error_setg_errno(errp
, EINVAL
,
1459 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1460 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1461 newsize
, block
->used_length
);
1465 if (block
->max_length
< newsize
) {
1466 error_setg_errno(errp
, EINVAL
,
1467 "Length too large: %s: 0x" RAM_ADDR_FMT
1468 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1469 newsize
, block
->max_length
);
1473 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1474 block
->used_length
= newsize
;
1475 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1477 memory_region_set_size(block
->mr
, newsize
);
1478 if (block
->resized
) {
1479 block
->resized(block
->idstr
, newsize
, block
->host
);
1484 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1487 RAMBlock
*last_block
= NULL
;
1488 ram_addr_t old_ram_size
, new_ram_size
;
1490 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1492 qemu_mutex_lock_ramlist();
1493 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1495 if (!new_block
->host
) {
1496 if (xen_enabled()) {
1497 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1500 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1501 &new_block
->mr
->align
);
1502 if (!new_block
->host
) {
1503 error_setg_errno(errp
, errno
,
1504 "cannot set up guest memory '%s'",
1505 memory_region_name(new_block
->mr
));
1506 qemu_mutex_unlock_ramlist();
1509 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1513 new_ram_size
= MAX(old_ram_size
,
1514 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1515 if (new_ram_size
> old_ram_size
) {
1516 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1518 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1519 * QLIST (which has an RCU-friendly variant) does not have insertion at
1520 * tail, so save the last element in last_block.
1522 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1524 if (block
->max_length
< new_block
->max_length
) {
1529 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1530 } else if (last_block
) {
1531 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1532 } else { /* list is empty */
1533 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1535 ram_list
.mru_block
= NULL
;
1537 /* Write list before version */
1540 qemu_mutex_unlock_ramlist();
1542 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1544 if (new_ram_size
> old_ram_size
) {
1547 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1548 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1549 ram_list
.dirty_memory
[i
] =
1550 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1551 old_ram_size
, new_ram_size
);
1554 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1555 new_block
->used_length
,
1558 if (new_block
->host
) {
1559 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1560 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1561 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1562 if (kvm_enabled()) {
1563 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1567 return new_block
->offset
;
1571 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1572 bool share
, const char *mem_path
,
1575 RAMBlock
*new_block
;
1577 Error
*local_err
= NULL
;
1579 if (xen_enabled()) {
1580 error_setg(errp
, "-mem-path not supported with Xen");
1584 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1586 * file_ram_alloc() needs to allocate just like
1587 * phys_mem_alloc, but we haven't bothered to provide
1591 "-mem-path not supported with this accelerator");
1595 size
= HOST_PAGE_ALIGN(size
);
1596 new_block
= g_malloc0(sizeof(*new_block
));
1598 new_block
->used_length
= size
;
1599 new_block
->max_length
= size
;
1600 new_block
->flags
= share
? RAM_SHARED
: 0;
1601 new_block
->host
= file_ram_alloc(new_block
, size
,
1603 if (!new_block
->host
) {
1608 addr
= ram_block_add(new_block
, &local_err
);
1611 error_propagate(errp
, local_err
);
1619 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1620 void (*resized
)(const char*,
1623 void *host
, bool resizeable
,
1624 MemoryRegion
*mr
, Error
**errp
)
1626 RAMBlock
*new_block
;
1628 Error
*local_err
= NULL
;
1630 size
= HOST_PAGE_ALIGN(size
);
1631 max_size
= HOST_PAGE_ALIGN(max_size
);
1632 new_block
= g_malloc0(sizeof(*new_block
));
1634 new_block
->resized
= resized
;
1635 new_block
->used_length
= size
;
1636 new_block
->max_length
= max_size
;
1637 assert(max_size
>= size
);
1639 new_block
->host
= host
;
1641 new_block
->flags
|= RAM_PREALLOC
;
1644 new_block
->flags
|= RAM_RESIZEABLE
;
1646 addr
= ram_block_add(new_block
, &local_err
);
1649 error_propagate(errp
, local_err
);
1655 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1656 MemoryRegion
*mr
, Error
**errp
)
1658 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1661 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1663 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1666 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1667 void (*resized
)(const char*,
1670 MemoryRegion
*mr
, Error
**errp
)
1672 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1675 static void reclaim_ramblock(RAMBlock
*block
)
1677 if (block
->flags
& RAM_PREALLOC
) {
1679 } else if (xen_enabled()) {
1680 xen_invalidate_map_cache_entry(block
->host
);
1682 } else if (block
->fd
>= 0) {
1683 qemu_ram_munmap(block
->host
, block
->max_length
);
1687 qemu_anon_ram_free(block
->host
, block
->max_length
);
1692 void qemu_ram_free(ram_addr_t addr
)
1696 qemu_mutex_lock_ramlist();
1697 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1698 if (addr
== block
->offset
) {
1699 QLIST_REMOVE_RCU(block
, next
);
1700 ram_list
.mru_block
= NULL
;
1701 /* Write list before version */
1704 call_rcu(block
, reclaim_ramblock
, rcu
);
1708 qemu_mutex_unlock_ramlist();
1712 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1719 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1720 offset
= addr
- block
->offset
;
1721 if (offset
< block
->max_length
) {
1722 vaddr
= ramblock_ptr(block
, offset
);
1723 if (block
->flags
& RAM_PREALLOC
) {
1725 } else if (xen_enabled()) {
1729 if (block
->fd
>= 0) {
1730 flags
|= (block
->flags
& RAM_SHARED
?
1731 MAP_SHARED
: MAP_PRIVATE
);
1732 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1733 flags
, block
->fd
, offset
);
1736 * Remap needs to match alloc. Accelerators that
1737 * set phys_mem_alloc never remap. If they did,
1738 * we'd need a remap hook here.
1740 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1742 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1743 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1746 if (area
!= vaddr
) {
1747 fprintf(stderr
, "Could not remap addr: "
1748 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1752 memory_try_enable_merging(vaddr
, length
);
1753 qemu_ram_setup_dump(vaddr
, length
);
1758 #endif /* !_WIN32 */
1760 int qemu_get_ram_fd(ram_addr_t addr
)
1766 block
= qemu_get_ram_block(addr
);
1772 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1778 block
= qemu_get_ram_block(addr
);
1779 ptr
= ramblock_ptr(block
, 0);
1784 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1785 * This should not be used for general purpose DMA. Use address_space_map
1786 * or address_space_rw instead. For local memory (e.g. video ram) that the
1787 * device owns, use memory_region_get_ram_ptr.
1789 * By the time this function returns, the returned pointer is not protected
1790 * by RCU anymore. If the caller is not within an RCU critical section and
1791 * does not hold the iothread lock, it must have other means of protecting the
1792 * pointer, such as a reference to the region that includes the incoming
1795 void *qemu_get_ram_ptr(ram_addr_t addr
)
1801 block
= qemu_get_ram_block(addr
);
1803 if (xen_enabled() && block
->host
== NULL
) {
1804 /* We need to check if the requested address is in the RAM
1805 * because we don't want to map the entire memory in QEMU.
1806 * In that case just map until the end of the page.
1808 if (block
->offset
== 0) {
1809 ptr
= xen_map_cache(addr
, 0, 0);
1813 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1815 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1822 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1823 * but takes a size argument.
1825 * By the time this function returns, the returned pointer is not protected
1826 * by RCU anymore. If the caller is not within an RCU critical section and
1827 * does not hold the iothread lock, it must have other means of protecting the
1828 * pointer, such as a reference to the region that includes the incoming
1831 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1837 if (xen_enabled()) {
1838 return xen_map_cache(addr
, *size
, 1);
1842 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1843 if (addr
- block
->offset
< block
->max_length
) {
1844 if (addr
- block
->offset
+ *size
> block
->max_length
)
1845 *size
= block
->max_length
- addr
+ block
->offset
;
1846 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1852 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1858 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1861 * ptr: Host pointer to look up
1862 * round_offset: If true round the result offset down to a page boundary
1863 * *ram_addr: set to result ram_addr
1864 * *offset: set to result offset within the RAMBlock
1866 * Returns: RAMBlock (or NULL if not found)
1868 * By the time this function returns, the returned pointer is not protected
1869 * by RCU anymore. If the caller is not within an RCU critical section and
1870 * does not hold the iothread lock, it must have other means of protecting the
1871 * pointer, such as a reference to the region that includes the incoming
1874 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1875 ram_addr_t
*ram_addr
,
1879 uint8_t *host
= ptr
;
1881 if (xen_enabled()) {
1883 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1884 block
= qemu_get_ram_block(*ram_addr
);
1886 *offset
= (host
- block
->host
);
1893 block
= atomic_rcu_read(&ram_list
.mru_block
);
1894 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1898 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1899 /* This case append when the block is not mapped. */
1900 if (block
->host
== NULL
) {
1903 if (host
- block
->host
< block
->max_length
) {
1912 *offset
= (host
- block
->host
);
1914 *offset
&= TARGET_PAGE_MASK
;
1916 *ram_addr
= block
->offset
+ *offset
;
1922 * Finds the named RAMBlock
1924 * name: The name of RAMBlock to find
1926 * Returns: RAMBlock (or NULL if not found)
1928 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1932 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1933 if (!strcmp(name
, block
->idstr
)) {
1941 /* Some of the softmmu routines need to translate from a host pointer
1942 (typically a TLB entry) back to a ram offset. */
1943 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1946 ram_addr_t offset
; /* Not used */
1948 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
1957 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1958 uint64_t val
, unsigned size
)
1960 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1961 tb_invalidate_phys_page_fast(ram_addr
, size
);
1965 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1968 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1971 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1976 /* Set both VGA and migration bits for simplicity and to remove
1977 * the notdirty callback faster.
1979 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
1980 DIRTY_CLIENTS_NOCODE
);
1981 /* we remove the notdirty callback only if the code has been
1983 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1984 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
1988 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1989 unsigned size
, bool is_write
)
1994 static const MemoryRegionOps notdirty_mem_ops
= {
1995 .write
= notdirty_mem_write
,
1996 .valid
.accepts
= notdirty_mem_accepts
,
1997 .endianness
= DEVICE_NATIVE_ENDIAN
,
2000 /* Generate a debug exception if a watchpoint has been hit. */
2001 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2003 CPUState
*cpu
= current_cpu
;
2004 CPUArchState
*env
= cpu
->env_ptr
;
2005 target_ulong pc
, cs_base
;
2010 if (cpu
->watchpoint_hit
) {
2011 /* We re-entered the check after replacing the TB. Now raise
2012 * the debug interrupt so that is will trigger after the
2013 * current instruction. */
2014 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2017 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2018 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2019 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2020 && (wp
->flags
& flags
)) {
2021 if (flags
== BP_MEM_READ
) {
2022 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2024 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2026 wp
->hitaddr
= vaddr
;
2027 wp
->hitattrs
= attrs
;
2028 if (!cpu
->watchpoint_hit
) {
2029 cpu
->watchpoint_hit
= wp
;
2030 tb_check_watchpoint(cpu
);
2031 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2032 cpu
->exception_index
= EXCP_DEBUG
;
2035 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2036 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2037 cpu_resume_from_signal(cpu
, NULL
);
2041 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2046 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2047 so these check for a hit then pass through to the normal out-of-line
2049 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2050 unsigned size
, MemTxAttrs attrs
)
2055 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2058 data
= address_space_ldub(&address_space_memory
, addr
, attrs
, &res
);
2061 data
= address_space_lduw(&address_space_memory
, addr
, attrs
, &res
);
2064 data
= address_space_ldl(&address_space_memory
, addr
, attrs
, &res
);
2072 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2073 uint64_t val
, unsigned size
,
2078 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2081 address_space_stb(&address_space_memory
, addr
, val
, attrs
, &res
);
2084 address_space_stw(&address_space_memory
, addr
, val
, attrs
, &res
);
2087 address_space_stl(&address_space_memory
, addr
, val
, attrs
, &res
);
2094 static const MemoryRegionOps watch_mem_ops
= {
2095 .read_with_attrs
= watch_mem_read
,
2096 .write_with_attrs
= watch_mem_write
,
2097 .endianness
= DEVICE_NATIVE_ENDIAN
,
2100 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2101 unsigned len
, MemTxAttrs attrs
)
2103 subpage_t
*subpage
= opaque
;
2107 #if defined(DEBUG_SUBPAGE)
2108 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2109 subpage
, len
, addr
);
2111 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2118 *data
= ldub_p(buf
);
2121 *data
= lduw_p(buf
);
2134 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2135 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2137 subpage_t
*subpage
= opaque
;
2140 #if defined(DEBUG_SUBPAGE)
2141 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2142 " value %"PRIx64
"\n",
2143 __func__
, subpage
, len
, addr
, value
);
2161 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2165 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2166 unsigned len
, bool is_write
)
2168 subpage_t
*subpage
= opaque
;
2169 #if defined(DEBUG_SUBPAGE)
2170 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2171 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2174 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2178 static const MemoryRegionOps subpage_ops
= {
2179 .read_with_attrs
= subpage_read
,
2180 .write_with_attrs
= subpage_write
,
2181 .impl
.min_access_size
= 1,
2182 .impl
.max_access_size
= 8,
2183 .valid
.min_access_size
= 1,
2184 .valid
.max_access_size
= 8,
2185 .valid
.accepts
= subpage_accepts
,
2186 .endianness
= DEVICE_NATIVE_ENDIAN
,
2189 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2194 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2196 idx
= SUBPAGE_IDX(start
);
2197 eidx
= SUBPAGE_IDX(end
);
2198 #if defined(DEBUG_SUBPAGE)
2199 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2200 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2202 for (; idx
<= eidx
; idx
++) {
2203 mmio
->sub_section
[idx
] = section
;
2209 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2213 mmio
= g_malloc0(sizeof(subpage_t
));
2217 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2218 NULL
, TARGET_PAGE_SIZE
);
2219 mmio
->iomem
.subpage
= true;
2220 #if defined(DEBUG_SUBPAGE)
2221 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2222 mmio
, base
, TARGET_PAGE_SIZE
);
2224 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2229 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2233 MemoryRegionSection section
= {
2234 .address_space
= as
,
2236 .offset_within_address_space
= 0,
2237 .offset_within_region
= 0,
2238 .size
= int128_2_64(),
2241 return phys_section_add(map
, §ion
);
2244 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
)
2246 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[0];
2247 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2248 MemoryRegionSection
*sections
= d
->map
.sections
;
2250 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2253 static void io_mem_init(void)
2255 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2256 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2258 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2260 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2264 static void mem_begin(MemoryListener
*listener
)
2266 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2267 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2270 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2271 assert(n
== PHYS_SECTION_UNASSIGNED
);
2272 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2273 assert(n
== PHYS_SECTION_NOTDIRTY
);
2274 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2275 assert(n
== PHYS_SECTION_ROM
);
2276 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2277 assert(n
== PHYS_SECTION_WATCH
);
2279 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2281 as
->next_dispatch
= d
;
2284 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2286 phys_sections_free(&d
->map
);
2290 static void mem_commit(MemoryListener
*listener
)
2292 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2293 AddressSpaceDispatch
*cur
= as
->dispatch
;
2294 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2296 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2298 atomic_rcu_set(&as
->dispatch
, next
);
2300 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2304 static void tcg_commit(MemoryListener
*listener
)
2306 CPUAddressSpace
*cpuas
;
2307 AddressSpaceDispatch
*d
;
2309 /* since each CPU stores ram addresses in its TLB cache, we must
2310 reset the modified entries */
2311 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2312 cpu_reloading_memory_map();
2313 /* The CPU and TLB are protected by the iothread lock.
2314 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2315 * may have split the RCU critical section.
2317 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2318 cpuas
->memory_dispatch
= d
;
2319 tlb_flush(cpuas
->cpu
, 1);
2322 void address_space_init_dispatch(AddressSpace
*as
)
2324 as
->dispatch
= NULL
;
2325 as
->dispatch_listener
= (MemoryListener
) {
2327 .commit
= mem_commit
,
2328 .region_add
= mem_add
,
2329 .region_nop
= mem_add
,
2332 memory_listener_register(&as
->dispatch_listener
, as
);
2335 void address_space_unregister(AddressSpace
*as
)
2337 memory_listener_unregister(&as
->dispatch_listener
);
2340 void address_space_destroy_dispatch(AddressSpace
*as
)
2342 AddressSpaceDispatch
*d
= as
->dispatch
;
2344 atomic_rcu_set(&as
->dispatch
, NULL
);
2346 call_rcu(d
, address_space_dispatch_free
, rcu
);
2350 static void memory_map_init(void)
2352 system_memory
= g_malloc(sizeof(*system_memory
));
2354 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2355 address_space_init(&address_space_memory
, system_memory
, "memory");
2357 system_io
= g_malloc(sizeof(*system_io
));
2358 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2360 address_space_init(&address_space_io
, system_io
, "I/O");
2363 MemoryRegion
*get_system_memory(void)
2365 return system_memory
;
2368 MemoryRegion
*get_system_io(void)
2373 #endif /* !defined(CONFIG_USER_ONLY) */
2375 /* physical memory access (slow version, mainly for debug) */
2376 #if defined(CONFIG_USER_ONLY)
2377 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2378 uint8_t *buf
, int len
, int is_write
)
2385 page
= addr
& TARGET_PAGE_MASK
;
2386 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2389 flags
= page_get_flags(page
);
2390 if (!(flags
& PAGE_VALID
))
2393 if (!(flags
& PAGE_WRITE
))
2395 /* XXX: this code should not depend on lock_user */
2396 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2399 unlock_user(p
, addr
, l
);
2401 if (!(flags
& PAGE_READ
))
2403 /* XXX: this code should not depend on lock_user */
2404 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2407 unlock_user(p
, addr
, 0);
2418 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2421 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2422 /* No early return if dirty_log_mask is or becomes 0, because
2423 * cpu_physical_memory_set_dirty_range will still call
2424 * xen_modified_memory.
2426 if (dirty_log_mask
) {
2428 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2430 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2431 tb_invalidate_phys_range(addr
, addr
+ length
);
2432 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2434 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2437 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2439 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2441 /* Regions are assumed to support 1-4 byte accesses unless
2442 otherwise specified. */
2443 if (access_size_max
== 0) {
2444 access_size_max
= 4;
2447 /* Bound the maximum access by the alignment of the address. */
2448 if (!mr
->ops
->impl
.unaligned
) {
2449 unsigned align_size_max
= addr
& -addr
;
2450 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2451 access_size_max
= align_size_max
;
2455 /* Don't attempt accesses larger than the maximum. */
2456 if (l
> access_size_max
) {
2457 l
= access_size_max
;
2464 static bool prepare_mmio_access(MemoryRegion
*mr
)
2466 bool unlocked
= !qemu_mutex_iothread_locked();
2467 bool release_lock
= false;
2469 if (unlocked
&& mr
->global_locking
) {
2470 qemu_mutex_lock_iothread();
2472 release_lock
= true;
2474 if (mr
->flush_coalesced_mmio
) {
2476 qemu_mutex_lock_iothread();
2478 qemu_flush_coalesced_mmio_buffer();
2480 qemu_mutex_unlock_iothread();
2484 return release_lock
;
2487 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2488 uint8_t *buf
, int len
, bool is_write
)
2495 MemTxResult result
= MEMTX_OK
;
2496 bool release_lock
= false;
2501 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2504 if (!memory_access_is_direct(mr
, is_write
)) {
2505 release_lock
|= prepare_mmio_access(mr
);
2506 l
= memory_access_size(mr
, l
, addr1
);
2507 /* XXX: could force current_cpu to NULL to avoid
2511 /* 64 bit write access */
2513 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2517 /* 32 bit write access */
2519 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2523 /* 16 bit write access */
2525 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2529 /* 8 bit write access */
2531 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2538 addr1
+= memory_region_get_ram_addr(mr
);
2540 ptr
= qemu_get_ram_ptr(addr1
);
2541 memcpy(ptr
, buf
, l
);
2542 invalidate_and_set_dirty(mr
, addr1
, l
);
2545 if (!memory_access_is_direct(mr
, is_write
)) {
2547 release_lock
|= prepare_mmio_access(mr
);
2548 l
= memory_access_size(mr
, l
, addr1
);
2551 /* 64 bit read access */
2552 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2557 /* 32 bit read access */
2558 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2563 /* 16 bit read access */
2564 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2569 /* 8 bit read access */
2570 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2579 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2580 memcpy(buf
, ptr
, l
);
2585 qemu_mutex_unlock_iothread();
2586 release_lock
= false;
2598 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2599 const uint8_t *buf
, int len
)
2601 return address_space_rw(as
, addr
, attrs
, (uint8_t *)buf
, len
, true);
2604 MemTxResult
address_space_read(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2605 uint8_t *buf
, int len
)
2607 return address_space_rw(as
, addr
, attrs
, buf
, len
, false);
2611 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2612 int len
, int is_write
)
2614 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2615 buf
, len
, is_write
);
2618 enum write_rom_type
{
2623 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2624 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2634 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2636 if (!(memory_region_is_ram(mr
) ||
2637 memory_region_is_romd(mr
))) {
2638 l
= memory_access_size(mr
, l
, addr1
);
2640 addr1
+= memory_region_get_ram_addr(mr
);
2642 ptr
= qemu_get_ram_ptr(addr1
);
2645 memcpy(ptr
, buf
, l
);
2646 invalidate_and_set_dirty(mr
, addr1
, l
);
2649 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2660 /* used for ROM loading : can write in RAM and ROM */
2661 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2662 const uint8_t *buf
, int len
)
2664 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2667 void cpu_flush_icache_range(hwaddr start
, int len
)
2670 * This function should do the same thing as an icache flush that was
2671 * triggered from within the guest. For TCG we are always cache coherent,
2672 * so there is no need to flush anything. For KVM / Xen we need to flush
2673 * the host's instruction cache at least.
2675 if (tcg_enabled()) {
2679 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2680 start
, NULL
, len
, FLUSH_CACHE
);
2691 static BounceBuffer bounce
;
2693 typedef struct MapClient
{
2695 QLIST_ENTRY(MapClient
) link
;
2698 QemuMutex map_client_list_lock
;
2699 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2700 = QLIST_HEAD_INITIALIZER(map_client_list
);
2702 static void cpu_unregister_map_client_do(MapClient
*client
)
2704 QLIST_REMOVE(client
, link
);
2708 static void cpu_notify_map_clients_locked(void)
2712 while (!QLIST_EMPTY(&map_client_list
)) {
2713 client
= QLIST_FIRST(&map_client_list
);
2714 qemu_bh_schedule(client
->bh
);
2715 cpu_unregister_map_client_do(client
);
2719 void cpu_register_map_client(QEMUBH
*bh
)
2721 MapClient
*client
= g_malloc(sizeof(*client
));
2723 qemu_mutex_lock(&map_client_list_lock
);
2725 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2726 if (!atomic_read(&bounce
.in_use
)) {
2727 cpu_notify_map_clients_locked();
2729 qemu_mutex_unlock(&map_client_list_lock
);
2732 void cpu_exec_init_all(void)
2734 qemu_mutex_init(&ram_list
.mutex
);
2737 qemu_mutex_init(&map_client_list_lock
);
2740 void cpu_unregister_map_client(QEMUBH
*bh
)
2744 qemu_mutex_lock(&map_client_list_lock
);
2745 QLIST_FOREACH(client
, &map_client_list
, link
) {
2746 if (client
->bh
== bh
) {
2747 cpu_unregister_map_client_do(client
);
2751 qemu_mutex_unlock(&map_client_list_lock
);
2754 static void cpu_notify_map_clients(void)
2756 qemu_mutex_lock(&map_client_list_lock
);
2757 cpu_notify_map_clients_locked();
2758 qemu_mutex_unlock(&map_client_list_lock
);
2761 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2769 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2770 if (!memory_access_is_direct(mr
, is_write
)) {
2771 l
= memory_access_size(mr
, l
, addr
);
2772 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2784 /* Map a physical memory region into a host virtual address.
2785 * May map a subset of the requested range, given by and returned in *plen.
2786 * May return NULL if resources needed to perform the mapping are exhausted.
2787 * Use only for reads OR writes - not for read-modify-write operations.
2788 * Use cpu_register_map_client() to know when retrying the map operation is
2789 * likely to succeed.
2791 void *address_space_map(AddressSpace
*as
,
2798 hwaddr l
, xlat
, base
;
2799 MemoryRegion
*mr
, *this_mr
;
2808 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2810 if (!memory_access_is_direct(mr
, is_write
)) {
2811 if (atomic_xchg(&bounce
.in_use
, true)) {
2815 /* Avoid unbounded allocations */
2816 l
= MIN(l
, TARGET_PAGE_SIZE
);
2817 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2821 memory_region_ref(mr
);
2824 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2830 return bounce
.buffer
;
2834 raddr
= memory_region_get_ram_addr(mr
);
2845 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2846 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2851 memory_region_ref(mr
);
2854 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2857 /* Unmaps a memory region previously mapped by address_space_map().
2858 * Will also mark the memory as dirty if is_write == 1. access_len gives
2859 * the amount of memory that was actually read or written by the caller.
2861 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2862 int is_write
, hwaddr access_len
)
2864 if (buffer
!= bounce
.buffer
) {
2868 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2871 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2873 if (xen_enabled()) {
2874 xen_invalidate_map_cache_entry(buffer
);
2876 memory_region_unref(mr
);
2880 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2881 bounce
.buffer
, access_len
);
2883 qemu_vfree(bounce
.buffer
);
2884 bounce
.buffer
= NULL
;
2885 memory_region_unref(bounce
.mr
);
2886 atomic_mb_set(&bounce
.in_use
, false);
2887 cpu_notify_map_clients();
2890 void *cpu_physical_memory_map(hwaddr addr
,
2894 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2897 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2898 int is_write
, hwaddr access_len
)
2900 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2903 /* warning: addr must be aligned */
2904 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2906 MemTxResult
*result
,
2907 enum device_endian endian
)
2915 bool release_lock
= false;
2918 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2919 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2920 release_lock
|= prepare_mmio_access(mr
);
2923 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
2924 #if defined(TARGET_WORDS_BIGENDIAN)
2925 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2929 if (endian
== DEVICE_BIG_ENDIAN
) {
2935 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2939 case DEVICE_LITTLE_ENDIAN
:
2940 val
= ldl_le_p(ptr
);
2942 case DEVICE_BIG_ENDIAN
:
2943 val
= ldl_be_p(ptr
);
2955 qemu_mutex_unlock_iothread();
2961 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
2962 MemTxAttrs attrs
, MemTxResult
*result
)
2964 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2965 DEVICE_NATIVE_ENDIAN
);
2968 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
2969 MemTxAttrs attrs
, MemTxResult
*result
)
2971 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2972 DEVICE_LITTLE_ENDIAN
);
2975 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
2976 MemTxAttrs attrs
, MemTxResult
*result
)
2978 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2982 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2984 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2987 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2989 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2992 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2994 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2997 /* warning: addr must be aligned */
2998 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3000 MemTxResult
*result
,
3001 enum device_endian endian
)
3009 bool release_lock
= false;
3012 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3014 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3015 release_lock
|= prepare_mmio_access(mr
);
3018 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3019 #if defined(TARGET_WORDS_BIGENDIAN)
3020 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3024 if (endian
== DEVICE_BIG_ENDIAN
) {
3030 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3034 case DEVICE_LITTLE_ENDIAN
:
3035 val
= ldq_le_p(ptr
);
3037 case DEVICE_BIG_ENDIAN
:
3038 val
= ldq_be_p(ptr
);
3050 qemu_mutex_unlock_iothread();
3056 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3057 MemTxAttrs attrs
, MemTxResult
*result
)
3059 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3060 DEVICE_NATIVE_ENDIAN
);
3063 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3064 MemTxAttrs attrs
, MemTxResult
*result
)
3066 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3067 DEVICE_LITTLE_ENDIAN
);
3070 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3071 MemTxAttrs attrs
, MemTxResult
*result
)
3073 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3077 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3079 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3082 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3084 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3087 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3089 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3093 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3094 MemTxAttrs attrs
, MemTxResult
*result
)
3099 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3106 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3108 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3111 /* warning: addr must be aligned */
3112 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3115 MemTxResult
*result
,
3116 enum device_endian endian
)
3124 bool release_lock
= false;
3127 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3129 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3130 release_lock
|= prepare_mmio_access(mr
);
3133 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3134 #if defined(TARGET_WORDS_BIGENDIAN)
3135 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3139 if (endian
== DEVICE_BIG_ENDIAN
) {
3145 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3149 case DEVICE_LITTLE_ENDIAN
:
3150 val
= lduw_le_p(ptr
);
3152 case DEVICE_BIG_ENDIAN
:
3153 val
= lduw_be_p(ptr
);
3165 qemu_mutex_unlock_iothread();
3171 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3172 MemTxAttrs attrs
, MemTxResult
*result
)
3174 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3175 DEVICE_NATIVE_ENDIAN
);
3178 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3179 MemTxAttrs attrs
, MemTxResult
*result
)
3181 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3182 DEVICE_LITTLE_ENDIAN
);
3185 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3186 MemTxAttrs attrs
, MemTxResult
*result
)
3188 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3192 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3194 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3197 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3199 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3202 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3204 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3207 /* warning: addr must be aligned. The ram page is not masked as dirty
3208 and the code inside is not invalidated. It is useful if the dirty
3209 bits are used to track modified PTEs */
3210 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3211 MemTxAttrs attrs
, MemTxResult
*result
)
3218 uint8_t dirty_log_mask
;
3219 bool release_lock
= false;
3222 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3224 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3225 release_lock
|= prepare_mmio_access(mr
);
3227 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3229 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3230 ptr
= qemu_get_ram_ptr(addr1
);
3233 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3234 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3235 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3242 qemu_mutex_unlock_iothread();
3247 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3249 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3252 /* warning: addr must be aligned */
3253 static inline void address_space_stl_internal(AddressSpace
*as
,
3254 hwaddr addr
, uint32_t val
,
3256 MemTxResult
*result
,
3257 enum device_endian endian
)
3264 bool release_lock
= false;
3267 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3269 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3270 release_lock
|= prepare_mmio_access(mr
);
3272 #if defined(TARGET_WORDS_BIGENDIAN)
3273 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3277 if (endian
== DEVICE_BIG_ENDIAN
) {
3281 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3284 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3285 ptr
= qemu_get_ram_ptr(addr1
);
3287 case DEVICE_LITTLE_ENDIAN
:
3290 case DEVICE_BIG_ENDIAN
:
3297 invalidate_and_set_dirty(mr
, addr1
, 4);
3304 qemu_mutex_unlock_iothread();
3309 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3310 MemTxAttrs attrs
, MemTxResult
*result
)
3312 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3313 DEVICE_NATIVE_ENDIAN
);
3316 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3317 MemTxAttrs attrs
, MemTxResult
*result
)
3319 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3320 DEVICE_LITTLE_ENDIAN
);
3323 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3324 MemTxAttrs attrs
, MemTxResult
*result
)
3326 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3330 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3332 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3335 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3337 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3340 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3342 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3346 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3347 MemTxAttrs attrs
, MemTxResult
*result
)
3352 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3358 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3360 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3363 /* warning: addr must be aligned */
3364 static inline void address_space_stw_internal(AddressSpace
*as
,
3365 hwaddr addr
, uint32_t val
,
3367 MemTxResult
*result
,
3368 enum device_endian endian
)
3375 bool release_lock
= false;
3378 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3379 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3380 release_lock
|= prepare_mmio_access(mr
);
3382 #if defined(TARGET_WORDS_BIGENDIAN)
3383 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3387 if (endian
== DEVICE_BIG_ENDIAN
) {
3391 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3394 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3395 ptr
= qemu_get_ram_ptr(addr1
);
3397 case DEVICE_LITTLE_ENDIAN
:
3400 case DEVICE_BIG_ENDIAN
:
3407 invalidate_and_set_dirty(mr
, addr1
, 2);
3414 qemu_mutex_unlock_iothread();
3419 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3420 MemTxAttrs attrs
, MemTxResult
*result
)
3422 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3423 DEVICE_NATIVE_ENDIAN
);
3426 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3427 MemTxAttrs attrs
, MemTxResult
*result
)
3429 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3430 DEVICE_LITTLE_ENDIAN
);
3433 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3434 MemTxAttrs attrs
, MemTxResult
*result
)
3436 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3440 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3442 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3445 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3447 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3450 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3452 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3456 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3457 MemTxAttrs attrs
, MemTxResult
*result
)
3461 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3467 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3468 MemTxAttrs attrs
, MemTxResult
*result
)
3471 val
= cpu_to_le64(val
);
3472 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3477 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3478 MemTxAttrs attrs
, MemTxResult
*result
)
3481 val
= cpu_to_be64(val
);
3482 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3488 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3490 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3493 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3495 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3498 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3500 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3503 /* virtual memory access for debug (includes writing to ROM) */
3504 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3505 uint8_t *buf
, int len
, int is_write
)
3512 page
= addr
& TARGET_PAGE_MASK
;
3513 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
3514 /* if no physical page mapped, return an error */
3515 if (phys_addr
== -1)
3517 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3520 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3522 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
3524 address_space_rw(cpu
->as
, phys_addr
, MEMTXATTRS_UNSPECIFIED
,
3535 * Allows code that needs to deal with migration bitmaps etc to still be built
3536 * target independent.
3538 size_t qemu_target_page_bits(void)
3540 return TARGET_PAGE_BITS
;
3546 * A helper function for the _utterly broken_ virtio device model to find out if
3547 * it's running on a big endian machine. Don't do this at home kids!
3549 bool target_words_bigendian(void);
3550 bool target_words_bigendian(void)
3552 #if defined(TARGET_WORDS_BIGENDIAN)
3559 #ifndef CONFIG_USER_ONLY
3560 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3567 mr
= address_space_translate(&address_space_memory
,
3568 phys_addr
, &phys_addr
, &l
, false);
3570 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3575 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3581 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3582 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3583 block
->used_length
, opaque
);