4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
58 #include "qemu/range.h"
60 #include "qemu/mmap-alloc.h"
63 //#define DEBUG_SUBPAGE
65 #if !defined(CONFIG_USER_ONLY)
66 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
69 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
71 static MemoryRegion
*system_memory
;
72 static MemoryRegion
*system_io
;
74 AddressSpace address_space_io
;
75 AddressSpace address_space_memory
;
77 MemoryRegion io_mem_rom
, io_mem_notdirty
;
78 static MemoryRegion io_mem_unassigned
;
80 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81 #define RAM_PREALLOC (1 << 0)
83 /* RAM is mmap-ed with MAP_SHARED */
84 #define RAM_SHARED (1 << 1)
86 /* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
89 #define RAM_RESIZEABLE (1 << 2)
93 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
94 /* current CPU in the current thread. It is only valid inside
96 __thread CPUState
*current_cpu
;
97 /* 0 = Do not count executed instructions.
98 1 = Precise instruction counting.
99 2 = Adaptive rate instruction counting. */
102 #if !defined(CONFIG_USER_ONLY)
104 typedef struct PhysPageEntry PhysPageEntry
;
106 struct PhysPageEntry
{
107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
113 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
115 /* Size of the L2 (and L3, etc) page tables. */
116 #define ADDR_SPACE_BITS 64
119 #define P_L2_SIZE (1 << P_L2_BITS)
121 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
123 typedef PhysPageEntry Node
[P_L2_SIZE
];
125 typedef struct PhysPageMap
{
128 unsigned sections_nb
;
129 unsigned sections_nb_alloc
;
131 unsigned nodes_nb_alloc
;
133 MemoryRegionSection
*sections
;
136 struct AddressSpaceDispatch
{
139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
142 PhysPageEntry phys_map
;
147 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148 typedef struct subpage_t
{
152 uint16_t sub_section
[TARGET_PAGE_SIZE
];
155 #define PHYS_SECTION_UNASSIGNED 0
156 #define PHYS_SECTION_NOTDIRTY 1
157 #define PHYS_SECTION_ROM 2
158 #define PHYS_SECTION_WATCH 3
160 static void io_mem_init(void);
161 static void memory_map_init(void);
162 static void tcg_commit(MemoryListener
*listener
);
164 static MemoryRegion io_mem_watch
;
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
173 struct CPUAddressSpace
{
176 struct AddressSpaceDispatch
*memory_dispatch
;
177 MemoryListener tcg_as_listener
;
182 #if !defined(CONFIG_USER_ONLY)
184 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
186 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
187 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
188 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
189 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
193 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
200 ret
= map
->nodes_nb
++;
202 assert(ret
!= PHYS_MAP_NODE_NIL
);
203 assert(ret
!= map
->nodes_nb_alloc
);
205 e
.skip
= leaf
? 0 : 1;
206 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
207 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
208 memcpy(&p
[i
], &e
, sizeof(e
));
213 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
214 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
218 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
220 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
221 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
223 p
= map
->nodes
[lp
->ptr
];
224 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
226 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
227 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
233 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
239 static void phys_page_set(AddressSpaceDispatch
*d
,
240 hwaddr index
, hwaddr nb
,
243 /* Wildly overreserve - it doesn't matter much. */
244 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
246 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
249 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
252 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
254 unsigned valid_ptr
= P_L2_SIZE
;
259 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
264 for (i
= 0; i
< P_L2_SIZE
; i
++) {
265 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
272 phys_page_compact(&p
[i
], nodes
, compacted
);
276 /* We can only compress if there's only one child. */
281 assert(valid_ptr
< P_L2_SIZE
);
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
288 lp
->ptr
= p
[valid_ptr
].ptr
;
289 if (!p
[valid_ptr
].skip
) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
298 lp
->skip
+= p
[valid_ptr
].skip
;
302 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
304 DECLARE_BITMAP(compacted
, nodes_nb
);
306 if (d
->phys_map
.skip
) {
307 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
311 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
312 Node
*nodes
, MemoryRegionSection
*sections
)
315 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
318 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
319 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
320 return §ions
[PHYS_SECTION_UNASSIGNED
];
323 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
326 if (sections
[lp
.ptr
].size
.hi
||
327 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
328 sections
[lp
.ptr
].size
.lo
, addr
)) {
329 return §ions
[lp
.ptr
];
331 return §ions
[PHYS_SECTION_UNASSIGNED
];
335 bool memory_region_is_unassigned(MemoryRegion
*mr
)
337 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
338 && mr
!= &io_mem_watch
;
341 /* Called from RCU critical section */
342 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
344 bool resolve_subpage
)
346 MemoryRegionSection
*section
;
349 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
350 if (resolve_subpage
&& section
->mr
->subpage
) {
351 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
352 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
357 /* Called from RCU critical section */
358 static MemoryRegionSection
*
359 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
360 hwaddr
*plen
, bool resolve_subpage
)
362 MemoryRegionSection
*section
;
366 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
367 /* Compute offset within MemoryRegionSection */
368 addr
-= section
->offset_within_address_space
;
370 /* Compute offset within MemoryRegion */
371 *xlat
= addr
+ section
->offset_within_region
;
375 /* MMIO registers can be expected to perform full-width accesses based only
376 * on their address, without considering adjacent registers that could
377 * decode to completely different MemoryRegions. When such registers
378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379 * regions overlap wildly. For this reason we cannot clamp the accesses
382 * If the length is small (as is the case for address_space_ldl/stl),
383 * everything works fine. If the incoming length is large, however,
384 * the caller really has to do the clamping through memory_access_size.
386 if (memory_region_is_ram(mr
)) {
387 diff
= int128_sub(section
->size
, int128_make64(addr
));
388 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
393 /* Called from RCU critical section */
394 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
395 hwaddr
*xlat
, hwaddr
*plen
,
399 MemoryRegionSection
*section
;
403 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
404 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
407 if (!mr
->iommu_ops
) {
411 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
412 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
413 | (addr
& iotlb
.addr_mask
));
414 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
415 if (!(iotlb
.perm
& (1 << is_write
))) {
416 mr
= &io_mem_unassigned
;
420 as
= iotlb
.target_as
;
423 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
424 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
425 *plen
= MIN(page
, *plen
);
432 /* Called from RCU critical section */
433 MemoryRegionSection
*
434 address_space_translate_for_iotlb(CPUState
*cpu
, hwaddr addr
,
435 hwaddr
*xlat
, hwaddr
*plen
)
437 MemoryRegionSection
*section
;
438 section
= address_space_translate_internal(cpu
->cpu_ases
[0].memory_dispatch
,
439 addr
, xlat
, plen
, false);
441 assert(!section
->mr
->iommu_ops
);
446 #if !defined(CONFIG_USER_ONLY)
448 static int cpu_common_post_load(void *opaque
, int version_id
)
450 CPUState
*cpu
= opaque
;
452 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
453 version_id is increased. */
454 cpu
->interrupt_request
&= ~0x01;
460 static int cpu_common_pre_load(void *opaque
)
462 CPUState
*cpu
= opaque
;
464 cpu
->exception_index
= -1;
469 static bool cpu_common_exception_index_needed(void *opaque
)
471 CPUState
*cpu
= opaque
;
473 return tcg_enabled() && cpu
->exception_index
!= -1;
476 static const VMStateDescription vmstate_cpu_common_exception_index
= {
477 .name
= "cpu_common/exception_index",
479 .minimum_version_id
= 1,
480 .needed
= cpu_common_exception_index_needed
,
481 .fields
= (VMStateField
[]) {
482 VMSTATE_INT32(exception_index
, CPUState
),
483 VMSTATE_END_OF_LIST()
487 static bool cpu_common_crash_occurred_needed(void *opaque
)
489 CPUState
*cpu
= opaque
;
491 return cpu
->crash_occurred
;
494 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
495 .name
= "cpu_common/crash_occurred",
497 .minimum_version_id
= 1,
498 .needed
= cpu_common_crash_occurred_needed
,
499 .fields
= (VMStateField
[]) {
500 VMSTATE_BOOL(crash_occurred
, CPUState
),
501 VMSTATE_END_OF_LIST()
505 const VMStateDescription vmstate_cpu_common
= {
506 .name
= "cpu_common",
508 .minimum_version_id
= 1,
509 .pre_load
= cpu_common_pre_load
,
510 .post_load
= cpu_common_post_load
,
511 .fields
= (VMStateField
[]) {
512 VMSTATE_UINT32(halted
, CPUState
),
513 VMSTATE_UINT32(interrupt_request
, CPUState
),
514 VMSTATE_END_OF_LIST()
516 .subsections
= (const VMStateDescription
*[]) {
517 &vmstate_cpu_common_exception_index
,
518 &vmstate_cpu_common_crash_occurred
,
525 CPUState
*qemu_get_cpu(int index
)
530 if (cpu
->cpu_index
== index
) {
538 #if !defined(CONFIG_USER_ONLY)
539 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
541 /* We only support one address space per cpu at the moment. */
542 assert(cpu
->as
== as
);
545 /* We've already registered the listener for our only AS */
549 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, 1);
550 cpu
->cpu_ases
[0].cpu
= cpu
;
551 cpu
->cpu_ases
[0].as
= as
;
552 cpu
->cpu_ases
[0].tcg_as_listener
.commit
= tcg_commit
;
553 memory_listener_register(&cpu
->cpu_ases
[0].tcg_as_listener
, as
);
557 #ifndef CONFIG_USER_ONLY
558 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
560 static int cpu_get_free_index(Error
**errp
)
562 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
564 if (cpu
>= MAX_CPUMASK_BITS
) {
565 error_setg(errp
, "Trying to use more CPUs than max of %d",
570 bitmap_set(cpu_index_map
, cpu
, 1);
574 void cpu_exec_exit(CPUState
*cpu
)
576 if (cpu
->cpu_index
== -1) {
577 /* cpu_index was never allocated by this @cpu or was already freed. */
581 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
586 static int cpu_get_free_index(Error
**errp
)
591 CPU_FOREACH(some_cpu
) {
597 void cpu_exec_exit(CPUState
*cpu
)
602 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
604 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
606 Error
*local_err
= NULL
;
608 #ifndef CONFIG_USER_ONLY
609 cpu
->as
= &address_space_memory
;
610 cpu
->thread_id
= qemu_get_thread_id();
613 #if defined(CONFIG_USER_ONLY)
616 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
618 error_propagate(errp
, local_err
);
619 #if defined(CONFIG_USER_ONLY)
624 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
625 #if defined(CONFIG_USER_ONLY)
628 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
629 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
631 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
632 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
633 cpu_save
, cpu_load
, cpu
->env_ptr
);
634 assert(cc
->vmsd
== NULL
);
635 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
637 if (cc
->vmsd
!= NULL
) {
638 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
642 #if defined(CONFIG_USER_ONLY)
643 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
645 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
648 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
650 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
652 tb_invalidate_phys_addr(cpu
->as
,
653 phys
| (pc
& ~TARGET_PAGE_MASK
));
658 #if defined(CONFIG_USER_ONLY)
659 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
664 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
670 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
674 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
675 int flags
, CPUWatchpoint
**watchpoint
)
680 /* Add a watchpoint. */
681 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
682 int flags
, CPUWatchpoint
**watchpoint
)
686 /* forbid ranges which are empty or run off the end of the address space */
687 if (len
== 0 || (addr
+ len
- 1) < addr
) {
688 error_report("tried to set invalid watchpoint at %"
689 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
692 wp
= g_malloc(sizeof(*wp
));
698 /* keep all GDB-injected watchpoints in front */
699 if (flags
& BP_GDB
) {
700 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
702 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
705 tlb_flush_page(cpu
, addr
);
712 /* Remove a specific watchpoint. */
713 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
718 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
719 if (addr
== wp
->vaddr
&& len
== wp
->len
720 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
721 cpu_watchpoint_remove_by_ref(cpu
, wp
);
728 /* Remove a specific watchpoint by reference. */
729 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
731 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
733 tlb_flush_page(cpu
, watchpoint
->vaddr
);
738 /* Remove all matching watchpoints. */
739 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
741 CPUWatchpoint
*wp
, *next
;
743 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
744 if (wp
->flags
& mask
) {
745 cpu_watchpoint_remove_by_ref(cpu
, wp
);
750 /* Return true if this watchpoint address matches the specified
751 * access (ie the address range covered by the watchpoint overlaps
752 * partially or completely with the address range covered by the
755 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
759 /* We know the lengths are non-zero, but a little caution is
760 * required to avoid errors in the case where the range ends
761 * exactly at the top of the address space and so addr + len
762 * wraps round to zero.
764 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
765 vaddr addrend
= addr
+ len
- 1;
767 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
772 /* Add a breakpoint. */
773 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
774 CPUBreakpoint
**breakpoint
)
778 bp
= g_malloc(sizeof(*bp
));
783 /* keep all GDB-injected breakpoints in front */
784 if (flags
& BP_GDB
) {
785 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
787 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
790 breakpoint_invalidate(cpu
, pc
);
798 /* Remove a specific breakpoint. */
799 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
803 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
804 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
805 cpu_breakpoint_remove_by_ref(cpu
, bp
);
812 /* Remove a specific breakpoint by reference. */
813 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
815 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
817 breakpoint_invalidate(cpu
, breakpoint
->pc
);
822 /* Remove all matching breakpoints. */
823 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
825 CPUBreakpoint
*bp
, *next
;
827 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
828 if (bp
->flags
& mask
) {
829 cpu_breakpoint_remove_by_ref(cpu
, bp
);
834 /* enable or disable single step mode. EXCP_DEBUG is returned by the
835 CPU loop after each instruction */
836 void cpu_single_step(CPUState
*cpu
, int enabled
)
838 if (cpu
->singlestep_enabled
!= enabled
) {
839 cpu
->singlestep_enabled
= enabled
;
841 kvm_update_guest_debug(cpu
, 0);
843 /* must flush all the translated code to avoid inconsistencies */
844 /* XXX: only flush what is necessary */
850 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
857 fprintf(stderr
, "qemu: fatal: ");
858 vfprintf(stderr
, fmt
, ap
);
859 fprintf(stderr
, "\n");
860 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
861 if (qemu_log_separate()) {
862 qemu_log("qemu: fatal: ");
863 qemu_log_vprintf(fmt
, ap2
);
865 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
872 #if defined(CONFIG_USER_ONLY)
874 struct sigaction act
;
875 sigfillset(&act
.sa_mask
);
876 act
.sa_handler
= SIG_DFL
;
877 sigaction(SIGABRT
, &act
, NULL
);
883 #if !defined(CONFIG_USER_ONLY)
884 /* Called from RCU critical section */
885 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
889 block
= atomic_rcu_read(&ram_list
.mru_block
);
890 if (block
&& addr
- block
->offset
< block
->max_length
) {
893 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
894 if (addr
- block
->offset
< block
->max_length
) {
899 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
903 /* It is safe to write mru_block outside the iothread lock. This
908 * xxx removed from list
912 * call_rcu(reclaim_ramblock, xxx);
915 * atomic_rcu_set is not needed here. The block was already published
916 * when it was placed into the list. Here we're just making an extra
917 * copy of the pointer.
919 ram_list
.mru_block
= block
;
923 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
930 end
= TARGET_PAGE_ALIGN(start
+ length
);
931 start
&= TARGET_PAGE_MASK
;
934 block
= qemu_get_ram_block(start
);
935 assert(block
== qemu_get_ram_block(end
- 1));
936 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
938 tlb_reset_dirty(cpu
, start1
, length
);
943 /* Note: start and end must be within the same ram block. */
944 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
948 unsigned long end
, page
;
955 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
956 page
= start
>> TARGET_PAGE_BITS
;
957 dirty
= bitmap_test_and_clear_atomic(ram_list
.dirty_memory
[client
],
960 if (dirty
&& tcg_enabled()) {
961 tlb_reset_dirty_range_all(start
, length
);
967 /* Called from RCU critical section */
968 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
969 MemoryRegionSection
*section
,
971 hwaddr paddr
, hwaddr xlat
,
973 target_ulong
*address
)
978 if (memory_region_is_ram(section
->mr
)) {
980 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
982 if (!section
->readonly
) {
983 iotlb
|= PHYS_SECTION_NOTDIRTY
;
985 iotlb
|= PHYS_SECTION_ROM
;
988 AddressSpaceDispatch
*d
;
990 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
991 iotlb
= section
- d
->map
.sections
;
995 /* Make accesses to pages with watchpoints go via the
996 watchpoint trap routines. */
997 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
998 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
999 /* Avoid trapping reads of pages with a write breakpoint. */
1000 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1001 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1002 *address
|= TLB_MMIO
;
1010 #endif /* defined(CONFIG_USER_ONLY) */
1012 #if !defined(CONFIG_USER_ONLY)
1014 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1016 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1018 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1019 qemu_anon_ram_alloc
;
1022 * Set a custom physical guest memory alloator.
1023 * Accelerators with unusual needs may need this. Hopefully, we can
1024 * get rid of it eventually.
1026 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1028 phys_mem_alloc
= alloc
;
1031 static uint16_t phys_section_add(PhysPageMap
*map
,
1032 MemoryRegionSection
*section
)
1034 /* The physical section number is ORed with a page-aligned
1035 * pointer to produce the iotlb entries. Thus it should
1036 * never overflow into the page-aligned value.
1038 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1040 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1041 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1042 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1043 map
->sections_nb_alloc
);
1045 map
->sections
[map
->sections_nb
] = *section
;
1046 memory_region_ref(section
->mr
);
1047 return map
->sections_nb
++;
1050 static void phys_section_destroy(MemoryRegion
*mr
)
1052 bool have_sub_page
= mr
->subpage
;
1054 memory_region_unref(mr
);
1056 if (have_sub_page
) {
1057 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1058 object_unref(OBJECT(&subpage
->iomem
));
1063 static void phys_sections_free(PhysPageMap
*map
)
1065 while (map
->sections_nb
> 0) {
1066 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1067 phys_section_destroy(section
->mr
);
1069 g_free(map
->sections
);
1073 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1076 hwaddr base
= section
->offset_within_address_space
1078 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1079 d
->map
.nodes
, d
->map
.sections
);
1080 MemoryRegionSection subsection
= {
1081 .offset_within_address_space
= base
,
1082 .size
= int128_make64(TARGET_PAGE_SIZE
),
1086 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1088 if (!(existing
->mr
->subpage
)) {
1089 subpage
= subpage_init(d
->as
, base
);
1090 subsection
.address_space
= d
->as
;
1091 subsection
.mr
= &subpage
->iomem
;
1092 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1093 phys_section_add(&d
->map
, &subsection
));
1095 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1097 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1098 end
= start
+ int128_get64(section
->size
) - 1;
1099 subpage_register(subpage
, start
, end
,
1100 phys_section_add(&d
->map
, section
));
1104 static void register_multipage(AddressSpaceDispatch
*d
,
1105 MemoryRegionSection
*section
)
1107 hwaddr start_addr
= section
->offset_within_address_space
;
1108 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1109 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1113 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1116 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1118 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1119 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1120 MemoryRegionSection now
= *section
, remain
= *section
;
1121 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1123 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1124 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1125 - now
.offset_within_address_space
;
1127 now
.size
= int128_min(int128_make64(left
), now
.size
);
1128 register_subpage(d
, &now
);
1130 now
.size
= int128_zero();
1132 while (int128_ne(remain
.size
, now
.size
)) {
1133 remain
.size
= int128_sub(remain
.size
, now
.size
);
1134 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1135 remain
.offset_within_region
+= int128_get64(now
.size
);
1137 if (int128_lt(remain
.size
, page_size
)) {
1138 register_subpage(d
, &now
);
1139 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1140 now
.size
= page_size
;
1141 register_subpage(d
, &now
);
1143 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1144 register_multipage(d
, &now
);
1149 void qemu_flush_coalesced_mmio_buffer(void)
1152 kvm_flush_coalesced_mmio_buffer();
1155 void qemu_mutex_lock_ramlist(void)
1157 qemu_mutex_lock(&ram_list
.mutex
);
1160 void qemu_mutex_unlock_ramlist(void)
1162 qemu_mutex_unlock(&ram_list
.mutex
);
1167 #include <sys/vfs.h>
1169 #define HUGETLBFS_MAGIC 0x958458f6
1171 static long gethugepagesize(const char *path
, Error
**errp
)
1177 ret
= statfs(path
, &fs
);
1178 } while (ret
!= 0 && errno
== EINTR
);
1181 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1189 static void *file_ram_alloc(RAMBlock
*block
,
1196 char *sanitized_name
;
1201 Error
*local_err
= NULL
;
1203 hpagesize
= gethugepagesize(path
, &local_err
);
1205 error_propagate(errp
, local_err
);
1208 block
->mr
->align
= hpagesize
;
1210 if (memory
< hpagesize
) {
1211 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1212 "or larger than huge page size 0x%" PRIx64
,
1217 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1219 "host lacks kvm mmu notifiers, -mem-path unsupported");
1223 if (!stat(path
, &st
) && S_ISDIR(st
.st_mode
)) {
1224 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1225 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1226 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1232 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1234 g_free(sanitized_name
);
1236 fd
= mkstemp(filename
);
1242 fd
= open(path
, O_RDWR
| O_CREAT
, 0644);
1246 error_setg_errno(errp
, errno
,
1247 "unable to create backing store for hugepages");
1251 memory
= ROUND_UP(memory
, hpagesize
);
1254 * ftruncate is not supported by hugetlbfs in older
1255 * hosts, so don't bother bailing out on errors.
1256 * If anything goes wrong with it under other filesystems,
1259 if (ftruncate(fd
, memory
)) {
1260 perror("ftruncate");
1263 area
= qemu_ram_mmap(fd
, memory
, hpagesize
, block
->flags
& RAM_SHARED
);
1264 if (area
== MAP_FAILED
) {
1265 error_setg_errno(errp
, errno
,
1266 "unable to map backing store for hugepages");
1272 os_mem_prealloc(fd
, area
, memory
);
1283 /* Called with the ramlist lock held. */
1284 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1286 RAMBlock
*block
, *next_block
;
1287 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1289 assert(size
!= 0); /* it would hand out same offset multiple times */
1291 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1295 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1296 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1298 end
= block
->offset
+ block
->max_length
;
1300 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1301 if (next_block
->offset
>= end
) {
1302 next
= MIN(next
, next_block
->offset
);
1305 if (next
- end
>= size
&& next
- end
< mingap
) {
1307 mingap
= next
- end
;
1311 if (offset
== RAM_ADDR_MAX
) {
1312 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1320 ram_addr_t
last_ram_offset(void)
1323 ram_addr_t last
= 0;
1326 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1327 last
= MAX(last
, block
->offset
+ block
->max_length
);
1333 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1337 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1338 if (!machine_dump_guest_core(current_machine
)) {
1339 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1341 perror("qemu_madvise");
1342 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1343 "but dump_guest_core=off specified\n");
1348 /* Called within an RCU critical section, or while the ramlist lock
1351 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1355 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1356 if (block
->offset
== addr
) {
1364 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1369 /* Called with iothread lock held. */
1370 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1372 RAMBlock
*new_block
, *block
;
1375 new_block
= find_ram_block(addr
);
1377 assert(!new_block
->idstr
[0]);
1380 char *id
= qdev_get_dev_path(dev
);
1382 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1386 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1388 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1389 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1390 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1398 /* Called with iothread lock held. */
1399 void qemu_ram_unset_idstr(ram_addr_t addr
)
1403 /* FIXME: arch_init.c assumes that this is not called throughout
1404 * migration. Ignore the problem since hot-unplug during migration
1405 * does not work anyway.
1409 block
= find_ram_block(addr
);
1411 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1416 static int memory_try_enable_merging(void *addr
, size_t len
)
1418 if (!machine_mem_merge(current_machine
)) {
1419 /* disabled by the user */
1423 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1426 /* Only legal before guest might have detected the memory size: e.g. on
1427 * incoming migration, or right after reset.
1429 * As memory core doesn't know how is memory accessed, it is up to
1430 * resize callback to update device state and/or add assertions to detect
1431 * misuse, if necessary.
1433 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1435 RAMBlock
*block
= find_ram_block(base
);
1439 newsize
= HOST_PAGE_ALIGN(newsize
);
1441 if (block
->used_length
== newsize
) {
1445 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1446 error_setg_errno(errp
, EINVAL
,
1447 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1448 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1449 newsize
, block
->used_length
);
1453 if (block
->max_length
< newsize
) {
1454 error_setg_errno(errp
, EINVAL
,
1455 "Length too large: %s: 0x" RAM_ADDR_FMT
1456 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1457 newsize
, block
->max_length
);
1461 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1462 block
->used_length
= newsize
;
1463 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1465 memory_region_set_size(block
->mr
, newsize
);
1466 if (block
->resized
) {
1467 block
->resized(block
->idstr
, newsize
, block
->host
);
1472 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1475 RAMBlock
*last_block
= NULL
;
1476 ram_addr_t old_ram_size
, new_ram_size
;
1478 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1480 qemu_mutex_lock_ramlist();
1481 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1483 if (!new_block
->host
) {
1484 if (xen_enabled()) {
1485 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1488 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1489 &new_block
->mr
->align
);
1490 if (!new_block
->host
) {
1491 error_setg_errno(errp
, errno
,
1492 "cannot set up guest memory '%s'",
1493 memory_region_name(new_block
->mr
));
1494 qemu_mutex_unlock_ramlist();
1497 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1501 new_ram_size
= MAX(old_ram_size
,
1502 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1503 if (new_ram_size
> old_ram_size
) {
1504 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1506 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1507 * QLIST (which has an RCU-friendly variant) does not have insertion at
1508 * tail, so save the last element in last_block.
1510 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1512 if (block
->max_length
< new_block
->max_length
) {
1517 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1518 } else if (last_block
) {
1519 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1520 } else { /* list is empty */
1521 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1523 ram_list
.mru_block
= NULL
;
1525 /* Write list before version */
1528 qemu_mutex_unlock_ramlist();
1530 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1532 if (new_ram_size
> old_ram_size
) {
1535 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1536 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1537 ram_list
.dirty_memory
[i
] =
1538 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1539 old_ram_size
, new_ram_size
);
1542 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1543 new_block
->used_length
,
1546 if (new_block
->host
) {
1547 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1548 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1549 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1550 if (kvm_enabled()) {
1551 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1555 return new_block
->offset
;
1559 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1560 bool share
, const char *mem_path
,
1563 RAMBlock
*new_block
;
1565 Error
*local_err
= NULL
;
1567 if (xen_enabled()) {
1568 error_setg(errp
, "-mem-path not supported with Xen");
1572 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1574 * file_ram_alloc() needs to allocate just like
1575 * phys_mem_alloc, but we haven't bothered to provide
1579 "-mem-path not supported with this accelerator");
1583 size
= HOST_PAGE_ALIGN(size
);
1584 new_block
= g_malloc0(sizeof(*new_block
));
1586 new_block
->used_length
= size
;
1587 new_block
->max_length
= size
;
1588 new_block
->flags
= share
? RAM_SHARED
: 0;
1589 new_block
->host
= file_ram_alloc(new_block
, size
,
1591 if (!new_block
->host
) {
1596 addr
= ram_block_add(new_block
, &local_err
);
1599 error_propagate(errp
, local_err
);
1607 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1608 void (*resized
)(const char*,
1611 void *host
, bool resizeable
,
1612 MemoryRegion
*mr
, Error
**errp
)
1614 RAMBlock
*new_block
;
1616 Error
*local_err
= NULL
;
1618 size
= HOST_PAGE_ALIGN(size
);
1619 max_size
= HOST_PAGE_ALIGN(max_size
);
1620 new_block
= g_malloc0(sizeof(*new_block
));
1622 new_block
->resized
= resized
;
1623 new_block
->used_length
= size
;
1624 new_block
->max_length
= max_size
;
1625 assert(max_size
>= size
);
1627 new_block
->host
= host
;
1629 new_block
->flags
|= RAM_PREALLOC
;
1632 new_block
->flags
|= RAM_RESIZEABLE
;
1634 addr
= ram_block_add(new_block
, &local_err
);
1637 error_propagate(errp
, local_err
);
1643 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1644 MemoryRegion
*mr
, Error
**errp
)
1646 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1649 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1651 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1654 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1655 void (*resized
)(const char*,
1658 MemoryRegion
*mr
, Error
**errp
)
1660 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1663 static void reclaim_ramblock(RAMBlock
*block
)
1665 if (block
->flags
& RAM_PREALLOC
) {
1667 } else if (xen_enabled()) {
1668 xen_invalidate_map_cache_entry(block
->host
);
1670 } else if (block
->fd
>= 0) {
1671 qemu_ram_munmap(block
->host
, block
->max_length
);
1675 qemu_anon_ram_free(block
->host
, block
->max_length
);
1680 void qemu_ram_free(ram_addr_t addr
)
1684 qemu_mutex_lock_ramlist();
1685 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1686 if (addr
== block
->offset
) {
1687 QLIST_REMOVE_RCU(block
, next
);
1688 ram_list
.mru_block
= NULL
;
1689 /* Write list before version */
1692 call_rcu(block
, reclaim_ramblock
, rcu
);
1696 qemu_mutex_unlock_ramlist();
1700 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1707 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1708 offset
= addr
- block
->offset
;
1709 if (offset
< block
->max_length
) {
1710 vaddr
= ramblock_ptr(block
, offset
);
1711 if (block
->flags
& RAM_PREALLOC
) {
1713 } else if (xen_enabled()) {
1717 if (block
->fd
>= 0) {
1718 flags
|= (block
->flags
& RAM_SHARED
?
1719 MAP_SHARED
: MAP_PRIVATE
);
1720 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1721 flags
, block
->fd
, offset
);
1724 * Remap needs to match alloc. Accelerators that
1725 * set phys_mem_alloc never remap. If they did,
1726 * we'd need a remap hook here.
1728 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1730 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1731 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1734 if (area
!= vaddr
) {
1735 fprintf(stderr
, "Could not remap addr: "
1736 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1740 memory_try_enable_merging(vaddr
, length
);
1741 qemu_ram_setup_dump(vaddr
, length
);
1746 #endif /* !_WIN32 */
1748 int qemu_get_ram_fd(ram_addr_t addr
)
1754 block
= qemu_get_ram_block(addr
);
1760 void qemu_set_ram_fd(ram_addr_t addr
, int fd
)
1765 block
= qemu_get_ram_block(addr
);
1770 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1776 block
= qemu_get_ram_block(addr
);
1777 ptr
= ramblock_ptr(block
, 0);
1782 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1783 * This should not be used for general purpose DMA. Use address_space_map
1784 * or address_space_rw instead. For local memory (e.g. video ram) that the
1785 * device owns, use memory_region_get_ram_ptr.
1787 * Called within RCU critical section.
1789 void *qemu_get_ram_ptr(ram_addr_t addr
)
1791 RAMBlock
*block
= qemu_get_ram_block(addr
);
1793 if (xen_enabled() && block
->host
== NULL
) {
1794 /* We need to check if the requested address is in the RAM
1795 * because we don't want to map the entire memory in QEMU.
1796 * In that case just map until the end of the page.
1798 if (block
->offset
== 0) {
1799 return xen_map_cache(addr
, 0, 0);
1802 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1804 return ramblock_ptr(block
, addr
- block
->offset
);
1807 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1808 * but takes a size argument.
1810 * Called within RCU critical section.
1812 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1815 ram_addr_t offset_inside_block
;
1820 block
= qemu_get_ram_block(addr
);
1821 offset_inside_block
= addr
- block
->offset
;
1822 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1824 if (xen_enabled() && block
->host
== NULL
) {
1825 /* We need to check if the requested address is in the RAM
1826 * because we don't want to map the entire memory in QEMU.
1827 * In that case just map the requested area.
1829 if (block
->offset
== 0) {
1830 return xen_map_cache(addr
, *size
, 1);
1833 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1836 return ramblock_ptr(block
, offset_inside_block
);
1840 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1843 * ptr: Host pointer to look up
1844 * round_offset: If true round the result offset down to a page boundary
1845 * *ram_addr: set to result ram_addr
1846 * *offset: set to result offset within the RAMBlock
1848 * Returns: RAMBlock (or NULL if not found)
1850 * By the time this function returns, the returned pointer is not protected
1851 * by RCU anymore. If the caller is not within an RCU critical section and
1852 * does not hold the iothread lock, it must have other means of protecting the
1853 * pointer, such as a reference to the region that includes the incoming
1856 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1857 ram_addr_t
*ram_addr
,
1861 uint8_t *host
= ptr
;
1863 if (xen_enabled()) {
1865 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1866 block
= qemu_get_ram_block(*ram_addr
);
1868 *offset
= (host
- block
->host
);
1875 block
= atomic_rcu_read(&ram_list
.mru_block
);
1876 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1880 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1881 /* This case append when the block is not mapped. */
1882 if (block
->host
== NULL
) {
1885 if (host
- block
->host
< block
->max_length
) {
1894 *offset
= (host
- block
->host
);
1896 *offset
&= TARGET_PAGE_MASK
;
1898 *ram_addr
= block
->offset
+ *offset
;
1904 * Finds the named RAMBlock
1906 * name: The name of RAMBlock to find
1908 * Returns: RAMBlock (or NULL if not found)
1910 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1914 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1915 if (!strcmp(name
, block
->idstr
)) {
1923 /* Some of the softmmu routines need to translate from a host pointer
1924 (typically a TLB entry) back to a ram offset. */
1925 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1928 ram_addr_t offset
; /* Not used */
1930 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
1939 /* Called within RCU critical section. */
1940 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1941 uint64_t val
, unsigned size
)
1943 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1944 tb_invalidate_phys_page_fast(ram_addr
, size
);
1948 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1951 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1954 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1959 /* Set both VGA and migration bits for simplicity and to remove
1960 * the notdirty callback faster.
1962 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
1963 DIRTY_CLIENTS_NOCODE
);
1964 /* we remove the notdirty callback only if the code has been
1966 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1967 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
1971 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1972 unsigned size
, bool is_write
)
1977 static const MemoryRegionOps notdirty_mem_ops
= {
1978 .write
= notdirty_mem_write
,
1979 .valid
.accepts
= notdirty_mem_accepts
,
1980 .endianness
= DEVICE_NATIVE_ENDIAN
,
1983 /* Generate a debug exception if a watchpoint has been hit. */
1984 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
1986 CPUState
*cpu
= current_cpu
;
1987 CPUArchState
*env
= cpu
->env_ptr
;
1988 target_ulong pc
, cs_base
;
1993 if (cpu
->watchpoint_hit
) {
1994 /* We re-entered the check after replacing the TB. Now raise
1995 * the debug interrupt so that is will trigger after the
1996 * current instruction. */
1997 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2000 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2001 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2002 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2003 && (wp
->flags
& flags
)) {
2004 if (flags
== BP_MEM_READ
) {
2005 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2007 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2009 wp
->hitaddr
= vaddr
;
2010 wp
->hitattrs
= attrs
;
2011 if (!cpu
->watchpoint_hit
) {
2012 cpu
->watchpoint_hit
= wp
;
2013 tb_check_watchpoint(cpu
);
2014 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2015 cpu
->exception_index
= EXCP_DEBUG
;
2018 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2019 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2020 cpu_resume_from_signal(cpu
, NULL
);
2024 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2029 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2030 so these check for a hit then pass through to the normal out-of-line
2032 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2033 unsigned size
, MemTxAttrs attrs
)
2038 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2041 data
= address_space_ldub(&address_space_memory
, addr
, attrs
, &res
);
2044 data
= address_space_lduw(&address_space_memory
, addr
, attrs
, &res
);
2047 data
= address_space_ldl(&address_space_memory
, addr
, attrs
, &res
);
2055 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2056 uint64_t val
, unsigned size
,
2061 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2064 address_space_stb(&address_space_memory
, addr
, val
, attrs
, &res
);
2067 address_space_stw(&address_space_memory
, addr
, val
, attrs
, &res
);
2070 address_space_stl(&address_space_memory
, addr
, val
, attrs
, &res
);
2077 static const MemoryRegionOps watch_mem_ops
= {
2078 .read_with_attrs
= watch_mem_read
,
2079 .write_with_attrs
= watch_mem_write
,
2080 .endianness
= DEVICE_NATIVE_ENDIAN
,
2083 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2084 unsigned len
, MemTxAttrs attrs
)
2086 subpage_t
*subpage
= opaque
;
2090 #if defined(DEBUG_SUBPAGE)
2091 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2092 subpage
, len
, addr
);
2094 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2101 *data
= ldub_p(buf
);
2104 *data
= lduw_p(buf
);
2117 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2118 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2120 subpage_t
*subpage
= opaque
;
2123 #if defined(DEBUG_SUBPAGE)
2124 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2125 " value %"PRIx64
"\n",
2126 __func__
, subpage
, len
, addr
, value
);
2144 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2148 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2149 unsigned len
, bool is_write
)
2151 subpage_t
*subpage
= opaque
;
2152 #if defined(DEBUG_SUBPAGE)
2153 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2154 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2157 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2161 static const MemoryRegionOps subpage_ops
= {
2162 .read_with_attrs
= subpage_read
,
2163 .write_with_attrs
= subpage_write
,
2164 .impl
.min_access_size
= 1,
2165 .impl
.max_access_size
= 8,
2166 .valid
.min_access_size
= 1,
2167 .valid
.max_access_size
= 8,
2168 .valid
.accepts
= subpage_accepts
,
2169 .endianness
= DEVICE_NATIVE_ENDIAN
,
2172 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2177 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2179 idx
= SUBPAGE_IDX(start
);
2180 eidx
= SUBPAGE_IDX(end
);
2181 #if defined(DEBUG_SUBPAGE)
2182 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2183 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2185 for (; idx
<= eidx
; idx
++) {
2186 mmio
->sub_section
[idx
] = section
;
2192 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2196 mmio
= g_malloc0(sizeof(subpage_t
));
2200 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2201 NULL
, TARGET_PAGE_SIZE
);
2202 mmio
->iomem
.subpage
= true;
2203 #if defined(DEBUG_SUBPAGE)
2204 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2205 mmio
, base
, TARGET_PAGE_SIZE
);
2207 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2212 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2216 MemoryRegionSection section
= {
2217 .address_space
= as
,
2219 .offset_within_address_space
= 0,
2220 .offset_within_region
= 0,
2221 .size
= int128_2_64(),
2224 return phys_section_add(map
, §ion
);
2227 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
)
2229 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[0];
2230 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2231 MemoryRegionSection
*sections
= d
->map
.sections
;
2233 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2236 static void io_mem_init(void)
2238 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2239 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2241 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2243 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2247 static void mem_begin(MemoryListener
*listener
)
2249 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2250 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2253 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2254 assert(n
== PHYS_SECTION_UNASSIGNED
);
2255 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2256 assert(n
== PHYS_SECTION_NOTDIRTY
);
2257 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2258 assert(n
== PHYS_SECTION_ROM
);
2259 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2260 assert(n
== PHYS_SECTION_WATCH
);
2262 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2264 as
->next_dispatch
= d
;
2267 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2269 phys_sections_free(&d
->map
);
2273 static void mem_commit(MemoryListener
*listener
)
2275 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2276 AddressSpaceDispatch
*cur
= as
->dispatch
;
2277 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2279 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2281 atomic_rcu_set(&as
->dispatch
, next
);
2283 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2287 static void tcg_commit(MemoryListener
*listener
)
2289 CPUAddressSpace
*cpuas
;
2290 AddressSpaceDispatch
*d
;
2292 /* since each CPU stores ram addresses in its TLB cache, we must
2293 reset the modified entries */
2294 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2295 cpu_reloading_memory_map();
2296 /* The CPU and TLB are protected by the iothread lock.
2297 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2298 * may have split the RCU critical section.
2300 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2301 cpuas
->memory_dispatch
= d
;
2302 tlb_flush(cpuas
->cpu
, 1);
2305 void address_space_init_dispatch(AddressSpace
*as
)
2307 as
->dispatch
= NULL
;
2308 as
->dispatch_listener
= (MemoryListener
) {
2310 .commit
= mem_commit
,
2311 .region_add
= mem_add
,
2312 .region_nop
= mem_add
,
2315 memory_listener_register(&as
->dispatch_listener
, as
);
2318 void address_space_unregister(AddressSpace
*as
)
2320 memory_listener_unregister(&as
->dispatch_listener
);
2323 void address_space_destroy_dispatch(AddressSpace
*as
)
2325 AddressSpaceDispatch
*d
= as
->dispatch
;
2327 atomic_rcu_set(&as
->dispatch
, NULL
);
2329 call_rcu(d
, address_space_dispatch_free
, rcu
);
2333 static void memory_map_init(void)
2335 system_memory
= g_malloc(sizeof(*system_memory
));
2337 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2338 address_space_init(&address_space_memory
, system_memory
, "memory");
2340 system_io
= g_malloc(sizeof(*system_io
));
2341 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2343 address_space_init(&address_space_io
, system_io
, "I/O");
2346 MemoryRegion
*get_system_memory(void)
2348 return system_memory
;
2351 MemoryRegion
*get_system_io(void)
2356 #endif /* !defined(CONFIG_USER_ONLY) */
2358 /* physical memory access (slow version, mainly for debug) */
2359 #if defined(CONFIG_USER_ONLY)
2360 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2361 uint8_t *buf
, int len
, int is_write
)
2368 page
= addr
& TARGET_PAGE_MASK
;
2369 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2372 flags
= page_get_flags(page
);
2373 if (!(flags
& PAGE_VALID
))
2376 if (!(flags
& PAGE_WRITE
))
2378 /* XXX: this code should not depend on lock_user */
2379 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2382 unlock_user(p
, addr
, l
);
2384 if (!(flags
& PAGE_READ
))
2386 /* XXX: this code should not depend on lock_user */
2387 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2390 unlock_user(p
, addr
, 0);
2401 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2404 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2405 /* No early return if dirty_log_mask is or becomes 0, because
2406 * cpu_physical_memory_set_dirty_range will still call
2407 * xen_modified_memory.
2409 if (dirty_log_mask
) {
2411 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2413 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2414 tb_invalidate_phys_range(addr
, addr
+ length
);
2415 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2417 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2420 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2422 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2424 /* Regions are assumed to support 1-4 byte accesses unless
2425 otherwise specified. */
2426 if (access_size_max
== 0) {
2427 access_size_max
= 4;
2430 /* Bound the maximum access by the alignment of the address. */
2431 if (!mr
->ops
->impl
.unaligned
) {
2432 unsigned align_size_max
= addr
& -addr
;
2433 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2434 access_size_max
= align_size_max
;
2438 /* Don't attempt accesses larger than the maximum. */
2439 if (l
> access_size_max
) {
2440 l
= access_size_max
;
2447 static bool prepare_mmio_access(MemoryRegion
*mr
)
2449 bool unlocked
= !qemu_mutex_iothread_locked();
2450 bool release_lock
= false;
2452 if (unlocked
&& mr
->global_locking
) {
2453 qemu_mutex_lock_iothread();
2455 release_lock
= true;
2457 if (mr
->flush_coalesced_mmio
) {
2459 qemu_mutex_lock_iothread();
2461 qemu_flush_coalesced_mmio_buffer();
2463 qemu_mutex_unlock_iothread();
2467 return release_lock
;
2470 /* Called within RCU critical section. */
2471 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2474 int len
, hwaddr addr1
,
2475 hwaddr l
, MemoryRegion
*mr
)
2479 MemTxResult result
= MEMTX_OK
;
2480 bool release_lock
= false;
2483 if (!memory_access_is_direct(mr
, true)) {
2484 release_lock
|= prepare_mmio_access(mr
);
2485 l
= memory_access_size(mr
, l
, addr1
);
2486 /* XXX: could force current_cpu to NULL to avoid
2490 /* 64 bit write access */
2492 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2496 /* 32 bit write access */
2498 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2502 /* 16 bit write access */
2504 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2508 /* 8 bit write access */
2510 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2517 addr1
+= memory_region_get_ram_addr(mr
);
2519 ptr
= qemu_get_ram_ptr(addr1
);
2520 memcpy(ptr
, buf
, l
);
2521 invalidate_and_set_dirty(mr
, addr1
, l
);
2525 qemu_mutex_unlock_iothread();
2526 release_lock
= false;
2538 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2544 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2545 const uint8_t *buf
, int len
)
2550 MemTxResult result
= MEMTX_OK
;
2555 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2556 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2564 /* Called within RCU critical section. */
2565 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2566 MemTxAttrs attrs
, uint8_t *buf
,
2567 int len
, hwaddr addr1
, hwaddr l
,
2572 MemTxResult result
= MEMTX_OK
;
2573 bool release_lock
= false;
2576 if (!memory_access_is_direct(mr
, false)) {
2578 release_lock
|= prepare_mmio_access(mr
);
2579 l
= memory_access_size(mr
, l
, addr1
);
2582 /* 64 bit read access */
2583 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2588 /* 32 bit read access */
2589 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2594 /* 16 bit read access */
2595 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2600 /* 8 bit read access */
2601 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2610 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2611 memcpy(buf
, ptr
, l
);
2615 qemu_mutex_unlock_iothread();
2616 release_lock
= false;
2628 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2634 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2635 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2640 MemTxResult result
= MEMTX_OK
;
2645 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2646 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2654 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2655 uint8_t *buf
, int len
, bool is_write
)
2658 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2660 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2664 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2665 int len
, int is_write
)
2667 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2668 buf
, len
, is_write
);
2671 enum write_rom_type
{
2676 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2677 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2687 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2689 if (!(memory_region_is_ram(mr
) ||
2690 memory_region_is_romd(mr
))) {
2691 l
= memory_access_size(mr
, l
, addr1
);
2693 addr1
+= memory_region_get_ram_addr(mr
);
2695 ptr
= qemu_get_ram_ptr(addr1
);
2698 memcpy(ptr
, buf
, l
);
2699 invalidate_and_set_dirty(mr
, addr1
, l
);
2702 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2713 /* used for ROM loading : can write in RAM and ROM */
2714 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2715 const uint8_t *buf
, int len
)
2717 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2720 void cpu_flush_icache_range(hwaddr start
, int len
)
2723 * This function should do the same thing as an icache flush that was
2724 * triggered from within the guest. For TCG we are always cache coherent,
2725 * so there is no need to flush anything. For KVM / Xen we need to flush
2726 * the host's instruction cache at least.
2728 if (tcg_enabled()) {
2732 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2733 start
, NULL
, len
, FLUSH_CACHE
);
2744 static BounceBuffer bounce
;
2746 typedef struct MapClient
{
2748 QLIST_ENTRY(MapClient
) link
;
2751 QemuMutex map_client_list_lock
;
2752 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2753 = QLIST_HEAD_INITIALIZER(map_client_list
);
2755 static void cpu_unregister_map_client_do(MapClient
*client
)
2757 QLIST_REMOVE(client
, link
);
2761 static void cpu_notify_map_clients_locked(void)
2765 while (!QLIST_EMPTY(&map_client_list
)) {
2766 client
= QLIST_FIRST(&map_client_list
);
2767 qemu_bh_schedule(client
->bh
);
2768 cpu_unregister_map_client_do(client
);
2772 void cpu_register_map_client(QEMUBH
*bh
)
2774 MapClient
*client
= g_malloc(sizeof(*client
));
2776 qemu_mutex_lock(&map_client_list_lock
);
2778 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2779 if (!atomic_read(&bounce
.in_use
)) {
2780 cpu_notify_map_clients_locked();
2782 qemu_mutex_unlock(&map_client_list_lock
);
2785 void cpu_exec_init_all(void)
2787 qemu_mutex_init(&ram_list
.mutex
);
2790 qemu_mutex_init(&map_client_list_lock
);
2793 void cpu_unregister_map_client(QEMUBH
*bh
)
2797 qemu_mutex_lock(&map_client_list_lock
);
2798 QLIST_FOREACH(client
, &map_client_list
, link
) {
2799 if (client
->bh
== bh
) {
2800 cpu_unregister_map_client_do(client
);
2804 qemu_mutex_unlock(&map_client_list_lock
);
2807 static void cpu_notify_map_clients(void)
2809 qemu_mutex_lock(&map_client_list_lock
);
2810 cpu_notify_map_clients_locked();
2811 qemu_mutex_unlock(&map_client_list_lock
);
2814 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2822 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2823 if (!memory_access_is_direct(mr
, is_write
)) {
2824 l
= memory_access_size(mr
, l
, addr
);
2825 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2837 /* Map a physical memory region into a host virtual address.
2838 * May map a subset of the requested range, given by and returned in *plen.
2839 * May return NULL if resources needed to perform the mapping are exhausted.
2840 * Use only for reads OR writes - not for read-modify-write operations.
2841 * Use cpu_register_map_client() to know when retrying the map operation is
2842 * likely to succeed.
2844 void *address_space_map(AddressSpace
*as
,
2851 hwaddr l
, xlat
, base
;
2852 MemoryRegion
*mr
, *this_mr
;
2862 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2864 if (!memory_access_is_direct(mr
, is_write
)) {
2865 if (atomic_xchg(&bounce
.in_use
, true)) {
2869 /* Avoid unbounded allocations */
2870 l
= MIN(l
, TARGET_PAGE_SIZE
);
2871 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2875 memory_region_ref(mr
);
2878 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2884 return bounce
.buffer
;
2888 raddr
= memory_region_get_ram_addr(mr
);
2899 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2900 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2905 memory_region_ref(mr
);
2907 ptr
= qemu_ram_ptr_length(raddr
+ base
, plen
);
2913 /* Unmaps a memory region previously mapped by address_space_map().
2914 * Will also mark the memory as dirty if is_write == 1. access_len gives
2915 * the amount of memory that was actually read or written by the caller.
2917 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2918 int is_write
, hwaddr access_len
)
2920 if (buffer
!= bounce
.buffer
) {
2924 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2927 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2929 if (xen_enabled()) {
2930 xen_invalidate_map_cache_entry(buffer
);
2932 memory_region_unref(mr
);
2936 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2937 bounce
.buffer
, access_len
);
2939 qemu_vfree(bounce
.buffer
);
2940 bounce
.buffer
= NULL
;
2941 memory_region_unref(bounce
.mr
);
2942 atomic_mb_set(&bounce
.in_use
, false);
2943 cpu_notify_map_clients();
2946 void *cpu_physical_memory_map(hwaddr addr
,
2950 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2953 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2954 int is_write
, hwaddr access_len
)
2956 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2959 /* warning: addr must be aligned */
2960 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2962 MemTxResult
*result
,
2963 enum device_endian endian
)
2971 bool release_lock
= false;
2974 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2975 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2976 release_lock
|= prepare_mmio_access(mr
);
2979 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
2980 #if defined(TARGET_WORDS_BIGENDIAN)
2981 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2985 if (endian
== DEVICE_BIG_ENDIAN
) {
2991 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2995 case DEVICE_LITTLE_ENDIAN
:
2996 val
= ldl_le_p(ptr
);
2998 case DEVICE_BIG_ENDIAN
:
2999 val
= ldl_be_p(ptr
);
3011 qemu_mutex_unlock_iothread();
3017 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3018 MemTxAttrs attrs
, MemTxResult
*result
)
3020 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3021 DEVICE_NATIVE_ENDIAN
);
3024 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3025 MemTxAttrs attrs
, MemTxResult
*result
)
3027 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3028 DEVICE_LITTLE_ENDIAN
);
3031 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3032 MemTxAttrs attrs
, MemTxResult
*result
)
3034 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3038 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3040 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3043 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3045 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3048 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3050 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3053 /* warning: addr must be aligned */
3054 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3056 MemTxResult
*result
,
3057 enum device_endian endian
)
3065 bool release_lock
= false;
3068 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3070 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3071 release_lock
|= prepare_mmio_access(mr
);
3074 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3075 #if defined(TARGET_WORDS_BIGENDIAN)
3076 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3080 if (endian
== DEVICE_BIG_ENDIAN
) {
3086 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3090 case DEVICE_LITTLE_ENDIAN
:
3091 val
= ldq_le_p(ptr
);
3093 case DEVICE_BIG_ENDIAN
:
3094 val
= ldq_be_p(ptr
);
3106 qemu_mutex_unlock_iothread();
3112 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3113 MemTxAttrs attrs
, MemTxResult
*result
)
3115 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3116 DEVICE_NATIVE_ENDIAN
);
3119 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3120 MemTxAttrs attrs
, MemTxResult
*result
)
3122 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3123 DEVICE_LITTLE_ENDIAN
);
3126 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3127 MemTxAttrs attrs
, MemTxResult
*result
)
3129 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3133 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3135 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3138 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3140 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3143 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3145 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3149 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3150 MemTxAttrs attrs
, MemTxResult
*result
)
3155 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3162 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3164 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3167 /* warning: addr must be aligned */
3168 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3171 MemTxResult
*result
,
3172 enum device_endian endian
)
3180 bool release_lock
= false;
3183 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3185 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3186 release_lock
|= prepare_mmio_access(mr
);
3189 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3190 #if defined(TARGET_WORDS_BIGENDIAN)
3191 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3195 if (endian
== DEVICE_BIG_ENDIAN
) {
3201 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3205 case DEVICE_LITTLE_ENDIAN
:
3206 val
= lduw_le_p(ptr
);
3208 case DEVICE_BIG_ENDIAN
:
3209 val
= lduw_be_p(ptr
);
3221 qemu_mutex_unlock_iothread();
3227 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3228 MemTxAttrs attrs
, MemTxResult
*result
)
3230 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3231 DEVICE_NATIVE_ENDIAN
);
3234 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3235 MemTxAttrs attrs
, MemTxResult
*result
)
3237 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3238 DEVICE_LITTLE_ENDIAN
);
3241 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3242 MemTxAttrs attrs
, MemTxResult
*result
)
3244 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3248 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3250 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3253 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3255 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3258 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3260 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3263 /* warning: addr must be aligned. The ram page is not masked as dirty
3264 and the code inside is not invalidated. It is useful if the dirty
3265 bits are used to track modified PTEs */
3266 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3267 MemTxAttrs attrs
, MemTxResult
*result
)
3274 uint8_t dirty_log_mask
;
3275 bool release_lock
= false;
3278 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3280 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3281 release_lock
|= prepare_mmio_access(mr
);
3283 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3285 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3286 ptr
= qemu_get_ram_ptr(addr1
);
3289 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3290 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3291 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3298 qemu_mutex_unlock_iothread();
3303 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3305 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3308 /* warning: addr must be aligned */
3309 static inline void address_space_stl_internal(AddressSpace
*as
,
3310 hwaddr addr
, uint32_t val
,
3312 MemTxResult
*result
,
3313 enum device_endian endian
)
3320 bool release_lock
= false;
3323 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3325 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3326 release_lock
|= prepare_mmio_access(mr
);
3328 #if defined(TARGET_WORDS_BIGENDIAN)
3329 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3333 if (endian
== DEVICE_BIG_ENDIAN
) {
3337 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3340 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3341 ptr
= qemu_get_ram_ptr(addr1
);
3343 case DEVICE_LITTLE_ENDIAN
:
3346 case DEVICE_BIG_ENDIAN
:
3353 invalidate_and_set_dirty(mr
, addr1
, 4);
3360 qemu_mutex_unlock_iothread();
3365 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3366 MemTxAttrs attrs
, MemTxResult
*result
)
3368 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3369 DEVICE_NATIVE_ENDIAN
);
3372 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3373 MemTxAttrs attrs
, MemTxResult
*result
)
3375 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3376 DEVICE_LITTLE_ENDIAN
);
3379 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3380 MemTxAttrs attrs
, MemTxResult
*result
)
3382 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3386 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3388 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3391 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3393 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3396 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3398 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3402 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3403 MemTxAttrs attrs
, MemTxResult
*result
)
3408 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3414 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3416 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3419 /* warning: addr must be aligned */
3420 static inline void address_space_stw_internal(AddressSpace
*as
,
3421 hwaddr addr
, uint32_t val
,
3423 MemTxResult
*result
,
3424 enum device_endian endian
)
3431 bool release_lock
= false;
3434 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3435 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3436 release_lock
|= prepare_mmio_access(mr
);
3438 #if defined(TARGET_WORDS_BIGENDIAN)
3439 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3443 if (endian
== DEVICE_BIG_ENDIAN
) {
3447 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3450 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3451 ptr
= qemu_get_ram_ptr(addr1
);
3453 case DEVICE_LITTLE_ENDIAN
:
3456 case DEVICE_BIG_ENDIAN
:
3463 invalidate_and_set_dirty(mr
, addr1
, 2);
3470 qemu_mutex_unlock_iothread();
3475 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3476 MemTxAttrs attrs
, MemTxResult
*result
)
3478 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3479 DEVICE_NATIVE_ENDIAN
);
3482 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3483 MemTxAttrs attrs
, MemTxResult
*result
)
3485 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3486 DEVICE_LITTLE_ENDIAN
);
3489 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3490 MemTxAttrs attrs
, MemTxResult
*result
)
3492 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3496 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3498 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3501 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3503 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3506 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3508 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3512 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3513 MemTxAttrs attrs
, MemTxResult
*result
)
3517 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3523 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3524 MemTxAttrs attrs
, MemTxResult
*result
)
3527 val
= cpu_to_le64(val
);
3528 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3533 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3534 MemTxAttrs attrs
, MemTxResult
*result
)
3537 val
= cpu_to_be64(val
);
3538 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3544 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3546 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3549 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3551 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3554 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3556 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3559 /* virtual memory access for debug (includes writing to ROM) */
3560 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3561 uint8_t *buf
, int len
, int is_write
)
3568 page
= addr
& TARGET_PAGE_MASK
;
3569 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
3570 /* if no physical page mapped, return an error */
3571 if (phys_addr
== -1)
3573 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3576 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3578 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
3580 address_space_rw(cpu
->as
, phys_addr
, MEMTXATTRS_UNSPECIFIED
,
3591 * Allows code that needs to deal with migration bitmaps etc to still be built
3592 * target independent.
3594 size_t qemu_target_page_bits(void)
3596 return TARGET_PAGE_BITS
;
3602 * A helper function for the _utterly broken_ virtio device model to find out if
3603 * it's running on a big endian machine. Don't do this at home kids!
3605 bool target_words_bigendian(void);
3606 bool target_words_bigendian(void)
3608 #if defined(TARGET_WORDS_BIGENDIAN)
3615 #ifndef CONFIG_USER_ONLY
3616 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3623 mr
= address_space_translate(&address_space_memory
,
3624 phys_addr
, &phys_addr
, &l
, false);
3626 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3631 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3637 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3638 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3639 block
->used_length
, opaque
);