4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
25 #include "qemu/cutils.h"
27 #include "exec/exec-all.h"
29 #include "hw/qdev-core.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "hw/boards.h"
32 #include "hw/xen/xen.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "qemu/error-report.h"
39 #if defined(CONFIG_USER_ONLY)
41 #else /* !CONFIG_USER_ONLY */
43 #include "exec/memory.h"
44 #include "exec/ioport.h"
45 #include "sysemu/dma.h"
46 #include "exec/address-spaces.h"
47 #include "sysemu/xen-mapcache.h"
50 #include "exec/cpu-all.h"
51 #include "qemu/rcu_queue.h"
52 #include "qemu/main-loop.h"
53 #include "translate-all.h"
54 #include "sysemu/replay.h"
56 #include "exec/memory-internal.h"
57 #include "exec/ram_addr.h"
60 #include "migration/vmstate.h"
62 #include "qemu/range.h"
64 #include "qemu/mmap-alloc.h"
67 //#define DEBUG_SUBPAGE
69 #if !defined(CONFIG_USER_ONLY)
70 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
71 * are protected by the ramlist lock.
73 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
75 static MemoryRegion
*system_memory
;
76 static MemoryRegion
*system_io
;
78 AddressSpace address_space_io
;
79 AddressSpace address_space_memory
;
81 MemoryRegion io_mem_rom
, io_mem_notdirty
;
82 static MemoryRegion io_mem_unassigned
;
84 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
85 #define RAM_PREALLOC (1 << 0)
87 /* RAM is mmap-ed with MAP_SHARED */
88 #define RAM_SHARED (1 << 1)
90 /* Only a portion of RAM (used_length) is actually used, and migrated.
91 * This used_length size can change across reboots.
93 #define RAM_RESIZEABLE (1 << 2)
97 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
98 /* current CPU in the current thread. It is only valid inside
100 __thread CPUState
*current_cpu
;
101 /* 0 = Do not count executed instructions.
102 1 = Precise instruction counting.
103 2 = Adaptive rate instruction counting. */
106 #if !defined(CONFIG_USER_ONLY)
108 typedef struct PhysPageEntry PhysPageEntry
;
110 struct PhysPageEntry
{
111 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
113 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
117 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
119 /* Size of the L2 (and L3, etc) page tables. */
120 #define ADDR_SPACE_BITS 64
123 #define P_L2_SIZE (1 << P_L2_BITS)
125 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
127 typedef PhysPageEntry Node
[P_L2_SIZE
];
129 typedef struct PhysPageMap
{
132 unsigned sections_nb
;
133 unsigned sections_nb_alloc
;
135 unsigned nodes_nb_alloc
;
137 MemoryRegionSection
*sections
;
140 struct AddressSpaceDispatch
{
143 MemoryRegionSection
*mru_section
;
144 /* This is a multi-level map on the physical address space.
145 * The bottom level has pointers to MemoryRegionSections.
147 PhysPageEntry phys_map
;
152 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
153 typedef struct subpage_t
{
157 uint16_t sub_section
[TARGET_PAGE_SIZE
];
160 #define PHYS_SECTION_UNASSIGNED 0
161 #define PHYS_SECTION_NOTDIRTY 1
162 #define PHYS_SECTION_ROM 2
163 #define PHYS_SECTION_WATCH 3
165 static void io_mem_init(void);
166 static void memory_map_init(void);
167 static void tcg_commit(MemoryListener
*listener
);
169 static MemoryRegion io_mem_watch
;
172 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
173 * @cpu: the CPU whose AddressSpace this is
174 * @as: the AddressSpace itself
175 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
176 * @tcg_as_listener: listener for tracking changes to the AddressSpace
178 struct CPUAddressSpace
{
181 struct AddressSpaceDispatch
*memory_dispatch
;
182 MemoryListener tcg_as_listener
;
187 #if !defined(CONFIG_USER_ONLY)
189 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
191 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
192 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
193 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
194 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
198 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
205 ret
= map
->nodes_nb
++;
207 assert(ret
!= PHYS_MAP_NODE_NIL
);
208 assert(ret
!= map
->nodes_nb_alloc
);
210 e
.skip
= leaf
? 0 : 1;
211 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
212 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
213 memcpy(&p
[i
], &e
, sizeof(e
));
218 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
219 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
223 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
225 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
226 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
228 p
= map
->nodes
[lp
->ptr
];
229 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
231 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
232 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
238 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
244 static void phys_page_set(AddressSpaceDispatch
*d
,
245 hwaddr index
, hwaddr nb
,
248 /* Wildly overreserve - it doesn't matter much. */
249 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
251 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
254 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
255 * and update our entry so we can skip it and go directly to the destination.
257 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
259 unsigned valid_ptr
= P_L2_SIZE
;
264 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
269 for (i
= 0; i
< P_L2_SIZE
; i
++) {
270 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
277 phys_page_compact(&p
[i
], nodes
, compacted
);
281 /* We can only compress if there's only one child. */
286 assert(valid_ptr
< P_L2_SIZE
);
288 /* Don't compress if it won't fit in the # of bits we have. */
289 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
293 lp
->ptr
= p
[valid_ptr
].ptr
;
294 if (!p
[valid_ptr
].skip
) {
295 /* If our only child is a leaf, make this a leaf. */
296 /* By design, we should have made this node a leaf to begin with so we
297 * should never reach here.
298 * But since it's so simple to handle this, let's do it just in case we
303 lp
->skip
+= p
[valid_ptr
].skip
;
307 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
309 DECLARE_BITMAP(compacted
, nodes_nb
);
311 if (d
->phys_map
.skip
) {
312 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
316 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
319 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
320 * the section must cover the entire address space.
322 return section
->size
.hi
||
323 range_covers_byte(section
->offset_within_address_space
,
324 section
->size
.lo
, addr
);
327 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
328 Node
*nodes
, MemoryRegionSection
*sections
)
331 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
334 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
335 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
336 return §ions
[PHYS_SECTION_UNASSIGNED
];
339 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
342 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
343 return §ions
[lp
.ptr
];
345 return §ions
[PHYS_SECTION_UNASSIGNED
];
349 bool memory_region_is_unassigned(MemoryRegion
*mr
)
351 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
352 && mr
!= &io_mem_watch
;
355 /* Called from RCU critical section */
356 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
358 bool resolve_subpage
)
360 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
364 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
365 section_covers_addr(section
, addr
)) {
368 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
372 if (resolve_subpage
&& section
->mr
->subpage
) {
373 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
374 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
377 atomic_set(&d
->mru_section
, section
);
382 /* Called from RCU critical section */
383 static MemoryRegionSection
*
384 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
385 hwaddr
*plen
, bool resolve_subpage
)
387 MemoryRegionSection
*section
;
391 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
392 /* Compute offset within MemoryRegionSection */
393 addr
-= section
->offset_within_address_space
;
395 /* Compute offset within MemoryRegion */
396 *xlat
= addr
+ section
->offset_within_region
;
400 /* MMIO registers can be expected to perform full-width accesses based only
401 * on their address, without considering adjacent registers that could
402 * decode to completely different MemoryRegions. When such registers
403 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
404 * regions overlap wildly. For this reason we cannot clamp the accesses
407 * If the length is small (as is the case for address_space_ldl/stl),
408 * everything works fine. If the incoming length is large, however,
409 * the caller really has to do the clamping through memory_access_size.
411 if (memory_region_is_ram(mr
)) {
412 diff
= int128_sub(section
->size
, int128_make64(addr
));
413 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
418 /* Called from RCU critical section */
419 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
420 hwaddr
*xlat
, hwaddr
*plen
,
424 MemoryRegionSection
*section
;
428 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
429 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
432 if (!mr
->iommu_ops
) {
436 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
437 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
438 | (addr
& iotlb
.addr_mask
));
439 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
440 if (!(iotlb
.perm
& (1 << is_write
))) {
441 mr
= &io_mem_unassigned
;
445 as
= iotlb
.target_as
;
448 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
449 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
450 *plen
= MIN(page
, *plen
);
457 /* Called from RCU critical section */
458 MemoryRegionSection
*
459 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
460 hwaddr
*xlat
, hwaddr
*plen
)
462 MemoryRegionSection
*section
;
463 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
465 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
467 assert(!section
->mr
->iommu_ops
);
472 #if !defined(CONFIG_USER_ONLY)
474 static int cpu_common_post_load(void *opaque
, int version_id
)
476 CPUState
*cpu
= opaque
;
478 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
479 version_id is increased. */
480 cpu
->interrupt_request
&= ~0x01;
486 static int cpu_common_pre_load(void *opaque
)
488 CPUState
*cpu
= opaque
;
490 cpu
->exception_index
= -1;
495 static bool cpu_common_exception_index_needed(void *opaque
)
497 CPUState
*cpu
= opaque
;
499 return tcg_enabled() && cpu
->exception_index
!= -1;
502 static const VMStateDescription vmstate_cpu_common_exception_index
= {
503 .name
= "cpu_common/exception_index",
505 .minimum_version_id
= 1,
506 .needed
= cpu_common_exception_index_needed
,
507 .fields
= (VMStateField
[]) {
508 VMSTATE_INT32(exception_index
, CPUState
),
509 VMSTATE_END_OF_LIST()
513 static bool cpu_common_crash_occurred_needed(void *opaque
)
515 CPUState
*cpu
= opaque
;
517 return cpu
->crash_occurred
;
520 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
521 .name
= "cpu_common/crash_occurred",
523 .minimum_version_id
= 1,
524 .needed
= cpu_common_crash_occurred_needed
,
525 .fields
= (VMStateField
[]) {
526 VMSTATE_BOOL(crash_occurred
, CPUState
),
527 VMSTATE_END_OF_LIST()
531 const VMStateDescription vmstate_cpu_common
= {
532 .name
= "cpu_common",
534 .minimum_version_id
= 1,
535 .pre_load
= cpu_common_pre_load
,
536 .post_load
= cpu_common_post_load
,
537 .fields
= (VMStateField
[]) {
538 VMSTATE_UINT32(halted
, CPUState
),
539 VMSTATE_UINT32(interrupt_request
, CPUState
),
540 VMSTATE_END_OF_LIST()
542 .subsections
= (const VMStateDescription
*[]) {
543 &vmstate_cpu_common_exception_index
,
544 &vmstate_cpu_common_crash_occurred
,
551 CPUState
*qemu_get_cpu(int index
)
556 if (cpu
->cpu_index
== index
) {
564 #if !defined(CONFIG_USER_ONLY)
565 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
567 CPUAddressSpace
*newas
;
569 /* Target code should have set num_ases before calling us */
570 assert(asidx
< cpu
->num_ases
);
573 /* address space 0 gets the convenience alias */
577 /* KVM cannot currently support multiple address spaces. */
578 assert(asidx
== 0 || !kvm_enabled());
580 if (!cpu
->cpu_ases
) {
581 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
584 newas
= &cpu
->cpu_ases
[asidx
];
588 newas
->tcg_as_listener
.commit
= tcg_commit
;
589 memory_listener_register(&newas
->tcg_as_listener
, as
);
593 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
595 /* Return the AddressSpace corresponding to the specified index */
596 return cpu
->cpu_ases
[asidx
].as
;
600 #ifndef CONFIG_USER_ONLY
601 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
603 static int cpu_get_free_index(Error
**errp
)
605 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
607 if (cpu
>= MAX_CPUMASK_BITS
) {
608 error_setg(errp
, "Trying to use more CPUs than max of %d",
613 bitmap_set(cpu_index_map
, cpu
, 1);
617 static void cpu_release_index(CPUState
*cpu
)
619 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
623 static int cpu_get_free_index(Error
**errp
)
628 CPU_FOREACH(some_cpu
) {
634 static void cpu_release_index(CPUState
*cpu
)
640 void cpu_exec_exit(CPUState
*cpu
)
642 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
644 #if defined(CONFIG_USER_ONLY)
647 if (cpu
->cpu_index
== -1) {
648 /* cpu_index was never allocated by this @cpu or was already freed. */
649 #if defined(CONFIG_USER_ONLY)
655 QTAILQ_REMOVE(&cpus
, cpu
, node
);
656 cpu_release_index(cpu
);
658 #if defined(CONFIG_USER_ONLY)
662 if (cc
->vmsd
!= NULL
) {
663 vmstate_unregister(NULL
, cc
->vmsd
, cpu
);
665 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
666 vmstate_unregister(NULL
, &vmstate_cpu_common
, cpu
);
670 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
672 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
673 Error
*local_err
= NULL
;
678 #ifndef CONFIG_USER_ONLY
679 cpu
->thread_id
= qemu_get_thread_id();
681 /* This is a softmmu CPU object, so create a property for it
682 * so users can wire up its memory. (This can't go in qom/cpu.c
683 * because that file is compiled only once for both user-mode
684 * and system builds.) The default if no link is set up is to use
685 * the system address space.
687 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
688 (Object
**)&cpu
->memory
,
689 qdev_prop_allow_set_link_before_realize
,
690 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
692 cpu
->memory
= system_memory
;
693 object_ref(OBJECT(cpu
->memory
));
696 #if defined(CONFIG_USER_ONLY)
699 cpu
->cpu_index
= cpu_get_free_index(&local_err
);
701 error_propagate(errp
, local_err
);
702 #if defined(CONFIG_USER_ONLY)
707 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
708 #if defined(CONFIG_USER_ONLY)
712 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
713 vmstate_register(NULL
, cpu
->cpu_index
, &vmstate_cpu_common
, cpu
);
715 if (cc
->vmsd
!= NULL
) {
716 vmstate_register(NULL
, cpu
->cpu_index
, cc
->vmsd
, cpu
);
721 #if defined(CONFIG_USER_ONLY)
722 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
724 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
727 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
730 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
731 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
733 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
734 phys
| (pc
& ~TARGET_PAGE_MASK
));
739 #if defined(CONFIG_USER_ONLY)
740 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
745 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
751 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
755 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
756 int flags
, CPUWatchpoint
**watchpoint
)
761 /* Add a watchpoint. */
762 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
763 int flags
, CPUWatchpoint
**watchpoint
)
767 /* forbid ranges which are empty or run off the end of the address space */
768 if (len
== 0 || (addr
+ len
- 1) < addr
) {
769 error_report("tried to set invalid watchpoint at %"
770 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
773 wp
= g_malloc(sizeof(*wp
));
779 /* keep all GDB-injected watchpoints in front */
780 if (flags
& BP_GDB
) {
781 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
783 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
786 tlb_flush_page(cpu
, addr
);
793 /* Remove a specific watchpoint. */
794 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
799 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
800 if (addr
== wp
->vaddr
&& len
== wp
->len
801 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
802 cpu_watchpoint_remove_by_ref(cpu
, wp
);
809 /* Remove a specific watchpoint by reference. */
810 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
812 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
814 tlb_flush_page(cpu
, watchpoint
->vaddr
);
819 /* Remove all matching watchpoints. */
820 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
822 CPUWatchpoint
*wp
, *next
;
824 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
825 if (wp
->flags
& mask
) {
826 cpu_watchpoint_remove_by_ref(cpu
, wp
);
831 /* Return true if this watchpoint address matches the specified
832 * access (ie the address range covered by the watchpoint overlaps
833 * partially or completely with the address range covered by the
836 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
840 /* We know the lengths are non-zero, but a little caution is
841 * required to avoid errors in the case where the range ends
842 * exactly at the top of the address space and so addr + len
843 * wraps round to zero.
845 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
846 vaddr addrend
= addr
+ len
- 1;
848 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
853 /* Add a breakpoint. */
854 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
855 CPUBreakpoint
**breakpoint
)
859 bp
= g_malloc(sizeof(*bp
));
864 /* keep all GDB-injected breakpoints in front */
865 if (flags
& BP_GDB
) {
866 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
868 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
871 breakpoint_invalidate(cpu
, pc
);
879 /* Remove a specific breakpoint. */
880 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
884 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
885 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
886 cpu_breakpoint_remove_by_ref(cpu
, bp
);
893 /* Remove a specific breakpoint by reference. */
894 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
896 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
898 breakpoint_invalidate(cpu
, breakpoint
->pc
);
903 /* Remove all matching breakpoints. */
904 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
906 CPUBreakpoint
*bp
, *next
;
908 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
909 if (bp
->flags
& mask
) {
910 cpu_breakpoint_remove_by_ref(cpu
, bp
);
915 /* enable or disable single step mode. EXCP_DEBUG is returned by the
916 CPU loop after each instruction */
917 void cpu_single_step(CPUState
*cpu
, int enabled
)
919 if (cpu
->singlestep_enabled
!= enabled
) {
920 cpu
->singlestep_enabled
= enabled
;
922 kvm_update_guest_debug(cpu
, 0);
924 /* must flush all the translated code to avoid inconsistencies */
925 /* XXX: only flush what is necessary */
931 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
938 fprintf(stderr
, "qemu: fatal: ");
939 vfprintf(stderr
, fmt
, ap
);
940 fprintf(stderr
, "\n");
941 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
942 if (qemu_log_separate()) {
943 qemu_log("qemu: fatal: ");
944 qemu_log_vprintf(fmt
, ap2
);
946 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
953 #if defined(CONFIG_USER_ONLY)
955 struct sigaction act
;
956 sigfillset(&act
.sa_mask
);
957 act
.sa_handler
= SIG_DFL
;
958 sigaction(SIGABRT
, &act
, NULL
);
964 #if !defined(CONFIG_USER_ONLY)
965 /* Called from RCU critical section */
966 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
970 block
= atomic_rcu_read(&ram_list
.mru_block
);
971 if (block
&& addr
- block
->offset
< block
->max_length
) {
974 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
975 if (addr
- block
->offset
< block
->max_length
) {
980 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
984 /* It is safe to write mru_block outside the iothread lock. This
989 * xxx removed from list
993 * call_rcu(reclaim_ramblock, xxx);
996 * atomic_rcu_set is not needed here. The block was already published
997 * when it was placed into the list. Here we're just making an extra
998 * copy of the pointer.
1000 ram_list
.mru_block
= block
;
1004 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
1011 end
= TARGET_PAGE_ALIGN(start
+ length
);
1012 start
&= TARGET_PAGE_MASK
;
1015 block
= qemu_get_ram_block(start
);
1016 assert(block
== qemu_get_ram_block(end
- 1));
1017 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
1019 tlb_reset_dirty(cpu
, start1
, length
);
1024 /* Note: start and end must be within the same ram block. */
1025 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
1029 DirtyMemoryBlocks
*blocks
;
1030 unsigned long end
, page
;
1037 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1038 page
= start
>> TARGET_PAGE_BITS
;
1042 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1044 while (page
< end
) {
1045 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1046 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1047 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1049 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1056 if (dirty
&& tcg_enabled()) {
1057 tlb_reset_dirty_range_all(start
, length
);
1063 /* Called from RCU critical section */
1064 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1065 MemoryRegionSection
*section
,
1067 hwaddr paddr
, hwaddr xlat
,
1069 target_ulong
*address
)
1074 if (memory_region_is_ram(section
->mr
)) {
1076 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1077 if (!section
->readonly
) {
1078 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1080 iotlb
|= PHYS_SECTION_ROM
;
1083 AddressSpaceDispatch
*d
;
1085 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1086 iotlb
= section
- d
->map
.sections
;
1090 /* Make accesses to pages with watchpoints go via the
1091 watchpoint trap routines. */
1092 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1093 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1094 /* Avoid trapping reads of pages with a write breakpoint. */
1095 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1096 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1097 *address
|= TLB_MMIO
;
1105 #endif /* defined(CONFIG_USER_ONLY) */
1107 #if !defined(CONFIG_USER_ONLY)
1109 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1111 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1113 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1114 qemu_anon_ram_alloc
;
1117 * Set a custom physical guest memory alloator.
1118 * Accelerators with unusual needs may need this. Hopefully, we can
1119 * get rid of it eventually.
1121 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1123 phys_mem_alloc
= alloc
;
1126 static uint16_t phys_section_add(PhysPageMap
*map
,
1127 MemoryRegionSection
*section
)
1129 /* The physical section number is ORed with a page-aligned
1130 * pointer to produce the iotlb entries. Thus it should
1131 * never overflow into the page-aligned value.
1133 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1135 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1136 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1137 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1138 map
->sections_nb_alloc
);
1140 map
->sections
[map
->sections_nb
] = *section
;
1141 memory_region_ref(section
->mr
);
1142 return map
->sections_nb
++;
1145 static void phys_section_destroy(MemoryRegion
*mr
)
1147 bool have_sub_page
= mr
->subpage
;
1149 memory_region_unref(mr
);
1151 if (have_sub_page
) {
1152 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1153 object_unref(OBJECT(&subpage
->iomem
));
1158 static void phys_sections_free(PhysPageMap
*map
)
1160 while (map
->sections_nb
> 0) {
1161 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1162 phys_section_destroy(section
->mr
);
1164 g_free(map
->sections
);
1168 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1171 hwaddr base
= section
->offset_within_address_space
1173 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1174 d
->map
.nodes
, d
->map
.sections
);
1175 MemoryRegionSection subsection
= {
1176 .offset_within_address_space
= base
,
1177 .size
= int128_make64(TARGET_PAGE_SIZE
),
1181 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1183 if (!(existing
->mr
->subpage
)) {
1184 subpage
= subpage_init(d
->as
, base
);
1185 subsection
.address_space
= d
->as
;
1186 subsection
.mr
= &subpage
->iomem
;
1187 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1188 phys_section_add(&d
->map
, &subsection
));
1190 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1192 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1193 end
= start
+ int128_get64(section
->size
) - 1;
1194 subpage_register(subpage
, start
, end
,
1195 phys_section_add(&d
->map
, section
));
1199 static void register_multipage(AddressSpaceDispatch
*d
,
1200 MemoryRegionSection
*section
)
1202 hwaddr start_addr
= section
->offset_within_address_space
;
1203 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1204 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1208 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1211 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1213 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1214 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1215 MemoryRegionSection now
= *section
, remain
= *section
;
1216 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1218 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1219 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1220 - now
.offset_within_address_space
;
1222 now
.size
= int128_min(int128_make64(left
), now
.size
);
1223 register_subpage(d
, &now
);
1225 now
.size
= int128_zero();
1227 while (int128_ne(remain
.size
, now
.size
)) {
1228 remain
.size
= int128_sub(remain
.size
, now
.size
);
1229 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1230 remain
.offset_within_region
+= int128_get64(now
.size
);
1232 if (int128_lt(remain
.size
, page_size
)) {
1233 register_subpage(d
, &now
);
1234 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1235 now
.size
= page_size
;
1236 register_subpage(d
, &now
);
1238 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1239 register_multipage(d
, &now
);
1244 void qemu_flush_coalesced_mmio_buffer(void)
1247 kvm_flush_coalesced_mmio_buffer();
1250 void qemu_mutex_lock_ramlist(void)
1252 qemu_mutex_lock(&ram_list
.mutex
);
1255 void qemu_mutex_unlock_ramlist(void)
1257 qemu_mutex_unlock(&ram_list
.mutex
);
1261 static void *file_ram_alloc(RAMBlock
*block
,
1266 bool unlink_on_error
= false;
1268 char *sanitized_name
;
1274 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1276 "host lacks kvm mmu notifiers, -mem-path unsupported");
1281 fd
= open(path
, O_RDWR
);
1283 /* @path names an existing file, use it */
1286 if (errno
== ENOENT
) {
1287 /* @path names a file that doesn't exist, create it */
1288 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1290 unlink_on_error
= true;
1293 } else if (errno
== EISDIR
) {
1294 /* @path names a directory, create a file there */
1295 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1296 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1297 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1303 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1305 g_free(sanitized_name
);
1307 fd
= mkstemp(filename
);
1315 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1316 error_setg_errno(errp
, errno
,
1317 "can't open backing store %s for guest RAM",
1322 * Try again on EINTR and EEXIST. The latter happens when
1323 * something else creates the file between our two open().
1327 page_size
= qemu_fd_getpagesize(fd
);
1328 block
->mr
->align
= MAX(page_size
, QEMU_VMALLOC_ALIGN
);
1330 if (memory
< page_size
) {
1331 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1332 "or larger than page size 0x%" PRIx64
,
1337 memory
= ROUND_UP(memory
, page_size
);
1340 * ftruncate is not supported by hugetlbfs in older
1341 * hosts, so don't bother bailing out on errors.
1342 * If anything goes wrong with it under other filesystems,
1345 if (ftruncate(fd
, memory
)) {
1346 perror("ftruncate");
1349 area
= qemu_ram_mmap(fd
, memory
, block
->mr
->align
,
1350 block
->flags
& RAM_SHARED
);
1351 if (area
== MAP_FAILED
) {
1352 error_setg_errno(errp
, errno
,
1353 "unable to map backing store for guest RAM");
1358 os_mem_prealloc(fd
, area
, memory
);
1365 if (unlink_on_error
) {
1375 /* Called with the ramlist lock held. */
1376 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1378 RAMBlock
*block
, *next_block
;
1379 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1381 assert(size
!= 0); /* it would hand out same offset multiple times */
1383 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1387 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1388 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1390 end
= block
->offset
+ block
->max_length
;
1392 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1393 if (next_block
->offset
>= end
) {
1394 next
= MIN(next
, next_block
->offset
);
1397 if (next
- end
>= size
&& next
- end
< mingap
) {
1399 mingap
= next
- end
;
1403 if (offset
== RAM_ADDR_MAX
) {
1404 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1412 ram_addr_t
last_ram_offset(void)
1415 ram_addr_t last
= 0;
1418 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1419 last
= MAX(last
, block
->offset
+ block
->max_length
);
1425 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1429 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1430 if (!machine_dump_guest_core(current_machine
)) {
1431 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1433 perror("qemu_madvise");
1434 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1435 "but dump_guest_core=off specified\n");
1440 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1445 /* Called with iothread lock held. */
1446 void qemu_ram_set_idstr(RAMBlock
*new_block
, const char *name
, DeviceState
*dev
)
1451 assert(!new_block
->idstr
[0]);
1454 char *id
= qdev_get_dev_path(dev
);
1456 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1460 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1463 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1464 if (block
!= new_block
&&
1465 !strcmp(block
->idstr
, new_block
->idstr
)) {
1466 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1474 /* Called with iothread lock held. */
1475 void qemu_ram_unset_idstr(RAMBlock
*block
)
1477 /* FIXME: arch_init.c assumes that this is not called throughout
1478 * migration. Ignore the problem since hot-unplug during migration
1479 * does not work anyway.
1482 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1486 static int memory_try_enable_merging(void *addr
, size_t len
)
1488 if (!machine_mem_merge(current_machine
)) {
1489 /* disabled by the user */
1493 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1496 /* Only legal before guest might have detected the memory size: e.g. on
1497 * incoming migration, or right after reset.
1499 * As memory core doesn't know how is memory accessed, it is up to
1500 * resize callback to update device state and/or add assertions to detect
1501 * misuse, if necessary.
1503 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
)
1507 newsize
= HOST_PAGE_ALIGN(newsize
);
1509 if (block
->used_length
== newsize
) {
1513 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1514 error_setg_errno(errp
, EINVAL
,
1515 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1516 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1517 newsize
, block
->used_length
);
1521 if (block
->max_length
< newsize
) {
1522 error_setg_errno(errp
, EINVAL
,
1523 "Length too large: %s: 0x" RAM_ADDR_FMT
1524 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1525 newsize
, block
->max_length
);
1529 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1530 block
->used_length
= newsize
;
1531 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1533 memory_region_set_size(block
->mr
, newsize
);
1534 if (block
->resized
) {
1535 block
->resized(block
->idstr
, newsize
, block
->host
);
1540 /* Called with ram_list.mutex held */
1541 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1542 ram_addr_t new_ram_size
)
1544 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1545 DIRTY_MEMORY_BLOCK_SIZE
);
1546 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1547 DIRTY_MEMORY_BLOCK_SIZE
);
1550 /* Only need to extend if block count increased */
1551 if (new_num_blocks
<= old_num_blocks
) {
1555 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1556 DirtyMemoryBlocks
*old_blocks
;
1557 DirtyMemoryBlocks
*new_blocks
;
1560 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1561 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1562 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1564 if (old_num_blocks
) {
1565 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1566 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1569 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1570 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1573 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1576 g_free_rcu(old_blocks
, rcu
);
1581 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1584 RAMBlock
*last_block
= NULL
;
1585 ram_addr_t old_ram_size
, new_ram_size
;
1588 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1590 qemu_mutex_lock_ramlist();
1591 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1593 if (!new_block
->host
) {
1594 if (xen_enabled()) {
1595 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1596 new_block
->mr
, &err
);
1598 error_propagate(errp
, err
);
1599 qemu_mutex_unlock_ramlist();
1603 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1604 &new_block
->mr
->align
);
1605 if (!new_block
->host
) {
1606 error_setg_errno(errp
, errno
,
1607 "cannot set up guest memory '%s'",
1608 memory_region_name(new_block
->mr
));
1609 qemu_mutex_unlock_ramlist();
1612 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1616 new_ram_size
= MAX(old_ram_size
,
1617 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1618 if (new_ram_size
> old_ram_size
) {
1619 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1620 dirty_memory_extend(old_ram_size
, new_ram_size
);
1622 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1623 * QLIST (which has an RCU-friendly variant) does not have insertion at
1624 * tail, so save the last element in last_block.
1626 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1628 if (block
->max_length
< new_block
->max_length
) {
1633 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1634 } else if (last_block
) {
1635 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1636 } else { /* list is empty */
1637 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1639 ram_list
.mru_block
= NULL
;
1641 /* Write list before version */
1644 qemu_mutex_unlock_ramlist();
1646 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1647 new_block
->used_length
,
1650 if (new_block
->host
) {
1651 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1652 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1653 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1654 if (kvm_enabled()) {
1655 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1661 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1662 bool share
, const char *mem_path
,
1665 RAMBlock
*new_block
;
1666 Error
*local_err
= NULL
;
1668 if (xen_enabled()) {
1669 error_setg(errp
, "-mem-path not supported with Xen");
1673 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1675 * file_ram_alloc() needs to allocate just like
1676 * phys_mem_alloc, but we haven't bothered to provide
1680 "-mem-path not supported with this accelerator");
1684 size
= HOST_PAGE_ALIGN(size
);
1685 new_block
= g_malloc0(sizeof(*new_block
));
1687 new_block
->used_length
= size
;
1688 new_block
->max_length
= size
;
1689 new_block
->flags
= share
? RAM_SHARED
: 0;
1690 new_block
->host
= file_ram_alloc(new_block
, size
,
1692 if (!new_block
->host
) {
1697 ram_block_add(new_block
, &local_err
);
1700 error_propagate(errp
, local_err
);
1708 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1709 void (*resized
)(const char*,
1712 void *host
, bool resizeable
,
1713 MemoryRegion
*mr
, Error
**errp
)
1715 RAMBlock
*new_block
;
1716 Error
*local_err
= NULL
;
1718 size
= HOST_PAGE_ALIGN(size
);
1719 max_size
= HOST_PAGE_ALIGN(max_size
);
1720 new_block
= g_malloc0(sizeof(*new_block
));
1722 new_block
->resized
= resized
;
1723 new_block
->used_length
= size
;
1724 new_block
->max_length
= max_size
;
1725 assert(max_size
>= size
);
1727 new_block
->host
= host
;
1729 new_block
->flags
|= RAM_PREALLOC
;
1732 new_block
->flags
|= RAM_RESIZEABLE
;
1734 ram_block_add(new_block
, &local_err
);
1737 error_propagate(errp
, local_err
);
1743 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1744 MemoryRegion
*mr
, Error
**errp
)
1746 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1749 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1751 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1754 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1755 void (*resized
)(const char*,
1758 MemoryRegion
*mr
, Error
**errp
)
1760 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1763 static void reclaim_ramblock(RAMBlock
*block
)
1765 if (block
->flags
& RAM_PREALLOC
) {
1767 } else if (xen_enabled()) {
1768 xen_invalidate_map_cache_entry(block
->host
);
1770 } else if (block
->fd
>= 0) {
1771 qemu_ram_munmap(block
->host
, block
->max_length
);
1775 qemu_anon_ram_free(block
->host
, block
->max_length
);
1780 void qemu_ram_free(RAMBlock
*block
)
1786 qemu_mutex_lock_ramlist();
1787 QLIST_REMOVE_RCU(block
, next
);
1788 ram_list
.mru_block
= NULL
;
1789 /* Write list before version */
1792 call_rcu(block
, reclaim_ramblock
, rcu
);
1793 qemu_mutex_unlock_ramlist();
1797 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1804 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1805 offset
= addr
- block
->offset
;
1806 if (offset
< block
->max_length
) {
1807 vaddr
= ramblock_ptr(block
, offset
);
1808 if (block
->flags
& RAM_PREALLOC
) {
1810 } else if (xen_enabled()) {
1814 if (block
->fd
>= 0) {
1815 flags
|= (block
->flags
& RAM_SHARED
?
1816 MAP_SHARED
: MAP_PRIVATE
);
1817 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1818 flags
, block
->fd
, offset
);
1821 * Remap needs to match alloc. Accelerators that
1822 * set phys_mem_alloc never remap. If they did,
1823 * we'd need a remap hook here.
1825 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1827 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1828 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1831 if (area
!= vaddr
) {
1832 fprintf(stderr
, "Could not remap addr: "
1833 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1837 memory_try_enable_merging(vaddr
, length
);
1838 qemu_ram_setup_dump(vaddr
, length
);
1843 #endif /* !_WIN32 */
1845 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1846 * This should not be used for general purpose DMA. Use address_space_map
1847 * or address_space_rw instead. For local memory (e.g. video ram) that the
1848 * device owns, use memory_region_get_ram_ptr.
1850 * Called within RCU critical section.
1852 void *qemu_map_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1854 RAMBlock
*block
= ram_block
;
1856 if (block
== NULL
) {
1857 block
= qemu_get_ram_block(addr
);
1858 addr
-= block
->offset
;
1861 if (xen_enabled() && block
->host
== NULL
) {
1862 /* We need to check if the requested address is in the RAM
1863 * because we don't want to map the entire memory in QEMU.
1864 * In that case just map until the end of the page.
1866 if (block
->offset
== 0) {
1867 return xen_map_cache(addr
, 0, 0);
1870 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1872 return ramblock_ptr(block
, addr
);
1875 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1876 * but takes a size argument.
1878 * Called within RCU critical section.
1880 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1883 RAMBlock
*block
= ram_block
;
1888 if (block
== NULL
) {
1889 block
= qemu_get_ram_block(addr
);
1890 addr
-= block
->offset
;
1892 *size
= MIN(*size
, block
->max_length
- addr
);
1894 if (xen_enabled() && block
->host
== NULL
) {
1895 /* We need to check if the requested address is in the RAM
1896 * because we don't want to map the entire memory in QEMU.
1897 * In that case just map the requested area.
1899 if (block
->offset
== 0) {
1900 return xen_map_cache(addr
, *size
, 1);
1903 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1906 return ramblock_ptr(block
, addr
);
1910 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1913 * ptr: Host pointer to look up
1914 * round_offset: If true round the result offset down to a page boundary
1915 * *ram_addr: set to result ram_addr
1916 * *offset: set to result offset within the RAMBlock
1918 * Returns: RAMBlock (or NULL if not found)
1920 * By the time this function returns, the returned pointer is not protected
1921 * by RCU anymore. If the caller is not within an RCU critical section and
1922 * does not hold the iothread lock, it must have other means of protecting the
1923 * pointer, such as a reference to the region that includes the incoming
1926 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1930 uint8_t *host
= ptr
;
1932 if (xen_enabled()) {
1933 ram_addr_t ram_addr
;
1935 ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1936 block
= qemu_get_ram_block(ram_addr
);
1938 *offset
= ram_addr
- block
->offset
;
1945 block
= atomic_rcu_read(&ram_list
.mru_block
);
1946 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1950 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1951 /* This case append when the block is not mapped. */
1952 if (block
->host
== NULL
) {
1955 if (host
- block
->host
< block
->max_length
) {
1964 *offset
= (host
- block
->host
);
1966 *offset
&= TARGET_PAGE_MASK
;
1973 * Finds the named RAMBlock
1975 * name: The name of RAMBlock to find
1977 * Returns: RAMBlock (or NULL if not found)
1979 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1983 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1984 if (!strcmp(name
, block
->idstr
)) {
1992 /* Some of the softmmu routines need to translate from a host pointer
1993 (typically a TLB entry) back to a ram offset. */
1994 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
1999 block
= qemu_ram_block_from_host(ptr
, false, &offset
);
2001 return RAM_ADDR_INVALID
;
2004 return block
->offset
+ offset
;
2007 /* Called within RCU critical section. */
2008 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
2009 uint64_t val
, unsigned size
)
2011 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2012 tb_invalidate_phys_page_fast(ram_addr
, size
);
2016 stb_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2019 stw_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2022 stl_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2027 /* Set both VGA and migration bits for simplicity and to remove
2028 * the notdirty callback faster.
2030 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2031 DIRTY_CLIENTS_NOCODE
);
2032 /* we remove the notdirty callback only if the code has been
2034 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2035 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2039 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2040 unsigned size
, bool is_write
)
2045 static const MemoryRegionOps notdirty_mem_ops
= {
2046 .write
= notdirty_mem_write
,
2047 .valid
.accepts
= notdirty_mem_accepts
,
2048 .endianness
= DEVICE_NATIVE_ENDIAN
,
2051 /* Generate a debug exception if a watchpoint has been hit. */
2052 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2054 CPUState
*cpu
= current_cpu
;
2055 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2056 CPUArchState
*env
= cpu
->env_ptr
;
2057 target_ulong pc
, cs_base
;
2062 if (cpu
->watchpoint_hit
) {
2063 /* We re-entered the check after replacing the TB. Now raise
2064 * the debug interrupt so that is will trigger after the
2065 * current instruction. */
2066 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2069 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2070 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2071 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2072 && (wp
->flags
& flags
)) {
2073 if (flags
== BP_MEM_READ
) {
2074 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2076 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2078 wp
->hitaddr
= vaddr
;
2079 wp
->hitattrs
= attrs
;
2080 if (!cpu
->watchpoint_hit
) {
2081 if (wp
->flags
& BP_CPU
&&
2082 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2083 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2086 cpu
->watchpoint_hit
= wp
;
2087 tb_check_watchpoint(cpu
);
2088 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2089 cpu
->exception_index
= EXCP_DEBUG
;
2092 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2093 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2094 cpu_loop_exit_noexc(cpu
);
2098 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2103 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2104 so these check for a hit then pass through to the normal out-of-line
2106 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2107 unsigned size
, MemTxAttrs attrs
)
2111 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2112 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2114 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2117 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2120 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2123 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2131 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2132 uint64_t val
, unsigned size
,
2136 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2137 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2139 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2142 address_space_stb(as
, addr
, val
, attrs
, &res
);
2145 address_space_stw(as
, addr
, val
, attrs
, &res
);
2148 address_space_stl(as
, addr
, val
, attrs
, &res
);
2155 static const MemoryRegionOps watch_mem_ops
= {
2156 .read_with_attrs
= watch_mem_read
,
2157 .write_with_attrs
= watch_mem_write
,
2158 .endianness
= DEVICE_NATIVE_ENDIAN
,
2161 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2162 unsigned len
, MemTxAttrs attrs
)
2164 subpage_t
*subpage
= opaque
;
2168 #if defined(DEBUG_SUBPAGE)
2169 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2170 subpage
, len
, addr
);
2172 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2179 *data
= ldub_p(buf
);
2182 *data
= lduw_p(buf
);
2195 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2196 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2198 subpage_t
*subpage
= opaque
;
2201 #if defined(DEBUG_SUBPAGE)
2202 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2203 " value %"PRIx64
"\n",
2204 __func__
, subpage
, len
, addr
, value
);
2222 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2226 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2227 unsigned len
, bool is_write
)
2229 subpage_t
*subpage
= opaque
;
2230 #if defined(DEBUG_SUBPAGE)
2231 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2232 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2235 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2239 static const MemoryRegionOps subpage_ops
= {
2240 .read_with_attrs
= subpage_read
,
2241 .write_with_attrs
= subpage_write
,
2242 .impl
.min_access_size
= 1,
2243 .impl
.max_access_size
= 8,
2244 .valid
.min_access_size
= 1,
2245 .valid
.max_access_size
= 8,
2246 .valid
.accepts
= subpage_accepts
,
2247 .endianness
= DEVICE_NATIVE_ENDIAN
,
2250 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2255 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2257 idx
= SUBPAGE_IDX(start
);
2258 eidx
= SUBPAGE_IDX(end
);
2259 #if defined(DEBUG_SUBPAGE)
2260 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2261 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2263 for (; idx
<= eidx
; idx
++) {
2264 mmio
->sub_section
[idx
] = section
;
2270 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2274 mmio
= g_malloc0(sizeof(subpage_t
));
2278 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2279 NULL
, TARGET_PAGE_SIZE
);
2280 mmio
->iomem
.subpage
= true;
2281 #if defined(DEBUG_SUBPAGE)
2282 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2283 mmio
, base
, TARGET_PAGE_SIZE
);
2285 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2290 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2294 MemoryRegionSection section
= {
2295 .address_space
= as
,
2297 .offset_within_address_space
= 0,
2298 .offset_within_region
= 0,
2299 .size
= int128_2_64(),
2302 return phys_section_add(map
, §ion
);
2305 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2307 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2308 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2309 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2310 MemoryRegionSection
*sections
= d
->map
.sections
;
2312 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2315 static void io_mem_init(void)
2317 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2318 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2320 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2322 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2326 static void mem_begin(MemoryListener
*listener
)
2328 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2329 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2332 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2333 assert(n
== PHYS_SECTION_UNASSIGNED
);
2334 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2335 assert(n
== PHYS_SECTION_NOTDIRTY
);
2336 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2337 assert(n
== PHYS_SECTION_ROM
);
2338 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2339 assert(n
== PHYS_SECTION_WATCH
);
2341 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2343 as
->next_dispatch
= d
;
2346 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2348 phys_sections_free(&d
->map
);
2352 static void mem_commit(MemoryListener
*listener
)
2354 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2355 AddressSpaceDispatch
*cur
= as
->dispatch
;
2356 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2358 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2360 atomic_rcu_set(&as
->dispatch
, next
);
2362 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2366 static void tcg_commit(MemoryListener
*listener
)
2368 CPUAddressSpace
*cpuas
;
2369 AddressSpaceDispatch
*d
;
2371 /* since each CPU stores ram addresses in its TLB cache, we must
2372 reset the modified entries */
2373 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2374 cpu_reloading_memory_map();
2375 /* The CPU and TLB are protected by the iothread lock.
2376 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2377 * may have split the RCU critical section.
2379 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2380 cpuas
->memory_dispatch
= d
;
2381 tlb_flush(cpuas
->cpu
, 1);
2384 void address_space_init_dispatch(AddressSpace
*as
)
2386 as
->dispatch
= NULL
;
2387 as
->dispatch_listener
= (MemoryListener
) {
2389 .commit
= mem_commit
,
2390 .region_add
= mem_add
,
2391 .region_nop
= mem_add
,
2394 memory_listener_register(&as
->dispatch_listener
, as
);
2397 void address_space_unregister(AddressSpace
*as
)
2399 memory_listener_unregister(&as
->dispatch_listener
);
2402 void address_space_destroy_dispatch(AddressSpace
*as
)
2404 AddressSpaceDispatch
*d
= as
->dispatch
;
2406 atomic_rcu_set(&as
->dispatch
, NULL
);
2408 call_rcu(d
, address_space_dispatch_free
, rcu
);
2412 static void memory_map_init(void)
2414 system_memory
= g_malloc(sizeof(*system_memory
));
2416 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2417 address_space_init(&address_space_memory
, system_memory
, "memory");
2419 system_io
= g_malloc(sizeof(*system_io
));
2420 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2422 address_space_init(&address_space_io
, system_io
, "I/O");
2425 MemoryRegion
*get_system_memory(void)
2427 return system_memory
;
2430 MemoryRegion
*get_system_io(void)
2435 #endif /* !defined(CONFIG_USER_ONLY) */
2437 /* physical memory access (slow version, mainly for debug) */
2438 #if defined(CONFIG_USER_ONLY)
2439 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2440 uint8_t *buf
, int len
, int is_write
)
2447 page
= addr
& TARGET_PAGE_MASK
;
2448 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2451 flags
= page_get_flags(page
);
2452 if (!(flags
& PAGE_VALID
))
2455 if (!(flags
& PAGE_WRITE
))
2457 /* XXX: this code should not depend on lock_user */
2458 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2461 unlock_user(p
, addr
, l
);
2463 if (!(flags
& PAGE_READ
))
2465 /* XXX: this code should not depend on lock_user */
2466 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2469 unlock_user(p
, addr
, 0);
2480 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2483 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2484 addr
+= memory_region_get_ram_addr(mr
);
2486 /* No early return if dirty_log_mask is or becomes 0, because
2487 * cpu_physical_memory_set_dirty_range will still call
2488 * xen_modified_memory.
2490 if (dirty_log_mask
) {
2492 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2494 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2495 tb_invalidate_phys_range(addr
, addr
+ length
);
2496 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2498 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2501 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2503 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2505 /* Regions are assumed to support 1-4 byte accesses unless
2506 otherwise specified. */
2507 if (access_size_max
== 0) {
2508 access_size_max
= 4;
2511 /* Bound the maximum access by the alignment of the address. */
2512 if (!mr
->ops
->impl
.unaligned
) {
2513 unsigned align_size_max
= addr
& -addr
;
2514 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2515 access_size_max
= align_size_max
;
2519 /* Don't attempt accesses larger than the maximum. */
2520 if (l
> access_size_max
) {
2521 l
= access_size_max
;
2528 static bool prepare_mmio_access(MemoryRegion
*mr
)
2530 bool unlocked
= !qemu_mutex_iothread_locked();
2531 bool release_lock
= false;
2533 if (unlocked
&& mr
->global_locking
) {
2534 qemu_mutex_lock_iothread();
2536 release_lock
= true;
2538 if (mr
->flush_coalesced_mmio
) {
2540 qemu_mutex_lock_iothread();
2542 qemu_flush_coalesced_mmio_buffer();
2544 qemu_mutex_unlock_iothread();
2548 return release_lock
;
2551 /* Called within RCU critical section. */
2552 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2555 int len
, hwaddr addr1
,
2556 hwaddr l
, MemoryRegion
*mr
)
2560 MemTxResult result
= MEMTX_OK
;
2561 bool release_lock
= false;
2564 if (!memory_access_is_direct(mr
, true)) {
2565 release_lock
|= prepare_mmio_access(mr
);
2566 l
= memory_access_size(mr
, l
, addr1
);
2567 /* XXX: could force current_cpu to NULL to avoid
2571 /* 64 bit write access */
2573 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2577 /* 32 bit write access */
2579 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2583 /* 16 bit write access */
2585 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2589 /* 8 bit write access */
2591 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2599 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2600 memcpy(ptr
, buf
, l
);
2601 invalidate_and_set_dirty(mr
, addr1
, l
);
2605 qemu_mutex_unlock_iothread();
2606 release_lock
= false;
2618 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2624 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2625 const uint8_t *buf
, int len
)
2630 MemTxResult result
= MEMTX_OK
;
2635 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2636 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2644 /* Called within RCU critical section. */
2645 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2646 MemTxAttrs attrs
, uint8_t *buf
,
2647 int len
, hwaddr addr1
, hwaddr l
,
2652 MemTxResult result
= MEMTX_OK
;
2653 bool release_lock
= false;
2656 if (!memory_access_is_direct(mr
, false)) {
2658 release_lock
|= prepare_mmio_access(mr
);
2659 l
= memory_access_size(mr
, l
, addr1
);
2662 /* 64 bit read access */
2663 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2668 /* 32 bit read access */
2669 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2674 /* 16 bit read access */
2675 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2680 /* 8 bit read access */
2681 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2690 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2691 memcpy(buf
, ptr
, l
);
2695 qemu_mutex_unlock_iothread();
2696 release_lock
= false;
2708 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2714 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2715 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2720 MemTxResult result
= MEMTX_OK
;
2725 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2726 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2734 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2735 uint8_t *buf
, int len
, bool is_write
)
2738 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2740 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2744 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2745 int len
, int is_write
)
2747 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2748 buf
, len
, is_write
);
2751 enum write_rom_type
{
2756 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2757 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2767 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2769 if (!(memory_region_is_ram(mr
) ||
2770 memory_region_is_romd(mr
))) {
2771 l
= memory_access_size(mr
, l
, addr1
);
2774 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2777 memcpy(ptr
, buf
, l
);
2778 invalidate_and_set_dirty(mr
, addr1
, l
);
2781 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2792 /* used for ROM loading : can write in RAM and ROM */
2793 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2794 const uint8_t *buf
, int len
)
2796 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2799 void cpu_flush_icache_range(hwaddr start
, int len
)
2802 * This function should do the same thing as an icache flush that was
2803 * triggered from within the guest. For TCG we are always cache coherent,
2804 * so there is no need to flush anything. For KVM / Xen we need to flush
2805 * the host's instruction cache at least.
2807 if (tcg_enabled()) {
2811 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2812 start
, NULL
, len
, FLUSH_CACHE
);
2823 static BounceBuffer bounce
;
2825 typedef struct MapClient
{
2827 QLIST_ENTRY(MapClient
) link
;
2830 QemuMutex map_client_list_lock
;
2831 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2832 = QLIST_HEAD_INITIALIZER(map_client_list
);
2834 static void cpu_unregister_map_client_do(MapClient
*client
)
2836 QLIST_REMOVE(client
, link
);
2840 static void cpu_notify_map_clients_locked(void)
2844 while (!QLIST_EMPTY(&map_client_list
)) {
2845 client
= QLIST_FIRST(&map_client_list
);
2846 qemu_bh_schedule(client
->bh
);
2847 cpu_unregister_map_client_do(client
);
2851 void cpu_register_map_client(QEMUBH
*bh
)
2853 MapClient
*client
= g_malloc(sizeof(*client
));
2855 qemu_mutex_lock(&map_client_list_lock
);
2857 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2858 if (!atomic_read(&bounce
.in_use
)) {
2859 cpu_notify_map_clients_locked();
2861 qemu_mutex_unlock(&map_client_list_lock
);
2864 void cpu_exec_init_all(void)
2866 qemu_mutex_init(&ram_list
.mutex
);
2869 qemu_mutex_init(&map_client_list_lock
);
2872 void cpu_unregister_map_client(QEMUBH
*bh
)
2876 qemu_mutex_lock(&map_client_list_lock
);
2877 QLIST_FOREACH(client
, &map_client_list
, link
) {
2878 if (client
->bh
== bh
) {
2879 cpu_unregister_map_client_do(client
);
2883 qemu_mutex_unlock(&map_client_list_lock
);
2886 static void cpu_notify_map_clients(void)
2888 qemu_mutex_lock(&map_client_list_lock
);
2889 cpu_notify_map_clients_locked();
2890 qemu_mutex_unlock(&map_client_list_lock
);
2893 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2901 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2902 if (!memory_access_is_direct(mr
, is_write
)) {
2903 l
= memory_access_size(mr
, l
, addr
);
2904 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2916 /* Map a physical memory region into a host virtual address.
2917 * May map a subset of the requested range, given by and returned in *plen.
2918 * May return NULL if resources needed to perform the mapping are exhausted.
2919 * Use only for reads OR writes - not for read-modify-write operations.
2920 * Use cpu_register_map_client() to know when retrying the map operation is
2921 * likely to succeed.
2923 void *address_space_map(AddressSpace
*as
,
2930 hwaddr l
, xlat
, base
;
2931 MemoryRegion
*mr
, *this_mr
;
2940 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2942 if (!memory_access_is_direct(mr
, is_write
)) {
2943 if (atomic_xchg(&bounce
.in_use
, true)) {
2947 /* Avoid unbounded allocations */
2948 l
= MIN(l
, TARGET_PAGE_SIZE
);
2949 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2953 memory_region_ref(mr
);
2956 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2962 return bounce
.buffer
;
2976 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2977 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2982 memory_region_ref(mr
);
2984 ptr
= qemu_ram_ptr_length(mr
->ram_block
, base
, plen
);
2990 /* Unmaps a memory region previously mapped by address_space_map().
2991 * Will also mark the memory as dirty if is_write == 1. access_len gives
2992 * the amount of memory that was actually read or written by the caller.
2994 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2995 int is_write
, hwaddr access_len
)
2997 if (buffer
!= bounce
.buffer
) {
3001 mr
= memory_region_from_host(buffer
, &addr1
);
3004 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3006 if (xen_enabled()) {
3007 xen_invalidate_map_cache_entry(buffer
);
3009 memory_region_unref(mr
);
3013 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3014 bounce
.buffer
, access_len
);
3016 qemu_vfree(bounce
.buffer
);
3017 bounce
.buffer
= NULL
;
3018 memory_region_unref(bounce
.mr
);
3019 atomic_mb_set(&bounce
.in_use
, false);
3020 cpu_notify_map_clients();
3023 void *cpu_physical_memory_map(hwaddr addr
,
3027 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3030 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3031 int is_write
, hwaddr access_len
)
3033 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3036 /* warning: addr must be aligned */
3037 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3039 MemTxResult
*result
,
3040 enum device_endian endian
)
3048 bool release_lock
= false;
3051 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3052 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3053 release_lock
|= prepare_mmio_access(mr
);
3056 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3057 #if defined(TARGET_WORDS_BIGENDIAN)
3058 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3062 if (endian
== DEVICE_BIG_ENDIAN
) {
3068 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3070 case DEVICE_LITTLE_ENDIAN
:
3071 val
= ldl_le_p(ptr
);
3073 case DEVICE_BIG_ENDIAN
:
3074 val
= ldl_be_p(ptr
);
3086 qemu_mutex_unlock_iothread();
3092 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3093 MemTxAttrs attrs
, MemTxResult
*result
)
3095 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3096 DEVICE_NATIVE_ENDIAN
);
3099 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3100 MemTxAttrs attrs
, MemTxResult
*result
)
3102 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3103 DEVICE_LITTLE_ENDIAN
);
3106 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3107 MemTxAttrs attrs
, MemTxResult
*result
)
3109 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3113 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3115 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3118 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3120 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3123 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3125 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3128 /* warning: addr must be aligned */
3129 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3131 MemTxResult
*result
,
3132 enum device_endian endian
)
3140 bool release_lock
= false;
3143 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3145 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3146 release_lock
|= prepare_mmio_access(mr
);
3149 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3150 #if defined(TARGET_WORDS_BIGENDIAN)
3151 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3155 if (endian
== DEVICE_BIG_ENDIAN
) {
3161 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3163 case DEVICE_LITTLE_ENDIAN
:
3164 val
= ldq_le_p(ptr
);
3166 case DEVICE_BIG_ENDIAN
:
3167 val
= ldq_be_p(ptr
);
3179 qemu_mutex_unlock_iothread();
3185 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3186 MemTxAttrs attrs
, MemTxResult
*result
)
3188 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3189 DEVICE_NATIVE_ENDIAN
);
3192 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3193 MemTxAttrs attrs
, MemTxResult
*result
)
3195 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3196 DEVICE_LITTLE_ENDIAN
);
3199 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3200 MemTxAttrs attrs
, MemTxResult
*result
)
3202 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3206 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3208 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3211 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3213 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3216 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3218 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3222 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3223 MemTxAttrs attrs
, MemTxResult
*result
)
3228 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3235 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3237 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3240 /* warning: addr must be aligned */
3241 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3244 MemTxResult
*result
,
3245 enum device_endian endian
)
3253 bool release_lock
= false;
3256 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3258 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3259 release_lock
|= prepare_mmio_access(mr
);
3262 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3263 #if defined(TARGET_WORDS_BIGENDIAN)
3264 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3268 if (endian
== DEVICE_BIG_ENDIAN
) {
3274 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3276 case DEVICE_LITTLE_ENDIAN
:
3277 val
= lduw_le_p(ptr
);
3279 case DEVICE_BIG_ENDIAN
:
3280 val
= lduw_be_p(ptr
);
3292 qemu_mutex_unlock_iothread();
3298 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3299 MemTxAttrs attrs
, MemTxResult
*result
)
3301 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3302 DEVICE_NATIVE_ENDIAN
);
3305 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3306 MemTxAttrs attrs
, MemTxResult
*result
)
3308 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3309 DEVICE_LITTLE_ENDIAN
);
3312 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3313 MemTxAttrs attrs
, MemTxResult
*result
)
3315 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3319 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3321 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3324 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3326 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3329 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3331 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3334 /* warning: addr must be aligned. The ram page is not masked as dirty
3335 and the code inside is not invalidated. It is useful if the dirty
3336 bits are used to track modified PTEs */
3337 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3338 MemTxAttrs attrs
, MemTxResult
*result
)
3345 uint8_t dirty_log_mask
;
3346 bool release_lock
= false;
3349 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3351 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3352 release_lock
|= prepare_mmio_access(mr
);
3354 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3356 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3359 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3360 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3361 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
3369 qemu_mutex_unlock_iothread();
3374 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3376 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3379 /* warning: addr must be aligned */
3380 static inline void address_space_stl_internal(AddressSpace
*as
,
3381 hwaddr addr
, uint32_t val
,
3383 MemTxResult
*result
,
3384 enum device_endian endian
)
3391 bool release_lock
= false;
3394 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3396 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3397 release_lock
|= prepare_mmio_access(mr
);
3399 #if defined(TARGET_WORDS_BIGENDIAN)
3400 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3404 if (endian
== DEVICE_BIG_ENDIAN
) {
3408 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3411 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3413 case DEVICE_LITTLE_ENDIAN
:
3416 case DEVICE_BIG_ENDIAN
:
3423 invalidate_and_set_dirty(mr
, addr1
, 4);
3430 qemu_mutex_unlock_iothread();
3435 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3436 MemTxAttrs attrs
, MemTxResult
*result
)
3438 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3439 DEVICE_NATIVE_ENDIAN
);
3442 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3443 MemTxAttrs attrs
, MemTxResult
*result
)
3445 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3446 DEVICE_LITTLE_ENDIAN
);
3449 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3450 MemTxAttrs attrs
, MemTxResult
*result
)
3452 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3456 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3458 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3461 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3463 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3466 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3468 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3472 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3473 MemTxAttrs attrs
, MemTxResult
*result
)
3478 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3484 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3486 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3489 /* warning: addr must be aligned */
3490 static inline void address_space_stw_internal(AddressSpace
*as
,
3491 hwaddr addr
, uint32_t val
,
3493 MemTxResult
*result
,
3494 enum device_endian endian
)
3501 bool release_lock
= false;
3504 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3505 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3506 release_lock
|= prepare_mmio_access(mr
);
3508 #if defined(TARGET_WORDS_BIGENDIAN)
3509 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3513 if (endian
== DEVICE_BIG_ENDIAN
) {
3517 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3520 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3522 case DEVICE_LITTLE_ENDIAN
:
3525 case DEVICE_BIG_ENDIAN
:
3532 invalidate_and_set_dirty(mr
, addr1
, 2);
3539 qemu_mutex_unlock_iothread();
3544 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3545 MemTxAttrs attrs
, MemTxResult
*result
)
3547 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3548 DEVICE_NATIVE_ENDIAN
);
3551 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3552 MemTxAttrs attrs
, MemTxResult
*result
)
3554 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3555 DEVICE_LITTLE_ENDIAN
);
3558 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3559 MemTxAttrs attrs
, MemTxResult
*result
)
3561 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3565 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3567 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3570 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3572 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3575 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3577 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3581 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3582 MemTxAttrs attrs
, MemTxResult
*result
)
3586 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3592 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3593 MemTxAttrs attrs
, MemTxResult
*result
)
3596 val
= cpu_to_le64(val
);
3597 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3602 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3603 MemTxAttrs attrs
, MemTxResult
*result
)
3606 val
= cpu_to_be64(val
);
3607 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3613 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3615 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3618 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3620 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3623 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3625 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3628 /* virtual memory access for debug (includes writing to ROM) */
3629 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3630 uint8_t *buf
, int len
, int is_write
)
3640 page
= addr
& TARGET_PAGE_MASK
;
3641 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3642 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3643 /* if no physical page mapped, return an error */
3644 if (phys_addr
== -1)
3646 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3649 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3651 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3654 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3655 MEMTXATTRS_UNSPECIFIED
,
3666 * Allows code that needs to deal with migration bitmaps etc to still be built
3667 * target independent.
3669 size_t qemu_target_page_bits(void)
3671 return TARGET_PAGE_BITS
;
3677 * A helper function for the _utterly broken_ virtio device model to find out if
3678 * it's running on a big endian machine. Don't do this at home kids!
3680 bool target_words_bigendian(void);
3681 bool target_words_bigendian(void)
3683 #if defined(TARGET_WORDS_BIGENDIAN)
3690 #ifndef CONFIG_USER_ONLY
3691 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3698 mr
= address_space_translate(&address_space_memory
,
3699 phys_addr
, &phys_addr
, &l
, false);
3701 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3706 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3712 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3713 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3714 block
->used_length
, opaque
);