4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
24 #include "qemu/cutils.h"
26 #include "exec/exec-all.h"
27 #include "exec/target_page.h"
29 #include "hw/qdev-core.h"
30 #include "hw/qdev-properties.h"
31 #if !defined(CONFIG_USER_ONLY)
32 #include "hw/boards.h"
33 #include "hw/xen/xen.h"
35 #include "sysemu/kvm.h"
36 #include "sysemu/sysemu.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
44 #include "exec/memory.h"
45 #include "exec/ioport.h"
46 #include "sysemu/dma.h"
47 #include "sysemu/numa.h"
48 #include "sysemu/hw_accel.h"
49 #include "exec/address-spaces.h"
50 #include "sysemu/xen-mapcache.h"
51 #include "trace-root.h"
53 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
55 #include <linux/falloc.h>
59 #include "qemu/rcu_queue.h"
60 #include "qemu/main-loop.h"
61 #include "translate-all.h"
62 #include "sysemu/replay.h"
64 #include "exec/memory-internal.h"
65 #include "exec/ram_addr.h"
68 #include "migration/vmstate.h"
70 #include "qemu/range.h"
72 #include "qemu/mmap-alloc.h"
75 #include "monitor/monitor.h"
77 //#define DEBUG_SUBPAGE
79 #if !defined(CONFIG_USER_ONLY)
80 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
81 * are protected by the ramlist lock.
83 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
85 static MemoryRegion
*system_memory
;
86 static MemoryRegion
*system_io
;
88 AddressSpace address_space_io
;
89 AddressSpace address_space_memory
;
91 MemoryRegion io_mem_rom
, io_mem_notdirty
;
92 static MemoryRegion io_mem_unassigned
;
94 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
95 #define RAM_PREALLOC (1 << 0)
97 /* RAM is mmap-ed with MAP_SHARED */
98 #define RAM_SHARED (1 << 1)
100 /* Only a portion of RAM (used_length) is actually used, and migrated.
101 * This used_length size can change across reboots.
103 #define RAM_RESIZEABLE (1 << 2)
107 #ifdef TARGET_PAGE_BITS_VARY
108 int target_page_bits
;
109 bool target_page_bits_decided
;
112 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
113 /* current CPU in the current thread. It is only valid inside
115 __thread CPUState
*current_cpu
;
116 /* 0 = Do not count executed instructions.
117 1 = Precise instruction counting.
118 2 = Adaptive rate instruction counting. */
121 uintptr_t qemu_host_page_size
;
122 intptr_t qemu_host_page_mask
;
123 uintptr_t qemu_real_host_page_size
;
124 intptr_t qemu_real_host_page_mask
;
126 bool set_preferred_target_page_bits(int bits
)
128 /* The target page size is the lowest common denominator for all
129 * the CPUs in the system, so we can only make it smaller, never
130 * larger. And we can't make it smaller once we've committed to
133 #ifdef TARGET_PAGE_BITS_VARY
134 assert(bits
>= TARGET_PAGE_BITS_MIN
);
135 if (target_page_bits
== 0 || target_page_bits
> bits
) {
136 if (target_page_bits_decided
) {
139 target_page_bits
= bits
;
145 #if !defined(CONFIG_USER_ONLY)
147 static void finalize_target_page_bits(void)
149 #ifdef TARGET_PAGE_BITS_VARY
150 if (target_page_bits
== 0) {
151 target_page_bits
= TARGET_PAGE_BITS_MIN
;
153 target_page_bits_decided
= true;
157 typedef struct PhysPageEntry PhysPageEntry
;
159 struct PhysPageEntry
{
160 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
162 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
166 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
168 /* Size of the L2 (and L3, etc) page tables. */
169 #define ADDR_SPACE_BITS 64
172 #define P_L2_SIZE (1 << P_L2_BITS)
174 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
176 typedef PhysPageEntry Node
[P_L2_SIZE
];
178 typedef struct PhysPageMap
{
181 unsigned sections_nb
;
182 unsigned sections_nb_alloc
;
184 unsigned nodes_nb_alloc
;
186 MemoryRegionSection
*sections
;
189 struct AddressSpaceDispatch
{
190 MemoryRegionSection
*mru_section
;
191 /* This is a multi-level map on the physical address space.
192 * The bottom level has pointers to MemoryRegionSections.
194 PhysPageEntry phys_map
;
198 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
199 typedef struct subpage_t
{
203 uint16_t sub_section
[];
206 #define PHYS_SECTION_UNASSIGNED 0
207 #define PHYS_SECTION_NOTDIRTY 1
208 #define PHYS_SECTION_ROM 2
209 #define PHYS_SECTION_WATCH 3
211 static void io_mem_init(void);
212 static void memory_map_init(void);
213 static void tcg_commit(MemoryListener
*listener
);
215 static MemoryRegion io_mem_watch
;
218 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
219 * @cpu: the CPU whose AddressSpace this is
220 * @as: the AddressSpace itself
221 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
222 * @tcg_as_listener: listener for tracking changes to the AddressSpace
224 struct CPUAddressSpace
{
227 struct AddressSpaceDispatch
*memory_dispatch
;
228 MemoryListener tcg_as_listener
;
231 struct DirtyBitmapSnapshot
{
234 unsigned long dirty
[];
239 #if !defined(CONFIG_USER_ONLY)
241 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
243 static unsigned alloc_hint
= 16;
244 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
245 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, alloc_hint
);
246 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
247 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
248 alloc_hint
= map
->nodes_nb_alloc
;
252 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
259 ret
= map
->nodes_nb
++;
261 assert(ret
!= PHYS_MAP_NODE_NIL
);
262 assert(ret
!= map
->nodes_nb_alloc
);
264 e
.skip
= leaf
? 0 : 1;
265 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
266 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
267 memcpy(&p
[i
], &e
, sizeof(e
));
272 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
273 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
277 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
279 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
280 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
282 p
= map
->nodes
[lp
->ptr
];
283 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
285 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
286 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
292 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
298 static void phys_page_set(AddressSpaceDispatch
*d
,
299 hwaddr index
, hwaddr nb
,
302 /* Wildly overreserve - it doesn't matter much. */
303 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
305 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
308 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
309 * and update our entry so we can skip it and go directly to the destination.
311 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
)
313 unsigned valid_ptr
= P_L2_SIZE
;
318 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
323 for (i
= 0; i
< P_L2_SIZE
; i
++) {
324 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
331 phys_page_compact(&p
[i
], nodes
);
335 /* We can only compress if there's only one child. */
340 assert(valid_ptr
< P_L2_SIZE
);
342 /* Don't compress if it won't fit in the # of bits we have. */
343 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
347 lp
->ptr
= p
[valid_ptr
].ptr
;
348 if (!p
[valid_ptr
].skip
) {
349 /* If our only child is a leaf, make this a leaf. */
350 /* By design, we should have made this node a leaf to begin with so we
351 * should never reach here.
352 * But since it's so simple to handle this, let's do it just in case we
357 lp
->skip
+= p
[valid_ptr
].skip
;
361 void address_space_dispatch_compact(AddressSpaceDispatch
*d
)
363 if (d
->phys_map
.skip
) {
364 phys_page_compact(&d
->phys_map
, d
->map
.nodes
);
368 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
371 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
372 * the section must cover the entire address space.
374 return int128_gethi(section
->size
) ||
375 range_covers_byte(section
->offset_within_address_space
,
376 int128_getlo(section
->size
), addr
);
379 static MemoryRegionSection
*phys_page_find(AddressSpaceDispatch
*d
, hwaddr addr
)
381 PhysPageEntry lp
= d
->phys_map
, *p
;
382 Node
*nodes
= d
->map
.nodes
;
383 MemoryRegionSection
*sections
= d
->map
.sections
;
384 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
387 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
388 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
389 return §ions
[PHYS_SECTION_UNASSIGNED
];
392 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
395 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
396 return §ions
[lp
.ptr
];
398 return §ions
[PHYS_SECTION_UNASSIGNED
];
402 bool memory_region_is_unassigned(MemoryRegion
*mr
)
404 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
405 && mr
!= &io_mem_watch
;
408 /* Called from RCU critical section */
409 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
411 bool resolve_subpage
)
413 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
417 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
418 section_covers_addr(section
, addr
)) {
421 section
= phys_page_find(d
, addr
);
424 if (resolve_subpage
&& section
->mr
->subpage
) {
425 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
426 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
429 atomic_set(&d
->mru_section
, section
);
434 /* Called from RCU critical section */
435 static MemoryRegionSection
*
436 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
437 hwaddr
*plen
, bool resolve_subpage
)
439 MemoryRegionSection
*section
;
443 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
444 /* Compute offset within MemoryRegionSection */
445 addr
-= section
->offset_within_address_space
;
447 /* Compute offset within MemoryRegion */
448 *xlat
= addr
+ section
->offset_within_region
;
452 /* MMIO registers can be expected to perform full-width accesses based only
453 * on their address, without considering adjacent registers that could
454 * decode to completely different MemoryRegions. When such registers
455 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
456 * regions overlap wildly. For this reason we cannot clamp the accesses
459 * If the length is small (as is the case for address_space_ldl/stl),
460 * everything works fine. If the incoming length is large, however,
461 * the caller really has to do the clamping through memory_access_size.
463 if (memory_region_is_ram(mr
)) {
464 diff
= int128_sub(section
->size
, int128_make64(addr
));
465 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
470 /* Called from RCU critical section */
471 static MemoryRegionSection
flatview_do_translate(FlatView
*fv
,
477 AddressSpace
**target_as
)
480 MemoryRegionSection
*section
;
481 IOMMUMemoryRegion
*iommu_mr
;
482 IOMMUMemoryRegionClass
*imrc
;
485 section
= address_space_translate_internal(
486 flatview_to_dispatch(fv
), addr
, &addr
,
489 iommu_mr
= memory_region_get_iommu(section
->mr
);
493 imrc
= memory_region_get_iommu_class_nocheck(iommu_mr
);
495 iotlb
= imrc
->translate(iommu_mr
, addr
, is_write
?
496 IOMMU_WO
: IOMMU_RO
);
497 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
498 | (addr
& iotlb
.addr_mask
));
499 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
500 if (!(iotlb
.perm
& (1 << is_write
))) {
504 fv
= address_space_to_flatview(iotlb
.target_as
);
505 *target_as
= iotlb
.target_as
;
513 return (MemoryRegionSection
) { .mr
= &io_mem_unassigned
};
516 /* Called from RCU critical section */
517 IOMMUTLBEntry
address_space_get_iotlb_entry(AddressSpace
*as
, hwaddr addr
,
520 MemoryRegionSection section
;
523 /* Try to get maximum page mask during translation. */
526 /* This can never be MMIO. */
527 section
= flatview_do_translate(address_space_to_flatview(as
), addr
,
528 &xlat
, &plen
, is_write
, false, &as
);
530 /* Illegal translation */
531 if (section
.mr
== &io_mem_unassigned
) {
535 /* Convert memory region offset into address space offset */
536 xlat
+= section
.offset_within_address_space
-
537 section
.offset_within_region
;
539 if (plen
== (hwaddr
)-1) {
541 * We use default page size here. Logically it only happens
542 * for identity mappings.
544 plen
= TARGET_PAGE_SIZE
;
547 /* Convert to address mask */
550 return (IOMMUTLBEntry
) {
552 .iova
= addr
& ~plen
,
553 .translated_addr
= xlat
& ~plen
,
555 /* IOTLBs are for DMAs, and DMA only allows on RAMs. */
560 return (IOMMUTLBEntry
) {0};
563 /* Called from RCU critical section */
564 MemoryRegion
*flatview_translate(FlatView
*fv
, hwaddr addr
, hwaddr
*xlat
,
565 hwaddr
*plen
, bool is_write
)
568 MemoryRegionSection section
;
569 AddressSpace
*as
= NULL
;
571 /* This can be MMIO, so setup MMIO bit. */
572 section
= flatview_do_translate(fv
, addr
, xlat
, plen
, is_write
, true, &as
);
575 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
576 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
577 *plen
= MIN(page
, *plen
);
583 /* Called from RCU critical section */
584 MemoryRegionSection
*
585 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
586 hwaddr
*xlat
, hwaddr
*plen
)
588 MemoryRegionSection
*section
;
589 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpu
->cpu_ases
[asidx
].memory_dispatch
);
591 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
593 assert(!memory_region_is_iommu(section
->mr
));
598 #if !defined(CONFIG_USER_ONLY)
600 static int cpu_common_post_load(void *opaque
, int version_id
)
602 CPUState
*cpu
= opaque
;
604 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
605 version_id is increased. */
606 cpu
->interrupt_request
&= ~0x01;
612 static int cpu_common_pre_load(void *opaque
)
614 CPUState
*cpu
= opaque
;
616 cpu
->exception_index
= -1;
621 static bool cpu_common_exception_index_needed(void *opaque
)
623 CPUState
*cpu
= opaque
;
625 return tcg_enabled() && cpu
->exception_index
!= -1;
628 static const VMStateDescription vmstate_cpu_common_exception_index
= {
629 .name
= "cpu_common/exception_index",
631 .minimum_version_id
= 1,
632 .needed
= cpu_common_exception_index_needed
,
633 .fields
= (VMStateField
[]) {
634 VMSTATE_INT32(exception_index
, CPUState
),
635 VMSTATE_END_OF_LIST()
639 static bool cpu_common_crash_occurred_needed(void *opaque
)
641 CPUState
*cpu
= opaque
;
643 return cpu
->crash_occurred
;
646 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
647 .name
= "cpu_common/crash_occurred",
649 .minimum_version_id
= 1,
650 .needed
= cpu_common_crash_occurred_needed
,
651 .fields
= (VMStateField
[]) {
652 VMSTATE_BOOL(crash_occurred
, CPUState
),
653 VMSTATE_END_OF_LIST()
657 const VMStateDescription vmstate_cpu_common
= {
658 .name
= "cpu_common",
660 .minimum_version_id
= 1,
661 .pre_load
= cpu_common_pre_load
,
662 .post_load
= cpu_common_post_load
,
663 .fields
= (VMStateField
[]) {
664 VMSTATE_UINT32(halted
, CPUState
),
665 VMSTATE_UINT32(interrupt_request
, CPUState
),
666 VMSTATE_END_OF_LIST()
668 .subsections
= (const VMStateDescription
*[]) {
669 &vmstate_cpu_common_exception_index
,
670 &vmstate_cpu_common_crash_occurred
,
677 CPUState
*qemu_get_cpu(int index
)
682 if (cpu
->cpu_index
== index
) {
690 #if !defined(CONFIG_USER_ONLY)
691 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
693 CPUAddressSpace
*newas
;
695 /* Target code should have set num_ases before calling us */
696 assert(asidx
< cpu
->num_ases
);
699 /* address space 0 gets the convenience alias */
703 /* KVM cannot currently support multiple address spaces. */
704 assert(asidx
== 0 || !kvm_enabled());
706 if (!cpu
->cpu_ases
) {
707 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
710 newas
= &cpu
->cpu_ases
[asidx
];
714 newas
->tcg_as_listener
.commit
= tcg_commit
;
715 memory_listener_register(&newas
->tcg_as_listener
, as
);
719 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
721 /* Return the AddressSpace corresponding to the specified index */
722 return cpu
->cpu_ases
[asidx
].as
;
726 void cpu_exec_unrealizefn(CPUState
*cpu
)
728 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
730 cpu_list_remove(cpu
);
732 if (cc
->vmsd
!= NULL
) {
733 vmstate_unregister(NULL
, cc
->vmsd
, cpu
);
735 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
736 vmstate_unregister(NULL
, &vmstate_cpu_common
, cpu
);
740 Property cpu_common_props
[] = {
741 #ifndef CONFIG_USER_ONLY
742 /* Create a memory property for softmmu CPU object,
743 * so users can wire up its memory. (This can't go in qom/cpu.c
744 * because that file is compiled only once for both user-mode
745 * and system builds.) The default if no link is set up is to use
746 * the system address space.
748 DEFINE_PROP_LINK("memory", CPUState
, memory
, TYPE_MEMORY_REGION
,
751 DEFINE_PROP_END_OF_LIST(),
754 void cpu_exec_initfn(CPUState
*cpu
)
756 #ifdef TARGET_WORDS_BIGENDIAN
757 cpu
->bigendian
= true;
759 cpu
->bigendian
= false;
764 #ifndef CONFIG_USER_ONLY
765 cpu
->thread_id
= qemu_get_thread_id();
766 cpu
->memory
= system_memory
;
767 object_ref(OBJECT(cpu
->memory
));
771 void cpu_exec_realizefn(CPUState
*cpu
, Error
**errp
)
773 CPUClass
*cc ATTRIBUTE_UNUSED
= CPU_GET_CLASS(cpu
);
777 #ifndef CONFIG_USER_ONLY
778 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
779 vmstate_register(NULL
, cpu
->cpu_index
, &vmstate_cpu_common
, cpu
);
781 if (cc
->vmsd
!= NULL
) {
782 vmstate_register(NULL
, cpu
->cpu_index
, cc
->vmsd
, cpu
);
787 #if defined(CONFIG_USER_ONLY)
788 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
792 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
797 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
800 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
801 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
803 /* Locks grabbed by tb_invalidate_phys_addr */
804 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
805 phys
| (pc
& ~TARGET_PAGE_MASK
));
810 #if defined(CONFIG_USER_ONLY)
811 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
816 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
822 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
826 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
827 int flags
, CPUWatchpoint
**watchpoint
)
832 /* Add a watchpoint. */
833 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
834 int flags
, CPUWatchpoint
**watchpoint
)
838 /* forbid ranges which are empty or run off the end of the address space */
839 if (len
== 0 || (addr
+ len
- 1) < addr
) {
840 error_report("tried to set invalid watchpoint at %"
841 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
844 wp
= g_malloc(sizeof(*wp
));
850 /* keep all GDB-injected watchpoints in front */
851 if (flags
& BP_GDB
) {
852 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
854 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
857 tlb_flush_page(cpu
, addr
);
864 /* Remove a specific watchpoint. */
865 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
870 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
871 if (addr
== wp
->vaddr
&& len
== wp
->len
872 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
873 cpu_watchpoint_remove_by_ref(cpu
, wp
);
880 /* Remove a specific watchpoint by reference. */
881 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
883 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
885 tlb_flush_page(cpu
, watchpoint
->vaddr
);
890 /* Remove all matching watchpoints. */
891 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
893 CPUWatchpoint
*wp
, *next
;
895 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
896 if (wp
->flags
& mask
) {
897 cpu_watchpoint_remove_by_ref(cpu
, wp
);
902 /* Return true if this watchpoint address matches the specified
903 * access (ie the address range covered by the watchpoint overlaps
904 * partially or completely with the address range covered by the
907 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
911 /* We know the lengths are non-zero, but a little caution is
912 * required to avoid errors in the case where the range ends
913 * exactly at the top of the address space and so addr + len
914 * wraps round to zero.
916 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
917 vaddr addrend
= addr
+ len
- 1;
919 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
924 /* Add a breakpoint. */
925 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
926 CPUBreakpoint
**breakpoint
)
930 bp
= g_malloc(sizeof(*bp
));
935 /* keep all GDB-injected breakpoints in front */
936 if (flags
& BP_GDB
) {
937 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
939 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
942 breakpoint_invalidate(cpu
, pc
);
950 /* Remove a specific breakpoint. */
951 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
955 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
956 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
957 cpu_breakpoint_remove_by_ref(cpu
, bp
);
964 /* Remove a specific breakpoint by reference. */
965 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
967 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
969 breakpoint_invalidate(cpu
, breakpoint
->pc
);
974 /* Remove all matching breakpoints. */
975 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
977 CPUBreakpoint
*bp
, *next
;
979 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
980 if (bp
->flags
& mask
) {
981 cpu_breakpoint_remove_by_ref(cpu
, bp
);
986 /* enable or disable single step mode. EXCP_DEBUG is returned by the
987 CPU loop after each instruction */
988 void cpu_single_step(CPUState
*cpu
, int enabled
)
990 if (cpu
->singlestep_enabled
!= enabled
) {
991 cpu
->singlestep_enabled
= enabled
;
993 kvm_update_guest_debug(cpu
, 0);
995 /* must flush all the translated code to avoid inconsistencies */
996 /* XXX: only flush what is necessary */
1002 void QEMU_NORETURN
cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
1009 fprintf(stderr
, "qemu: fatal: ");
1010 vfprintf(stderr
, fmt
, ap
);
1011 fprintf(stderr
, "\n");
1012 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
1013 if (qemu_log_separate()) {
1015 qemu_log("qemu: fatal: ");
1016 qemu_log_vprintf(fmt
, ap2
);
1018 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
1026 #if defined(CONFIG_USER_ONLY)
1028 struct sigaction act
;
1029 sigfillset(&act
.sa_mask
);
1030 act
.sa_handler
= SIG_DFL
;
1031 sigaction(SIGABRT
, &act
, NULL
);
1037 #if !defined(CONFIG_USER_ONLY)
1038 /* Called from RCU critical section */
1039 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
1043 block
= atomic_rcu_read(&ram_list
.mru_block
);
1044 if (block
&& addr
- block
->offset
< block
->max_length
) {
1047 RAMBLOCK_FOREACH(block
) {
1048 if (addr
- block
->offset
< block
->max_length
) {
1053 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1057 /* It is safe to write mru_block outside the iothread lock. This
1062 * xxx removed from list
1066 * call_rcu(reclaim_ramblock, xxx);
1069 * atomic_rcu_set is not needed here. The block was already published
1070 * when it was placed into the list. Here we're just making an extra
1071 * copy of the pointer.
1073 ram_list
.mru_block
= block
;
1077 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
1084 end
= TARGET_PAGE_ALIGN(start
+ length
);
1085 start
&= TARGET_PAGE_MASK
;
1088 block
= qemu_get_ram_block(start
);
1089 assert(block
== qemu_get_ram_block(end
- 1));
1090 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
1092 tlb_reset_dirty(cpu
, start1
, length
);
1097 /* Note: start and end must be within the same ram block. */
1098 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
1102 DirtyMemoryBlocks
*blocks
;
1103 unsigned long end
, page
;
1110 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1111 page
= start
>> TARGET_PAGE_BITS
;
1115 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1117 while (page
< end
) {
1118 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1119 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1120 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1122 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1129 if (dirty
&& tcg_enabled()) {
1130 tlb_reset_dirty_range_all(start
, length
);
1136 DirtyBitmapSnapshot
*cpu_physical_memory_snapshot_and_clear_dirty
1137 (ram_addr_t start
, ram_addr_t length
, unsigned client
)
1139 DirtyMemoryBlocks
*blocks
;
1140 unsigned long align
= 1UL << (TARGET_PAGE_BITS
+ BITS_PER_LEVEL
);
1141 ram_addr_t first
= QEMU_ALIGN_DOWN(start
, align
);
1142 ram_addr_t last
= QEMU_ALIGN_UP(start
+ length
, align
);
1143 DirtyBitmapSnapshot
*snap
;
1144 unsigned long page
, end
, dest
;
1146 snap
= g_malloc0(sizeof(*snap
) +
1147 ((last
- first
) >> (TARGET_PAGE_BITS
+ 3)));
1148 snap
->start
= first
;
1151 page
= first
>> TARGET_PAGE_BITS
;
1152 end
= last
>> TARGET_PAGE_BITS
;
1157 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1159 while (page
< end
) {
1160 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1161 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1162 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1164 assert(QEMU_IS_ALIGNED(offset
, (1 << BITS_PER_LEVEL
)));
1165 assert(QEMU_IS_ALIGNED(num
, (1 << BITS_PER_LEVEL
)));
1166 offset
>>= BITS_PER_LEVEL
;
1168 bitmap_copy_and_clear_atomic(snap
->dirty
+ dest
,
1169 blocks
->blocks
[idx
] + offset
,
1172 dest
+= num
>> BITS_PER_LEVEL
;
1177 if (tcg_enabled()) {
1178 tlb_reset_dirty_range_all(start
, length
);
1184 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot
*snap
,
1188 unsigned long page
, end
;
1190 assert(start
>= snap
->start
);
1191 assert(start
+ length
<= snap
->end
);
1193 end
= TARGET_PAGE_ALIGN(start
+ length
- snap
->start
) >> TARGET_PAGE_BITS
;
1194 page
= (start
- snap
->start
) >> TARGET_PAGE_BITS
;
1196 while (page
< end
) {
1197 if (test_bit(page
, snap
->dirty
)) {
1205 /* Called from RCU critical section */
1206 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1207 MemoryRegionSection
*section
,
1209 hwaddr paddr
, hwaddr xlat
,
1211 target_ulong
*address
)
1216 if (memory_region_is_ram(section
->mr
)) {
1218 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1219 if (!section
->readonly
) {
1220 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1222 iotlb
|= PHYS_SECTION_ROM
;
1225 AddressSpaceDispatch
*d
;
1227 d
= flatview_to_dispatch(section
->fv
);
1228 iotlb
= section
- d
->map
.sections
;
1232 /* Make accesses to pages with watchpoints go via the
1233 watchpoint trap routines. */
1234 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1235 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1236 /* Avoid trapping reads of pages with a write breakpoint. */
1237 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1238 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1239 *address
|= TLB_MMIO
;
1247 #endif /* defined(CONFIG_USER_ONLY) */
1249 #if !defined(CONFIG_USER_ONLY)
1251 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1253 static subpage_t
*subpage_init(FlatView
*fv
, hwaddr base
);
1255 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1256 qemu_anon_ram_alloc
;
1259 * Set a custom physical guest memory alloator.
1260 * Accelerators with unusual needs may need this. Hopefully, we can
1261 * get rid of it eventually.
1263 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1265 phys_mem_alloc
= alloc
;
1268 static uint16_t phys_section_add(PhysPageMap
*map
,
1269 MemoryRegionSection
*section
)
1271 /* The physical section number is ORed with a page-aligned
1272 * pointer to produce the iotlb entries. Thus it should
1273 * never overflow into the page-aligned value.
1275 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1277 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1278 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1279 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1280 map
->sections_nb_alloc
);
1282 map
->sections
[map
->sections_nb
] = *section
;
1283 memory_region_ref(section
->mr
);
1284 return map
->sections_nb
++;
1287 static void phys_section_destroy(MemoryRegion
*mr
)
1289 bool have_sub_page
= mr
->subpage
;
1291 memory_region_unref(mr
);
1293 if (have_sub_page
) {
1294 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1295 object_unref(OBJECT(&subpage
->iomem
));
1300 static void phys_sections_free(PhysPageMap
*map
)
1302 while (map
->sections_nb
> 0) {
1303 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1304 phys_section_destroy(section
->mr
);
1306 g_free(map
->sections
);
1310 static void register_subpage(FlatView
*fv
, MemoryRegionSection
*section
)
1312 AddressSpaceDispatch
*d
= flatview_to_dispatch(fv
);
1314 hwaddr base
= section
->offset_within_address_space
1316 MemoryRegionSection
*existing
= phys_page_find(d
, base
);
1317 MemoryRegionSection subsection
= {
1318 .offset_within_address_space
= base
,
1319 .size
= int128_make64(TARGET_PAGE_SIZE
),
1323 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1325 if (!(existing
->mr
->subpage
)) {
1326 subpage
= subpage_init(fv
, base
);
1328 subsection
.mr
= &subpage
->iomem
;
1329 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1330 phys_section_add(&d
->map
, &subsection
));
1332 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1334 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1335 end
= start
+ int128_get64(section
->size
) - 1;
1336 subpage_register(subpage
, start
, end
,
1337 phys_section_add(&d
->map
, section
));
1341 static void register_multipage(FlatView
*fv
,
1342 MemoryRegionSection
*section
)
1344 AddressSpaceDispatch
*d
= flatview_to_dispatch(fv
);
1345 hwaddr start_addr
= section
->offset_within_address_space
;
1346 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1347 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1351 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1354 void flatview_add_to_dispatch(FlatView
*fv
, MemoryRegionSection
*section
)
1356 MemoryRegionSection now
= *section
, remain
= *section
;
1357 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1359 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1360 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1361 - now
.offset_within_address_space
;
1363 now
.size
= int128_min(int128_make64(left
), now
.size
);
1364 register_subpage(fv
, &now
);
1366 now
.size
= int128_zero();
1368 while (int128_ne(remain
.size
, now
.size
)) {
1369 remain
.size
= int128_sub(remain
.size
, now
.size
);
1370 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1371 remain
.offset_within_region
+= int128_get64(now
.size
);
1373 if (int128_lt(remain
.size
, page_size
)) {
1374 register_subpage(fv
, &now
);
1375 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1376 now
.size
= page_size
;
1377 register_subpage(fv
, &now
);
1379 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1380 register_multipage(fv
, &now
);
1385 void qemu_flush_coalesced_mmio_buffer(void)
1388 kvm_flush_coalesced_mmio_buffer();
1391 void qemu_mutex_lock_ramlist(void)
1393 qemu_mutex_lock(&ram_list
.mutex
);
1396 void qemu_mutex_unlock_ramlist(void)
1398 qemu_mutex_unlock(&ram_list
.mutex
);
1401 void ram_block_dump(Monitor
*mon
)
1407 monitor_printf(mon
, "%24s %8s %18s %18s %18s\n",
1408 "Block Name", "PSize", "Offset", "Used", "Total");
1409 RAMBLOCK_FOREACH(block
) {
1410 psize
= size_to_str(block
->page_size
);
1411 monitor_printf(mon
, "%24s %8s 0x%016" PRIx64
" 0x%016" PRIx64
1412 " 0x%016" PRIx64
"\n", block
->idstr
, psize
,
1413 (uint64_t)block
->offset
,
1414 (uint64_t)block
->used_length
,
1415 (uint64_t)block
->max_length
);
1423 * FIXME TOCTTOU: this iterates over memory backends' mem-path, which
1424 * may or may not name the same files / on the same filesystem now as
1425 * when we actually open and map them. Iterate over the file
1426 * descriptors instead, and use qemu_fd_getpagesize().
1428 static int find_max_supported_pagesize(Object
*obj
, void *opaque
)
1431 long *hpsize_min
= opaque
;
1433 if (object_dynamic_cast(obj
, TYPE_MEMORY_BACKEND
)) {
1434 mem_path
= object_property_get_str(obj
, "mem-path", NULL
);
1436 long hpsize
= qemu_mempath_getpagesize(mem_path
);
1437 if (hpsize
< *hpsize_min
) {
1438 *hpsize_min
= hpsize
;
1441 *hpsize_min
= getpagesize();
1448 long qemu_getrampagesize(void)
1450 long hpsize
= LONG_MAX
;
1451 long mainrampagesize
;
1452 Object
*memdev_root
;
1455 mainrampagesize
= qemu_mempath_getpagesize(mem_path
);
1457 mainrampagesize
= getpagesize();
1460 /* it's possible we have memory-backend objects with
1461 * hugepage-backed RAM. these may get mapped into system
1462 * address space via -numa parameters or memory hotplug
1463 * hooks. we want to take these into account, but we
1464 * also want to make sure these supported hugepage
1465 * sizes are applicable across the entire range of memory
1466 * we may boot from, so we take the min across all
1467 * backends, and assume normal pages in cases where a
1468 * backend isn't backed by hugepages.
1470 memdev_root
= object_resolve_path("/objects", NULL
);
1472 object_child_foreach(memdev_root
, find_max_supported_pagesize
, &hpsize
);
1474 if (hpsize
== LONG_MAX
) {
1475 /* No additional memory regions found ==> Report main RAM page size */
1476 return mainrampagesize
;
1479 /* If NUMA is disabled or the NUMA nodes are not backed with a
1480 * memory-backend, then there is at least one node using "normal" RAM,
1481 * so if its page size is smaller we have got to report that size instead.
1483 if (hpsize
> mainrampagesize
&&
1484 (nb_numa_nodes
== 0 || numa_info
[0].node_memdev
== NULL
)) {
1487 error_report("Huge page support disabled (n/a for main memory).");
1490 return mainrampagesize
;
1496 long qemu_getrampagesize(void)
1498 return getpagesize();
1503 static int64_t get_file_size(int fd
)
1505 int64_t size
= lseek(fd
, 0, SEEK_END
);
1512 static int file_ram_open(const char *path
,
1513 const char *region_name
,
1518 char *sanitized_name
;
1524 fd
= open(path
, O_RDWR
);
1526 /* @path names an existing file, use it */
1529 if (errno
== ENOENT
) {
1530 /* @path names a file that doesn't exist, create it */
1531 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1536 } else if (errno
== EISDIR
) {
1537 /* @path names a directory, create a file there */
1538 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1539 sanitized_name
= g_strdup(region_name
);
1540 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1546 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1548 g_free(sanitized_name
);
1550 fd
= mkstemp(filename
);
1558 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1559 error_setg_errno(errp
, errno
,
1560 "can't open backing store %s for guest RAM",
1565 * Try again on EINTR and EEXIST. The latter happens when
1566 * something else creates the file between our two open().
1573 static void *file_ram_alloc(RAMBlock
*block
,
1581 block
->page_size
= qemu_fd_getpagesize(fd
);
1582 block
->mr
->align
= block
->page_size
;
1583 #if defined(__s390x__)
1584 if (kvm_enabled()) {
1585 block
->mr
->align
= MAX(block
->mr
->align
, QEMU_VMALLOC_ALIGN
);
1589 if (memory
< block
->page_size
) {
1590 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1591 "or larger than page size 0x%zx",
1592 memory
, block
->page_size
);
1596 memory
= ROUND_UP(memory
, block
->page_size
);
1599 * ftruncate is not supported by hugetlbfs in older
1600 * hosts, so don't bother bailing out on errors.
1601 * If anything goes wrong with it under other filesystems,
1604 * Do not truncate the non-empty backend file to avoid corrupting
1605 * the existing data in the file. Disabling shrinking is not
1606 * enough. For example, the current vNVDIMM implementation stores
1607 * the guest NVDIMM labels at the end of the backend file. If the
1608 * backend file is later extended, QEMU will not be able to find
1609 * those labels. Therefore, extending the non-empty backend file
1610 * is disabled as well.
1612 if (truncate
&& ftruncate(fd
, memory
)) {
1613 perror("ftruncate");
1616 area
= qemu_ram_mmap(fd
, memory
, block
->mr
->align
,
1617 block
->flags
& RAM_SHARED
);
1618 if (area
== MAP_FAILED
) {
1619 error_setg_errno(errp
, errno
,
1620 "unable to map backing store for guest RAM");
1625 os_mem_prealloc(fd
, area
, memory
, smp_cpus
, errp
);
1626 if (errp
&& *errp
) {
1627 qemu_ram_munmap(area
, memory
);
1637 /* Called with the ramlist lock held. */
1638 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1640 RAMBlock
*block
, *next_block
;
1641 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1643 assert(size
!= 0); /* it would hand out same offset multiple times */
1645 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1649 RAMBLOCK_FOREACH(block
) {
1650 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1652 end
= block
->offset
+ block
->max_length
;
1654 RAMBLOCK_FOREACH(next_block
) {
1655 if (next_block
->offset
>= end
) {
1656 next
= MIN(next
, next_block
->offset
);
1659 if (next
- end
>= size
&& next
- end
< mingap
) {
1661 mingap
= next
- end
;
1665 if (offset
== RAM_ADDR_MAX
) {
1666 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1674 unsigned long last_ram_page(void)
1677 ram_addr_t last
= 0;
1680 RAMBLOCK_FOREACH(block
) {
1681 last
= MAX(last
, block
->offset
+ block
->max_length
);
1684 return last
>> TARGET_PAGE_BITS
;
1687 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1691 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1692 if (!machine_dump_guest_core(current_machine
)) {
1693 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1695 perror("qemu_madvise");
1696 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1697 "but dump_guest_core=off specified\n");
1702 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1707 bool qemu_ram_is_shared(RAMBlock
*rb
)
1709 return rb
->flags
& RAM_SHARED
;
1712 /* Called with iothread lock held. */
1713 void qemu_ram_set_idstr(RAMBlock
*new_block
, const char *name
, DeviceState
*dev
)
1718 assert(!new_block
->idstr
[0]);
1721 char *id
= qdev_get_dev_path(dev
);
1723 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1727 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1730 RAMBLOCK_FOREACH(block
) {
1731 if (block
!= new_block
&&
1732 !strcmp(block
->idstr
, new_block
->idstr
)) {
1733 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1741 /* Called with iothread lock held. */
1742 void qemu_ram_unset_idstr(RAMBlock
*block
)
1744 /* FIXME: arch_init.c assumes that this is not called throughout
1745 * migration. Ignore the problem since hot-unplug during migration
1746 * does not work anyway.
1749 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1753 size_t qemu_ram_pagesize(RAMBlock
*rb
)
1755 return rb
->page_size
;
1758 /* Returns the largest size of page in use */
1759 size_t qemu_ram_pagesize_largest(void)
1764 RAMBLOCK_FOREACH(block
) {
1765 largest
= MAX(largest
, qemu_ram_pagesize(block
));
1771 static int memory_try_enable_merging(void *addr
, size_t len
)
1773 if (!machine_mem_merge(current_machine
)) {
1774 /* disabled by the user */
1778 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1781 /* Only legal before guest might have detected the memory size: e.g. on
1782 * incoming migration, or right after reset.
1784 * As memory core doesn't know how is memory accessed, it is up to
1785 * resize callback to update device state and/or add assertions to detect
1786 * misuse, if necessary.
1788 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
)
1792 newsize
= HOST_PAGE_ALIGN(newsize
);
1794 if (block
->used_length
== newsize
) {
1798 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1799 error_setg_errno(errp
, EINVAL
,
1800 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1801 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1802 newsize
, block
->used_length
);
1806 if (block
->max_length
< newsize
) {
1807 error_setg_errno(errp
, EINVAL
,
1808 "Length too large: %s: 0x" RAM_ADDR_FMT
1809 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1810 newsize
, block
->max_length
);
1814 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1815 block
->used_length
= newsize
;
1816 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1818 memory_region_set_size(block
->mr
, newsize
);
1819 if (block
->resized
) {
1820 block
->resized(block
->idstr
, newsize
, block
->host
);
1825 /* Called with ram_list.mutex held */
1826 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1827 ram_addr_t new_ram_size
)
1829 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1830 DIRTY_MEMORY_BLOCK_SIZE
);
1831 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1832 DIRTY_MEMORY_BLOCK_SIZE
);
1835 /* Only need to extend if block count increased */
1836 if (new_num_blocks
<= old_num_blocks
) {
1840 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1841 DirtyMemoryBlocks
*old_blocks
;
1842 DirtyMemoryBlocks
*new_blocks
;
1845 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1846 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1847 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1849 if (old_num_blocks
) {
1850 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1851 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1854 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1855 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1858 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1861 g_free_rcu(old_blocks
, rcu
);
1866 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1869 RAMBlock
*last_block
= NULL
;
1870 ram_addr_t old_ram_size
, new_ram_size
;
1873 old_ram_size
= last_ram_page();
1875 qemu_mutex_lock_ramlist();
1876 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1878 if (!new_block
->host
) {
1879 if (xen_enabled()) {
1880 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1881 new_block
->mr
, &err
);
1883 error_propagate(errp
, err
);
1884 qemu_mutex_unlock_ramlist();
1888 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1889 &new_block
->mr
->align
);
1890 if (!new_block
->host
) {
1891 error_setg_errno(errp
, errno
,
1892 "cannot set up guest memory '%s'",
1893 memory_region_name(new_block
->mr
));
1894 qemu_mutex_unlock_ramlist();
1897 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1901 new_ram_size
= MAX(old_ram_size
,
1902 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1903 if (new_ram_size
> old_ram_size
) {
1904 dirty_memory_extend(old_ram_size
, new_ram_size
);
1906 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1907 * QLIST (which has an RCU-friendly variant) does not have insertion at
1908 * tail, so save the last element in last_block.
1910 RAMBLOCK_FOREACH(block
) {
1912 if (block
->max_length
< new_block
->max_length
) {
1917 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1918 } else if (last_block
) {
1919 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1920 } else { /* list is empty */
1921 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1923 ram_list
.mru_block
= NULL
;
1925 /* Write list before version */
1928 qemu_mutex_unlock_ramlist();
1930 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1931 new_block
->used_length
,
1934 if (new_block
->host
) {
1935 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1936 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1937 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
1938 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1939 ram_block_notify_add(new_block
->host
, new_block
->max_length
);
1944 RAMBlock
*qemu_ram_alloc_from_fd(ram_addr_t size
, MemoryRegion
*mr
,
1948 RAMBlock
*new_block
;
1949 Error
*local_err
= NULL
;
1952 if (xen_enabled()) {
1953 error_setg(errp
, "-mem-path not supported with Xen");
1957 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1959 "host lacks kvm mmu notifiers, -mem-path unsupported");
1963 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1965 * file_ram_alloc() needs to allocate just like
1966 * phys_mem_alloc, but we haven't bothered to provide
1970 "-mem-path not supported with this accelerator");
1974 size
= HOST_PAGE_ALIGN(size
);
1975 file_size
= get_file_size(fd
);
1976 if (file_size
> 0 && file_size
< size
) {
1977 error_setg(errp
, "backing store %s size 0x%" PRIx64
1978 " does not match 'size' option 0x" RAM_ADDR_FMT
,
1979 mem_path
, file_size
, size
);
1983 new_block
= g_malloc0(sizeof(*new_block
));
1985 new_block
->used_length
= size
;
1986 new_block
->max_length
= size
;
1987 new_block
->flags
= share
? RAM_SHARED
: 0;
1988 new_block
->host
= file_ram_alloc(new_block
, size
, fd
, !file_size
, errp
);
1989 if (!new_block
->host
) {
1994 ram_block_add(new_block
, &local_err
);
1997 error_propagate(errp
, local_err
);
2005 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
2006 bool share
, const char *mem_path
,
2013 fd
= file_ram_open(mem_path
, memory_region_name(mr
), &created
, errp
);
2018 block
= qemu_ram_alloc_from_fd(size
, mr
, share
, fd
, errp
);
2032 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
2033 void (*resized
)(const char*,
2036 void *host
, bool resizeable
,
2037 MemoryRegion
*mr
, Error
**errp
)
2039 RAMBlock
*new_block
;
2040 Error
*local_err
= NULL
;
2042 size
= HOST_PAGE_ALIGN(size
);
2043 max_size
= HOST_PAGE_ALIGN(max_size
);
2044 new_block
= g_malloc0(sizeof(*new_block
));
2046 new_block
->resized
= resized
;
2047 new_block
->used_length
= size
;
2048 new_block
->max_length
= max_size
;
2049 assert(max_size
>= size
);
2051 new_block
->page_size
= getpagesize();
2052 new_block
->host
= host
;
2054 new_block
->flags
|= RAM_PREALLOC
;
2057 new_block
->flags
|= RAM_RESIZEABLE
;
2059 ram_block_add(new_block
, &local_err
);
2062 error_propagate(errp
, local_err
);
2068 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2069 MemoryRegion
*mr
, Error
**errp
)
2071 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
2074 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
2076 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
2079 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
2080 void (*resized
)(const char*,
2083 MemoryRegion
*mr
, Error
**errp
)
2085 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
2088 static void reclaim_ramblock(RAMBlock
*block
)
2090 if (block
->flags
& RAM_PREALLOC
) {
2092 } else if (xen_enabled()) {
2093 xen_invalidate_map_cache_entry(block
->host
);
2095 } else if (block
->fd
>= 0) {
2096 qemu_ram_munmap(block
->host
, block
->max_length
);
2100 qemu_anon_ram_free(block
->host
, block
->max_length
);
2105 void qemu_ram_free(RAMBlock
*block
)
2112 ram_block_notify_remove(block
->host
, block
->max_length
);
2115 qemu_mutex_lock_ramlist();
2116 QLIST_REMOVE_RCU(block
, next
);
2117 ram_list
.mru_block
= NULL
;
2118 /* Write list before version */
2121 call_rcu(block
, reclaim_ramblock
, rcu
);
2122 qemu_mutex_unlock_ramlist();
2126 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2133 RAMBLOCK_FOREACH(block
) {
2134 offset
= addr
- block
->offset
;
2135 if (offset
< block
->max_length
) {
2136 vaddr
= ramblock_ptr(block
, offset
);
2137 if (block
->flags
& RAM_PREALLOC
) {
2139 } else if (xen_enabled()) {
2143 if (block
->fd
>= 0) {
2144 flags
|= (block
->flags
& RAM_SHARED
?
2145 MAP_SHARED
: MAP_PRIVATE
);
2146 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2147 flags
, block
->fd
, offset
);
2150 * Remap needs to match alloc. Accelerators that
2151 * set phys_mem_alloc never remap. If they did,
2152 * we'd need a remap hook here.
2154 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
2156 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2157 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2160 if (area
!= vaddr
) {
2161 fprintf(stderr
, "Could not remap addr: "
2162 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2166 memory_try_enable_merging(vaddr
, length
);
2167 qemu_ram_setup_dump(vaddr
, length
);
2172 #endif /* !_WIN32 */
2174 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2175 * This should not be used for general purpose DMA. Use address_space_map
2176 * or address_space_rw instead. For local memory (e.g. video ram) that the
2177 * device owns, use memory_region_get_ram_ptr.
2179 * Called within RCU critical section.
2181 void *qemu_map_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
2183 RAMBlock
*block
= ram_block
;
2185 if (block
== NULL
) {
2186 block
= qemu_get_ram_block(addr
);
2187 addr
-= block
->offset
;
2190 if (xen_enabled() && block
->host
== NULL
) {
2191 /* We need to check if the requested address is in the RAM
2192 * because we don't want to map the entire memory in QEMU.
2193 * In that case just map until the end of the page.
2195 if (block
->offset
== 0) {
2196 return xen_map_cache(addr
, 0, 0, false);
2199 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1, false);
2201 return ramblock_ptr(block
, addr
);
2204 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
2205 * but takes a size argument.
2207 * Called within RCU critical section.
2209 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
2210 hwaddr
*size
, bool lock
)
2212 RAMBlock
*block
= ram_block
;
2217 if (block
== NULL
) {
2218 block
= qemu_get_ram_block(addr
);
2219 addr
-= block
->offset
;
2221 *size
= MIN(*size
, block
->max_length
- addr
);
2223 if (xen_enabled() && block
->host
== NULL
) {
2224 /* We need to check if the requested address is in the RAM
2225 * because we don't want to map the entire memory in QEMU.
2226 * In that case just map the requested area.
2228 if (block
->offset
== 0) {
2229 return xen_map_cache(addr
, *size
, lock
, lock
);
2232 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1, lock
);
2235 return ramblock_ptr(block
, addr
);
2239 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
2242 * ptr: Host pointer to look up
2243 * round_offset: If true round the result offset down to a page boundary
2244 * *ram_addr: set to result ram_addr
2245 * *offset: set to result offset within the RAMBlock
2247 * Returns: RAMBlock (or NULL if not found)
2249 * By the time this function returns, the returned pointer is not protected
2250 * by RCU anymore. If the caller is not within an RCU critical section and
2251 * does not hold the iothread lock, it must have other means of protecting the
2252 * pointer, such as a reference to the region that includes the incoming
2255 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
2259 uint8_t *host
= ptr
;
2261 if (xen_enabled()) {
2262 ram_addr_t ram_addr
;
2264 ram_addr
= xen_ram_addr_from_mapcache(ptr
);
2265 block
= qemu_get_ram_block(ram_addr
);
2267 *offset
= ram_addr
- block
->offset
;
2274 block
= atomic_rcu_read(&ram_list
.mru_block
);
2275 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
2279 RAMBLOCK_FOREACH(block
) {
2280 /* This case append when the block is not mapped. */
2281 if (block
->host
== NULL
) {
2284 if (host
- block
->host
< block
->max_length
) {
2293 *offset
= (host
- block
->host
);
2295 *offset
&= TARGET_PAGE_MASK
;
2302 * Finds the named RAMBlock
2304 * name: The name of RAMBlock to find
2306 * Returns: RAMBlock (or NULL if not found)
2308 RAMBlock
*qemu_ram_block_by_name(const char *name
)
2312 RAMBLOCK_FOREACH(block
) {
2313 if (!strcmp(name
, block
->idstr
)) {
2321 /* Some of the softmmu routines need to translate from a host pointer
2322 (typically a TLB entry) back to a ram offset. */
2323 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2328 block
= qemu_ram_block_from_host(ptr
, false, &offset
);
2330 return RAM_ADDR_INVALID
;
2333 return block
->offset
+ offset
;
2336 /* Called within RCU critical section. */
2337 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
2338 uint64_t val
, unsigned size
)
2340 bool locked
= false;
2342 assert(tcg_enabled());
2343 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2346 tb_invalidate_phys_page_fast(ram_addr
, size
);
2350 stb_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2353 stw_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2356 stl_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2366 /* Set both VGA and migration bits for simplicity and to remove
2367 * the notdirty callback faster.
2369 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2370 DIRTY_CLIENTS_NOCODE
);
2371 /* we remove the notdirty callback only if the code has been
2373 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2374 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2378 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2379 unsigned size
, bool is_write
)
2384 static const MemoryRegionOps notdirty_mem_ops
= {
2385 .write
= notdirty_mem_write
,
2386 .valid
.accepts
= notdirty_mem_accepts
,
2387 .endianness
= DEVICE_NATIVE_ENDIAN
,
2390 /* Generate a debug exception if a watchpoint has been hit. */
2391 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2393 CPUState
*cpu
= current_cpu
;
2394 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2395 CPUArchState
*env
= cpu
->env_ptr
;
2396 target_ulong pc
, cs_base
;
2401 assert(tcg_enabled());
2402 if (cpu
->watchpoint_hit
) {
2403 /* We re-entered the check after replacing the TB. Now raise
2404 * the debug interrupt so that is will trigger after the
2405 * current instruction. */
2406 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2409 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2410 vaddr
= cc
->adjust_watchpoint_address(cpu
, vaddr
, len
);
2411 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2412 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2413 && (wp
->flags
& flags
)) {
2414 if (flags
== BP_MEM_READ
) {
2415 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2417 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2419 wp
->hitaddr
= vaddr
;
2420 wp
->hitattrs
= attrs
;
2421 if (!cpu
->watchpoint_hit
) {
2422 if (wp
->flags
& BP_CPU
&&
2423 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2424 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2427 cpu
->watchpoint_hit
= wp
;
2429 /* Both tb_lock and iothread_mutex will be reset when
2430 * cpu_loop_exit or cpu_loop_exit_noexc longjmp
2431 * back into the cpu_exec main loop.
2434 tb_check_watchpoint(cpu
);
2435 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2436 cpu
->exception_index
= EXCP_DEBUG
;
2439 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2440 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2441 cpu_loop_exit_noexc(cpu
);
2445 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2450 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2451 so these check for a hit then pass through to the normal out-of-line
2453 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2454 unsigned size
, MemTxAttrs attrs
)
2458 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2459 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2461 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2464 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2467 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2470 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2478 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2479 uint64_t val
, unsigned size
,
2483 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2484 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2486 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2489 address_space_stb(as
, addr
, val
, attrs
, &res
);
2492 address_space_stw(as
, addr
, val
, attrs
, &res
);
2495 address_space_stl(as
, addr
, val
, attrs
, &res
);
2502 static const MemoryRegionOps watch_mem_ops
= {
2503 .read_with_attrs
= watch_mem_read
,
2504 .write_with_attrs
= watch_mem_write
,
2505 .endianness
= DEVICE_NATIVE_ENDIAN
,
2508 static MemTxResult
flatview_write(FlatView
*fv
, hwaddr addr
, MemTxAttrs attrs
,
2509 const uint8_t *buf
, int len
);
2510 static bool flatview_access_valid(FlatView
*fv
, hwaddr addr
, int len
,
2513 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2514 unsigned len
, MemTxAttrs attrs
)
2516 subpage_t
*subpage
= opaque
;
2520 #if defined(DEBUG_SUBPAGE)
2521 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2522 subpage
, len
, addr
);
2524 res
= flatview_read(subpage
->fv
, addr
+ subpage
->base
, attrs
, buf
, len
);
2530 *data
= ldub_p(buf
);
2533 *data
= lduw_p(buf
);
2546 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2547 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2549 subpage_t
*subpage
= opaque
;
2552 #if defined(DEBUG_SUBPAGE)
2553 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2554 " value %"PRIx64
"\n",
2555 __func__
, subpage
, len
, addr
, value
);
2573 return flatview_write(subpage
->fv
, addr
+ subpage
->base
, attrs
, buf
, len
);
2576 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2577 unsigned len
, bool is_write
)
2579 subpage_t
*subpage
= opaque
;
2580 #if defined(DEBUG_SUBPAGE)
2581 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2582 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2585 return flatview_access_valid(subpage
->fv
, addr
+ subpage
->base
,
2589 static const MemoryRegionOps subpage_ops
= {
2590 .read_with_attrs
= subpage_read
,
2591 .write_with_attrs
= subpage_write
,
2592 .impl
.min_access_size
= 1,
2593 .impl
.max_access_size
= 8,
2594 .valid
.min_access_size
= 1,
2595 .valid
.max_access_size
= 8,
2596 .valid
.accepts
= subpage_accepts
,
2597 .endianness
= DEVICE_NATIVE_ENDIAN
,
2600 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2605 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2607 idx
= SUBPAGE_IDX(start
);
2608 eidx
= SUBPAGE_IDX(end
);
2609 #if defined(DEBUG_SUBPAGE)
2610 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2611 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2613 for (; idx
<= eidx
; idx
++) {
2614 mmio
->sub_section
[idx
] = section
;
2620 static subpage_t
*subpage_init(FlatView
*fv
, hwaddr base
)
2624 mmio
= g_malloc0(sizeof(subpage_t
) + TARGET_PAGE_SIZE
* sizeof(uint16_t));
2627 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2628 NULL
, TARGET_PAGE_SIZE
);
2629 mmio
->iomem
.subpage
= true;
2630 #if defined(DEBUG_SUBPAGE)
2631 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2632 mmio
, base
, TARGET_PAGE_SIZE
);
2634 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2639 static uint16_t dummy_section(PhysPageMap
*map
, FlatView
*fv
, MemoryRegion
*mr
)
2642 MemoryRegionSection section
= {
2645 .offset_within_address_space
= 0,
2646 .offset_within_region
= 0,
2647 .size
= int128_2_64(),
2650 return phys_section_add(map
, §ion
);
2653 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2655 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2656 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2657 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2658 MemoryRegionSection
*sections
= d
->map
.sections
;
2660 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2663 static void io_mem_init(void)
2665 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2666 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2669 /* io_mem_notdirty calls tb_invalidate_phys_page_fast,
2670 * which can be called without the iothread mutex.
2672 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2674 memory_region_clear_global_locking(&io_mem_notdirty
);
2676 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2680 AddressSpaceDispatch
*address_space_dispatch_new(FlatView
*fv
)
2682 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2685 n
= dummy_section(&d
->map
, fv
, &io_mem_unassigned
);
2686 assert(n
== PHYS_SECTION_UNASSIGNED
);
2687 n
= dummy_section(&d
->map
, fv
, &io_mem_notdirty
);
2688 assert(n
== PHYS_SECTION_NOTDIRTY
);
2689 n
= dummy_section(&d
->map
, fv
, &io_mem_rom
);
2690 assert(n
== PHYS_SECTION_ROM
);
2691 n
= dummy_section(&d
->map
, fv
, &io_mem_watch
);
2692 assert(n
== PHYS_SECTION_WATCH
);
2694 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2699 void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2701 phys_sections_free(&d
->map
);
2705 static void tcg_commit(MemoryListener
*listener
)
2707 CPUAddressSpace
*cpuas
;
2708 AddressSpaceDispatch
*d
;
2710 /* since each CPU stores ram addresses in its TLB cache, we must
2711 reset the modified entries */
2712 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2713 cpu_reloading_memory_map();
2714 /* The CPU and TLB are protected by the iothread lock.
2715 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2716 * may have split the RCU critical section.
2718 d
= address_space_to_dispatch(cpuas
->as
);
2719 atomic_rcu_set(&cpuas
->memory_dispatch
, d
);
2720 tlb_flush(cpuas
->cpu
);
2723 static void memory_map_init(void)
2725 system_memory
= g_malloc(sizeof(*system_memory
));
2727 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2728 address_space_init(&address_space_memory
, system_memory
, "memory");
2730 system_io
= g_malloc(sizeof(*system_io
));
2731 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2733 address_space_init(&address_space_io
, system_io
, "I/O");
2736 MemoryRegion
*get_system_memory(void)
2738 return system_memory
;
2741 MemoryRegion
*get_system_io(void)
2746 #endif /* !defined(CONFIG_USER_ONLY) */
2748 /* physical memory access (slow version, mainly for debug) */
2749 #if defined(CONFIG_USER_ONLY)
2750 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2751 uint8_t *buf
, int len
, int is_write
)
2758 page
= addr
& TARGET_PAGE_MASK
;
2759 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2762 flags
= page_get_flags(page
);
2763 if (!(flags
& PAGE_VALID
))
2766 if (!(flags
& PAGE_WRITE
))
2768 /* XXX: this code should not depend on lock_user */
2769 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2772 unlock_user(p
, addr
, l
);
2774 if (!(flags
& PAGE_READ
))
2776 /* XXX: this code should not depend on lock_user */
2777 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2780 unlock_user(p
, addr
, 0);
2791 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2794 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2795 addr
+= memory_region_get_ram_addr(mr
);
2797 /* No early return if dirty_log_mask is or becomes 0, because
2798 * cpu_physical_memory_set_dirty_range will still call
2799 * xen_modified_memory.
2801 if (dirty_log_mask
) {
2803 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2805 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2806 assert(tcg_enabled());
2808 tb_invalidate_phys_range(addr
, addr
+ length
);
2810 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2812 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2815 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2817 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2819 /* Regions are assumed to support 1-4 byte accesses unless
2820 otherwise specified. */
2821 if (access_size_max
== 0) {
2822 access_size_max
= 4;
2825 /* Bound the maximum access by the alignment of the address. */
2826 if (!mr
->ops
->impl
.unaligned
) {
2827 unsigned align_size_max
= addr
& -addr
;
2828 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2829 access_size_max
= align_size_max
;
2833 /* Don't attempt accesses larger than the maximum. */
2834 if (l
> access_size_max
) {
2835 l
= access_size_max
;
2842 static bool prepare_mmio_access(MemoryRegion
*mr
)
2844 bool unlocked
= !qemu_mutex_iothread_locked();
2845 bool release_lock
= false;
2847 if (unlocked
&& mr
->global_locking
) {
2848 qemu_mutex_lock_iothread();
2850 release_lock
= true;
2852 if (mr
->flush_coalesced_mmio
) {
2854 qemu_mutex_lock_iothread();
2856 qemu_flush_coalesced_mmio_buffer();
2858 qemu_mutex_unlock_iothread();
2862 return release_lock
;
2865 /* Called within RCU critical section. */
2866 static MemTxResult
flatview_write_continue(FlatView
*fv
, hwaddr addr
,
2869 int len
, hwaddr addr1
,
2870 hwaddr l
, MemoryRegion
*mr
)
2874 MemTxResult result
= MEMTX_OK
;
2875 bool release_lock
= false;
2878 if (!memory_access_is_direct(mr
, true)) {
2879 release_lock
|= prepare_mmio_access(mr
);
2880 l
= memory_access_size(mr
, l
, addr1
);
2881 /* XXX: could force current_cpu to NULL to avoid
2885 /* 64 bit write access */
2887 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2891 /* 32 bit write access */
2892 val
= (uint32_t)ldl_p(buf
);
2893 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2897 /* 16 bit write access */
2899 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2903 /* 8 bit write access */
2905 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2913 ptr
= qemu_ram_ptr_length(mr
->ram_block
, addr1
, &l
, false);
2914 memcpy(ptr
, buf
, l
);
2915 invalidate_and_set_dirty(mr
, addr1
, l
);
2919 qemu_mutex_unlock_iothread();
2920 release_lock
= false;
2932 mr
= flatview_translate(fv
, addr
, &addr1
, &l
, true);
2938 static MemTxResult
flatview_write(FlatView
*fv
, hwaddr addr
, MemTxAttrs attrs
,
2939 const uint8_t *buf
, int len
)
2944 MemTxResult result
= MEMTX_OK
;
2949 mr
= flatview_translate(fv
, addr
, &addr1
, &l
, true);
2950 result
= flatview_write_continue(fv
, addr
, attrs
, buf
, len
,
2958 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
,
2960 const uint8_t *buf
, int len
)
2962 return flatview_write(address_space_to_flatview(as
), addr
, attrs
, buf
, len
);
2965 /* Called within RCU critical section. */
2966 MemTxResult
flatview_read_continue(FlatView
*fv
, hwaddr addr
,
2967 MemTxAttrs attrs
, uint8_t *buf
,
2968 int len
, hwaddr addr1
, hwaddr l
,
2973 MemTxResult result
= MEMTX_OK
;
2974 bool release_lock
= false;
2977 if (!memory_access_is_direct(mr
, false)) {
2979 release_lock
|= prepare_mmio_access(mr
);
2980 l
= memory_access_size(mr
, l
, addr1
);
2983 /* 64 bit read access */
2984 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2989 /* 32 bit read access */
2990 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2995 /* 16 bit read access */
2996 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
3001 /* 8 bit read access */
3002 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
3011 ptr
= qemu_ram_ptr_length(mr
->ram_block
, addr1
, &l
, false);
3012 memcpy(buf
, ptr
, l
);
3016 qemu_mutex_unlock_iothread();
3017 release_lock
= false;
3029 mr
= flatview_translate(fv
, addr
, &addr1
, &l
, false);
3035 MemTxResult
flatview_read_full(FlatView
*fv
, hwaddr addr
,
3036 MemTxAttrs attrs
, uint8_t *buf
, int len
)
3041 MemTxResult result
= MEMTX_OK
;
3046 mr
= flatview_translate(fv
, addr
, &addr1
, &l
, false);
3047 result
= flatview_read_continue(fv
, addr
, attrs
, buf
, len
,
3055 static MemTxResult
flatview_rw(FlatView
*fv
, hwaddr addr
, MemTxAttrs attrs
,
3056 uint8_t *buf
, int len
, bool is_write
)
3059 return flatview_write(fv
, addr
, attrs
, (uint8_t *)buf
, len
);
3061 return flatview_read(fv
, addr
, attrs
, (uint8_t *)buf
, len
);
3065 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
,
3066 MemTxAttrs attrs
, uint8_t *buf
,
3067 int len
, bool is_write
)
3069 return flatview_rw(address_space_to_flatview(as
),
3070 addr
, attrs
, buf
, len
, is_write
);
3073 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
3074 int len
, int is_write
)
3076 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
3077 buf
, len
, is_write
);
3080 enum write_rom_type
{
3085 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
3086 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
3096 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3098 if (!(memory_region_is_ram(mr
) ||
3099 memory_region_is_romd(mr
))) {
3100 l
= memory_access_size(mr
, l
, addr1
);
3103 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3106 memcpy(ptr
, buf
, l
);
3107 invalidate_and_set_dirty(mr
, addr1
, l
);
3110 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
3121 /* used for ROM loading : can write in RAM and ROM */
3122 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
3123 const uint8_t *buf
, int len
)
3125 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
3128 void cpu_flush_icache_range(hwaddr start
, int len
)
3131 * This function should do the same thing as an icache flush that was
3132 * triggered from within the guest. For TCG we are always cache coherent,
3133 * so there is no need to flush anything. For KVM / Xen we need to flush
3134 * the host's instruction cache at least.
3136 if (tcg_enabled()) {
3140 cpu_physical_memory_write_rom_internal(&address_space_memory
,
3141 start
, NULL
, len
, FLUSH_CACHE
);
3152 static BounceBuffer bounce
;
3154 typedef struct MapClient
{
3156 QLIST_ENTRY(MapClient
) link
;
3159 QemuMutex map_client_list_lock
;
3160 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3161 = QLIST_HEAD_INITIALIZER(map_client_list
);
3163 static void cpu_unregister_map_client_do(MapClient
*client
)
3165 QLIST_REMOVE(client
, link
);
3169 static void cpu_notify_map_clients_locked(void)
3173 while (!QLIST_EMPTY(&map_client_list
)) {
3174 client
= QLIST_FIRST(&map_client_list
);
3175 qemu_bh_schedule(client
->bh
);
3176 cpu_unregister_map_client_do(client
);
3180 void cpu_register_map_client(QEMUBH
*bh
)
3182 MapClient
*client
= g_malloc(sizeof(*client
));
3184 qemu_mutex_lock(&map_client_list_lock
);
3186 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3187 if (!atomic_read(&bounce
.in_use
)) {
3188 cpu_notify_map_clients_locked();
3190 qemu_mutex_unlock(&map_client_list_lock
);
3193 void cpu_exec_init_all(void)
3195 qemu_mutex_init(&ram_list
.mutex
);
3196 /* The data structures we set up here depend on knowing the page size,
3197 * so no more changes can be made after this point.
3198 * In an ideal world, nothing we did before we had finished the
3199 * machine setup would care about the target page size, and we could
3200 * do this much later, rather than requiring board models to state
3201 * up front what their requirements are.
3203 finalize_target_page_bits();
3206 qemu_mutex_init(&map_client_list_lock
);
3209 void cpu_unregister_map_client(QEMUBH
*bh
)
3213 qemu_mutex_lock(&map_client_list_lock
);
3214 QLIST_FOREACH(client
, &map_client_list
, link
) {
3215 if (client
->bh
== bh
) {
3216 cpu_unregister_map_client_do(client
);
3220 qemu_mutex_unlock(&map_client_list_lock
);
3223 static void cpu_notify_map_clients(void)
3225 qemu_mutex_lock(&map_client_list_lock
);
3226 cpu_notify_map_clients_locked();
3227 qemu_mutex_unlock(&map_client_list_lock
);
3230 static bool flatview_access_valid(FlatView
*fv
, hwaddr addr
, int len
,
3239 mr
= flatview_translate(fv
, addr
, &xlat
, &l
, is_write
);
3240 if (!memory_access_is_direct(mr
, is_write
)) {
3241 l
= memory_access_size(mr
, l
, addr
);
3242 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
3255 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
,
3256 int len
, bool is_write
)
3258 return flatview_access_valid(address_space_to_flatview(as
),
3259 addr
, len
, is_write
);
3263 flatview_extend_translation(FlatView
*fv
, hwaddr addr
,
3265 MemoryRegion
*mr
, hwaddr base
, hwaddr len
,
3270 MemoryRegion
*this_mr
;
3276 if (target_len
== 0) {
3281 this_mr
= flatview_translate(fv
, addr
, &xlat
,
3283 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
3289 /* Map a physical memory region into a host virtual address.
3290 * May map a subset of the requested range, given by and returned in *plen.
3291 * May return NULL if resources needed to perform the mapping are exhausted.
3292 * Use only for reads OR writes - not for read-modify-write operations.
3293 * Use cpu_register_map_client() to know when retrying the map operation is
3294 * likely to succeed.
3296 void *address_space_map(AddressSpace
*as
,
3305 FlatView
*fv
= address_space_to_flatview(as
);
3313 mr
= flatview_translate(fv
, addr
, &xlat
, &l
, is_write
);
3315 if (!memory_access_is_direct(mr
, is_write
)) {
3316 if (atomic_xchg(&bounce
.in_use
, true)) {
3320 /* Avoid unbounded allocations */
3321 l
= MIN(l
, TARGET_PAGE_SIZE
);
3322 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
3326 memory_region_ref(mr
);
3329 flatview_read(fv
, addr
, MEMTXATTRS_UNSPECIFIED
,
3335 return bounce
.buffer
;
3339 memory_region_ref(mr
);
3340 *plen
= flatview_extend_translation(fv
, addr
, len
, mr
, xlat
,
3342 ptr
= qemu_ram_ptr_length(mr
->ram_block
, xlat
, plen
, true);
3348 /* Unmaps a memory region previously mapped by address_space_map().
3349 * Will also mark the memory as dirty if is_write == 1. access_len gives
3350 * the amount of memory that was actually read or written by the caller.
3352 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3353 int is_write
, hwaddr access_len
)
3355 if (buffer
!= bounce
.buffer
) {
3359 mr
= memory_region_from_host(buffer
, &addr1
);
3362 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3364 if (xen_enabled()) {
3365 xen_invalidate_map_cache_entry(buffer
);
3367 memory_region_unref(mr
);
3371 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3372 bounce
.buffer
, access_len
);
3374 qemu_vfree(bounce
.buffer
);
3375 bounce
.buffer
= NULL
;
3376 memory_region_unref(bounce
.mr
);
3377 atomic_mb_set(&bounce
.in_use
, false);
3378 cpu_notify_map_clients();
3381 void *cpu_physical_memory_map(hwaddr addr
,
3385 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3388 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3389 int is_write
, hwaddr access_len
)
3391 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3394 #define ARG1_DECL AddressSpace *as
3397 #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
3398 #define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write)
3399 #define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
3400 #define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
3401 #define RCU_READ_LOCK(...) rcu_read_lock()
3402 #define RCU_READ_UNLOCK(...) rcu_read_unlock()
3403 #include "memory_ldst.inc.c"
3405 int64_t address_space_cache_init(MemoryRegionCache
*cache
,
3417 void address_space_cache_invalidate(MemoryRegionCache
*cache
,
3423 void address_space_cache_destroy(MemoryRegionCache
*cache
)
3428 #define ARG1_DECL MemoryRegionCache *cache
3430 #define SUFFIX _cached
3431 #define TRANSLATE(addr, ...) \
3432 address_space_translate(cache->as, cache->xlat + (addr), __VA_ARGS__)
3433 #define IS_DIRECT(mr, is_write) true
3434 #define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
3435 #define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
3436 #define RCU_READ_LOCK() rcu_read_lock()
3437 #define RCU_READ_UNLOCK() rcu_read_unlock()
3438 #include "memory_ldst.inc.c"
3440 /* virtual memory access for debug (includes writing to ROM) */
3441 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3442 uint8_t *buf
, int len
, int is_write
)
3448 cpu_synchronize_state(cpu
);
3453 page
= addr
& TARGET_PAGE_MASK
;
3454 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3455 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3456 /* if no physical page mapped, return an error */
3457 if (phys_addr
== -1)
3459 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3462 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3464 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3467 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3468 MEMTXATTRS_UNSPECIFIED
,
3479 * Allows code that needs to deal with migration bitmaps etc to still be built
3480 * target independent.
3482 size_t qemu_target_page_size(void)
3484 return TARGET_PAGE_SIZE
;
3487 int qemu_target_page_bits(void)
3489 return TARGET_PAGE_BITS
;
3492 int qemu_target_page_bits_min(void)
3494 return TARGET_PAGE_BITS_MIN
;
3499 * A helper function for the _utterly broken_ virtio device model to find out if
3500 * it's running on a big endian machine. Don't do this at home kids!
3502 bool target_words_bigendian(void);
3503 bool target_words_bigendian(void)
3505 #if defined(TARGET_WORDS_BIGENDIAN)
3512 #ifndef CONFIG_USER_ONLY
3513 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3520 mr
= address_space_translate(&address_space_memory
,
3521 phys_addr
, &phys_addr
, &l
, false);
3523 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3528 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3534 RAMBLOCK_FOREACH(block
) {
3535 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3536 block
->used_length
, opaque
);
3546 * Unmap pages of memory from start to start+length such that
3547 * they a) read as 0, b) Trigger whatever fault mechanism
3548 * the OS provides for postcopy.
3549 * The pages must be unmapped by the end of the function.
3550 * Returns: 0 on success, none-0 on failure
3553 int ram_block_discard_range(RAMBlock
*rb
, uint64_t start
, size_t length
)
3557 uint8_t *host_startaddr
= rb
->host
+ start
;
3559 if ((uintptr_t)host_startaddr
& (rb
->page_size
- 1)) {
3560 error_report("ram_block_discard_range: Unaligned start address: %p",
3565 if ((start
+ length
) <= rb
->used_length
) {
3566 uint8_t *host_endaddr
= host_startaddr
+ length
;
3567 if ((uintptr_t)host_endaddr
& (rb
->page_size
- 1)) {
3568 error_report("ram_block_discard_range: Unaligned end address: %p",
3573 errno
= ENOTSUP
; /* If we are missing MADVISE etc */
3575 if (rb
->page_size
== qemu_host_page_size
) {
3576 #if defined(CONFIG_MADVISE)
3577 /* Note: We need the madvise MADV_DONTNEED behaviour of definitely
3580 ret
= madvise(host_startaddr
, length
, MADV_DONTNEED
);
3583 /* Huge page case - unfortunately it can't do DONTNEED, but
3584 * it can do the equivalent by FALLOC_FL_PUNCH_HOLE in the
3587 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
3588 ret
= fallocate(rb
->fd
, FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
,
3594 error_report("ram_block_discard_range: Failed to discard range "
3595 "%s:%" PRIx64
" +%zx (%d)",
3596 rb
->idstr
, start
, length
, ret
);
3599 error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
3600 "/%zx/" RAM_ADDR_FMT
")",
3601 rb
->idstr
, start
, length
, rb
->used_length
);
3610 void page_size_init(void)
3612 /* NOTE: we can always suppose that qemu_host_page_size >=
3614 qemu_real_host_page_size
= getpagesize();
3615 qemu_real_host_page_mask
= -(intptr_t)qemu_real_host_page_size
;
3616 if (qemu_host_page_size
== 0) {
3617 qemu_host_page_size
= qemu_real_host_page_size
;
3619 if (qemu_host_page_size
< TARGET_PAGE_SIZE
) {
3620 qemu_host_page_size
= TARGET_PAGE_SIZE
;
3622 qemu_host_page_mask
= -(intptr_t)qemu_host_page_size
;
3625 #if !defined(CONFIG_USER_ONLY)
3627 static void mtree_print_phys_entries(fprintf_function mon
, void *f
,
3628 int start
, int end
, int skip
, int ptr
)
3630 if (start
== end
- 1) {
3631 mon(f
, "\t%3d ", start
);
3633 mon(f
, "\t%3d..%-3d ", start
, end
- 1);
3635 mon(f
, " skip=%d ", skip
);
3636 if (ptr
== PHYS_MAP_NODE_NIL
) {
3639 mon(f
, " ptr=#%d", ptr
);
3641 mon(f
, " ptr=[%d]", ptr
);
3646 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3647 int128_sub((size), int128_one())) : 0)
3649 void mtree_print_dispatch(fprintf_function mon
, void *f
,
3650 AddressSpaceDispatch
*d
, MemoryRegion
*root
)
3654 mon(f
, " Dispatch\n");
3655 mon(f
, " Physical sections\n");
3657 for (i
= 0; i
< d
->map
.sections_nb
; ++i
) {
3658 MemoryRegionSection
*s
= d
->map
.sections
+ i
;
3659 const char *names
[] = { " [unassigned]", " [not dirty]",
3660 " [ROM]", " [watch]" };
3662 mon(f
, " #%d @" TARGET_FMT_plx
".." TARGET_FMT_plx
" %s%s%s%s%s",
3664 s
->offset_within_address_space
,
3665 s
->offset_within_address_space
+ MR_SIZE(s
->mr
->size
),
3666 s
->mr
->name
? s
->mr
->name
: "(noname)",
3667 i
< ARRAY_SIZE(names
) ? names
[i
] : "",
3668 s
->mr
== root
? " [ROOT]" : "",
3669 s
== d
->mru_section
? " [MRU]" : "",
3670 s
->mr
->is_iommu
? " [iommu]" : "");
3673 mon(f
, " alias=%s", s
->mr
->alias
->name
?
3674 s
->mr
->alias
->name
: "noname");
3679 mon(f
, " Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n",
3680 P_L2_BITS
, P_L2_LEVELS
, d
->phys_map
.ptr
, d
->phys_map
.skip
);
3681 for (i
= 0; i
< d
->map
.nodes_nb
; ++i
) {
3684 Node
*n
= d
->map
.nodes
+ i
;
3686 mon(f
, " [%d]\n", i
);
3688 for (j
= 0, jprev
= 0, prev
= *n
[0]; j
< ARRAY_SIZE(*n
); ++j
) {
3689 PhysPageEntry
*pe
= *n
+ j
;
3691 if (pe
->ptr
== prev
.ptr
&& pe
->skip
== prev
.skip
) {
3695 mtree_print_phys_entries(mon
, f
, jprev
, j
, prev
.skip
, prev
.ptr
);
3701 if (jprev
!= ARRAY_SIZE(*n
)) {
3702 mtree_print_phys_entries(mon
, f
, jprev
, j
, prev
.skip
, prev
.ptr
);