2 * RAM allocation and memory access
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "qapi/error.h"
24 #include "qemu/cutils.h"
26 #include "exec/exec-all.h"
27 #include "exec/target_page.h"
28 #include "hw/qdev-core.h"
29 #include "hw/qdev-properties.h"
30 #include "hw/boards.h"
31 #include "hw/xen/xen.h"
32 #include "sysemu/kvm.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/tcg.h"
35 #include "sysemu/qtest.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "qemu/error-report.h"
39 #include "qemu/qemu-print.h"
40 #include "exec/memory.h"
41 #include "exec/ioport.h"
42 #include "sysemu/dma.h"
43 #include "sysemu/hostmem.h"
44 #include "sysemu/hw_accel.h"
45 #include "exec/address-spaces.h"
46 #include "sysemu/xen-mapcache.h"
47 #include "trace/trace-root.h"
49 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
50 #include <linux/falloc.h>
53 #include "qemu/rcu_queue.h"
54 #include "qemu/main-loop.h"
55 #include "exec/translate-all.h"
56 #include "sysemu/replay.h"
58 #include "exec/memory-internal.h"
59 #include "exec/ram_addr.h"
62 #include "qemu/pmem.h"
64 #include "migration/vmstate.h"
66 #include "qemu/range.h"
68 #include "qemu/mmap-alloc.h"
71 #include "monitor/monitor.h"
73 #ifdef CONFIG_LIBDAXCTL
74 #include <daxctl/libdaxctl.h>
77 //#define DEBUG_SUBPAGE
79 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
80 * are protected by the ramlist lock.
82 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
84 static MemoryRegion
*system_memory
;
85 static MemoryRegion
*system_io
;
87 AddressSpace address_space_io
;
88 AddressSpace address_space_memory
;
90 static MemoryRegion io_mem_unassigned
;
92 typedef struct PhysPageEntry PhysPageEntry
;
94 struct PhysPageEntry
{
95 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
97 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
101 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
103 /* Size of the L2 (and L3, etc) page tables. */
104 #define ADDR_SPACE_BITS 64
107 #define P_L2_SIZE (1 << P_L2_BITS)
109 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
111 typedef PhysPageEntry Node
[P_L2_SIZE
];
113 typedef struct PhysPageMap
{
116 unsigned sections_nb
;
117 unsigned sections_nb_alloc
;
119 unsigned nodes_nb_alloc
;
121 MemoryRegionSection
*sections
;
124 struct AddressSpaceDispatch
{
125 MemoryRegionSection
*mru_section
;
126 /* This is a multi-level map on the physical address space.
127 * The bottom level has pointers to MemoryRegionSections.
129 PhysPageEntry phys_map
;
133 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
134 typedef struct subpage_t
{
138 uint16_t sub_section
[];
141 #define PHYS_SECTION_UNASSIGNED 0
143 static void io_mem_init(void);
144 static void memory_map_init(void);
145 static void tcg_log_global_after_sync(MemoryListener
*listener
);
146 static void tcg_commit(MemoryListener
*listener
);
149 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
150 * @cpu: the CPU whose AddressSpace this is
151 * @as: the AddressSpace itself
152 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
153 * @tcg_as_listener: listener for tracking changes to the AddressSpace
155 struct CPUAddressSpace
{
158 struct AddressSpaceDispatch
*memory_dispatch
;
159 MemoryListener tcg_as_listener
;
162 struct DirtyBitmapSnapshot
{
165 unsigned long dirty
[];
168 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
170 static unsigned alloc_hint
= 16;
171 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
172 map
->nodes_nb_alloc
= MAX(alloc_hint
, map
->nodes_nb
+ nodes
);
173 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
174 alloc_hint
= map
->nodes_nb_alloc
;
178 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
185 ret
= map
->nodes_nb
++;
187 assert(ret
!= PHYS_MAP_NODE_NIL
);
188 assert(ret
!= map
->nodes_nb_alloc
);
190 e
.skip
= leaf
? 0 : 1;
191 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
192 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
193 memcpy(&p
[i
], &e
, sizeof(e
));
198 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
199 hwaddr
*index
, uint64_t *nb
, uint16_t leaf
,
203 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
205 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
206 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
208 p
= map
->nodes
[lp
->ptr
];
209 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
211 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
212 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
218 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
224 static void phys_page_set(AddressSpaceDispatch
*d
,
225 hwaddr index
, uint64_t nb
,
228 /* Wildly overreserve - it doesn't matter much. */
229 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
231 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
234 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
235 * and update our entry so we can skip it and go directly to the destination.
237 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
)
239 unsigned valid_ptr
= P_L2_SIZE
;
244 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
249 for (i
= 0; i
< P_L2_SIZE
; i
++) {
250 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
257 phys_page_compact(&p
[i
], nodes
);
261 /* We can only compress if there's only one child. */
266 assert(valid_ptr
< P_L2_SIZE
);
268 /* Don't compress if it won't fit in the # of bits we have. */
269 if (P_L2_LEVELS
>= (1 << 6) &&
270 lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 6)) {
274 lp
->ptr
= p
[valid_ptr
].ptr
;
275 if (!p
[valid_ptr
].skip
) {
276 /* If our only child is a leaf, make this a leaf. */
277 /* By design, we should have made this node a leaf to begin with so we
278 * should never reach here.
279 * But since it's so simple to handle this, let's do it just in case we
284 lp
->skip
+= p
[valid_ptr
].skip
;
288 void address_space_dispatch_compact(AddressSpaceDispatch
*d
)
290 if (d
->phys_map
.skip
) {
291 phys_page_compact(&d
->phys_map
, d
->map
.nodes
);
295 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
298 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
299 * the section must cover the entire address space.
301 return int128_gethi(section
->size
) ||
302 range_covers_byte(section
->offset_within_address_space
,
303 int128_getlo(section
->size
), addr
);
306 static MemoryRegionSection
*phys_page_find(AddressSpaceDispatch
*d
, hwaddr addr
)
308 PhysPageEntry lp
= d
->phys_map
, *p
;
309 Node
*nodes
= d
->map
.nodes
;
310 MemoryRegionSection
*sections
= d
->map
.sections
;
311 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
314 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
315 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
316 return §ions
[PHYS_SECTION_UNASSIGNED
];
319 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
322 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
323 return §ions
[lp
.ptr
];
325 return §ions
[PHYS_SECTION_UNASSIGNED
];
329 /* Called from RCU critical section */
330 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
332 bool resolve_subpage
)
334 MemoryRegionSection
*section
= qatomic_read(&d
->mru_section
);
337 if (!section
|| section
== &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] ||
338 !section_covers_addr(section
, addr
)) {
339 section
= phys_page_find(d
, addr
);
340 qatomic_set(&d
->mru_section
, section
);
342 if (resolve_subpage
&& section
->mr
->subpage
) {
343 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
344 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
349 /* Called from RCU critical section */
350 static MemoryRegionSection
*
351 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
352 hwaddr
*plen
, bool resolve_subpage
)
354 MemoryRegionSection
*section
;
358 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
359 /* Compute offset within MemoryRegionSection */
360 addr
-= section
->offset_within_address_space
;
362 /* Compute offset within MemoryRegion */
363 *xlat
= addr
+ section
->offset_within_region
;
367 /* MMIO registers can be expected to perform full-width accesses based only
368 * on their address, without considering adjacent registers that could
369 * decode to completely different MemoryRegions. When such registers
370 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
371 * regions overlap wildly. For this reason we cannot clamp the accesses
374 * If the length is small (as is the case for address_space_ldl/stl),
375 * everything works fine. If the incoming length is large, however,
376 * the caller really has to do the clamping through memory_access_size.
378 if (memory_region_is_ram(mr
)) {
379 diff
= int128_sub(section
->size
, int128_make64(addr
));
380 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
386 * address_space_translate_iommu - translate an address through an IOMMU
387 * memory region and then through the target address space.
389 * @iommu_mr: the IOMMU memory region that we start the translation from
390 * @addr: the address to be translated through the MMU
391 * @xlat: the translated address offset within the destination memory region.
392 * It cannot be %NULL.
393 * @plen_out: valid read/write length of the translated address. It
395 * @page_mask_out: page mask for the translated address. This
396 * should only be meaningful for IOMMU translated
397 * addresses, since there may be huge pages that this bit
398 * would tell. It can be %NULL if we don't care about it.
399 * @is_write: whether the translation operation is for write
400 * @is_mmio: whether this can be MMIO, set true if it can
401 * @target_as: the address space targeted by the IOMMU
402 * @attrs: transaction attributes
404 * This function is called from RCU critical section. It is the common
405 * part of flatview_do_translate and address_space_translate_cached.
407 static MemoryRegionSection
address_space_translate_iommu(IOMMUMemoryRegion
*iommu_mr
,
410 hwaddr
*page_mask_out
,
413 AddressSpace
**target_as
,
416 MemoryRegionSection
*section
;
417 hwaddr page_mask
= (hwaddr
)-1;
421 IOMMUMemoryRegionClass
*imrc
= memory_region_get_iommu_class_nocheck(iommu_mr
);
425 if (imrc
->attrs_to_index
) {
426 iommu_idx
= imrc
->attrs_to_index(iommu_mr
, attrs
);
429 iotlb
= imrc
->translate(iommu_mr
, addr
, is_write
?
430 IOMMU_WO
: IOMMU_RO
, iommu_idx
);
432 if (!(iotlb
.perm
& (1 << is_write
))) {
436 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
437 | (addr
& iotlb
.addr_mask
));
438 page_mask
&= iotlb
.addr_mask
;
439 *plen_out
= MIN(*plen_out
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
440 *target_as
= iotlb
.target_as
;
442 section
= address_space_translate_internal(
443 address_space_to_dispatch(iotlb
.target_as
), addr
, xlat
,
446 iommu_mr
= memory_region_get_iommu(section
->mr
);
447 } while (unlikely(iommu_mr
));
450 *page_mask_out
= page_mask
;
455 return (MemoryRegionSection
) { .mr
= &io_mem_unassigned
};
459 * flatview_do_translate - translate an address in FlatView
461 * @fv: the flat view that we want to translate on
462 * @addr: the address to be translated in above address space
463 * @xlat: the translated address offset within memory region. It
465 * @plen_out: valid read/write length of the translated address. It
466 * can be @NULL when we don't care about it.
467 * @page_mask_out: page mask for the translated address. This
468 * should only be meaningful for IOMMU translated
469 * addresses, since there may be huge pages that this bit
470 * would tell. It can be @NULL if we don't care about it.
471 * @is_write: whether the translation operation is for write
472 * @is_mmio: whether this can be MMIO, set true if it can
473 * @target_as: the address space targeted by the IOMMU
474 * @attrs: memory transaction attributes
476 * This function is called from RCU critical section
478 static MemoryRegionSection
flatview_do_translate(FlatView
*fv
,
482 hwaddr
*page_mask_out
,
485 AddressSpace
**target_as
,
488 MemoryRegionSection
*section
;
489 IOMMUMemoryRegion
*iommu_mr
;
490 hwaddr plen
= (hwaddr
)(-1);
496 section
= address_space_translate_internal(
497 flatview_to_dispatch(fv
), addr
, xlat
,
500 iommu_mr
= memory_region_get_iommu(section
->mr
);
501 if (unlikely(iommu_mr
)) {
502 return address_space_translate_iommu(iommu_mr
, xlat
,
503 plen_out
, page_mask_out
,
508 /* Not behind an IOMMU, use default page size. */
509 *page_mask_out
= ~TARGET_PAGE_MASK
;
515 /* Called from RCU critical section */
516 IOMMUTLBEntry
address_space_get_iotlb_entry(AddressSpace
*as
, hwaddr addr
,
517 bool is_write
, MemTxAttrs attrs
)
519 MemoryRegionSection section
;
520 hwaddr xlat
, page_mask
;
523 * This can never be MMIO, and we don't really care about plen,
526 section
= flatview_do_translate(address_space_to_flatview(as
), addr
, &xlat
,
527 NULL
, &page_mask
, is_write
, false, &as
,
530 /* Illegal translation */
531 if (section
.mr
== &io_mem_unassigned
) {
535 /* Convert memory region offset into address space offset */
536 xlat
+= section
.offset_within_address_space
-
537 section
.offset_within_region
;
539 return (IOMMUTLBEntry
) {
541 .iova
= addr
& ~page_mask
,
542 .translated_addr
= xlat
& ~page_mask
,
543 .addr_mask
= page_mask
,
544 /* IOTLBs are for DMAs, and DMA only allows on RAMs. */
549 return (IOMMUTLBEntry
) {0};
552 /* Called from RCU critical section */
553 MemoryRegion
*flatview_translate(FlatView
*fv
, hwaddr addr
, hwaddr
*xlat
,
554 hwaddr
*plen
, bool is_write
,
558 MemoryRegionSection section
;
559 AddressSpace
*as
= NULL
;
561 /* This can be MMIO, so setup MMIO bit. */
562 section
= flatview_do_translate(fv
, addr
, xlat
, plen
, NULL
,
563 is_write
, true, &as
, attrs
);
566 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
567 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
568 *plen
= MIN(page
, *plen
);
574 typedef struct TCGIOMMUNotifier
{
582 static void tcg_iommu_unmap_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
584 TCGIOMMUNotifier
*notifier
= container_of(n
, TCGIOMMUNotifier
, n
);
586 if (!notifier
->active
) {
589 tlb_flush(notifier
->cpu
);
590 notifier
->active
= false;
591 /* We leave the notifier struct on the list to avoid reallocating it later.
592 * Generally the number of IOMMUs a CPU deals with will be small.
593 * In any case we can't unregister the iommu notifier from a notify
598 static void tcg_register_iommu_notifier(CPUState
*cpu
,
599 IOMMUMemoryRegion
*iommu_mr
,
602 /* Make sure this CPU has an IOMMU notifier registered for this
603 * IOMMU/IOMMU index combination, so that we can flush its TLB
604 * when the IOMMU tells us the mappings we've cached have changed.
606 MemoryRegion
*mr
= MEMORY_REGION(iommu_mr
);
607 TCGIOMMUNotifier
*notifier
;
610 for (i
= 0; i
< cpu
->iommu_notifiers
->len
; i
++) {
611 notifier
= g_array_index(cpu
->iommu_notifiers
, TCGIOMMUNotifier
*, i
);
612 if (notifier
->mr
== mr
&& notifier
->iommu_idx
== iommu_idx
) {
616 if (i
== cpu
->iommu_notifiers
->len
) {
617 /* Not found, add a new entry at the end of the array */
618 cpu
->iommu_notifiers
= g_array_set_size(cpu
->iommu_notifiers
, i
+ 1);
619 notifier
= g_new0(TCGIOMMUNotifier
, 1);
620 g_array_index(cpu
->iommu_notifiers
, TCGIOMMUNotifier
*, i
) = notifier
;
623 notifier
->iommu_idx
= iommu_idx
;
625 /* Rather than trying to register interest in the specific part
626 * of the iommu's address space that we've accessed and then
627 * expand it later as subsequent accesses touch more of it, we
628 * just register interest in the whole thing, on the assumption
629 * that iommu reconfiguration will be rare.
631 iommu_notifier_init(¬ifier
->n
,
632 tcg_iommu_unmap_notify
,
633 IOMMU_NOTIFIER_UNMAP
,
637 memory_region_register_iommu_notifier(notifier
->mr
, ¬ifier
->n
,
641 if (!notifier
->active
) {
642 notifier
->active
= true;
646 void tcg_iommu_free_notifier_list(CPUState
*cpu
)
648 /* Destroy the CPU's notifier list */
650 TCGIOMMUNotifier
*notifier
;
652 for (i
= 0; i
< cpu
->iommu_notifiers
->len
; i
++) {
653 notifier
= g_array_index(cpu
->iommu_notifiers
, TCGIOMMUNotifier
*, i
);
654 memory_region_unregister_iommu_notifier(notifier
->mr
, ¬ifier
->n
);
657 g_array_free(cpu
->iommu_notifiers
, true);
660 void tcg_iommu_init_notifier_list(CPUState
*cpu
)
662 cpu
->iommu_notifiers
= g_array_new(false, true, sizeof(TCGIOMMUNotifier
*));
665 /* Called from RCU critical section */
666 MemoryRegionSection
*
667 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
668 hwaddr
*xlat
, hwaddr
*plen
,
669 MemTxAttrs attrs
, int *prot
)
671 MemoryRegionSection
*section
;
672 IOMMUMemoryRegion
*iommu_mr
;
673 IOMMUMemoryRegionClass
*imrc
;
676 AddressSpaceDispatch
*d
=
677 qatomic_rcu_read(&cpu
->cpu_ases
[asidx
].memory_dispatch
);
680 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, false);
682 iommu_mr
= memory_region_get_iommu(section
->mr
);
687 imrc
= memory_region_get_iommu_class_nocheck(iommu_mr
);
689 iommu_idx
= imrc
->attrs_to_index(iommu_mr
, attrs
);
690 tcg_register_iommu_notifier(cpu
, iommu_mr
, iommu_idx
);
691 /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
692 * doesn't short-cut its translation table walk.
694 iotlb
= imrc
->translate(iommu_mr
, addr
, IOMMU_NONE
, iommu_idx
);
695 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
696 | (addr
& iotlb
.addr_mask
));
697 /* Update the caller's prot bits to remove permissions the IOMMU
698 * is giving us a failure response for. If we get down to no
699 * permissions left at all we can give up now.
701 if (!(iotlb
.perm
& IOMMU_RO
)) {
702 *prot
&= ~(PAGE_READ
| PAGE_EXEC
);
704 if (!(iotlb
.perm
& IOMMU_WO
)) {
705 *prot
&= ~PAGE_WRITE
;
712 d
= flatview_to_dispatch(address_space_to_flatview(iotlb
.target_as
));
715 assert(!memory_region_is_iommu(section
->mr
));
720 return &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
];
723 void cpu_address_space_init(CPUState
*cpu
, int asidx
,
724 const char *prefix
, MemoryRegion
*mr
)
726 CPUAddressSpace
*newas
;
727 AddressSpace
*as
= g_new0(AddressSpace
, 1);
731 as_name
= g_strdup_printf("%s-%d", prefix
, cpu
->cpu_index
);
732 address_space_init(as
, mr
, as_name
);
735 /* Target code should have set num_ases before calling us */
736 assert(asidx
< cpu
->num_ases
);
739 /* address space 0 gets the convenience alias */
743 /* KVM cannot currently support multiple address spaces. */
744 assert(asidx
== 0 || !kvm_enabled());
746 if (!cpu
->cpu_ases
) {
747 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
750 newas
= &cpu
->cpu_ases
[asidx
];
754 newas
->tcg_as_listener
.log_global_after_sync
= tcg_log_global_after_sync
;
755 newas
->tcg_as_listener
.commit
= tcg_commit
;
756 memory_listener_register(&newas
->tcg_as_listener
, as
);
760 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
762 /* Return the AddressSpace corresponding to the specified index */
763 return cpu
->cpu_ases
[asidx
].as
;
766 /* Add a watchpoint. */
767 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
768 int flags
, CPUWatchpoint
**watchpoint
)
773 /* forbid ranges which are empty or run off the end of the address space */
774 if (len
== 0 || (addr
+ len
- 1) < addr
) {
775 error_report("tried to set invalid watchpoint at %"
776 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
779 wp
= g_malloc(sizeof(*wp
));
785 /* keep all GDB-injected watchpoints in front */
786 if (flags
& BP_GDB
) {
787 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
789 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
792 in_page
= -(addr
| TARGET_PAGE_MASK
);
793 if (len
<= in_page
) {
794 tlb_flush_page(cpu
, addr
);
804 /* Remove a specific watchpoint. */
805 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
810 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
811 if (addr
== wp
->vaddr
&& len
== wp
->len
812 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
813 cpu_watchpoint_remove_by_ref(cpu
, wp
);
820 /* Remove a specific watchpoint by reference. */
821 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
823 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
825 tlb_flush_page(cpu
, watchpoint
->vaddr
);
830 /* Remove all matching watchpoints. */
831 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
833 CPUWatchpoint
*wp
, *next
;
835 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
836 if (wp
->flags
& mask
) {
837 cpu_watchpoint_remove_by_ref(cpu
, wp
);
842 /* Return true if this watchpoint address matches the specified
843 * access (ie the address range covered by the watchpoint overlaps
844 * partially or completely with the address range covered by the
847 static inline bool watchpoint_address_matches(CPUWatchpoint
*wp
,
848 vaddr addr
, vaddr len
)
850 /* We know the lengths are non-zero, but a little caution is
851 * required to avoid errors in the case where the range ends
852 * exactly at the top of the address space and so addr + len
853 * wraps round to zero.
855 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
856 vaddr addrend
= addr
+ len
- 1;
858 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
861 /* Return flags for watchpoints that match addr + prot. */
862 int cpu_watchpoint_address_matches(CPUState
*cpu
, vaddr addr
, vaddr len
)
867 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
868 if (watchpoint_address_matches(wp
, addr
, len
)) {
875 /* Called from RCU critical section */
876 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
880 block
= qatomic_rcu_read(&ram_list
.mru_block
);
881 if (block
&& addr
- block
->offset
< block
->max_length
) {
884 RAMBLOCK_FOREACH(block
) {
885 if (addr
- block
->offset
< block
->max_length
) {
890 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
894 /* It is safe to write mru_block outside the iothread lock. This
899 * xxx removed from list
903 * call_rcu(reclaim_ramblock, xxx);
906 * qatomic_rcu_set is not needed here. The block was already published
907 * when it was placed into the list. Here we're just making an extra
908 * copy of the pointer.
910 ram_list
.mru_block
= block
;
914 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
921 assert(tcg_enabled());
922 end
= TARGET_PAGE_ALIGN(start
+ length
);
923 start
&= TARGET_PAGE_MASK
;
925 RCU_READ_LOCK_GUARD();
926 block
= qemu_get_ram_block(start
);
927 assert(block
== qemu_get_ram_block(end
- 1));
928 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
930 tlb_reset_dirty(cpu
, start1
, length
);
934 /* Note: start and end must be within the same ram block. */
935 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
939 DirtyMemoryBlocks
*blocks
;
940 unsigned long end
, page
, start_page
;
943 uint64_t mr_offset
, mr_size
;
949 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
950 start_page
= start
>> TARGET_PAGE_BITS
;
953 WITH_RCU_READ_LOCK_GUARD() {
954 blocks
= qatomic_rcu_read(&ram_list
.dirty_memory
[client
]);
955 ramblock
= qemu_get_ram_block(start
);
956 /* Range sanity check on the ramblock */
957 assert(start
>= ramblock
->offset
&&
958 start
+ length
<= ramblock
->offset
+ ramblock
->used_length
);
961 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
962 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
963 unsigned long num
= MIN(end
- page
,
964 DIRTY_MEMORY_BLOCK_SIZE
- offset
);
966 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
971 mr_offset
= (ram_addr_t
)(start_page
<< TARGET_PAGE_BITS
) - ramblock
->offset
;
972 mr_size
= (end
- start_page
) << TARGET_PAGE_BITS
;
973 memory_region_clear_dirty_bitmap(ramblock
->mr
, mr_offset
, mr_size
);
976 if (dirty
&& tcg_enabled()) {
977 tlb_reset_dirty_range_all(start
, length
);
983 DirtyBitmapSnapshot
*cpu_physical_memory_snapshot_and_clear_dirty
984 (MemoryRegion
*mr
, hwaddr offset
, hwaddr length
, unsigned client
)
986 DirtyMemoryBlocks
*blocks
;
987 ram_addr_t start
= memory_region_get_ram_addr(mr
) + offset
;
988 unsigned long align
= 1UL << (TARGET_PAGE_BITS
+ BITS_PER_LEVEL
);
989 ram_addr_t first
= QEMU_ALIGN_DOWN(start
, align
);
990 ram_addr_t last
= QEMU_ALIGN_UP(start
+ length
, align
);
991 DirtyBitmapSnapshot
*snap
;
992 unsigned long page
, end
, dest
;
994 snap
= g_malloc0(sizeof(*snap
) +
995 ((last
- first
) >> (TARGET_PAGE_BITS
+ 3)));
999 page
= first
>> TARGET_PAGE_BITS
;
1000 end
= last
>> TARGET_PAGE_BITS
;
1003 WITH_RCU_READ_LOCK_GUARD() {
1004 blocks
= qatomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1006 while (page
< end
) {
1007 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1008 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1009 unsigned long num
= MIN(end
- page
,
1010 DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1012 assert(QEMU_IS_ALIGNED(offset
, (1 << BITS_PER_LEVEL
)));
1013 assert(QEMU_IS_ALIGNED(num
, (1 << BITS_PER_LEVEL
)));
1014 offset
>>= BITS_PER_LEVEL
;
1016 bitmap_copy_and_clear_atomic(snap
->dirty
+ dest
,
1017 blocks
->blocks
[idx
] + offset
,
1020 dest
+= num
>> BITS_PER_LEVEL
;
1024 if (tcg_enabled()) {
1025 tlb_reset_dirty_range_all(start
, length
);
1028 memory_region_clear_dirty_bitmap(mr
, offset
, length
);
1033 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot
*snap
,
1037 unsigned long page
, end
;
1039 assert(start
>= snap
->start
);
1040 assert(start
+ length
<= snap
->end
);
1042 end
= TARGET_PAGE_ALIGN(start
+ length
- snap
->start
) >> TARGET_PAGE_BITS
;
1043 page
= (start
- snap
->start
) >> TARGET_PAGE_BITS
;
1045 while (page
< end
) {
1046 if (test_bit(page
, snap
->dirty
)) {
1054 /* Called from RCU critical section */
1055 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1056 MemoryRegionSection
*section
)
1058 AddressSpaceDispatch
*d
= flatview_to_dispatch(section
->fv
);
1059 return section
- d
->map
.sections
;
1062 static int subpage_register(subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1064 static subpage_t
*subpage_init(FlatView
*fv
, hwaddr base
);
1066 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
, bool shared
) =
1067 qemu_anon_ram_alloc
;
1070 * Set a custom physical guest memory alloator.
1071 * Accelerators with unusual needs may need this. Hopefully, we can
1072 * get rid of it eventually.
1074 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
, bool shared
))
1076 phys_mem_alloc
= alloc
;
1079 static uint16_t phys_section_add(PhysPageMap
*map
,
1080 MemoryRegionSection
*section
)
1082 /* The physical section number is ORed with a page-aligned
1083 * pointer to produce the iotlb entries. Thus it should
1084 * never overflow into the page-aligned value.
1086 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1088 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1089 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1090 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1091 map
->sections_nb_alloc
);
1093 map
->sections
[map
->sections_nb
] = *section
;
1094 memory_region_ref(section
->mr
);
1095 return map
->sections_nb
++;
1098 static void phys_section_destroy(MemoryRegion
*mr
)
1100 bool have_sub_page
= mr
->subpage
;
1102 memory_region_unref(mr
);
1104 if (have_sub_page
) {
1105 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1106 object_unref(OBJECT(&subpage
->iomem
));
1111 static void phys_sections_free(PhysPageMap
*map
)
1113 while (map
->sections_nb
> 0) {
1114 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1115 phys_section_destroy(section
->mr
);
1117 g_free(map
->sections
);
1121 static void register_subpage(FlatView
*fv
, MemoryRegionSection
*section
)
1123 AddressSpaceDispatch
*d
= flatview_to_dispatch(fv
);
1125 hwaddr base
= section
->offset_within_address_space
1127 MemoryRegionSection
*existing
= phys_page_find(d
, base
);
1128 MemoryRegionSection subsection
= {
1129 .offset_within_address_space
= base
,
1130 .size
= int128_make64(TARGET_PAGE_SIZE
),
1134 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1136 if (!(existing
->mr
->subpage
)) {
1137 subpage
= subpage_init(fv
, base
);
1139 subsection
.mr
= &subpage
->iomem
;
1140 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1141 phys_section_add(&d
->map
, &subsection
));
1143 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1145 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1146 end
= start
+ int128_get64(section
->size
) - 1;
1147 subpage_register(subpage
, start
, end
,
1148 phys_section_add(&d
->map
, section
));
1152 static void register_multipage(FlatView
*fv
,
1153 MemoryRegionSection
*section
)
1155 AddressSpaceDispatch
*d
= flatview_to_dispatch(fv
);
1156 hwaddr start_addr
= section
->offset_within_address_space
;
1157 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1158 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1162 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1166 * The range in *section* may look like this:
1170 * where s stands for subpage and P for page.
1172 void flatview_add_to_dispatch(FlatView
*fv
, MemoryRegionSection
*section
)
1174 MemoryRegionSection remain
= *section
;
1175 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1177 /* register first subpage */
1178 if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1179 uint64_t left
= TARGET_PAGE_ALIGN(remain
.offset_within_address_space
)
1180 - remain
.offset_within_address_space
;
1182 MemoryRegionSection now
= remain
;
1183 now
.size
= int128_min(int128_make64(left
), now
.size
);
1184 register_subpage(fv
, &now
);
1185 if (int128_eq(remain
.size
, now
.size
)) {
1188 remain
.size
= int128_sub(remain
.size
, now
.size
);
1189 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1190 remain
.offset_within_region
+= int128_get64(now
.size
);
1193 /* register whole pages */
1194 if (int128_ge(remain
.size
, page_size
)) {
1195 MemoryRegionSection now
= remain
;
1196 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1197 register_multipage(fv
, &now
);
1198 if (int128_eq(remain
.size
, now
.size
)) {
1201 remain
.size
= int128_sub(remain
.size
, now
.size
);
1202 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1203 remain
.offset_within_region
+= int128_get64(now
.size
);
1206 /* register last subpage */
1207 register_subpage(fv
, &remain
);
1210 void qemu_flush_coalesced_mmio_buffer(void)
1213 kvm_flush_coalesced_mmio_buffer();
1216 void qemu_mutex_lock_ramlist(void)
1218 qemu_mutex_lock(&ram_list
.mutex
);
1221 void qemu_mutex_unlock_ramlist(void)
1223 qemu_mutex_unlock(&ram_list
.mutex
);
1226 void ram_block_dump(Monitor
*mon
)
1231 RCU_READ_LOCK_GUARD();
1232 monitor_printf(mon
, "%24s %8s %18s %18s %18s\n",
1233 "Block Name", "PSize", "Offset", "Used", "Total");
1234 RAMBLOCK_FOREACH(block
) {
1235 psize
= size_to_str(block
->page_size
);
1236 monitor_printf(mon
, "%24s %8s 0x%016" PRIx64
" 0x%016" PRIx64
1237 " 0x%016" PRIx64
"\n", block
->idstr
, psize
,
1238 (uint64_t)block
->offset
,
1239 (uint64_t)block
->used_length
,
1240 (uint64_t)block
->max_length
);
1247 * FIXME TOCTTOU: this iterates over memory backends' mem-path, which
1248 * may or may not name the same files / on the same filesystem now as
1249 * when we actually open and map them. Iterate over the file
1250 * descriptors instead, and use qemu_fd_getpagesize().
1252 static int find_min_backend_pagesize(Object
*obj
, void *opaque
)
1254 long *hpsize_min
= opaque
;
1256 if (object_dynamic_cast(obj
, TYPE_MEMORY_BACKEND
)) {
1257 HostMemoryBackend
*backend
= MEMORY_BACKEND(obj
);
1258 long hpsize
= host_memory_backend_pagesize(backend
);
1260 if (host_memory_backend_is_mapped(backend
) && (hpsize
< *hpsize_min
)) {
1261 *hpsize_min
= hpsize
;
1268 static int find_max_backend_pagesize(Object
*obj
, void *opaque
)
1270 long *hpsize_max
= opaque
;
1272 if (object_dynamic_cast(obj
, TYPE_MEMORY_BACKEND
)) {
1273 HostMemoryBackend
*backend
= MEMORY_BACKEND(obj
);
1274 long hpsize
= host_memory_backend_pagesize(backend
);
1276 if (host_memory_backend_is_mapped(backend
) && (hpsize
> *hpsize_max
)) {
1277 *hpsize_max
= hpsize
;
1285 * TODO: We assume right now that all mapped host memory backends are
1286 * used as RAM, however some might be used for different purposes.
1288 long qemu_minrampagesize(void)
1290 long hpsize
= LONG_MAX
;
1291 Object
*memdev_root
= object_resolve_path("/objects", NULL
);
1293 object_child_foreach(memdev_root
, find_min_backend_pagesize
, &hpsize
);
1297 long qemu_maxrampagesize(void)
1300 Object
*memdev_root
= object_resolve_path("/objects", NULL
);
1302 object_child_foreach(memdev_root
, find_max_backend_pagesize
, &pagesize
);
1306 long qemu_minrampagesize(void)
1308 return qemu_real_host_page_size
;
1310 long qemu_maxrampagesize(void)
1312 return qemu_real_host_page_size
;
1317 static int64_t get_file_size(int fd
)
1320 #if defined(__linux__)
1323 if (fstat(fd
, &st
) < 0) {
1327 /* Special handling for devdax character devices */
1328 if (S_ISCHR(st
.st_mode
)) {
1329 g_autofree
char *subsystem_path
= NULL
;
1330 g_autofree
char *subsystem
= NULL
;
1332 subsystem_path
= g_strdup_printf("/sys/dev/char/%d:%d/subsystem",
1333 major(st
.st_rdev
), minor(st
.st_rdev
));
1334 subsystem
= g_file_read_link(subsystem_path
, NULL
);
1336 if (subsystem
&& g_str_has_suffix(subsystem
, "/dax")) {
1337 g_autofree
char *size_path
= NULL
;
1338 g_autofree
char *size_str
= NULL
;
1340 size_path
= g_strdup_printf("/sys/dev/char/%d:%d/size",
1341 major(st
.st_rdev
), minor(st
.st_rdev
));
1343 if (g_file_get_contents(size_path
, &size_str
, NULL
, NULL
)) {
1344 return g_ascii_strtoll(size_str
, NULL
, 0);
1348 #endif /* defined(__linux__) */
1350 /* st.st_size may be zero for special files yet lseek(2) works */
1351 size
= lseek(fd
, 0, SEEK_END
);
1358 static int64_t get_file_align(int fd
)
1361 #if defined(__linux__) && defined(CONFIG_LIBDAXCTL)
1364 if (fstat(fd
, &st
) < 0) {
1368 /* Special handling for devdax character devices */
1369 if (S_ISCHR(st
.st_mode
)) {
1370 g_autofree
char *path
= NULL
;
1371 g_autofree
char *rpath
= NULL
;
1372 struct daxctl_ctx
*ctx
;
1373 struct daxctl_region
*region
;
1376 path
= g_strdup_printf("/sys/dev/char/%d:%d",
1377 major(st
.st_rdev
), minor(st
.st_rdev
));
1378 rpath
= realpath(path
, NULL
);
1380 rc
= daxctl_new(&ctx
);
1385 daxctl_region_foreach(ctx
, region
) {
1386 if (strstr(rpath
, daxctl_region_get_path(region
))) {
1387 align
= daxctl_region_get_align(region
);
1393 #endif /* defined(__linux__) && defined(CONFIG_LIBDAXCTL) */
1398 static int file_ram_open(const char *path
,
1399 const char *region_name
,
1404 char *sanitized_name
;
1410 fd
= open(path
, O_RDWR
);
1412 /* @path names an existing file, use it */
1415 if (errno
== ENOENT
) {
1416 /* @path names a file that doesn't exist, create it */
1417 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1422 } else if (errno
== EISDIR
) {
1423 /* @path names a directory, create a file there */
1424 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1425 sanitized_name
= g_strdup(region_name
);
1426 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1432 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1434 g_free(sanitized_name
);
1436 fd
= mkstemp(filename
);
1444 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1445 error_setg_errno(errp
, errno
,
1446 "can't open backing store %s for guest RAM",
1451 * Try again on EINTR and EEXIST. The latter happens when
1452 * something else creates the file between our two open().
1459 static void *file_ram_alloc(RAMBlock
*block
,
1467 block
->page_size
= qemu_fd_getpagesize(fd
);
1468 if (block
->mr
->align
% block
->page_size
) {
1469 error_setg(errp
, "alignment 0x%" PRIx64
1470 " must be multiples of page size 0x%zx",
1471 block
->mr
->align
, block
->page_size
);
1473 } else if (block
->mr
->align
&& !is_power_of_2(block
->mr
->align
)) {
1474 error_setg(errp
, "alignment 0x%" PRIx64
1475 " must be a power of two", block
->mr
->align
);
1478 block
->mr
->align
= MAX(block
->page_size
, block
->mr
->align
);
1479 #if defined(__s390x__)
1480 if (kvm_enabled()) {
1481 block
->mr
->align
= MAX(block
->mr
->align
, QEMU_VMALLOC_ALIGN
);
1485 if (memory
< block
->page_size
) {
1486 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1487 "or larger than page size 0x%zx",
1488 memory
, block
->page_size
);
1492 memory
= ROUND_UP(memory
, block
->page_size
);
1495 * ftruncate is not supported by hugetlbfs in older
1496 * hosts, so don't bother bailing out on errors.
1497 * If anything goes wrong with it under other filesystems,
1500 * Do not truncate the non-empty backend file to avoid corrupting
1501 * the existing data in the file. Disabling shrinking is not
1502 * enough. For example, the current vNVDIMM implementation stores
1503 * the guest NVDIMM labels at the end of the backend file. If the
1504 * backend file is later extended, QEMU will not be able to find
1505 * those labels. Therefore, extending the non-empty backend file
1506 * is disabled as well.
1508 if (truncate
&& ftruncate(fd
, memory
)) {
1509 perror("ftruncate");
1512 area
= qemu_ram_mmap(fd
, memory
, block
->mr
->align
,
1513 block
->flags
& RAM_SHARED
, block
->flags
& RAM_PMEM
);
1514 if (area
== MAP_FAILED
) {
1515 error_setg_errno(errp
, errno
,
1516 "unable to map backing store for guest RAM");
1525 /* Allocate space within the ram_addr_t space that governs the
1527 * Called with the ramlist lock held.
1529 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1531 RAMBlock
*block
, *next_block
;
1532 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1534 assert(size
!= 0); /* it would hand out same offset multiple times */
1536 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1540 RAMBLOCK_FOREACH(block
) {
1541 ram_addr_t candidate
, next
= RAM_ADDR_MAX
;
1543 /* Align blocks to start on a 'long' in the bitmap
1544 * which makes the bitmap sync'ing take the fast path.
1546 candidate
= block
->offset
+ block
->max_length
;
1547 candidate
= ROUND_UP(candidate
, BITS_PER_LONG
<< TARGET_PAGE_BITS
);
1549 /* Search for the closest following block
1552 RAMBLOCK_FOREACH(next_block
) {
1553 if (next_block
->offset
>= candidate
) {
1554 next
= MIN(next
, next_block
->offset
);
1558 /* If it fits remember our place and remember the size
1559 * of gap, but keep going so that we might find a smaller
1560 * gap to fill so avoiding fragmentation.
1562 if (next
- candidate
>= size
&& next
- candidate
< mingap
) {
1564 mingap
= next
- candidate
;
1567 trace_find_ram_offset_loop(size
, candidate
, offset
, next
, mingap
);
1570 if (offset
== RAM_ADDR_MAX
) {
1571 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1576 trace_find_ram_offset(size
, offset
);
1581 static unsigned long last_ram_page(void)
1584 ram_addr_t last
= 0;
1586 RCU_READ_LOCK_GUARD();
1587 RAMBLOCK_FOREACH(block
) {
1588 last
= MAX(last
, block
->offset
+ block
->max_length
);
1590 return last
>> TARGET_PAGE_BITS
;
1593 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1597 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1598 if (!machine_dump_guest_core(current_machine
)) {
1599 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1601 perror("qemu_madvise");
1602 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1603 "but dump_guest_core=off specified\n");
1608 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1613 void *qemu_ram_get_host_addr(RAMBlock
*rb
)
1618 ram_addr_t
qemu_ram_get_offset(RAMBlock
*rb
)
1623 ram_addr_t
qemu_ram_get_used_length(RAMBlock
*rb
)
1625 return rb
->used_length
;
1628 bool qemu_ram_is_shared(RAMBlock
*rb
)
1630 return rb
->flags
& RAM_SHARED
;
1633 /* Note: Only set at the start of postcopy */
1634 bool qemu_ram_is_uf_zeroable(RAMBlock
*rb
)
1636 return rb
->flags
& RAM_UF_ZEROPAGE
;
1639 void qemu_ram_set_uf_zeroable(RAMBlock
*rb
)
1641 rb
->flags
|= RAM_UF_ZEROPAGE
;
1644 bool qemu_ram_is_migratable(RAMBlock
*rb
)
1646 return rb
->flags
& RAM_MIGRATABLE
;
1649 void qemu_ram_set_migratable(RAMBlock
*rb
)
1651 rb
->flags
|= RAM_MIGRATABLE
;
1654 void qemu_ram_unset_migratable(RAMBlock
*rb
)
1656 rb
->flags
&= ~RAM_MIGRATABLE
;
1659 /* Called with iothread lock held. */
1660 void qemu_ram_set_idstr(RAMBlock
*new_block
, const char *name
, DeviceState
*dev
)
1665 assert(!new_block
->idstr
[0]);
1668 char *id
= qdev_get_dev_path(dev
);
1670 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1674 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1676 RCU_READ_LOCK_GUARD();
1677 RAMBLOCK_FOREACH(block
) {
1678 if (block
!= new_block
&&
1679 !strcmp(block
->idstr
, new_block
->idstr
)) {
1680 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1687 /* Called with iothread lock held. */
1688 void qemu_ram_unset_idstr(RAMBlock
*block
)
1690 /* FIXME: arch_init.c assumes that this is not called throughout
1691 * migration. Ignore the problem since hot-unplug during migration
1692 * does not work anyway.
1695 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1699 size_t qemu_ram_pagesize(RAMBlock
*rb
)
1701 return rb
->page_size
;
1704 /* Returns the largest size of page in use */
1705 size_t qemu_ram_pagesize_largest(void)
1710 RAMBLOCK_FOREACH(block
) {
1711 largest
= MAX(largest
, qemu_ram_pagesize(block
));
1717 static int memory_try_enable_merging(void *addr
, size_t len
)
1719 if (!machine_mem_merge(current_machine
)) {
1720 /* disabled by the user */
1724 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1727 /* Only legal before guest might have detected the memory size: e.g. on
1728 * incoming migration, or right after reset.
1730 * As memory core doesn't know how is memory accessed, it is up to
1731 * resize callback to update device state and/or add assertions to detect
1732 * misuse, if necessary.
1734 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
)
1736 const ram_addr_t unaligned_size
= newsize
;
1740 newsize
= HOST_PAGE_ALIGN(newsize
);
1742 if (block
->used_length
== newsize
) {
1744 * We don't have to resize the ram block (which only knows aligned
1745 * sizes), however, we have to notify if the unaligned size changed.
1747 if (unaligned_size
!= memory_region_size(block
->mr
)) {
1748 memory_region_set_size(block
->mr
, unaligned_size
);
1749 if (block
->resized
) {
1750 block
->resized(block
->idstr
, unaligned_size
, block
->host
);
1756 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1757 error_setg_errno(errp
, EINVAL
,
1758 "Size mismatch: %s: 0x" RAM_ADDR_FMT
1759 " != 0x" RAM_ADDR_FMT
, block
->idstr
,
1760 newsize
, block
->used_length
);
1764 if (block
->max_length
< newsize
) {
1765 error_setg_errno(errp
, EINVAL
,
1766 "Size too large: %s: 0x" RAM_ADDR_FMT
1767 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1768 newsize
, block
->max_length
);
1772 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1773 block
->used_length
= newsize
;
1774 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1776 memory_region_set_size(block
->mr
, unaligned_size
);
1777 if (block
->resized
) {
1778 block
->resized(block
->idstr
, unaligned_size
, block
->host
);
1784 * Trigger sync on the given ram block for range [start, start + length]
1785 * with the backing store if one is available.
1787 * @Note: this is supposed to be a synchronous op.
1789 void qemu_ram_msync(RAMBlock
*block
, ram_addr_t start
, ram_addr_t length
)
1791 /* The requested range should fit in within the block range */
1792 g_assert((start
+ length
) <= block
->used_length
);
1794 #ifdef CONFIG_LIBPMEM
1795 /* The lack of support for pmem should not block the sync */
1796 if (ramblock_is_pmem(block
)) {
1797 void *addr
= ramblock_ptr(block
, start
);
1798 pmem_persist(addr
, length
);
1802 if (block
->fd
>= 0) {
1804 * Case there is no support for PMEM or the memory has not been
1805 * specified as persistent (or is not one) - use the msync.
1806 * Less optimal but still achieves the same goal
1808 void *addr
= ramblock_ptr(block
, start
);
1809 if (qemu_msync(addr
, length
, block
->fd
)) {
1810 warn_report("%s: failed to sync memory range: start: "
1811 RAM_ADDR_FMT
" length: " RAM_ADDR_FMT
,
1812 __func__
, start
, length
);
1817 /* Called with ram_list.mutex held */
1818 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1819 ram_addr_t new_ram_size
)
1821 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1822 DIRTY_MEMORY_BLOCK_SIZE
);
1823 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1824 DIRTY_MEMORY_BLOCK_SIZE
);
1827 /* Only need to extend if block count increased */
1828 if (new_num_blocks
<= old_num_blocks
) {
1832 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1833 DirtyMemoryBlocks
*old_blocks
;
1834 DirtyMemoryBlocks
*new_blocks
;
1837 old_blocks
= qatomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1838 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1839 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1841 if (old_num_blocks
) {
1842 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1843 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1846 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1847 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1850 qatomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1853 g_free_rcu(old_blocks
, rcu
);
1858 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
, bool shared
)
1861 RAMBlock
*last_block
= NULL
;
1862 ram_addr_t old_ram_size
, new_ram_size
;
1865 old_ram_size
= last_ram_page();
1867 qemu_mutex_lock_ramlist();
1868 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1870 if (!new_block
->host
) {
1871 if (xen_enabled()) {
1872 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1873 new_block
->mr
, &err
);
1875 error_propagate(errp
, err
);
1876 qemu_mutex_unlock_ramlist();
1880 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1881 &new_block
->mr
->align
, shared
);
1882 if (!new_block
->host
) {
1883 error_setg_errno(errp
, errno
,
1884 "cannot set up guest memory '%s'",
1885 memory_region_name(new_block
->mr
));
1886 qemu_mutex_unlock_ramlist();
1889 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1893 new_ram_size
= MAX(old_ram_size
,
1894 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1895 if (new_ram_size
> old_ram_size
) {
1896 dirty_memory_extend(old_ram_size
, new_ram_size
);
1898 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1899 * QLIST (which has an RCU-friendly variant) does not have insertion at
1900 * tail, so save the last element in last_block.
1902 RAMBLOCK_FOREACH(block
) {
1904 if (block
->max_length
< new_block
->max_length
) {
1909 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1910 } else if (last_block
) {
1911 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1912 } else { /* list is empty */
1913 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1915 ram_list
.mru_block
= NULL
;
1917 /* Write list before version */
1920 qemu_mutex_unlock_ramlist();
1922 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1923 new_block
->used_length
,
1926 if (new_block
->host
) {
1927 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1928 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1930 * MADV_DONTFORK is also needed by KVM in absence of synchronous MMU
1931 * Configure it unless the machine is a qtest server, in which case
1932 * KVM is not used and it may be forked (eg for fuzzing purposes).
1934 if (!qtest_enabled()) {
1935 qemu_madvise(new_block
->host
, new_block
->max_length
,
1936 QEMU_MADV_DONTFORK
);
1938 ram_block_notify_add(new_block
->host
, new_block
->max_length
);
1943 RAMBlock
*qemu_ram_alloc_from_fd(ram_addr_t size
, MemoryRegion
*mr
,
1944 uint32_t ram_flags
, int fd
,
1947 RAMBlock
*new_block
;
1948 Error
*local_err
= NULL
;
1949 int64_t file_size
, file_align
;
1951 /* Just support these ram flags by now. */
1952 assert((ram_flags
& ~(RAM_SHARED
| RAM_PMEM
)) == 0);
1954 if (xen_enabled()) {
1955 error_setg(errp
, "-mem-path not supported with Xen");
1959 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1961 "host lacks kvm mmu notifiers, -mem-path unsupported");
1965 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1967 * file_ram_alloc() needs to allocate just like
1968 * phys_mem_alloc, but we haven't bothered to provide
1972 "-mem-path not supported with this accelerator");
1976 size
= HOST_PAGE_ALIGN(size
);
1977 file_size
= get_file_size(fd
);
1978 if (file_size
> 0 && file_size
< size
) {
1979 error_setg(errp
, "backing store size 0x%" PRIx64
1980 " does not match 'size' option 0x" RAM_ADDR_FMT
,
1985 file_align
= get_file_align(fd
);
1986 if (file_align
> 0 && mr
&& file_align
> mr
->align
) {
1987 error_setg(errp
, "backing store align 0x%" PRIx64
1988 " is larger than 'align' option 0x%" PRIx64
,
1989 file_align
, mr
->align
);
1993 new_block
= g_malloc0(sizeof(*new_block
));
1995 new_block
->used_length
= size
;
1996 new_block
->max_length
= size
;
1997 new_block
->flags
= ram_flags
;
1998 new_block
->host
= file_ram_alloc(new_block
, size
, fd
, !file_size
, errp
);
1999 if (!new_block
->host
) {
2004 ram_block_add(new_block
, &local_err
, ram_flags
& RAM_SHARED
);
2007 error_propagate(errp
, local_err
);
2015 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
2016 uint32_t ram_flags
, const char *mem_path
,
2023 fd
= file_ram_open(mem_path
, memory_region_name(mr
), &created
, errp
);
2028 block
= qemu_ram_alloc_from_fd(size
, mr
, ram_flags
, fd
, errp
);
2042 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
2043 void (*resized
)(const char*,
2046 void *host
, bool resizeable
, bool share
,
2047 MemoryRegion
*mr
, Error
**errp
)
2049 RAMBlock
*new_block
;
2050 Error
*local_err
= NULL
;
2052 size
= HOST_PAGE_ALIGN(size
);
2053 max_size
= HOST_PAGE_ALIGN(max_size
);
2054 new_block
= g_malloc0(sizeof(*new_block
));
2056 new_block
->resized
= resized
;
2057 new_block
->used_length
= size
;
2058 new_block
->max_length
= max_size
;
2059 assert(max_size
>= size
);
2061 new_block
->page_size
= qemu_real_host_page_size
;
2062 new_block
->host
= host
;
2064 new_block
->flags
|= RAM_PREALLOC
;
2067 new_block
->flags
|= RAM_RESIZEABLE
;
2069 ram_block_add(new_block
, &local_err
, share
);
2072 error_propagate(errp
, local_err
);
2078 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2079 MemoryRegion
*mr
, Error
**errp
)
2081 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false,
2085 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, bool share
,
2086 MemoryRegion
*mr
, Error
**errp
)
2088 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false,
2092 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
2093 void (*resized
)(const char*,
2096 MemoryRegion
*mr
, Error
**errp
)
2098 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true,
2102 static void reclaim_ramblock(RAMBlock
*block
)
2104 if (block
->flags
& RAM_PREALLOC
) {
2106 } else if (xen_enabled()) {
2107 xen_invalidate_map_cache_entry(block
->host
);
2109 } else if (block
->fd
>= 0) {
2110 qemu_ram_munmap(block
->fd
, block
->host
, block
->max_length
);
2114 qemu_anon_ram_free(block
->host
, block
->max_length
);
2119 void qemu_ram_free(RAMBlock
*block
)
2126 ram_block_notify_remove(block
->host
, block
->max_length
);
2129 qemu_mutex_lock_ramlist();
2130 QLIST_REMOVE_RCU(block
, next
);
2131 ram_list
.mru_block
= NULL
;
2132 /* Write list before version */
2135 call_rcu(block
, reclaim_ramblock
, rcu
);
2136 qemu_mutex_unlock_ramlist();
2140 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2147 RAMBLOCK_FOREACH(block
) {
2148 offset
= addr
- block
->offset
;
2149 if (offset
< block
->max_length
) {
2150 vaddr
= ramblock_ptr(block
, offset
);
2151 if (block
->flags
& RAM_PREALLOC
) {
2153 } else if (xen_enabled()) {
2157 if (block
->fd
>= 0) {
2158 flags
|= (block
->flags
& RAM_SHARED
?
2159 MAP_SHARED
: MAP_PRIVATE
);
2160 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2161 flags
, block
->fd
, offset
);
2164 * Remap needs to match alloc. Accelerators that
2165 * set phys_mem_alloc never remap. If they did,
2166 * we'd need a remap hook here.
2168 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
2170 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2171 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2174 if (area
!= vaddr
) {
2175 error_report("Could not remap addr: "
2176 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"",
2180 memory_try_enable_merging(vaddr
, length
);
2181 qemu_ram_setup_dump(vaddr
, length
);
2186 #endif /* !_WIN32 */
2188 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2189 * This should not be used for general purpose DMA. Use address_space_map
2190 * or address_space_rw instead. For local memory (e.g. video ram) that the
2191 * device owns, use memory_region_get_ram_ptr.
2193 * Called within RCU critical section.
2195 void *qemu_map_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
2197 RAMBlock
*block
= ram_block
;
2199 if (block
== NULL
) {
2200 block
= qemu_get_ram_block(addr
);
2201 addr
-= block
->offset
;
2204 if (xen_enabled() && block
->host
== NULL
) {
2205 /* We need to check if the requested address is in the RAM
2206 * because we don't want to map the entire memory in QEMU.
2207 * In that case just map until the end of the page.
2209 if (block
->offset
== 0) {
2210 return xen_map_cache(addr
, 0, 0, false);
2213 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1, false);
2215 return ramblock_ptr(block
, addr
);
2218 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
2219 * but takes a size argument.
2221 * Called within RCU critical section.
2223 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
2224 hwaddr
*size
, bool lock
)
2226 RAMBlock
*block
= ram_block
;
2231 if (block
== NULL
) {
2232 block
= qemu_get_ram_block(addr
);
2233 addr
-= block
->offset
;
2235 *size
= MIN(*size
, block
->max_length
- addr
);
2237 if (xen_enabled() && block
->host
== NULL
) {
2238 /* We need to check if the requested address is in the RAM
2239 * because we don't want to map the entire memory in QEMU.
2240 * In that case just map the requested area.
2242 if (block
->offset
== 0) {
2243 return xen_map_cache(addr
, *size
, lock
, lock
);
2246 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1, lock
);
2249 return ramblock_ptr(block
, addr
);
2252 /* Return the offset of a hostpointer within a ramblock */
2253 ram_addr_t
qemu_ram_block_host_offset(RAMBlock
*rb
, void *host
)
2255 ram_addr_t res
= (uint8_t *)host
- (uint8_t *)rb
->host
;
2256 assert((uintptr_t)host
>= (uintptr_t)rb
->host
);
2257 assert(res
< rb
->max_length
);
2263 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
2266 * ptr: Host pointer to look up
2267 * round_offset: If true round the result offset down to a page boundary
2268 * *ram_addr: set to result ram_addr
2269 * *offset: set to result offset within the RAMBlock
2271 * Returns: RAMBlock (or NULL if not found)
2273 * By the time this function returns, the returned pointer is not protected
2274 * by RCU anymore. If the caller is not within an RCU critical section and
2275 * does not hold the iothread lock, it must have other means of protecting the
2276 * pointer, such as a reference to the region that includes the incoming
2279 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
2283 uint8_t *host
= ptr
;
2285 if (xen_enabled()) {
2286 ram_addr_t ram_addr
;
2287 RCU_READ_LOCK_GUARD();
2288 ram_addr
= xen_ram_addr_from_mapcache(ptr
);
2289 block
= qemu_get_ram_block(ram_addr
);
2291 *offset
= ram_addr
- block
->offset
;
2296 RCU_READ_LOCK_GUARD();
2297 block
= qatomic_rcu_read(&ram_list
.mru_block
);
2298 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
2302 RAMBLOCK_FOREACH(block
) {
2303 /* This case append when the block is not mapped. */
2304 if (block
->host
== NULL
) {
2307 if (host
- block
->host
< block
->max_length
) {
2315 *offset
= (host
- block
->host
);
2317 *offset
&= TARGET_PAGE_MASK
;
2323 * Finds the named RAMBlock
2325 * name: The name of RAMBlock to find
2327 * Returns: RAMBlock (or NULL if not found)
2329 RAMBlock
*qemu_ram_block_by_name(const char *name
)
2333 RAMBLOCK_FOREACH(block
) {
2334 if (!strcmp(name
, block
->idstr
)) {
2342 /* Some of the softmmu routines need to translate from a host pointer
2343 (typically a TLB entry) back to a ram offset. */
2344 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2349 block
= qemu_ram_block_from_host(ptr
, false, &offset
);
2351 return RAM_ADDR_INVALID
;
2354 return block
->offset
+ offset
;
2357 /* Generate a debug exception if a watchpoint has been hit. */
2358 void cpu_check_watchpoint(CPUState
*cpu
, vaddr addr
, vaddr len
,
2359 MemTxAttrs attrs
, int flags
, uintptr_t ra
)
2361 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2364 assert(tcg_enabled());
2365 if (cpu
->watchpoint_hit
) {
2367 * We re-entered the check after replacing the TB.
2368 * Now raise the debug interrupt so that it will
2369 * trigger after the current instruction.
2371 qemu_mutex_lock_iothread();
2372 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2373 qemu_mutex_unlock_iothread();
2377 addr
= cc
->adjust_watchpoint_address(cpu
, addr
, len
);
2378 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2379 if (watchpoint_address_matches(wp
, addr
, len
)
2380 && (wp
->flags
& flags
)) {
2381 if (replay_running_debug()) {
2383 * Don't process the watchpoints when we are
2384 * in a reverse debugging operation.
2386 replay_breakpoint();
2389 if (flags
== BP_MEM_READ
) {
2390 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2392 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2394 wp
->hitaddr
= MAX(addr
, wp
->vaddr
);
2395 wp
->hitattrs
= attrs
;
2396 if (!cpu
->watchpoint_hit
) {
2397 if (wp
->flags
& BP_CPU
&&
2398 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2399 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2402 cpu
->watchpoint_hit
= wp
;
2405 tb_check_watchpoint(cpu
, ra
);
2406 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2407 cpu
->exception_index
= EXCP_DEBUG
;
2409 cpu_loop_exit_restore(cpu
, ra
);
2411 /* Force execution of one insn next time. */
2412 cpu
->cflags_next_tb
= 1 | curr_cflags();
2415 cpu_restore_state(cpu
, ra
, true);
2417 cpu_loop_exit_noexc(cpu
);
2421 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2426 static MemTxResult
flatview_read(FlatView
*fv
, hwaddr addr
,
2427 MemTxAttrs attrs
, void *buf
, hwaddr len
);
2428 static MemTxResult
flatview_write(FlatView
*fv
, hwaddr addr
, MemTxAttrs attrs
,
2429 const void *buf
, hwaddr len
);
2430 static bool flatview_access_valid(FlatView
*fv
, hwaddr addr
, hwaddr len
,
2431 bool is_write
, MemTxAttrs attrs
);
2433 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2434 unsigned len
, MemTxAttrs attrs
)
2436 subpage_t
*subpage
= opaque
;
2440 #if defined(DEBUG_SUBPAGE)
2441 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2442 subpage
, len
, addr
);
2444 res
= flatview_read(subpage
->fv
, addr
+ subpage
->base
, attrs
, buf
, len
);
2448 *data
= ldn_p(buf
, len
);
2452 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2453 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2455 subpage_t
*subpage
= opaque
;
2458 #if defined(DEBUG_SUBPAGE)
2459 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2460 " value %"PRIx64
"\n",
2461 __func__
, subpage
, len
, addr
, value
);
2463 stn_p(buf
, len
, value
);
2464 return flatview_write(subpage
->fv
, addr
+ subpage
->base
, attrs
, buf
, len
);
2467 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2468 unsigned len
, bool is_write
,
2471 subpage_t
*subpage
= opaque
;
2472 #if defined(DEBUG_SUBPAGE)
2473 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2474 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2477 return flatview_access_valid(subpage
->fv
, addr
+ subpage
->base
,
2478 len
, is_write
, attrs
);
2481 static const MemoryRegionOps subpage_ops
= {
2482 .read_with_attrs
= subpage_read
,
2483 .write_with_attrs
= subpage_write
,
2484 .impl
.min_access_size
= 1,
2485 .impl
.max_access_size
= 8,
2486 .valid
.min_access_size
= 1,
2487 .valid
.max_access_size
= 8,
2488 .valid
.accepts
= subpage_accepts
,
2489 .endianness
= DEVICE_NATIVE_ENDIAN
,
2492 static int subpage_register(subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2497 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2499 idx
= SUBPAGE_IDX(start
);
2500 eidx
= SUBPAGE_IDX(end
);
2501 #if defined(DEBUG_SUBPAGE)
2502 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2503 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2505 for (; idx
<= eidx
; idx
++) {
2506 mmio
->sub_section
[idx
] = section
;
2512 static subpage_t
*subpage_init(FlatView
*fv
, hwaddr base
)
2516 /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */
2517 mmio
= g_malloc0(sizeof(subpage_t
) + TARGET_PAGE_SIZE
* sizeof(uint16_t));
2520 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2521 NULL
, TARGET_PAGE_SIZE
);
2522 mmio
->iomem
.subpage
= true;
2523 #if defined(DEBUG_SUBPAGE)
2524 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2525 mmio
, base
, TARGET_PAGE_SIZE
);
2531 static uint16_t dummy_section(PhysPageMap
*map
, FlatView
*fv
, MemoryRegion
*mr
)
2534 MemoryRegionSection section
= {
2537 .offset_within_address_space
= 0,
2538 .offset_within_region
= 0,
2539 .size
= int128_2_64(),
2542 return phys_section_add(map
, §ion
);
2545 MemoryRegionSection
*iotlb_to_section(CPUState
*cpu
,
2546 hwaddr index
, MemTxAttrs attrs
)
2548 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2549 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2550 AddressSpaceDispatch
*d
= qatomic_rcu_read(&cpuas
->memory_dispatch
);
2551 MemoryRegionSection
*sections
= d
->map
.sections
;
2553 return §ions
[index
& ~TARGET_PAGE_MASK
];
2556 static void io_mem_init(void)
2558 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2562 AddressSpaceDispatch
*address_space_dispatch_new(FlatView
*fv
)
2564 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2567 n
= dummy_section(&d
->map
, fv
, &io_mem_unassigned
);
2568 assert(n
== PHYS_SECTION_UNASSIGNED
);
2570 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2575 void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2577 phys_sections_free(&d
->map
);
2581 static void do_nothing(CPUState
*cpu
, run_on_cpu_data d
)
2585 static void tcg_log_global_after_sync(MemoryListener
*listener
)
2587 CPUAddressSpace
*cpuas
;
2589 /* Wait for the CPU to end the current TB. This avoids the following
2593 * ---------------------- -------------------------
2594 * TLB check -> slow path
2595 * notdirty_mem_write
2599 * TLB check -> fast path
2603 * by pushing the migration thread's memory read after the vCPU thread has
2604 * written the memory.
2606 if (replay_mode
== REPLAY_MODE_NONE
) {
2608 * VGA can make calls to this function while updating the screen.
2609 * In record/replay mode this causes a deadlock, because
2610 * run_on_cpu waits for rr mutex. Therefore no races are possible
2611 * in this case and no need for making run_on_cpu when
2612 * record/replay is not enabled.
2614 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2615 run_on_cpu(cpuas
->cpu
, do_nothing
, RUN_ON_CPU_NULL
);
2619 static void tcg_commit(MemoryListener
*listener
)
2621 CPUAddressSpace
*cpuas
;
2622 AddressSpaceDispatch
*d
;
2624 assert(tcg_enabled());
2625 /* since each CPU stores ram addresses in its TLB cache, we must
2626 reset the modified entries */
2627 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2628 cpu_reloading_memory_map();
2629 /* The CPU and TLB are protected by the iothread lock.
2630 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2631 * may have split the RCU critical section.
2633 d
= address_space_to_dispatch(cpuas
->as
);
2634 qatomic_rcu_set(&cpuas
->memory_dispatch
, d
);
2635 tlb_flush(cpuas
->cpu
);
2638 static void memory_map_init(void)
2640 system_memory
= g_malloc(sizeof(*system_memory
));
2642 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2643 address_space_init(&address_space_memory
, system_memory
, "memory");
2645 system_io
= g_malloc(sizeof(*system_io
));
2646 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2648 address_space_init(&address_space_io
, system_io
, "I/O");
2651 MemoryRegion
*get_system_memory(void)
2653 return system_memory
;
2656 MemoryRegion
*get_system_io(void)
2661 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2664 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2665 addr
+= memory_region_get_ram_addr(mr
);
2667 /* No early return if dirty_log_mask is or becomes 0, because
2668 * cpu_physical_memory_set_dirty_range will still call
2669 * xen_modified_memory.
2671 if (dirty_log_mask
) {
2673 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2675 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2676 assert(tcg_enabled());
2677 tb_invalidate_phys_range(addr
, addr
+ length
);
2678 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2680 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2683 void memory_region_flush_rom_device(MemoryRegion
*mr
, hwaddr addr
, hwaddr size
)
2686 * In principle this function would work on other memory region types too,
2687 * but the ROM device use case is the only one where this operation is
2688 * necessary. Other memory regions should use the
2689 * address_space_read/write() APIs.
2691 assert(memory_region_is_romd(mr
));
2693 invalidate_and_set_dirty(mr
, addr
, size
);
2696 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2698 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2700 /* Regions are assumed to support 1-4 byte accesses unless
2701 otherwise specified. */
2702 if (access_size_max
== 0) {
2703 access_size_max
= 4;
2706 /* Bound the maximum access by the alignment of the address. */
2707 if (!mr
->ops
->impl
.unaligned
) {
2708 unsigned align_size_max
= addr
& -addr
;
2709 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2710 access_size_max
= align_size_max
;
2714 /* Don't attempt accesses larger than the maximum. */
2715 if (l
> access_size_max
) {
2716 l
= access_size_max
;
2723 static bool prepare_mmio_access(MemoryRegion
*mr
)
2725 bool release_lock
= false;
2727 if (!qemu_mutex_iothread_locked()) {
2728 qemu_mutex_lock_iothread();
2729 release_lock
= true;
2731 if (mr
->flush_coalesced_mmio
) {
2732 qemu_flush_coalesced_mmio_buffer();
2735 return release_lock
;
2738 /* Called within RCU critical section. */
2739 static MemTxResult
flatview_write_continue(FlatView
*fv
, hwaddr addr
,
2742 hwaddr len
, hwaddr addr1
,
2743 hwaddr l
, MemoryRegion
*mr
)
2747 MemTxResult result
= MEMTX_OK
;
2748 bool release_lock
= false;
2749 const uint8_t *buf
= ptr
;
2752 if (!memory_access_is_direct(mr
, true)) {
2753 release_lock
|= prepare_mmio_access(mr
);
2754 l
= memory_access_size(mr
, l
, addr1
);
2755 /* XXX: could force current_cpu to NULL to avoid
2757 val
= ldn_he_p(buf
, l
);
2758 result
|= memory_region_dispatch_write(mr
, addr1
, val
,
2759 size_memop(l
), attrs
);
2762 ram_ptr
= qemu_ram_ptr_length(mr
->ram_block
, addr1
, &l
, false);
2763 memcpy(ram_ptr
, buf
, l
);
2764 invalidate_and_set_dirty(mr
, addr1
, l
);
2768 qemu_mutex_unlock_iothread();
2769 release_lock
= false;
2781 mr
= flatview_translate(fv
, addr
, &addr1
, &l
, true, attrs
);
2787 /* Called from RCU critical section. */
2788 static MemTxResult
flatview_write(FlatView
*fv
, hwaddr addr
, MemTxAttrs attrs
,
2789 const void *buf
, hwaddr len
)
2794 MemTxResult result
= MEMTX_OK
;
2797 mr
= flatview_translate(fv
, addr
, &addr1
, &l
, true, attrs
);
2798 result
= flatview_write_continue(fv
, addr
, attrs
, buf
, len
,
2804 /* Called within RCU critical section. */
2805 MemTxResult
flatview_read_continue(FlatView
*fv
, hwaddr addr
,
2806 MemTxAttrs attrs
, void *ptr
,
2807 hwaddr len
, hwaddr addr1
, hwaddr l
,
2812 MemTxResult result
= MEMTX_OK
;
2813 bool release_lock
= false;
2817 if (!memory_access_is_direct(mr
, false)) {
2819 release_lock
|= prepare_mmio_access(mr
);
2820 l
= memory_access_size(mr
, l
, addr1
);
2821 result
|= memory_region_dispatch_read(mr
, addr1
, &val
,
2822 size_memop(l
), attrs
);
2823 stn_he_p(buf
, l
, val
);
2826 fuzz_dma_read_cb(addr
, len
, mr
, false);
2827 ram_ptr
= qemu_ram_ptr_length(mr
->ram_block
, addr1
, &l
, false);
2828 memcpy(buf
, ram_ptr
, l
);
2832 qemu_mutex_unlock_iothread();
2833 release_lock
= false;
2845 mr
= flatview_translate(fv
, addr
, &addr1
, &l
, false, attrs
);
2851 /* Called from RCU critical section. */
2852 static MemTxResult
flatview_read(FlatView
*fv
, hwaddr addr
,
2853 MemTxAttrs attrs
, void *buf
, hwaddr len
)
2860 mr
= flatview_translate(fv
, addr
, &addr1
, &l
, false, attrs
);
2861 return flatview_read_continue(fv
, addr
, attrs
, buf
, len
,
2865 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2866 MemTxAttrs attrs
, void *buf
, hwaddr len
)
2868 MemTxResult result
= MEMTX_OK
;
2872 RCU_READ_LOCK_GUARD();
2873 fv
= address_space_to_flatview(as
);
2874 result
= flatview_read(fv
, addr
, attrs
, buf
, len
);
2880 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
,
2882 const void *buf
, hwaddr len
)
2884 MemTxResult result
= MEMTX_OK
;
2888 RCU_READ_LOCK_GUARD();
2889 fv
= address_space_to_flatview(as
);
2890 result
= flatview_write(fv
, addr
, attrs
, buf
, len
);
2896 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2897 void *buf
, hwaddr len
, bool is_write
)
2900 return address_space_write(as
, addr
, attrs
, buf
, len
);
2902 return address_space_read_full(as
, addr
, attrs
, buf
, len
);
2906 void cpu_physical_memory_rw(hwaddr addr
, void *buf
,
2907 hwaddr len
, bool is_write
)
2909 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2910 buf
, len
, is_write
);
2913 enum write_rom_type
{
2918 static inline MemTxResult
address_space_write_rom_internal(AddressSpace
*as
,
2923 enum write_rom_type type
)
2929 const uint8_t *buf
= ptr
;
2931 RCU_READ_LOCK_GUARD();
2934 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true, attrs
);
2936 if (!(memory_region_is_ram(mr
) ||
2937 memory_region_is_romd(mr
))) {
2938 l
= memory_access_size(mr
, l
, addr1
);
2941 ram_ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2944 memcpy(ram_ptr
, buf
, l
);
2945 invalidate_and_set_dirty(mr
, addr1
, l
);
2948 flush_icache_range((uintptr_t)ram_ptr
, (uintptr_t)ram_ptr
+ l
);
2959 /* used for ROM loading : can write in RAM and ROM */
2960 MemTxResult
address_space_write_rom(AddressSpace
*as
, hwaddr addr
,
2962 const void *buf
, hwaddr len
)
2964 return address_space_write_rom_internal(as
, addr
, attrs
,
2965 buf
, len
, WRITE_DATA
);
2968 void cpu_flush_icache_range(hwaddr start
, hwaddr len
)
2971 * This function should do the same thing as an icache flush that was
2972 * triggered from within the guest. For TCG we are always cache coherent,
2973 * so there is no need to flush anything. For KVM / Xen we need to flush
2974 * the host's instruction cache at least.
2976 if (tcg_enabled()) {
2980 address_space_write_rom_internal(&address_space_memory
,
2981 start
, MEMTXATTRS_UNSPECIFIED
,
2982 NULL
, len
, FLUSH_CACHE
);
2993 static BounceBuffer bounce
;
2995 typedef struct MapClient
{
2997 QLIST_ENTRY(MapClient
) link
;
3000 QemuMutex map_client_list_lock
;
3001 static QLIST_HEAD(, MapClient
) map_client_list
3002 = QLIST_HEAD_INITIALIZER(map_client_list
);
3004 static void cpu_unregister_map_client_do(MapClient
*client
)
3006 QLIST_REMOVE(client
, link
);
3010 static void cpu_notify_map_clients_locked(void)
3014 while (!QLIST_EMPTY(&map_client_list
)) {
3015 client
= QLIST_FIRST(&map_client_list
);
3016 qemu_bh_schedule(client
->bh
);
3017 cpu_unregister_map_client_do(client
);
3021 void cpu_register_map_client(QEMUBH
*bh
)
3023 MapClient
*client
= g_malloc(sizeof(*client
));
3025 qemu_mutex_lock(&map_client_list_lock
);
3027 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3028 if (!qatomic_read(&bounce
.in_use
)) {
3029 cpu_notify_map_clients_locked();
3031 qemu_mutex_unlock(&map_client_list_lock
);
3034 void cpu_exec_init_all(void)
3036 qemu_mutex_init(&ram_list
.mutex
);
3037 /* The data structures we set up here depend on knowing the page size,
3038 * so no more changes can be made after this point.
3039 * In an ideal world, nothing we did before we had finished the
3040 * machine setup would care about the target page size, and we could
3041 * do this much later, rather than requiring board models to state
3042 * up front what their requirements are.
3044 finalize_target_page_bits();
3047 qemu_mutex_init(&map_client_list_lock
);
3050 void cpu_unregister_map_client(QEMUBH
*bh
)
3054 qemu_mutex_lock(&map_client_list_lock
);
3055 QLIST_FOREACH(client
, &map_client_list
, link
) {
3056 if (client
->bh
== bh
) {
3057 cpu_unregister_map_client_do(client
);
3061 qemu_mutex_unlock(&map_client_list_lock
);
3064 static void cpu_notify_map_clients(void)
3066 qemu_mutex_lock(&map_client_list_lock
);
3067 cpu_notify_map_clients_locked();
3068 qemu_mutex_unlock(&map_client_list_lock
);
3071 static bool flatview_access_valid(FlatView
*fv
, hwaddr addr
, hwaddr len
,
3072 bool is_write
, MemTxAttrs attrs
)
3079 mr
= flatview_translate(fv
, addr
, &xlat
, &l
, is_write
, attrs
);
3080 if (!memory_access_is_direct(mr
, is_write
)) {
3081 l
= memory_access_size(mr
, l
, addr
);
3082 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
, attrs
)) {
3093 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
,
3094 hwaddr len
, bool is_write
,
3100 RCU_READ_LOCK_GUARD();
3101 fv
= address_space_to_flatview(as
);
3102 result
= flatview_access_valid(fv
, addr
, len
, is_write
, attrs
);
3107 flatview_extend_translation(FlatView
*fv
, hwaddr addr
,
3109 MemoryRegion
*mr
, hwaddr base
, hwaddr len
,
3110 bool is_write
, MemTxAttrs attrs
)
3114 MemoryRegion
*this_mr
;
3120 if (target_len
== 0) {
3125 this_mr
= flatview_translate(fv
, addr
, &xlat
,
3126 &len
, is_write
, attrs
);
3127 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
3133 /* Map a physical memory region into a host virtual address.
3134 * May map a subset of the requested range, given by and returned in *plen.
3135 * May return NULL if resources needed to perform the mapping are exhausted.
3136 * Use only for reads OR writes - not for read-modify-write operations.
3137 * Use cpu_register_map_client() to know when retrying the map operation is
3138 * likely to succeed.
3140 void *address_space_map(AddressSpace
*as
,
3157 RCU_READ_LOCK_GUARD();
3158 fv
= address_space_to_flatview(as
);
3159 mr
= flatview_translate(fv
, addr
, &xlat
, &l
, is_write
, attrs
);
3161 if (!memory_access_is_direct(mr
, is_write
)) {
3162 if (qatomic_xchg(&bounce
.in_use
, true)) {
3166 /* Avoid unbounded allocations */
3167 l
= MIN(l
, TARGET_PAGE_SIZE
);
3168 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
3172 memory_region_ref(mr
);
3175 flatview_read(fv
, addr
, MEMTXATTRS_UNSPECIFIED
,
3180 return bounce
.buffer
;
3184 memory_region_ref(mr
);
3185 *plen
= flatview_extend_translation(fv
, addr
, len
, mr
, xlat
,
3186 l
, is_write
, attrs
);
3187 fuzz_dma_read_cb(addr
, *plen
, mr
, is_write
);
3188 ptr
= qemu_ram_ptr_length(mr
->ram_block
, xlat
, plen
, true);
3193 /* Unmaps a memory region previously mapped by address_space_map().
3194 * Will also mark the memory as dirty if is_write is true. access_len gives
3195 * the amount of memory that was actually read or written by the caller.
3197 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3198 bool is_write
, hwaddr access_len
)
3200 if (buffer
!= bounce
.buffer
) {
3204 mr
= memory_region_from_host(buffer
, &addr1
);
3207 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3209 if (xen_enabled()) {
3210 xen_invalidate_map_cache_entry(buffer
);
3212 memory_region_unref(mr
);
3216 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3217 bounce
.buffer
, access_len
);
3219 qemu_vfree(bounce
.buffer
);
3220 bounce
.buffer
= NULL
;
3221 memory_region_unref(bounce
.mr
);
3222 qatomic_mb_set(&bounce
.in_use
, false);
3223 cpu_notify_map_clients();
3226 void *cpu_physical_memory_map(hwaddr addr
,
3230 return address_space_map(&address_space_memory
, addr
, plen
, is_write
,
3231 MEMTXATTRS_UNSPECIFIED
);
3234 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3235 bool is_write
, hwaddr access_len
)
3237 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3240 #define ARG1_DECL AddressSpace *as
3243 #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
3244 #define RCU_READ_LOCK(...) rcu_read_lock()
3245 #define RCU_READ_UNLOCK(...) rcu_read_unlock()
3246 #include "memory_ldst.c.inc"
3248 int64_t address_space_cache_init(MemoryRegionCache
*cache
,
3254 AddressSpaceDispatch
*d
;
3262 cache
->fv
= address_space_get_flatview(as
);
3263 d
= flatview_to_dispatch(cache
->fv
);
3264 cache
->mrs
= *address_space_translate_internal(d
, addr
, &cache
->xlat
, &l
, true);
3267 * cache->xlat is now relative to cache->mrs.mr, not to the section itself.
3268 * Take that into account to compute how many bytes are there between
3269 * cache->xlat and the end of the section.
3271 diff
= int128_sub(cache
->mrs
.size
,
3272 int128_make64(cache
->xlat
- cache
->mrs
.offset_within_region
));
3273 l
= int128_get64(int128_min(diff
, int128_make64(l
)));
3276 memory_region_ref(mr
);
3277 if (memory_access_is_direct(mr
, is_write
)) {
3278 /* We don't care about the memory attributes here as we're only
3279 * doing this if we found actual RAM, which behaves the same
3280 * regardless of attributes; so UNSPECIFIED is fine.
3282 l
= flatview_extend_translation(cache
->fv
, addr
, len
, mr
,
3283 cache
->xlat
, l
, is_write
,
3284 MEMTXATTRS_UNSPECIFIED
);
3285 cache
->ptr
= qemu_ram_ptr_length(mr
->ram_block
, cache
->xlat
, &l
, true);
3291 cache
->is_write
= is_write
;
3295 void address_space_cache_invalidate(MemoryRegionCache
*cache
,
3299 assert(cache
->is_write
);
3300 if (likely(cache
->ptr
)) {
3301 invalidate_and_set_dirty(cache
->mrs
.mr
, addr
+ cache
->xlat
, access_len
);
3305 void address_space_cache_destroy(MemoryRegionCache
*cache
)
3307 if (!cache
->mrs
.mr
) {
3311 if (xen_enabled()) {
3312 xen_invalidate_map_cache_entry(cache
->ptr
);
3314 memory_region_unref(cache
->mrs
.mr
);
3315 flatview_unref(cache
->fv
);
3316 cache
->mrs
.mr
= NULL
;
3320 /* Called from RCU critical section. This function has the same
3321 * semantics as address_space_translate, but it only works on a
3322 * predefined range of a MemoryRegion that was mapped with
3323 * address_space_cache_init.
3325 static inline MemoryRegion
*address_space_translate_cached(
3326 MemoryRegionCache
*cache
, hwaddr addr
, hwaddr
*xlat
,
3327 hwaddr
*plen
, bool is_write
, MemTxAttrs attrs
)
3329 MemoryRegionSection section
;
3331 IOMMUMemoryRegion
*iommu_mr
;
3332 AddressSpace
*target_as
;
3334 assert(!cache
->ptr
);
3335 *xlat
= addr
+ cache
->xlat
;
3338 iommu_mr
= memory_region_get_iommu(mr
);
3344 section
= address_space_translate_iommu(iommu_mr
, xlat
, plen
,
3345 NULL
, is_write
, true,
3350 /* Called from RCU critical section. address_space_read_cached uses this
3351 * out of line function when the target is an MMIO or IOMMU region.
3354 address_space_read_cached_slow(MemoryRegionCache
*cache
, hwaddr addr
,
3355 void *buf
, hwaddr len
)
3361 mr
= address_space_translate_cached(cache
, addr
, &addr1
, &l
, false,
3362 MEMTXATTRS_UNSPECIFIED
);
3363 return flatview_read_continue(cache
->fv
,
3364 addr
, MEMTXATTRS_UNSPECIFIED
, buf
, len
,
3368 /* Called from RCU critical section. address_space_write_cached uses this
3369 * out of line function when the target is an MMIO or IOMMU region.
3372 address_space_write_cached_slow(MemoryRegionCache
*cache
, hwaddr addr
,
3373 const void *buf
, hwaddr len
)
3379 mr
= address_space_translate_cached(cache
, addr
, &addr1
, &l
, true,
3380 MEMTXATTRS_UNSPECIFIED
);
3381 return flatview_write_continue(cache
->fv
,
3382 addr
, MEMTXATTRS_UNSPECIFIED
, buf
, len
,
3386 #define ARG1_DECL MemoryRegionCache *cache
3388 #define SUFFIX _cached_slow
3389 #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__)
3390 #define RCU_READ_LOCK() ((void)0)
3391 #define RCU_READ_UNLOCK() ((void)0)
3392 #include "memory_ldst.c.inc"
3394 /* virtual memory access for debug (includes writing to ROM) */
3395 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3396 void *ptr
, target_ulong len
, bool is_write
)
3399 target_ulong l
, page
;
3402 cpu_synchronize_state(cpu
);
3408 page
= addr
& TARGET_PAGE_MASK
;
3409 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3410 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3411 /* if no physical page mapped, return an error */
3412 if (phys_addr
== -1)
3414 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3417 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3419 res
= address_space_write_rom(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3422 res
= address_space_read(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3425 if (res
!= MEMTX_OK
) {
3436 * Allows code that needs to deal with migration bitmaps etc to still be built
3437 * target independent.
3439 size_t qemu_target_page_size(void)
3441 return TARGET_PAGE_SIZE
;
3444 int qemu_target_page_bits(void)
3446 return TARGET_PAGE_BITS
;
3449 int qemu_target_page_bits_min(void)
3451 return TARGET_PAGE_BITS_MIN
;
3454 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3460 RCU_READ_LOCK_GUARD();
3461 mr
= address_space_translate(&address_space_memory
,
3462 phys_addr
, &phys_addr
, &l
, false,
3463 MEMTXATTRS_UNSPECIFIED
);
3465 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3469 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3474 RCU_READ_LOCK_GUARD();
3475 RAMBLOCK_FOREACH(block
) {
3476 ret
= func(block
, opaque
);
3485 * Unmap pages of memory from start to start+length such that
3486 * they a) read as 0, b) Trigger whatever fault mechanism
3487 * the OS provides for postcopy.
3488 * The pages must be unmapped by the end of the function.
3489 * Returns: 0 on success, none-0 on failure
3492 int ram_block_discard_range(RAMBlock
*rb
, uint64_t start
, size_t length
)
3496 uint8_t *host_startaddr
= rb
->host
+ start
;
3498 if (!QEMU_PTR_IS_ALIGNED(host_startaddr
, rb
->page_size
)) {
3499 error_report("ram_block_discard_range: Unaligned start address: %p",
3504 if ((start
+ length
) <= rb
->used_length
) {
3505 bool need_madvise
, need_fallocate
;
3506 if (!QEMU_IS_ALIGNED(length
, rb
->page_size
)) {
3507 error_report("ram_block_discard_range: Unaligned length: %zx",
3512 errno
= ENOTSUP
; /* If we are missing MADVISE etc */
3514 /* The logic here is messy;
3515 * madvise DONTNEED fails for hugepages
3516 * fallocate works on hugepages and shmem
3518 need_madvise
= (rb
->page_size
== qemu_host_page_size
);
3519 need_fallocate
= rb
->fd
!= -1;
3520 if (need_fallocate
) {
3521 /* For a file, this causes the area of the file to be zero'd
3522 * if read, and for hugetlbfs also causes it to be unmapped
3523 * so a userfault will trigger.
3525 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
3526 ret
= fallocate(rb
->fd
, FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
,
3530 error_report("ram_block_discard_range: Failed to fallocate "
3531 "%s:%" PRIx64
" +%zx (%d)",
3532 rb
->idstr
, start
, length
, ret
);
3537 error_report("ram_block_discard_range: fallocate not available/file"
3538 "%s:%" PRIx64
" +%zx (%d)",
3539 rb
->idstr
, start
, length
, ret
);
3544 /* For normal RAM this causes it to be unmapped,
3545 * for shared memory it causes the local mapping to disappear
3546 * and to fall back on the file contents (which we just
3547 * fallocate'd away).
3549 #if defined(CONFIG_MADVISE)
3550 ret
= madvise(host_startaddr
, length
, MADV_DONTNEED
);
3553 error_report("ram_block_discard_range: Failed to discard range "
3554 "%s:%" PRIx64
" +%zx (%d)",
3555 rb
->idstr
, start
, length
, ret
);
3560 error_report("ram_block_discard_range: MADVISE not available"
3561 "%s:%" PRIx64
" +%zx (%d)",
3562 rb
->idstr
, start
, length
, ret
);
3566 trace_ram_block_discard_range(rb
->idstr
, host_startaddr
, length
,
3567 need_madvise
, need_fallocate
, ret
);
3569 error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
3570 "/%zx/" RAM_ADDR_FMT
")",
3571 rb
->idstr
, start
, length
, rb
->used_length
);
3578 bool ramblock_is_pmem(RAMBlock
*rb
)
3580 return rb
->flags
& RAM_PMEM
;
3583 static void mtree_print_phys_entries(int start
, int end
, int skip
, int ptr
)
3585 if (start
== end
- 1) {
3586 qemu_printf("\t%3d ", start
);
3588 qemu_printf("\t%3d..%-3d ", start
, end
- 1);
3590 qemu_printf(" skip=%d ", skip
);
3591 if (ptr
== PHYS_MAP_NODE_NIL
) {
3592 qemu_printf(" ptr=NIL");
3594 qemu_printf(" ptr=#%d", ptr
);
3596 qemu_printf(" ptr=[%d]", ptr
);
3601 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3602 int128_sub((size), int128_one())) : 0)
3604 void mtree_print_dispatch(AddressSpaceDispatch
*d
, MemoryRegion
*root
)
3608 qemu_printf(" Dispatch\n");
3609 qemu_printf(" Physical sections\n");
3611 for (i
= 0; i
< d
->map
.sections_nb
; ++i
) {
3612 MemoryRegionSection
*s
= d
->map
.sections
+ i
;
3613 const char *names
[] = { " [unassigned]", " [not dirty]",
3614 " [ROM]", " [watch]" };
3616 qemu_printf(" #%d @" TARGET_FMT_plx
".." TARGET_FMT_plx
3619 s
->offset_within_address_space
,
3620 s
->offset_within_address_space
+ MR_SIZE(s
->mr
->size
),
3621 s
->mr
->name
? s
->mr
->name
: "(noname)",
3622 i
< ARRAY_SIZE(names
) ? names
[i
] : "",
3623 s
->mr
== root
? " [ROOT]" : "",
3624 s
== d
->mru_section
? " [MRU]" : "",
3625 s
->mr
->is_iommu
? " [iommu]" : "");
3628 qemu_printf(" alias=%s", s
->mr
->alias
->name
?
3629 s
->mr
->alias
->name
: "noname");
3634 qemu_printf(" Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n",
3635 P_L2_BITS
, P_L2_LEVELS
, d
->phys_map
.ptr
, d
->phys_map
.skip
);
3636 for (i
= 0; i
< d
->map
.nodes_nb
; ++i
) {
3639 Node
*n
= d
->map
.nodes
+ i
;
3641 qemu_printf(" [%d]\n", i
);
3643 for (j
= 0, jprev
= 0, prev
= *n
[0]; j
< ARRAY_SIZE(*n
); ++j
) {
3644 PhysPageEntry
*pe
= *n
+ j
;
3646 if (pe
->ptr
== prev
.ptr
&& pe
->skip
== prev
.skip
) {
3650 mtree_print_phys_entries(jprev
, j
, prev
.skip
, prev
.ptr
);
3656 if (jprev
!= ARRAY_SIZE(*n
)) {
3657 mtree_print_phys_entries(jprev
, j
, prev
.skip
, prev
.ptr
);
3663 * If positive, discarding RAM is disabled. If negative, discarding RAM is
3664 * required to work and cannot be disabled.
3666 static int ram_block_discard_disabled
;
3668 int ram_block_discard_disable(bool state
)
3673 qatomic_dec(&ram_block_discard_disabled
);
3678 old
= qatomic_read(&ram_block_discard_disabled
);
3682 } while (qatomic_cmpxchg(&ram_block_discard_disabled
,
3683 old
, old
+ 1) != old
);
3687 int ram_block_discard_require(bool state
)
3692 qatomic_inc(&ram_block_discard_disabled
);
3697 old
= qatomic_read(&ram_block_discard_disabled
);
3701 } while (qatomic_cmpxchg(&ram_block_discard_disabled
,
3702 old
, old
- 1) != old
);
3706 bool ram_block_discard_is_disabled(void)
3708 return qatomic_read(&ram_block_discard_disabled
) > 0;
3711 bool ram_block_discard_is_required(void)
3713 return qatomic_read(&ram_block_discard_disabled
) < 0;