4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
24 #include "qemu/cutils.h"
26 #include "exec/exec-all.h"
28 #include "hw/qdev-core.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #include "hw/xen/xen.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #if defined(CONFIG_USER_ONLY)
40 #else /* !CONFIG_USER_ONLY */
42 #include "exec/memory.h"
43 #include "exec/ioport.h"
44 #include "sysemu/dma.h"
45 #include "exec/address-spaces.h"
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
59 #include "migration/vmstate.h"
61 #include "qemu/range.h"
63 #include "qemu/mmap-alloc.h"
66 //#define DEBUG_SUBPAGE
68 #if !defined(CONFIG_USER_ONLY)
69 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
72 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
74 static MemoryRegion
*system_memory
;
75 static MemoryRegion
*system_io
;
77 AddressSpace address_space_io
;
78 AddressSpace address_space_memory
;
80 MemoryRegion io_mem_rom
, io_mem_notdirty
;
81 static MemoryRegion io_mem_unassigned
;
83 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84 #define RAM_PREALLOC (1 << 0)
86 /* RAM is mmap-ed with MAP_SHARED */
87 #define RAM_SHARED (1 << 1)
89 /* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
92 #define RAM_RESIZEABLE (1 << 2)
96 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
97 /* current CPU in the current thread. It is only valid inside
99 __thread CPUState
*current_cpu
;
100 /* 0 = Do not count executed instructions.
101 1 = Precise instruction counting.
102 2 = Adaptive rate instruction counting. */
105 #if !defined(CONFIG_USER_ONLY)
107 typedef struct PhysPageEntry PhysPageEntry
;
109 struct PhysPageEntry
{
110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
116 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
118 /* Size of the L2 (and L3, etc) page tables. */
119 #define ADDR_SPACE_BITS 64
122 #define P_L2_SIZE (1 << P_L2_BITS)
124 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
126 typedef PhysPageEntry Node
[P_L2_SIZE
];
128 typedef struct PhysPageMap
{
131 unsigned sections_nb
;
132 unsigned sections_nb_alloc
;
134 unsigned nodes_nb_alloc
;
136 MemoryRegionSection
*sections
;
139 struct AddressSpaceDispatch
{
142 MemoryRegionSection
*mru_section
;
143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
146 PhysPageEntry phys_map
;
151 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152 typedef struct subpage_t
{
156 uint16_t sub_section
[TARGET_PAGE_SIZE
];
159 #define PHYS_SECTION_UNASSIGNED 0
160 #define PHYS_SECTION_NOTDIRTY 1
161 #define PHYS_SECTION_ROM 2
162 #define PHYS_SECTION_WATCH 3
164 static void io_mem_init(void);
165 static void memory_map_init(void);
166 static void tcg_commit(MemoryListener
*listener
);
168 static MemoryRegion io_mem_watch
;
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
177 struct CPUAddressSpace
{
180 struct AddressSpaceDispatch
*memory_dispatch
;
181 MemoryListener tcg_as_listener
;
186 #if !defined(CONFIG_USER_ONLY)
188 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
190 static unsigned alloc_hint
= 16;
191 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
192 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, alloc_hint
);
193 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
194 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
195 alloc_hint
= map
->nodes_nb_alloc
;
199 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
206 ret
= map
->nodes_nb
++;
208 assert(ret
!= PHYS_MAP_NODE_NIL
);
209 assert(ret
!= map
->nodes_nb_alloc
);
211 e
.skip
= leaf
? 0 : 1;
212 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
213 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
214 memcpy(&p
[i
], &e
, sizeof(e
));
219 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
220 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
224 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
226 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
227 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
229 p
= map
->nodes
[lp
->ptr
];
230 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
232 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
233 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
239 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
245 static void phys_page_set(AddressSpaceDispatch
*d
,
246 hwaddr index
, hwaddr nb
,
249 /* Wildly overreserve - it doesn't matter much. */
250 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
252 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
255 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
256 * and update our entry so we can skip it and go directly to the destination.
258 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
260 unsigned valid_ptr
= P_L2_SIZE
;
265 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
270 for (i
= 0; i
< P_L2_SIZE
; i
++) {
271 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
278 phys_page_compact(&p
[i
], nodes
, compacted
);
282 /* We can only compress if there's only one child. */
287 assert(valid_ptr
< P_L2_SIZE
);
289 /* Don't compress if it won't fit in the # of bits we have. */
290 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
294 lp
->ptr
= p
[valid_ptr
].ptr
;
295 if (!p
[valid_ptr
].skip
) {
296 /* If our only child is a leaf, make this a leaf. */
297 /* By design, we should have made this node a leaf to begin with so we
298 * should never reach here.
299 * But since it's so simple to handle this, let's do it just in case we
304 lp
->skip
+= p
[valid_ptr
].skip
;
308 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
310 DECLARE_BITMAP(compacted
, nodes_nb
);
312 if (d
->phys_map
.skip
) {
313 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
317 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
320 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
321 * the section must cover the entire address space.
323 return section
->size
.hi
||
324 range_covers_byte(section
->offset_within_address_space
,
325 section
->size
.lo
, addr
);
328 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
329 Node
*nodes
, MemoryRegionSection
*sections
)
332 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
335 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
336 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
337 return §ions
[PHYS_SECTION_UNASSIGNED
];
340 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
343 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
344 return §ions
[lp
.ptr
];
346 return §ions
[PHYS_SECTION_UNASSIGNED
];
350 bool memory_region_is_unassigned(MemoryRegion
*mr
)
352 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
353 && mr
!= &io_mem_watch
;
356 /* Called from RCU critical section */
357 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
359 bool resolve_subpage
)
361 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
365 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
366 section_covers_addr(section
, addr
)) {
369 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
373 if (resolve_subpage
&& section
->mr
->subpage
) {
374 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
375 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
378 atomic_set(&d
->mru_section
, section
);
383 /* Called from RCU critical section */
384 static MemoryRegionSection
*
385 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
386 hwaddr
*plen
, bool resolve_subpage
)
388 MemoryRegionSection
*section
;
392 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
393 /* Compute offset within MemoryRegionSection */
394 addr
-= section
->offset_within_address_space
;
396 /* Compute offset within MemoryRegion */
397 *xlat
= addr
+ section
->offset_within_region
;
401 /* MMIO registers can be expected to perform full-width accesses based only
402 * on their address, without considering adjacent registers that could
403 * decode to completely different MemoryRegions. When such registers
404 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
405 * regions overlap wildly. For this reason we cannot clamp the accesses
408 * If the length is small (as is the case for address_space_ldl/stl),
409 * everything works fine. If the incoming length is large, however,
410 * the caller really has to do the clamping through memory_access_size.
412 if (memory_region_is_ram(mr
)) {
413 diff
= int128_sub(section
->size
, int128_make64(addr
));
414 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
419 /* Called from RCU critical section */
420 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
421 hwaddr
*xlat
, hwaddr
*plen
,
425 MemoryRegionSection
*section
;
429 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
430 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
433 if (!mr
->iommu_ops
) {
437 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
438 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
439 | (addr
& iotlb
.addr_mask
));
440 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
441 if (!(iotlb
.perm
& (1 << is_write
))) {
442 mr
= &io_mem_unassigned
;
446 as
= iotlb
.target_as
;
449 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
450 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
451 *plen
= MIN(page
, *plen
);
458 /* Called from RCU critical section */
459 MemoryRegionSection
*
460 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
461 hwaddr
*xlat
, hwaddr
*plen
)
463 MemoryRegionSection
*section
;
464 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
466 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
468 assert(!section
->mr
->iommu_ops
);
473 #if !defined(CONFIG_USER_ONLY)
475 static int cpu_common_post_load(void *opaque
, int version_id
)
477 CPUState
*cpu
= opaque
;
479 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
480 version_id is increased. */
481 cpu
->interrupt_request
&= ~0x01;
487 static int cpu_common_pre_load(void *opaque
)
489 CPUState
*cpu
= opaque
;
491 cpu
->exception_index
= -1;
496 static bool cpu_common_exception_index_needed(void *opaque
)
498 CPUState
*cpu
= opaque
;
500 return tcg_enabled() && cpu
->exception_index
!= -1;
503 static const VMStateDescription vmstate_cpu_common_exception_index
= {
504 .name
= "cpu_common/exception_index",
506 .minimum_version_id
= 1,
507 .needed
= cpu_common_exception_index_needed
,
508 .fields
= (VMStateField
[]) {
509 VMSTATE_INT32(exception_index
, CPUState
),
510 VMSTATE_END_OF_LIST()
514 static bool cpu_common_crash_occurred_needed(void *opaque
)
516 CPUState
*cpu
= opaque
;
518 return cpu
->crash_occurred
;
521 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
522 .name
= "cpu_common/crash_occurred",
524 .minimum_version_id
= 1,
525 .needed
= cpu_common_crash_occurred_needed
,
526 .fields
= (VMStateField
[]) {
527 VMSTATE_BOOL(crash_occurred
, CPUState
),
528 VMSTATE_END_OF_LIST()
532 const VMStateDescription vmstate_cpu_common
= {
533 .name
= "cpu_common",
535 .minimum_version_id
= 1,
536 .pre_load
= cpu_common_pre_load
,
537 .post_load
= cpu_common_post_load
,
538 .fields
= (VMStateField
[]) {
539 VMSTATE_UINT32(halted
, CPUState
),
540 VMSTATE_UINT32(interrupt_request
, CPUState
),
541 VMSTATE_END_OF_LIST()
543 .subsections
= (const VMStateDescription
*[]) {
544 &vmstate_cpu_common_exception_index
,
545 &vmstate_cpu_common_crash_occurred
,
552 CPUState
*qemu_get_cpu(int index
)
557 if (cpu
->cpu_index
== index
) {
565 #if !defined(CONFIG_USER_ONLY)
566 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
568 CPUAddressSpace
*newas
;
570 /* Target code should have set num_ases before calling us */
571 assert(asidx
< cpu
->num_ases
);
574 /* address space 0 gets the convenience alias */
578 /* KVM cannot currently support multiple address spaces. */
579 assert(asidx
== 0 || !kvm_enabled());
581 if (!cpu
->cpu_ases
) {
582 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
585 newas
= &cpu
->cpu_ases
[asidx
];
589 newas
->tcg_as_listener
.commit
= tcg_commit
;
590 memory_listener_register(&newas
->tcg_as_listener
, as
);
594 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
596 /* Return the AddressSpace corresponding to the specified index */
597 return cpu
->cpu_ases
[asidx
].as
;
601 #ifndef CONFIG_USER_ONLY
602 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
604 static int cpu_get_free_index(Error
**errp
)
606 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
608 if (cpu
>= MAX_CPUMASK_BITS
) {
609 error_setg(errp
, "Trying to use more CPUs than max of %d",
614 bitmap_set(cpu_index_map
, cpu
, 1);
618 static void cpu_release_index(CPUState
*cpu
)
620 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
624 static int cpu_get_free_index(Error
**errp
)
629 CPU_FOREACH(some_cpu
) {
635 static void cpu_release_index(CPUState
*cpu
)
641 void cpu_exec_exit(CPUState
*cpu
)
643 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
646 if (cpu
->cpu_index
== -1) {
647 /* cpu_index was never allocated by this @cpu or was already freed. */
652 QTAILQ_REMOVE(&cpus
, cpu
, node
);
653 cpu_release_index(cpu
);
657 if (cc
->vmsd
!= NULL
) {
658 vmstate_unregister(NULL
, cc
->vmsd
, cpu
);
660 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
661 vmstate_unregister(NULL
, &vmstate_cpu_common
, cpu
);
665 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
667 CPUClass
*cc ATTRIBUTE_UNUSED
= CPU_GET_CLASS(cpu
);
668 Error
*local_err
= NULL
;
673 #ifndef CONFIG_USER_ONLY
674 cpu
->thread_id
= qemu_get_thread_id();
676 /* This is a softmmu CPU object, so create a property for it
677 * so users can wire up its memory. (This can't go in qom/cpu.c
678 * because that file is compiled only once for both user-mode
679 * and system builds.) The default if no link is set up is to use
680 * the system address space.
682 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
683 (Object
**)&cpu
->memory
,
684 qdev_prop_allow_set_link_before_realize
,
685 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
687 cpu
->memory
= system_memory
;
688 object_ref(OBJECT(cpu
->memory
));
692 cpu
->cpu_index
= cpu_get_free_index(&local_err
);
694 error_propagate(errp
, local_err
);
698 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
701 #ifndef CONFIG_USER_ONLY
702 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
703 vmstate_register(NULL
, cpu
->cpu_index
, &vmstate_cpu_common
, cpu
);
705 if (cc
->vmsd
!= NULL
) {
706 vmstate_register(NULL
, cpu
->cpu_index
, cc
->vmsd
, cpu
);
711 #if defined(CONFIG_USER_ONLY)
712 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
714 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
717 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
720 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
721 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
723 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
724 phys
| (pc
& ~TARGET_PAGE_MASK
));
729 #if defined(CONFIG_USER_ONLY)
730 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
735 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
741 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
745 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
746 int flags
, CPUWatchpoint
**watchpoint
)
751 /* Add a watchpoint. */
752 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
753 int flags
, CPUWatchpoint
**watchpoint
)
757 /* forbid ranges which are empty or run off the end of the address space */
758 if (len
== 0 || (addr
+ len
- 1) < addr
) {
759 error_report("tried to set invalid watchpoint at %"
760 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
763 wp
= g_malloc(sizeof(*wp
));
769 /* keep all GDB-injected watchpoints in front */
770 if (flags
& BP_GDB
) {
771 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
773 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
776 tlb_flush_page(cpu
, addr
);
783 /* Remove a specific watchpoint. */
784 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
789 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
790 if (addr
== wp
->vaddr
&& len
== wp
->len
791 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
792 cpu_watchpoint_remove_by_ref(cpu
, wp
);
799 /* Remove a specific watchpoint by reference. */
800 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
802 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
804 tlb_flush_page(cpu
, watchpoint
->vaddr
);
809 /* Remove all matching watchpoints. */
810 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
812 CPUWatchpoint
*wp
, *next
;
814 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
815 if (wp
->flags
& mask
) {
816 cpu_watchpoint_remove_by_ref(cpu
, wp
);
821 /* Return true if this watchpoint address matches the specified
822 * access (ie the address range covered by the watchpoint overlaps
823 * partially or completely with the address range covered by the
826 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
830 /* We know the lengths are non-zero, but a little caution is
831 * required to avoid errors in the case where the range ends
832 * exactly at the top of the address space and so addr + len
833 * wraps round to zero.
835 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
836 vaddr addrend
= addr
+ len
- 1;
838 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
843 /* Add a breakpoint. */
844 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
845 CPUBreakpoint
**breakpoint
)
849 bp
= g_malloc(sizeof(*bp
));
854 /* keep all GDB-injected breakpoints in front */
855 if (flags
& BP_GDB
) {
856 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
858 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
861 breakpoint_invalidate(cpu
, pc
);
869 /* Remove a specific breakpoint. */
870 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
874 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
875 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
876 cpu_breakpoint_remove_by_ref(cpu
, bp
);
883 /* Remove a specific breakpoint by reference. */
884 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
886 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
888 breakpoint_invalidate(cpu
, breakpoint
->pc
);
893 /* Remove all matching breakpoints. */
894 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
896 CPUBreakpoint
*bp
, *next
;
898 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
899 if (bp
->flags
& mask
) {
900 cpu_breakpoint_remove_by_ref(cpu
, bp
);
905 /* enable or disable single step mode. EXCP_DEBUG is returned by the
906 CPU loop after each instruction */
907 void cpu_single_step(CPUState
*cpu
, int enabled
)
909 if (cpu
->singlestep_enabled
!= enabled
) {
910 cpu
->singlestep_enabled
= enabled
;
912 kvm_update_guest_debug(cpu
, 0);
914 /* must flush all the translated code to avoid inconsistencies */
915 /* XXX: only flush what is necessary */
921 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
928 fprintf(stderr
, "qemu: fatal: ");
929 vfprintf(stderr
, fmt
, ap
);
930 fprintf(stderr
, "\n");
931 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
932 if (qemu_log_separate()) {
933 qemu_log("qemu: fatal: ");
934 qemu_log_vprintf(fmt
, ap2
);
936 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
943 #if defined(CONFIG_USER_ONLY)
945 struct sigaction act
;
946 sigfillset(&act
.sa_mask
);
947 act
.sa_handler
= SIG_DFL
;
948 sigaction(SIGABRT
, &act
, NULL
);
954 #if !defined(CONFIG_USER_ONLY)
955 /* Called from RCU critical section */
956 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
960 block
= atomic_rcu_read(&ram_list
.mru_block
);
961 if (block
&& addr
- block
->offset
< block
->max_length
) {
964 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
965 if (addr
- block
->offset
< block
->max_length
) {
970 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
974 /* It is safe to write mru_block outside the iothread lock. This
979 * xxx removed from list
983 * call_rcu(reclaim_ramblock, xxx);
986 * atomic_rcu_set is not needed here. The block was already published
987 * when it was placed into the list. Here we're just making an extra
988 * copy of the pointer.
990 ram_list
.mru_block
= block
;
994 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
1001 end
= TARGET_PAGE_ALIGN(start
+ length
);
1002 start
&= TARGET_PAGE_MASK
;
1005 block
= qemu_get_ram_block(start
);
1006 assert(block
== qemu_get_ram_block(end
- 1));
1007 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
1009 tlb_reset_dirty(cpu
, start1
, length
);
1014 /* Note: start and end must be within the same ram block. */
1015 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
1019 DirtyMemoryBlocks
*blocks
;
1020 unsigned long end
, page
;
1027 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1028 page
= start
>> TARGET_PAGE_BITS
;
1032 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1034 while (page
< end
) {
1035 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1036 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1037 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1039 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1046 if (dirty
&& tcg_enabled()) {
1047 tlb_reset_dirty_range_all(start
, length
);
1053 /* Called from RCU critical section */
1054 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1055 MemoryRegionSection
*section
,
1057 hwaddr paddr
, hwaddr xlat
,
1059 target_ulong
*address
)
1064 if (memory_region_is_ram(section
->mr
)) {
1066 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1067 if (!section
->readonly
) {
1068 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1070 iotlb
|= PHYS_SECTION_ROM
;
1073 AddressSpaceDispatch
*d
;
1075 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1076 iotlb
= section
- d
->map
.sections
;
1080 /* Make accesses to pages with watchpoints go via the
1081 watchpoint trap routines. */
1082 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1083 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1084 /* Avoid trapping reads of pages with a write breakpoint. */
1085 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1086 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1087 *address
|= TLB_MMIO
;
1095 #endif /* defined(CONFIG_USER_ONLY) */
1097 #if !defined(CONFIG_USER_ONLY)
1099 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1101 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1103 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1104 qemu_anon_ram_alloc
;
1107 * Set a custom physical guest memory alloator.
1108 * Accelerators with unusual needs may need this. Hopefully, we can
1109 * get rid of it eventually.
1111 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1113 phys_mem_alloc
= alloc
;
1116 static uint16_t phys_section_add(PhysPageMap
*map
,
1117 MemoryRegionSection
*section
)
1119 /* The physical section number is ORed with a page-aligned
1120 * pointer to produce the iotlb entries. Thus it should
1121 * never overflow into the page-aligned value.
1123 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1125 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1126 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1127 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1128 map
->sections_nb_alloc
);
1130 map
->sections
[map
->sections_nb
] = *section
;
1131 memory_region_ref(section
->mr
);
1132 return map
->sections_nb
++;
1135 static void phys_section_destroy(MemoryRegion
*mr
)
1137 bool have_sub_page
= mr
->subpage
;
1139 memory_region_unref(mr
);
1141 if (have_sub_page
) {
1142 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1143 object_unref(OBJECT(&subpage
->iomem
));
1148 static void phys_sections_free(PhysPageMap
*map
)
1150 while (map
->sections_nb
> 0) {
1151 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1152 phys_section_destroy(section
->mr
);
1154 g_free(map
->sections
);
1158 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1161 hwaddr base
= section
->offset_within_address_space
1163 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1164 d
->map
.nodes
, d
->map
.sections
);
1165 MemoryRegionSection subsection
= {
1166 .offset_within_address_space
= base
,
1167 .size
= int128_make64(TARGET_PAGE_SIZE
),
1171 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1173 if (!(existing
->mr
->subpage
)) {
1174 subpage
= subpage_init(d
->as
, base
);
1175 subsection
.address_space
= d
->as
;
1176 subsection
.mr
= &subpage
->iomem
;
1177 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1178 phys_section_add(&d
->map
, &subsection
));
1180 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1182 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1183 end
= start
+ int128_get64(section
->size
) - 1;
1184 subpage_register(subpage
, start
, end
,
1185 phys_section_add(&d
->map
, section
));
1189 static void register_multipage(AddressSpaceDispatch
*d
,
1190 MemoryRegionSection
*section
)
1192 hwaddr start_addr
= section
->offset_within_address_space
;
1193 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1194 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1198 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1201 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1203 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1204 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1205 MemoryRegionSection now
= *section
, remain
= *section
;
1206 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1208 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1209 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1210 - now
.offset_within_address_space
;
1212 now
.size
= int128_min(int128_make64(left
), now
.size
);
1213 register_subpage(d
, &now
);
1215 now
.size
= int128_zero();
1217 while (int128_ne(remain
.size
, now
.size
)) {
1218 remain
.size
= int128_sub(remain
.size
, now
.size
);
1219 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1220 remain
.offset_within_region
+= int128_get64(now
.size
);
1222 if (int128_lt(remain
.size
, page_size
)) {
1223 register_subpage(d
, &now
);
1224 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1225 now
.size
= page_size
;
1226 register_subpage(d
, &now
);
1228 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1229 register_multipage(d
, &now
);
1234 void qemu_flush_coalesced_mmio_buffer(void)
1237 kvm_flush_coalesced_mmio_buffer();
1240 void qemu_mutex_lock_ramlist(void)
1242 qemu_mutex_lock(&ram_list
.mutex
);
1245 void qemu_mutex_unlock_ramlist(void)
1247 qemu_mutex_unlock(&ram_list
.mutex
);
1251 static void *file_ram_alloc(RAMBlock
*block
,
1256 bool unlink_on_error
= false;
1258 char *sanitized_name
;
1264 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1266 "host lacks kvm mmu notifiers, -mem-path unsupported");
1271 fd
= open(path
, O_RDWR
);
1273 /* @path names an existing file, use it */
1276 if (errno
== ENOENT
) {
1277 /* @path names a file that doesn't exist, create it */
1278 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1280 unlink_on_error
= true;
1283 } else if (errno
== EISDIR
) {
1284 /* @path names a directory, create a file there */
1285 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1286 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1287 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1293 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1295 g_free(sanitized_name
);
1297 fd
= mkstemp(filename
);
1305 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1306 error_setg_errno(errp
, errno
,
1307 "can't open backing store %s for guest RAM",
1312 * Try again on EINTR and EEXIST. The latter happens when
1313 * something else creates the file between our two open().
1317 page_size
= qemu_fd_getpagesize(fd
);
1318 block
->mr
->align
= MAX(page_size
, QEMU_VMALLOC_ALIGN
);
1320 if (memory
< page_size
) {
1321 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1322 "or larger than page size 0x%" PRIx64
,
1327 memory
= ROUND_UP(memory
, page_size
);
1330 * ftruncate is not supported by hugetlbfs in older
1331 * hosts, so don't bother bailing out on errors.
1332 * If anything goes wrong with it under other filesystems,
1335 if (ftruncate(fd
, memory
)) {
1336 perror("ftruncate");
1339 area
= qemu_ram_mmap(fd
, memory
, block
->mr
->align
,
1340 block
->flags
& RAM_SHARED
);
1341 if (area
== MAP_FAILED
) {
1342 error_setg_errno(errp
, errno
,
1343 "unable to map backing store for guest RAM");
1348 os_mem_prealloc(fd
, area
, memory
);
1355 if (unlink_on_error
) {
1365 /* Called with the ramlist lock held. */
1366 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1368 RAMBlock
*block
, *next_block
;
1369 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1371 assert(size
!= 0); /* it would hand out same offset multiple times */
1373 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1377 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1378 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1380 end
= block
->offset
+ block
->max_length
;
1382 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1383 if (next_block
->offset
>= end
) {
1384 next
= MIN(next
, next_block
->offset
);
1387 if (next
- end
>= size
&& next
- end
< mingap
) {
1389 mingap
= next
- end
;
1393 if (offset
== RAM_ADDR_MAX
) {
1394 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1402 ram_addr_t
last_ram_offset(void)
1405 ram_addr_t last
= 0;
1408 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1409 last
= MAX(last
, block
->offset
+ block
->max_length
);
1415 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1419 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1420 if (!machine_dump_guest_core(current_machine
)) {
1421 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1423 perror("qemu_madvise");
1424 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1425 "but dump_guest_core=off specified\n");
1430 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1435 /* Called with iothread lock held. */
1436 void qemu_ram_set_idstr(RAMBlock
*new_block
, const char *name
, DeviceState
*dev
)
1441 assert(!new_block
->idstr
[0]);
1444 char *id
= qdev_get_dev_path(dev
);
1446 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1450 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1453 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1454 if (block
!= new_block
&&
1455 !strcmp(block
->idstr
, new_block
->idstr
)) {
1456 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1464 /* Called with iothread lock held. */
1465 void qemu_ram_unset_idstr(RAMBlock
*block
)
1467 /* FIXME: arch_init.c assumes that this is not called throughout
1468 * migration. Ignore the problem since hot-unplug during migration
1469 * does not work anyway.
1472 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1476 static int memory_try_enable_merging(void *addr
, size_t len
)
1478 if (!machine_mem_merge(current_machine
)) {
1479 /* disabled by the user */
1483 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1486 /* Only legal before guest might have detected the memory size: e.g. on
1487 * incoming migration, or right after reset.
1489 * As memory core doesn't know how is memory accessed, it is up to
1490 * resize callback to update device state and/or add assertions to detect
1491 * misuse, if necessary.
1493 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
)
1497 newsize
= HOST_PAGE_ALIGN(newsize
);
1499 if (block
->used_length
== newsize
) {
1503 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1504 error_setg_errno(errp
, EINVAL
,
1505 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1506 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1507 newsize
, block
->used_length
);
1511 if (block
->max_length
< newsize
) {
1512 error_setg_errno(errp
, EINVAL
,
1513 "Length too large: %s: 0x" RAM_ADDR_FMT
1514 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1515 newsize
, block
->max_length
);
1519 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1520 block
->used_length
= newsize
;
1521 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1523 memory_region_set_size(block
->mr
, newsize
);
1524 if (block
->resized
) {
1525 block
->resized(block
->idstr
, newsize
, block
->host
);
1530 /* Called with ram_list.mutex held */
1531 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1532 ram_addr_t new_ram_size
)
1534 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1535 DIRTY_MEMORY_BLOCK_SIZE
);
1536 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1537 DIRTY_MEMORY_BLOCK_SIZE
);
1540 /* Only need to extend if block count increased */
1541 if (new_num_blocks
<= old_num_blocks
) {
1545 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1546 DirtyMemoryBlocks
*old_blocks
;
1547 DirtyMemoryBlocks
*new_blocks
;
1550 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1551 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1552 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1554 if (old_num_blocks
) {
1555 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1556 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1559 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1560 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1563 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1566 g_free_rcu(old_blocks
, rcu
);
1571 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1574 RAMBlock
*last_block
= NULL
;
1575 ram_addr_t old_ram_size
, new_ram_size
;
1578 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1580 qemu_mutex_lock_ramlist();
1581 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1583 if (!new_block
->host
) {
1584 if (xen_enabled()) {
1585 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1586 new_block
->mr
, &err
);
1588 error_propagate(errp
, err
);
1589 qemu_mutex_unlock_ramlist();
1593 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1594 &new_block
->mr
->align
);
1595 if (!new_block
->host
) {
1596 error_setg_errno(errp
, errno
,
1597 "cannot set up guest memory '%s'",
1598 memory_region_name(new_block
->mr
));
1599 qemu_mutex_unlock_ramlist();
1602 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1606 new_ram_size
= MAX(old_ram_size
,
1607 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1608 if (new_ram_size
> old_ram_size
) {
1609 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1610 dirty_memory_extend(old_ram_size
, new_ram_size
);
1612 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1613 * QLIST (which has an RCU-friendly variant) does not have insertion at
1614 * tail, so save the last element in last_block.
1616 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1618 if (block
->max_length
< new_block
->max_length
) {
1623 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1624 } else if (last_block
) {
1625 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1626 } else { /* list is empty */
1627 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1629 ram_list
.mru_block
= NULL
;
1631 /* Write list before version */
1634 qemu_mutex_unlock_ramlist();
1636 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1637 new_block
->used_length
,
1640 if (new_block
->host
) {
1641 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1642 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1643 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1644 if (kvm_enabled()) {
1645 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1651 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1652 bool share
, const char *mem_path
,
1655 RAMBlock
*new_block
;
1656 Error
*local_err
= NULL
;
1658 if (xen_enabled()) {
1659 error_setg(errp
, "-mem-path not supported with Xen");
1663 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1665 * file_ram_alloc() needs to allocate just like
1666 * phys_mem_alloc, but we haven't bothered to provide
1670 "-mem-path not supported with this accelerator");
1674 size
= HOST_PAGE_ALIGN(size
);
1675 new_block
= g_malloc0(sizeof(*new_block
));
1677 new_block
->used_length
= size
;
1678 new_block
->max_length
= size
;
1679 new_block
->flags
= share
? RAM_SHARED
: 0;
1680 new_block
->host
= file_ram_alloc(new_block
, size
,
1682 if (!new_block
->host
) {
1687 ram_block_add(new_block
, &local_err
);
1690 error_propagate(errp
, local_err
);
1698 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1699 void (*resized
)(const char*,
1702 void *host
, bool resizeable
,
1703 MemoryRegion
*mr
, Error
**errp
)
1705 RAMBlock
*new_block
;
1706 Error
*local_err
= NULL
;
1708 size
= HOST_PAGE_ALIGN(size
);
1709 max_size
= HOST_PAGE_ALIGN(max_size
);
1710 new_block
= g_malloc0(sizeof(*new_block
));
1712 new_block
->resized
= resized
;
1713 new_block
->used_length
= size
;
1714 new_block
->max_length
= max_size
;
1715 assert(max_size
>= size
);
1717 new_block
->host
= host
;
1719 new_block
->flags
|= RAM_PREALLOC
;
1722 new_block
->flags
|= RAM_RESIZEABLE
;
1724 ram_block_add(new_block
, &local_err
);
1727 error_propagate(errp
, local_err
);
1733 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1734 MemoryRegion
*mr
, Error
**errp
)
1736 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1739 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1741 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1744 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1745 void (*resized
)(const char*,
1748 MemoryRegion
*mr
, Error
**errp
)
1750 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1753 static void reclaim_ramblock(RAMBlock
*block
)
1755 if (block
->flags
& RAM_PREALLOC
) {
1757 } else if (xen_enabled()) {
1758 xen_invalidate_map_cache_entry(block
->host
);
1760 } else if (block
->fd
>= 0) {
1761 qemu_ram_munmap(block
->host
, block
->max_length
);
1765 qemu_anon_ram_free(block
->host
, block
->max_length
);
1770 void qemu_ram_free(RAMBlock
*block
)
1776 qemu_mutex_lock_ramlist();
1777 QLIST_REMOVE_RCU(block
, next
);
1778 ram_list
.mru_block
= NULL
;
1779 /* Write list before version */
1782 call_rcu(block
, reclaim_ramblock
, rcu
);
1783 qemu_mutex_unlock_ramlist();
1787 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1794 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1795 offset
= addr
- block
->offset
;
1796 if (offset
< block
->max_length
) {
1797 vaddr
= ramblock_ptr(block
, offset
);
1798 if (block
->flags
& RAM_PREALLOC
) {
1800 } else if (xen_enabled()) {
1804 if (block
->fd
>= 0) {
1805 flags
|= (block
->flags
& RAM_SHARED
?
1806 MAP_SHARED
: MAP_PRIVATE
);
1807 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1808 flags
, block
->fd
, offset
);
1811 * Remap needs to match alloc. Accelerators that
1812 * set phys_mem_alloc never remap. If they did,
1813 * we'd need a remap hook here.
1815 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1817 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1818 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1821 if (area
!= vaddr
) {
1822 fprintf(stderr
, "Could not remap addr: "
1823 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1827 memory_try_enable_merging(vaddr
, length
);
1828 qemu_ram_setup_dump(vaddr
, length
);
1833 #endif /* !_WIN32 */
1835 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1836 * This should not be used for general purpose DMA. Use address_space_map
1837 * or address_space_rw instead. For local memory (e.g. video ram) that the
1838 * device owns, use memory_region_get_ram_ptr.
1840 * Called within RCU critical section.
1842 void *qemu_map_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1844 RAMBlock
*block
= ram_block
;
1846 if (block
== NULL
) {
1847 block
= qemu_get_ram_block(addr
);
1848 addr
-= block
->offset
;
1851 if (xen_enabled() && block
->host
== NULL
) {
1852 /* We need to check if the requested address is in the RAM
1853 * because we don't want to map the entire memory in QEMU.
1854 * In that case just map until the end of the page.
1856 if (block
->offset
== 0) {
1857 return xen_map_cache(addr
, 0, 0);
1860 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1862 return ramblock_ptr(block
, addr
);
1865 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1866 * but takes a size argument.
1868 * Called within RCU critical section.
1870 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1873 RAMBlock
*block
= ram_block
;
1878 if (block
== NULL
) {
1879 block
= qemu_get_ram_block(addr
);
1880 addr
-= block
->offset
;
1882 *size
= MIN(*size
, block
->max_length
- addr
);
1884 if (xen_enabled() && block
->host
== NULL
) {
1885 /* We need to check if the requested address is in the RAM
1886 * because we don't want to map the entire memory in QEMU.
1887 * In that case just map the requested area.
1889 if (block
->offset
== 0) {
1890 return xen_map_cache(addr
, *size
, 1);
1893 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1896 return ramblock_ptr(block
, addr
);
1900 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1903 * ptr: Host pointer to look up
1904 * round_offset: If true round the result offset down to a page boundary
1905 * *ram_addr: set to result ram_addr
1906 * *offset: set to result offset within the RAMBlock
1908 * Returns: RAMBlock (or NULL if not found)
1910 * By the time this function returns, the returned pointer is not protected
1911 * by RCU anymore. If the caller is not within an RCU critical section and
1912 * does not hold the iothread lock, it must have other means of protecting the
1913 * pointer, such as a reference to the region that includes the incoming
1916 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1920 uint8_t *host
= ptr
;
1922 if (xen_enabled()) {
1923 ram_addr_t ram_addr
;
1925 ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1926 block
= qemu_get_ram_block(ram_addr
);
1928 *offset
= ram_addr
- block
->offset
;
1935 block
= atomic_rcu_read(&ram_list
.mru_block
);
1936 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1940 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1941 /* This case append when the block is not mapped. */
1942 if (block
->host
== NULL
) {
1945 if (host
- block
->host
< block
->max_length
) {
1954 *offset
= (host
- block
->host
);
1956 *offset
&= TARGET_PAGE_MASK
;
1963 * Finds the named RAMBlock
1965 * name: The name of RAMBlock to find
1967 * Returns: RAMBlock (or NULL if not found)
1969 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1973 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1974 if (!strcmp(name
, block
->idstr
)) {
1982 /* Some of the softmmu routines need to translate from a host pointer
1983 (typically a TLB entry) back to a ram offset. */
1984 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
1989 block
= qemu_ram_block_from_host(ptr
, false, &offset
);
1991 return RAM_ADDR_INVALID
;
1994 return block
->offset
+ offset
;
1997 /* Called within RCU critical section. */
1998 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1999 uint64_t val
, unsigned size
)
2001 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2002 tb_invalidate_phys_page_fast(ram_addr
, size
);
2006 stb_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2009 stw_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2012 stl_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2017 /* Set both VGA and migration bits for simplicity and to remove
2018 * the notdirty callback faster.
2020 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2021 DIRTY_CLIENTS_NOCODE
);
2022 /* we remove the notdirty callback only if the code has been
2024 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2025 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2029 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2030 unsigned size
, bool is_write
)
2035 static const MemoryRegionOps notdirty_mem_ops
= {
2036 .write
= notdirty_mem_write
,
2037 .valid
.accepts
= notdirty_mem_accepts
,
2038 .endianness
= DEVICE_NATIVE_ENDIAN
,
2041 /* Generate a debug exception if a watchpoint has been hit. */
2042 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2044 CPUState
*cpu
= current_cpu
;
2045 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2046 CPUArchState
*env
= cpu
->env_ptr
;
2047 target_ulong pc
, cs_base
;
2052 if (cpu
->watchpoint_hit
) {
2053 /* We re-entered the check after replacing the TB. Now raise
2054 * the debug interrupt so that is will trigger after the
2055 * current instruction. */
2056 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2059 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2060 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2061 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2062 && (wp
->flags
& flags
)) {
2063 if (flags
== BP_MEM_READ
) {
2064 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2066 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2068 wp
->hitaddr
= vaddr
;
2069 wp
->hitattrs
= attrs
;
2070 if (!cpu
->watchpoint_hit
) {
2071 if (wp
->flags
& BP_CPU
&&
2072 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2073 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2076 cpu
->watchpoint_hit
= wp
;
2077 tb_check_watchpoint(cpu
);
2078 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2079 cpu
->exception_index
= EXCP_DEBUG
;
2082 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2083 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2084 cpu_loop_exit_noexc(cpu
);
2088 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2093 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2094 so these check for a hit then pass through to the normal out-of-line
2096 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2097 unsigned size
, MemTxAttrs attrs
)
2101 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2102 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2104 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2107 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2110 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2113 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2121 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2122 uint64_t val
, unsigned size
,
2126 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2127 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2129 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2132 address_space_stb(as
, addr
, val
, attrs
, &res
);
2135 address_space_stw(as
, addr
, val
, attrs
, &res
);
2138 address_space_stl(as
, addr
, val
, attrs
, &res
);
2145 static const MemoryRegionOps watch_mem_ops
= {
2146 .read_with_attrs
= watch_mem_read
,
2147 .write_with_attrs
= watch_mem_write
,
2148 .endianness
= DEVICE_NATIVE_ENDIAN
,
2151 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2152 unsigned len
, MemTxAttrs attrs
)
2154 subpage_t
*subpage
= opaque
;
2158 #if defined(DEBUG_SUBPAGE)
2159 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2160 subpage
, len
, addr
);
2162 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2169 *data
= ldub_p(buf
);
2172 *data
= lduw_p(buf
);
2185 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2186 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2188 subpage_t
*subpage
= opaque
;
2191 #if defined(DEBUG_SUBPAGE)
2192 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2193 " value %"PRIx64
"\n",
2194 __func__
, subpage
, len
, addr
, value
);
2212 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2216 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2217 unsigned len
, bool is_write
)
2219 subpage_t
*subpage
= opaque
;
2220 #if defined(DEBUG_SUBPAGE)
2221 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2222 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2225 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2229 static const MemoryRegionOps subpage_ops
= {
2230 .read_with_attrs
= subpage_read
,
2231 .write_with_attrs
= subpage_write
,
2232 .impl
.min_access_size
= 1,
2233 .impl
.max_access_size
= 8,
2234 .valid
.min_access_size
= 1,
2235 .valid
.max_access_size
= 8,
2236 .valid
.accepts
= subpage_accepts
,
2237 .endianness
= DEVICE_NATIVE_ENDIAN
,
2240 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2245 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2247 idx
= SUBPAGE_IDX(start
);
2248 eidx
= SUBPAGE_IDX(end
);
2249 #if defined(DEBUG_SUBPAGE)
2250 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2251 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2253 for (; idx
<= eidx
; idx
++) {
2254 mmio
->sub_section
[idx
] = section
;
2260 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2264 mmio
= g_malloc0(sizeof(subpage_t
));
2268 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2269 NULL
, TARGET_PAGE_SIZE
);
2270 mmio
->iomem
.subpage
= true;
2271 #if defined(DEBUG_SUBPAGE)
2272 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2273 mmio
, base
, TARGET_PAGE_SIZE
);
2275 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2280 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2284 MemoryRegionSection section
= {
2285 .address_space
= as
,
2287 .offset_within_address_space
= 0,
2288 .offset_within_region
= 0,
2289 .size
= int128_2_64(),
2292 return phys_section_add(map
, §ion
);
2295 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2297 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2298 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2299 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2300 MemoryRegionSection
*sections
= d
->map
.sections
;
2302 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2305 static void io_mem_init(void)
2307 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2308 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2310 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2312 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2316 static void mem_begin(MemoryListener
*listener
)
2318 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2319 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2322 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2323 assert(n
== PHYS_SECTION_UNASSIGNED
);
2324 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2325 assert(n
== PHYS_SECTION_NOTDIRTY
);
2326 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2327 assert(n
== PHYS_SECTION_ROM
);
2328 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2329 assert(n
== PHYS_SECTION_WATCH
);
2331 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2333 as
->next_dispatch
= d
;
2336 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2338 phys_sections_free(&d
->map
);
2342 static void mem_commit(MemoryListener
*listener
)
2344 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2345 AddressSpaceDispatch
*cur
= as
->dispatch
;
2346 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2348 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2350 atomic_rcu_set(&as
->dispatch
, next
);
2352 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2356 static void tcg_commit(MemoryListener
*listener
)
2358 CPUAddressSpace
*cpuas
;
2359 AddressSpaceDispatch
*d
;
2361 /* since each CPU stores ram addresses in its TLB cache, we must
2362 reset the modified entries */
2363 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2364 cpu_reloading_memory_map();
2365 /* The CPU and TLB are protected by the iothread lock.
2366 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2367 * may have split the RCU critical section.
2369 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2370 cpuas
->memory_dispatch
= d
;
2371 tlb_flush(cpuas
->cpu
, 1);
2374 void address_space_init_dispatch(AddressSpace
*as
)
2376 as
->dispatch
= NULL
;
2377 as
->dispatch_listener
= (MemoryListener
) {
2379 .commit
= mem_commit
,
2380 .region_add
= mem_add
,
2381 .region_nop
= mem_add
,
2384 memory_listener_register(&as
->dispatch_listener
, as
);
2387 void address_space_unregister(AddressSpace
*as
)
2389 memory_listener_unregister(&as
->dispatch_listener
);
2392 void address_space_destroy_dispatch(AddressSpace
*as
)
2394 AddressSpaceDispatch
*d
= as
->dispatch
;
2396 atomic_rcu_set(&as
->dispatch
, NULL
);
2398 call_rcu(d
, address_space_dispatch_free
, rcu
);
2402 static void memory_map_init(void)
2404 system_memory
= g_malloc(sizeof(*system_memory
));
2406 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2407 address_space_init(&address_space_memory
, system_memory
, "memory");
2409 system_io
= g_malloc(sizeof(*system_io
));
2410 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2412 address_space_init(&address_space_io
, system_io
, "I/O");
2415 MemoryRegion
*get_system_memory(void)
2417 return system_memory
;
2420 MemoryRegion
*get_system_io(void)
2425 #endif /* !defined(CONFIG_USER_ONLY) */
2427 /* physical memory access (slow version, mainly for debug) */
2428 #if defined(CONFIG_USER_ONLY)
2429 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2430 uint8_t *buf
, int len
, int is_write
)
2437 page
= addr
& TARGET_PAGE_MASK
;
2438 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2441 flags
= page_get_flags(page
);
2442 if (!(flags
& PAGE_VALID
))
2445 if (!(flags
& PAGE_WRITE
))
2447 /* XXX: this code should not depend on lock_user */
2448 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2451 unlock_user(p
, addr
, l
);
2453 if (!(flags
& PAGE_READ
))
2455 /* XXX: this code should not depend on lock_user */
2456 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2459 unlock_user(p
, addr
, 0);
2470 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2473 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2474 addr
+= memory_region_get_ram_addr(mr
);
2476 /* No early return if dirty_log_mask is or becomes 0, because
2477 * cpu_physical_memory_set_dirty_range will still call
2478 * xen_modified_memory.
2480 if (dirty_log_mask
) {
2482 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2484 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2485 tb_invalidate_phys_range(addr
, addr
+ length
);
2486 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2488 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2491 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2493 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2495 /* Regions are assumed to support 1-4 byte accesses unless
2496 otherwise specified. */
2497 if (access_size_max
== 0) {
2498 access_size_max
= 4;
2501 /* Bound the maximum access by the alignment of the address. */
2502 if (!mr
->ops
->impl
.unaligned
) {
2503 unsigned align_size_max
= addr
& -addr
;
2504 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2505 access_size_max
= align_size_max
;
2509 /* Don't attempt accesses larger than the maximum. */
2510 if (l
> access_size_max
) {
2511 l
= access_size_max
;
2518 static bool prepare_mmio_access(MemoryRegion
*mr
)
2520 bool unlocked
= !qemu_mutex_iothread_locked();
2521 bool release_lock
= false;
2523 if (unlocked
&& mr
->global_locking
) {
2524 qemu_mutex_lock_iothread();
2526 release_lock
= true;
2528 if (mr
->flush_coalesced_mmio
) {
2530 qemu_mutex_lock_iothread();
2532 qemu_flush_coalesced_mmio_buffer();
2534 qemu_mutex_unlock_iothread();
2538 return release_lock
;
2541 /* Called within RCU critical section. */
2542 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2545 int len
, hwaddr addr1
,
2546 hwaddr l
, MemoryRegion
*mr
)
2550 MemTxResult result
= MEMTX_OK
;
2551 bool release_lock
= false;
2554 if (!memory_access_is_direct(mr
, true)) {
2555 release_lock
|= prepare_mmio_access(mr
);
2556 l
= memory_access_size(mr
, l
, addr1
);
2557 /* XXX: could force current_cpu to NULL to avoid
2561 /* 64 bit write access */
2563 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2567 /* 32 bit write access */
2569 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2573 /* 16 bit write access */
2575 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2579 /* 8 bit write access */
2581 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2589 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2590 memcpy(ptr
, buf
, l
);
2591 invalidate_and_set_dirty(mr
, addr1
, l
);
2595 qemu_mutex_unlock_iothread();
2596 release_lock
= false;
2608 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2614 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2615 const uint8_t *buf
, int len
)
2620 MemTxResult result
= MEMTX_OK
;
2625 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2626 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2634 /* Called within RCU critical section. */
2635 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2636 MemTxAttrs attrs
, uint8_t *buf
,
2637 int len
, hwaddr addr1
, hwaddr l
,
2642 MemTxResult result
= MEMTX_OK
;
2643 bool release_lock
= false;
2646 if (!memory_access_is_direct(mr
, false)) {
2648 release_lock
|= prepare_mmio_access(mr
);
2649 l
= memory_access_size(mr
, l
, addr1
);
2652 /* 64 bit read access */
2653 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2658 /* 32 bit read access */
2659 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2664 /* 16 bit read access */
2665 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2670 /* 8 bit read access */
2671 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2680 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2681 memcpy(buf
, ptr
, l
);
2685 qemu_mutex_unlock_iothread();
2686 release_lock
= false;
2698 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2704 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2705 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2710 MemTxResult result
= MEMTX_OK
;
2715 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2716 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2724 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2725 uint8_t *buf
, int len
, bool is_write
)
2728 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2730 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2734 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2735 int len
, int is_write
)
2737 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2738 buf
, len
, is_write
);
2741 enum write_rom_type
{
2746 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2747 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2757 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2759 if (!(memory_region_is_ram(mr
) ||
2760 memory_region_is_romd(mr
))) {
2761 l
= memory_access_size(mr
, l
, addr1
);
2764 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2767 memcpy(ptr
, buf
, l
);
2768 invalidate_and_set_dirty(mr
, addr1
, l
);
2771 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2782 /* used for ROM loading : can write in RAM and ROM */
2783 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2784 const uint8_t *buf
, int len
)
2786 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2789 void cpu_flush_icache_range(hwaddr start
, int len
)
2792 * This function should do the same thing as an icache flush that was
2793 * triggered from within the guest. For TCG we are always cache coherent,
2794 * so there is no need to flush anything. For KVM / Xen we need to flush
2795 * the host's instruction cache at least.
2797 if (tcg_enabled()) {
2801 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2802 start
, NULL
, len
, FLUSH_CACHE
);
2813 static BounceBuffer bounce
;
2815 typedef struct MapClient
{
2817 QLIST_ENTRY(MapClient
) link
;
2820 QemuMutex map_client_list_lock
;
2821 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2822 = QLIST_HEAD_INITIALIZER(map_client_list
);
2824 static void cpu_unregister_map_client_do(MapClient
*client
)
2826 QLIST_REMOVE(client
, link
);
2830 static void cpu_notify_map_clients_locked(void)
2834 while (!QLIST_EMPTY(&map_client_list
)) {
2835 client
= QLIST_FIRST(&map_client_list
);
2836 qemu_bh_schedule(client
->bh
);
2837 cpu_unregister_map_client_do(client
);
2841 void cpu_register_map_client(QEMUBH
*bh
)
2843 MapClient
*client
= g_malloc(sizeof(*client
));
2845 qemu_mutex_lock(&map_client_list_lock
);
2847 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2848 if (!atomic_read(&bounce
.in_use
)) {
2849 cpu_notify_map_clients_locked();
2851 qemu_mutex_unlock(&map_client_list_lock
);
2854 void cpu_exec_init_all(void)
2856 qemu_mutex_init(&ram_list
.mutex
);
2859 qemu_mutex_init(&map_client_list_lock
);
2862 void cpu_unregister_map_client(QEMUBH
*bh
)
2866 qemu_mutex_lock(&map_client_list_lock
);
2867 QLIST_FOREACH(client
, &map_client_list
, link
) {
2868 if (client
->bh
== bh
) {
2869 cpu_unregister_map_client_do(client
);
2873 qemu_mutex_unlock(&map_client_list_lock
);
2876 static void cpu_notify_map_clients(void)
2878 qemu_mutex_lock(&map_client_list_lock
);
2879 cpu_notify_map_clients_locked();
2880 qemu_mutex_unlock(&map_client_list_lock
);
2883 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2891 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2892 if (!memory_access_is_direct(mr
, is_write
)) {
2893 l
= memory_access_size(mr
, l
, addr
);
2894 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2906 /* Map a physical memory region into a host virtual address.
2907 * May map a subset of the requested range, given by and returned in *plen.
2908 * May return NULL if resources needed to perform the mapping are exhausted.
2909 * Use only for reads OR writes - not for read-modify-write operations.
2910 * Use cpu_register_map_client() to know when retrying the map operation is
2911 * likely to succeed.
2913 void *address_space_map(AddressSpace
*as
,
2920 hwaddr l
, xlat
, base
;
2921 MemoryRegion
*mr
, *this_mr
;
2930 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2932 if (!memory_access_is_direct(mr
, is_write
)) {
2933 if (atomic_xchg(&bounce
.in_use
, true)) {
2937 /* Avoid unbounded allocations */
2938 l
= MIN(l
, TARGET_PAGE_SIZE
);
2939 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2943 memory_region_ref(mr
);
2946 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2952 return bounce
.buffer
;
2966 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2967 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2972 memory_region_ref(mr
);
2974 ptr
= qemu_ram_ptr_length(mr
->ram_block
, base
, plen
);
2980 /* Unmaps a memory region previously mapped by address_space_map().
2981 * Will also mark the memory as dirty if is_write == 1. access_len gives
2982 * the amount of memory that was actually read or written by the caller.
2984 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2985 int is_write
, hwaddr access_len
)
2987 if (buffer
!= bounce
.buffer
) {
2991 mr
= memory_region_from_host(buffer
, &addr1
);
2994 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2996 if (xen_enabled()) {
2997 xen_invalidate_map_cache_entry(buffer
);
2999 memory_region_unref(mr
);
3003 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3004 bounce
.buffer
, access_len
);
3006 qemu_vfree(bounce
.buffer
);
3007 bounce
.buffer
= NULL
;
3008 memory_region_unref(bounce
.mr
);
3009 atomic_mb_set(&bounce
.in_use
, false);
3010 cpu_notify_map_clients();
3013 void *cpu_physical_memory_map(hwaddr addr
,
3017 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3020 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3021 int is_write
, hwaddr access_len
)
3023 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3026 /* warning: addr must be aligned */
3027 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3029 MemTxResult
*result
,
3030 enum device_endian endian
)
3038 bool release_lock
= false;
3041 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3042 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3043 release_lock
|= prepare_mmio_access(mr
);
3046 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3047 #if defined(TARGET_WORDS_BIGENDIAN)
3048 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3052 if (endian
== DEVICE_BIG_ENDIAN
) {
3058 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3060 case DEVICE_LITTLE_ENDIAN
:
3061 val
= ldl_le_p(ptr
);
3063 case DEVICE_BIG_ENDIAN
:
3064 val
= ldl_be_p(ptr
);
3076 qemu_mutex_unlock_iothread();
3082 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3083 MemTxAttrs attrs
, MemTxResult
*result
)
3085 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3086 DEVICE_NATIVE_ENDIAN
);
3089 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3090 MemTxAttrs attrs
, MemTxResult
*result
)
3092 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3093 DEVICE_LITTLE_ENDIAN
);
3096 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3097 MemTxAttrs attrs
, MemTxResult
*result
)
3099 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3103 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3105 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3108 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3110 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3113 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3115 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3118 /* warning: addr must be aligned */
3119 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3121 MemTxResult
*result
,
3122 enum device_endian endian
)
3130 bool release_lock
= false;
3133 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3135 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3136 release_lock
|= prepare_mmio_access(mr
);
3139 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3140 #if defined(TARGET_WORDS_BIGENDIAN)
3141 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3145 if (endian
== DEVICE_BIG_ENDIAN
) {
3151 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3153 case DEVICE_LITTLE_ENDIAN
:
3154 val
= ldq_le_p(ptr
);
3156 case DEVICE_BIG_ENDIAN
:
3157 val
= ldq_be_p(ptr
);
3169 qemu_mutex_unlock_iothread();
3175 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3176 MemTxAttrs attrs
, MemTxResult
*result
)
3178 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3179 DEVICE_NATIVE_ENDIAN
);
3182 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3183 MemTxAttrs attrs
, MemTxResult
*result
)
3185 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3186 DEVICE_LITTLE_ENDIAN
);
3189 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3190 MemTxAttrs attrs
, MemTxResult
*result
)
3192 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3196 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3198 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3201 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3203 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3206 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3208 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3212 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3213 MemTxAttrs attrs
, MemTxResult
*result
)
3218 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3225 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3227 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3230 /* warning: addr must be aligned */
3231 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3234 MemTxResult
*result
,
3235 enum device_endian endian
)
3243 bool release_lock
= false;
3246 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3248 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3249 release_lock
|= prepare_mmio_access(mr
);
3252 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3253 #if defined(TARGET_WORDS_BIGENDIAN)
3254 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3258 if (endian
== DEVICE_BIG_ENDIAN
) {
3264 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3266 case DEVICE_LITTLE_ENDIAN
:
3267 val
= lduw_le_p(ptr
);
3269 case DEVICE_BIG_ENDIAN
:
3270 val
= lduw_be_p(ptr
);
3282 qemu_mutex_unlock_iothread();
3288 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3289 MemTxAttrs attrs
, MemTxResult
*result
)
3291 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3292 DEVICE_NATIVE_ENDIAN
);
3295 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3296 MemTxAttrs attrs
, MemTxResult
*result
)
3298 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3299 DEVICE_LITTLE_ENDIAN
);
3302 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3303 MemTxAttrs attrs
, MemTxResult
*result
)
3305 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3309 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3311 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3314 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3316 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3319 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3321 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3324 /* warning: addr must be aligned. The ram page is not masked as dirty
3325 and the code inside is not invalidated. It is useful if the dirty
3326 bits are used to track modified PTEs */
3327 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3328 MemTxAttrs attrs
, MemTxResult
*result
)
3335 uint8_t dirty_log_mask
;
3336 bool release_lock
= false;
3339 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3341 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3342 release_lock
|= prepare_mmio_access(mr
);
3344 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3346 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3349 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3350 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3351 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
3359 qemu_mutex_unlock_iothread();
3364 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3366 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3369 /* warning: addr must be aligned */
3370 static inline void address_space_stl_internal(AddressSpace
*as
,
3371 hwaddr addr
, uint32_t val
,
3373 MemTxResult
*result
,
3374 enum device_endian endian
)
3381 bool release_lock
= false;
3384 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3386 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3387 release_lock
|= prepare_mmio_access(mr
);
3389 #if defined(TARGET_WORDS_BIGENDIAN)
3390 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3394 if (endian
== DEVICE_BIG_ENDIAN
) {
3398 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3401 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3403 case DEVICE_LITTLE_ENDIAN
:
3406 case DEVICE_BIG_ENDIAN
:
3413 invalidate_and_set_dirty(mr
, addr1
, 4);
3420 qemu_mutex_unlock_iothread();
3425 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3426 MemTxAttrs attrs
, MemTxResult
*result
)
3428 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3429 DEVICE_NATIVE_ENDIAN
);
3432 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3433 MemTxAttrs attrs
, MemTxResult
*result
)
3435 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3436 DEVICE_LITTLE_ENDIAN
);
3439 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3440 MemTxAttrs attrs
, MemTxResult
*result
)
3442 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3446 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3448 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3451 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3453 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3456 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3458 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3462 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3463 MemTxAttrs attrs
, MemTxResult
*result
)
3468 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3474 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3476 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3479 /* warning: addr must be aligned */
3480 static inline void address_space_stw_internal(AddressSpace
*as
,
3481 hwaddr addr
, uint32_t val
,
3483 MemTxResult
*result
,
3484 enum device_endian endian
)
3491 bool release_lock
= false;
3494 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3495 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3496 release_lock
|= prepare_mmio_access(mr
);
3498 #if defined(TARGET_WORDS_BIGENDIAN)
3499 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3503 if (endian
== DEVICE_BIG_ENDIAN
) {
3507 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3510 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3512 case DEVICE_LITTLE_ENDIAN
:
3515 case DEVICE_BIG_ENDIAN
:
3522 invalidate_and_set_dirty(mr
, addr1
, 2);
3529 qemu_mutex_unlock_iothread();
3534 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3535 MemTxAttrs attrs
, MemTxResult
*result
)
3537 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3538 DEVICE_NATIVE_ENDIAN
);
3541 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3542 MemTxAttrs attrs
, MemTxResult
*result
)
3544 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3545 DEVICE_LITTLE_ENDIAN
);
3548 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3549 MemTxAttrs attrs
, MemTxResult
*result
)
3551 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3555 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3557 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3560 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3562 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3565 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3567 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3571 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3572 MemTxAttrs attrs
, MemTxResult
*result
)
3576 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3582 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3583 MemTxAttrs attrs
, MemTxResult
*result
)
3586 val
= cpu_to_le64(val
);
3587 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3592 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3593 MemTxAttrs attrs
, MemTxResult
*result
)
3596 val
= cpu_to_be64(val
);
3597 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3603 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3605 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3608 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3610 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3613 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3615 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3618 /* virtual memory access for debug (includes writing to ROM) */
3619 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3620 uint8_t *buf
, int len
, int is_write
)
3630 page
= addr
& TARGET_PAGE_MASK
;
3631 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3632 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3633 /* if no physical page mapped, return an error */
3634 if (phys_addr
== -1)
3636 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3639 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3641 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3644 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3645 MEMTXATTRS_UNSPECIFIED
,
3656 * Allows code that needs to deal with migration bitmaps etc to still be built
3657 * target independent.
3659 size_t qemu_target_page_bits(void)
3661 return TARGET_PAGE_BITS
;
3667 * A helper function for the _utterly broken_ virtio device model to find out if
3668 * it's running on a big endian machine. Don't do this at home kids!
3670 bool target_words_bigendian(void);
3671 bool target_words_bigendian(void)
3673 #if defined(TARGET_WORDS_BIGENDIAN)
3680 #ifndef CONFIG_USER_ONLY
3681 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3688 mr
= address_space_translate(&address_space_memory
,
3689 phys_addr
, &phys_addr
, &l
, false);
3691 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3696 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3702 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3703 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3704 block
->used_length
, opaque
);