4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
24 #include "qemu-common.h"
28 #if !defined(CONFIG_USER_ONLY)
29 #include "hw/boards.h"
32 #include "sysemu/kvm.h"
33 #include "sysemu/sysemu.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
47 #include "exec/cpu-all.h"
48 #include "qemu/rcu_queue.h"
49 #include "qemu/main-loop.h"
50 #include "translate-all.h"
51 #include "sysemu/replay.h"
53 #include "exec/memory-internal.h"
54 #include "exec/ram_addr.h"
57 #include "qemu/range.h"
59 #include "qemu/mmap-alloc.h"
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
66 * are protected by the ramlist lock.
68 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
70 static MemoryRegion
*system_memory
;
71 static MemoryRegion
*system_io
;
73 AddressSpace address_space_io
;
74 AddressSpace address_space_memory
;
76 MemoryRegion io_mem_rom
, io_mem_notdirty
;
77 static MemoryRegion io_mem_unassigned
;
79 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
80 #define RAM_PREALLOC (1 << 0)
82 /* RAM is mmap-ed with MAP_SHARED */
83 #define RAM_SHARED (1 << 1)
85 /* Only a portion of RAM (used_length) is actually used, and migrated.
86 * This used_length size can change across reboots.
88 #define RAM_RESIZEABLE (1 << 2)
92 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
93 /* current CPU in the current thread. It is only valid inside
95 __thread CPUState
*current_cpu
;
96 /* 0 = Do not count executed instructions.
97 1 = Precise instruction counting.
98 2 = Adaptive rate instruction counting. */
101 #if !defined(CONFIG_USER_ONLY)
103 typedef struct PhysPageEntry PhysPageEntry
;
105 struct PhysPageEntry
{
106 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
108 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
112 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
114 /* Size of the L2 (and L3, etc) page tables. */
115 #define ADDR_SPACE_BITS 64
118 #define P_L2_SIZE (1 << P_L2_BITS)
120 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
122 typedef PhysPageEntry Node
[P_L2_SIZE
];
124 typedef struct PhysPageMap
{
127 unsigned sections_nb
;
128 unsigned sections_nb_alloc
;
130 unsigned nodes_nb_alloc
;
132 MemoryRegionSection
*sections
;
135 struct AddressSpaceDispatch
{
138 /* This is a multi-level map on the physical address space.
139 * The bottom level has pointers to MemoryRegionSections.
141 PhysPageEntry phys_map
;
146 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
147 typedef struct subpage_t
{
151 uint16_t sub_section
[TARGET_PAGE_SIZE
];
154 #define PHYS_SECTION_UNASSIGNED 0
155 #define PHYS_SECTION_NOTDIRTY 1
156 #define PHYS_SECTION_ROM 2
157 #define PHYS_SECTION_WATCH 3
159 static void io_mem_init(void);
160 static void memory_map_init(void);
161 static void tcg_commit(MemoryListener
*listener
);
163 static MemoryRegion io_mem_watch
;
166 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
167 * @cpu: the CPU whose AddressSpace this is
168 * @as: the AddressSpace itself
169 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
170 * @tcg_as_listener: listener for tracking changes to the AddressSpace
172 struct CPUAddressSpace
{
175 struct AddressSpaceDispatch
*memory_dispatch
;
176 MemoryListener tcg_as_listener
;
181 #if !defined(CONFIG_USER_ONLY)
183 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
185 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
186 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
187 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
188 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
192 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
199 ret
= map
->nodes_nb
++;
201 assert(ret
!= PHYS_MAP_NODE_NIL
);
202 assert(ret
!= map
->nodes_nb_alloc
);
204 e
.skip
= leaf
? 0 : 1;
205 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
206 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
207 memcpy(&p
[i
], &e
, sizeof(e
));
212 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
213 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
217 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
219 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
220 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
222 p
= map
->nodes
[lp
->ptr
];
223 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
225 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
226 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
232 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
238 static void phys_page_set(AddressSpaceDispatch
*d
,
239 hwaddr index
, hwaddr nb
,
242 /* Wildly overreserve - it doesn't matter much. */
243 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
245 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
248 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
249 * and update our entry so we can skip it and go directly to the destination.
251 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
253 unsigned valid_ptr
= P_L2_SIZE
;
258 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
263 for (i
= 0; i
< P_L2_SIZE
; i
++) {
264 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
271 phys_page_compact(&p
[i
], nodes
, compacted
);
275 /* We can only compress if there's only one child. */
280 assert(valid_ptr
< P_L2_SIZE
);
282 /* Don't compress if it won't fit in the # of bits we have. */
283 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
287 lp
->ptr
= p
[valid_ptr
].ptr
;
288 if (!p
[valid_ptr
].skip
) {
289 /* If our only child is a leaf, make this a leaf. */
290 /* By design, we should have made this node a leaf to begin with so we
291 * should never reach here.
292 * But since it's so simple to handle this, let's do it just in case we
297 lp
->skip
+= p
[valid_ptr
].skip
;
301 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
303 DECLARE_BITMAP(compacted
, nodes_nb
);
305 if (d
->phys_map
.skip
) {
306 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
310 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
313 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
314 * the section must cover the entire address space.
316 return section
->size
.hi
||
317 range_covers_byte(section
->offset_within_address_space
,
318 section
->size
.lo
, addr
);
321 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
322 Node
*nodes
, MemoryRegionSection
*sections
)
325 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
328 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
329 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
330 return §ions
[PHYS_SECTION_UNASSIGNED
];
333 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
336 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
337 return §ions
[lp
.ptr
];
339 return §ions
[PHYS_SECTION_UNASSIGNED
];
343 bool memory_region_is_unassigned(MemoryRegion
*mr
)
345 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
346 && mr
!= &io_mem_watch
;
349 /* Called from RCU critical section */
350 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
352 bool resolve_subpage
)
354 MemoryRegionSection
*section
;
357 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
358 if (resolve_subpage
&& section
->mr
->subpage
) {
359 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
360 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
365 /* Called from RCU critical section */
366 static MemoryRegionSection
*
367 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
368 hwaddr
*plen
, bool resolve_subpage
)
370 MemoryRegionSection
*section
;
374 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
375 /* Compute offset within MemoryRegionSection */
376 addr
-= section
->offset_within_address_space
;
378 /* Compute offset within MemoryRegion */
379 *xlat
= addr
+ section
->offset_within_region
;
383 /* MMIO registers can be expected to perform full-width accesses based only
384 * on their address, without considering adjacent registers that could
385 * decode to completely different MemoryRegions. When such registers
386 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
387 * regions overlap wildly. For this reason we cannot clamp the accesses
390 * If the length is small (as is the case for address_space_ldl/stl),
391 * everything works fine. If the incoming length is large, however,
392 * the caller really has to do the clamping through memory_access_size.
394 if (memory_region_is_ram(mr
)) {
395 diff
= int128_sub(section
->size
, int128_make64(addr
));
396 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
401 /* Called from RCU critical section */
402 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
403 hwaddr
*xlat
, hwaddr
*plen
,
407 MemoryRegionSection
*section
;
411 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
412 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
415 if (!mr
->iommu_ops
) {
419 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
420 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
421 | (addr
& iotlb
.addr_mask
));
422 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
423 if (!(iotlb
.perm
& (1 << is_write
))) {
424 mr
= &io_mem_unassigned
;
428 as
= iotlb
.target_as
;
431 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
432 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
433 *plen
= MIN(page
, *plen
);
440 /* Called from RCU critical section */
441 MemoryRegionSection
*
442 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
443 hwaddr
*xlat
, hwaddr
*plen
)
445 MemoryRegionSection
*section
;
446 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
448 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
450 assert(!section
->mr
->iommu_ops
);
455 #if !defined(CONFIG_USER_ONLY)
457 static int cpu_common_post_load(void *opaque
, int version_id
)
459 CPUState
*cpu
= opaque
;
461 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
462 version_id is increased. */
463 cpu
->interrupt_request
&= ~0x01;
469 static int cpu_common_pre_load(void *opaque
)
471 CPUState
*cpu
= opaque
;
473 cpu
->exception_index
= -1;
478 static bool cpu_common_exception_index_needed(void *opaque
)
480 CPUState
*cpu
= opaque
;
482 return tcg_enabled() && cpu
->exception_index
!= -1;
485 static const VMStateDescription vmstate_cpu_common_exception_index
= {
486 .name
= "cpu_common/exception_index",
488 .minimum_version_id
= 1,
489 .needed
= cpu_common_exception_index_needed
,
490 .fields
= (VMStateField
[]) {
491 VMSTATE_INT32(exception_index
, CPUState
),
492 VMSTATE_END_OF_LIST()
496 static bool cpu_common_crash_occurred_needed(void *opaque
)
498 CPUState
*cpu
= opaque
;
500 return cpu
->crash_occurred
;
503 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
504 .name
= "cpu_common/crash_occurred",
506 .minimum_version_id
= 1,
507 .needed
= cpu_common_crash_occurred_needed
,
508 .fields
= (VMStateField
[]) {
509 VMSTATE_BOOL(crash_occurred
, CPUState
),
510 VMSTATE_END_OF_LIST()
514 const VMStateDescription vmstate_cpu_common
= {
515 .name
= "cpu_common",
517 .minimum_version_id
= 1,
518 .pre_load
= cpu_common_pre_load
,
519 .post_load
= cpu_common_post_load
,
520 .fields
= (VMStateField
[]) {
521 VMSTATE_UINT32(halted
, CPUState
),
522 VMSTATE_UINT32(interrupt_request
, CPUState
),
523 VMSTATE_END_OF_LIST()
525 .subsections
= (const VMStateDescription
*[]) {
526 &vmstate_cpu_common_exception_index
,
527 &vmstate_cpu_common_crash_occurred
,
534 CPUState
*qemu_get_cpu(int index
)
539 if (cpu
->cpu_index
== index
) {
547 #if !defined(CONFIG_USER_ONLY)
548 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
550 CPUAddressSpace
*newas
;
552 /* Target code should have set num_ases before calling us */
553 assert(asidx
< cpu
->num_ases
);
556 /* address space 0 gets the convenience alias */
560 /* KVM cannot currently support multiple address spaces. */
561 assert(asidx
== 0 || !kvm_enabled());
563 if (!cpu
->cpu_ases
) {
564 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
567 newas
= &cpu
->cpu_ases
[asidx
];
571 newas
->tcg_as_listener
.commit
= tcg_commit
;
572 memory_listener_register(&newas
->tcg_as_listener
, as
);
576 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
578 /* Return the AddressSpace corresponding to the specified index */
579 return cpu
->cpu_ases
[asidx
].as
;
583 #ifndef CONFIG_USER_ONLY
584 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
586 static int cpu_get_free_index(Error
**errp
)
588 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
590 if (cpu
>= MAX_CPUMASK_BITS
) {
591 error_setg(errp
, "Trying to use more CPUs than max of %d",
596 bitmap_set(cpu_index_map
, cpu
, 1);
600 void cpu_exec_exit(CPUState
*cpu
)
602 if (cpu
->cpu_index
== -1) {
603 /* cpu_index was never allocated by this @cpu or was already freed. */
607 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
612 static int cpu_get_free_index(Error
**errp
)
617 CPU_FOREACH(some_cpu
) {
623 void cpu_exec_exit(CPUState
*cpu
)
628 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
630 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
632 Error
*local_err
= NULL
;
637 #ifndef CONFIG_USER_ONLY
638 cpu
->thread_id
= qemu_get_thread_id();
640 /* This is a softmmu CPU object, so create a property for it
641 * so users can wire up its memory. (This can't go in qom/cpu.c
642 * because that file is compiled only once for both user-mode
643 * and system builds.) The default if no link is set up is to use
644 * the system address space.
646 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
647 (Object
**)&cpu
->memory
,
648 qdev_prop_allow_set_link_before_realize
,
649 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
651 cpu
->memory
= system_memory
;
652 object_ref(OBJECT(cpu
->memory
));
655 #if defined(CONFIG_USER_ONLY)
658 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
660 error_propagate(errp
, local_err
);
661 #if defined(CONFIG_USER_ONLY)
666 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
667 #if defined(CONFIG_USER_ONLY)
670 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
671 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
673 if (cc
->vmsd
!= NULL
) {
674 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
678 #if defined(CONFIG_USER_ONLY)
679 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
681 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
684 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
687 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
688 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
690 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
691 phys
| (pc
& ~TARGET_PAGE_MASK
));
696 #if defined(CONFIG_USER_ONLY)
697 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
702 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
708 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
712 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
713 int flags
, CPUWatchpoint
**watchpoint
)
718 /* Add a watchpoint. */
719 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
720 int flags
, CPUWatchpoint
**watchpoint
)
724 /* forbid ranges which are empty or run off the end of the address space */
725 if (len
== 0 || (addr
+ len
- 1) < addr
) {
726 error_report("tried to set invalid watchpoint at %"
727 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
730 wp
= g_malloc(sizeof(*wp
));
736 /* keep all GDB-injected watchpoints in front */
737 if (flags
& BP_GDB
) {
738 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
740 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
743 tlb_flush_page(cpu
, addr
);
750 /* Remove a specific watchpoint. */
751 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
756 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
757 if (addr
== wp
->vaddr
&& len
== wp
->len
758 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
759 cpu_watchpoint_remove_by_ref(cpu
, wp
);
766 /* Remove a specific watchpoint by reference. */
767 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
769 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
771 tlb_flush_page(cpu
, watchpoint
->vaddr
);
776 /* Remove all matching watchpoints. */
777 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
779 CPUWatchpoint
*wp
, *next
;
781 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
782 if (wp
->flags
& mask
) {
783 cpu_watchpoint_remove_by_ref(cpu
, wp
);
788 /* Return true if this watchpoint address matches the specified
789 * access (ie the address range covered by the watchpoint overlaps
790 * partially or completely with the address range covered by the
793 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
797 /* We know the lengths are non-zero, but a little caution is
798 * required to avoid errors in the case where the range ends
799 * exactly at the top of the address space and so addr + len
800 * wraps round to zero.
802 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
803 vaddr addrend
= addr
+ len
- 1;
805 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
810 /* Add a breakpoint. */
811 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
812 CPUBreakpoint
**breakpoint
)
816 bp
= g_malloc(sizeof(*bp
));
821 /* keep all GDB-injected breakpoints in front */
822 if (flags
& BP_GDB
) {
823 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
825 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
828 breakpoint_invalidate(cpu
, pc
);
836 /* Remove a specific breakpoint. */
837 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
841 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
842 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
843 cpu_breakpoint_remove_by_ref(cpu
, bp
);
850 /* Remove a specific breakpoint by reference. */
851 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
853 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
855 breakpoint_invalidate(cpu
, breakpoint
->pc
);
860 /* Remove all matching breakpoints. */
861 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
863 CPUBreakpoint
*bp
, *next
;
865 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
866 if (bp
->flags
& mask
) {
867 cpu_breakpoint_remove_by_ref(cpu
, bp
);
872 /* enable or disable single step mode. EXCP_DEBUG is returned by the
873 CPU loop after each instruction */
874 void cpu_single_step(CPUState
*cpu
, int enabled
)
876 if (cpu
->singlestep_enabled
!= enabled
) {
877 cpu
->singlestep_enabled
= enabled
;
879 kvm_update_guest_debug(cpu
, 0);
881 /* must flush all the translated code to avoid inconsistencies */
882 /* XXX: only flush what is necessary */
888 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
895 fprintf(stderr
, "qemu: fatal: ");
896 vfprintf(stderr
, fmt
, ap
);
897 fprintf(stderr
, "\n");
898 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
899 if (qemu_log_separate()) {
900 qemu_log("qemu: fatal: ");
901 qemu_log_vprintf(fmt
, ap2
);
903 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
910 #if defined(CONFIG_USER_ONLY)
912 struct sigaction act
;
913 sigfillset(&act
.sa_mask
);
914 act
.sa_handler
= SIG_DFL
;
915 sigaction(SIGABRT
, &act
, NULL
);
921 #if !defined(CONFIG_USER_ONLY)
922 /* Called from RCU critical section */
923 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
927 block
= atomic_rcu_read(&ram_list
.mru_block
);
928 if (block
&& addr
- block
->offset
< block
->max_length
) {
931 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
932 if (addr
- block
->offset
< block
->max_length
) {
937 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
941 /* It is safe to write mru_block outside the iothread lock. This
946 * xxx removed from list
950 * call_rcu(reclaim_ramblock, xxx);
953 * atomic_rcu_set is not needed here. The block was already published
954 * when it was placed into the list. Here we're just making an extra
955 * copy of the pointer.
957 ram_list
.mru_block
= block
;
961 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
968 end
= TARGET_PAGE_ALIGN(start
+ length
);
969 start
&= TARGET_PAGE_MASK
;
972 block
= qemu_get_ram_block(start
);
973 assert(block
== qemu_get_ram_block(end
- 1));
974 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
976 tlb_reset_dirty(cpu
, start1
, length
);
981 /* Note: start and end must be within the same ram block. */
982 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
986 DirtyMemoryBlocks
*blocks
;
987 unsigned long end
, page
;
994 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
995 page
= start
>> TARGET_PAGE_BITS
;
999 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1001 while (page
< end
) {
1002 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1003 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1004 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1006 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1013 if (dirty
&& tcg_enabled()) {
1014 tlb_reset_dirty_range_all(start
, length
);
1020 /* Called from RCU critical section */
1021 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1022 MemoryRegionSection
*section
,
1024 hwaddr paddr
, hwaddr xlat
,
1026 target_ulong
*address
)
1031 if (memory_region_is_ram(section
->mr
)) {
1033 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1035 if (!section
->readonly
) {
1036 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1038 iotlb
|= PHYS_SECTION_ROM
;
1041 AddressSpaceDispatch
*d
;
1043 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1044 iotlb
= section
- d
->map
.sections
;
1048 /* Make accesses to pages with watchpoints go via the
1049 watchpoint trap routines. */
1050 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1051 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1052 /* Avoid trapping reads of pages with a write breakpoint. */
1053 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1054 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1055 *address
|= TLB_MMIO
;
1063 #endif /* defined(CONFIG_USER_ONLY) */
1065 #if !defined(CONFIG_USER_ONLY)
1067 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1069 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1071 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1072 qemu_anon_ram_alloc
;
1075 * Set a custom physical guest memory alloator.
1076 * Accelerators with unusual needs may need this. Hopefully, we can
1077 * get rid of it eventually.
1079 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1081 phys_mem_alloc
= alloc
;
1084 static uint16_t phys_section_add(PhysPageMap
*map
,
1085 MemoryRegionSection
*section
)
1087 /* The physical section number is ORed with a page-aligned
1088 * pointer to produce the iotlb entries. Thus it should
1089 * never overflow into the page-aligned value.
1091 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1093 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1094 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1095 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1096 map
->sections_nb_alloc
);
1098 map
->sections
[map
->sections_nb
] = *section
;
1099 memory_region_ref(section
->mr
);
1100 return map
->sections_nb
++;
1103 static void phys_section_destroy(MemoryRegion
*mr
)
1105 bool have_sub_page
= mr
->subpage
;
1107 memory_region_unref(mr
);
1109 if (have_sub_page
) {
1110 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1111 object_unref(OBJECT(&subpage
->iomem
));
1116 static void phys_sections_free(PhysPageMap
*map
)
1118 while (map
->sections_nb
> 0) {
1119 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1120 phys_section_destroy(section
->mr
);
1122 g_free(map
->sections
);
1126 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1129 hwaddr base
= section
->offset_within_address_space
1131 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1132 d
->map
.nodes
, d
->map
.sections
);
1133 MemoryRegionSection subsection
= {
1134 .offset_within_address_space
= base
,
1135 .size
= int128_make64(TARGET_PAGE_SIZE
),
1139 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1141 if (!(existing
->mr
->subpage
)) {
1142 subpage
= subpage_init(d
->as
, base
);
1143 subsection
.address_space
= d
->as
;
1144 subsection
.mr
= &subpage
->iomem
;
1145 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1146 phys_section_add(&d
->map
, &subsection
));
1148 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1150 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1151 end
= start
+ int128_get64(section
->size
) - 1;
1152 subpage_register(subpage
, start
, end
,
1153 phys_section_add(&d
->map
, section
));
1157 static void register_multipage(AddressSpaceDispatch
*d
,
1158 MemoryRegionSection
*section
)
1160 hwaddr start_addr
= section
->offset_within_address_space
;
1161 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1162 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1166 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1169 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1171 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1172 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1173 MemoryRegionSection now
= *section
, remain
= *section
;
1174 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1176 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1177 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1178 - now
.offset_within_address_space
;
1180 now
.size
= int128_min(int128_make64(left
), now
.size
);
1181 register_subpage(d
, &now
);
1183 now
.size
= int128_zero();
1185 while (int128_ne(remain
.size
, now
.size
)) {
1186 remain
.size
= int128_sub(remain
.size
, now
.size
);
1187 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1188 remain
.offset_within_region
+= int128_get64(now
.size
);
1190 if (int128_lt(remain
.size
, page_size
)) {
1191 register_subpage(d
, &now
);
1192 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1193 now
.size
= page_size
;
1194 register_subpage(d
, &now
);
1196 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1197 register_multipage(d
, &now
);
1202 void qemu_flush_coalesced_mmio_buffer(void)
1205 kvm_flush_coalesced_mmio_buffer();
1208 void qemu_mutex_lock_ramlist(void)
1210 qemu_mutex_lock(&ram_list
.mutex
);
1213 void qemu_mutex_unlock_ramlist(void)
1215 qemu_mutex_unlock(&ram_list
.mutex
);
1220 #include <sys/vfs.h>
1222 #define HUGETLBFS_MAGIC 0x958458f6
1224 static long gethugepagesize(const char *path
, Error
**errp
)
1230 ret
= statfs(path
, &fs
);
1231 } while (ret
!= 0 && errno
== EINTR
);
1234 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1242 static void *file_ram_alloc(RAMBlock
*block
,
1249 char *sanitized_name
;
1254 Error
*local_err
= NULL
;
1256 hpagesize
= gethugepagesize(path
, &local_err
);
1258 error_propagate(errp
, local_err
);
1261 block
->mr
->align
= hpagesize
;
1263 if (memory
< hpagesize
) {
1264 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1265 "or larger than huge page size 0x%" PRIx64
,
1270 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1272 "host lacks kvm mmu notifiers, -mem-path unsupported");
1276 if (!stat(path
, &st
) && S_ISDIR(st
.st_mode
)) {
1277 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1278 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1279 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1285 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1287 g_free(sanitized_name
);
1289 fd
= mkstemp(filename
);
1295 fd
= open(path
, O_RDWR
| O_CREAT
, 0644);
1299 error_setg_errno(errp
, errno
,
1300 "unable to create backing store for hugepages");
1304 memory
= ROUND_UP(memory
, hpagesize
);
1307 * ftruncate is not supported by hugetlbfs in older
1308 * hosts, so don't bother bailing out on errors.
1309 * If anything goes wrong with it under other filesystems,
1312 if (ftruncate(fd
, memory
)) {
1313 perror("ftruncate");
1316 area
= qemu_ram_mmap(fd
, memory
, hpagesize
, block
->flags
& RAM_SHARED
);
1317 if (area
== MAP_FAILED
) {
1318 error_setg_errno(errp
, errno
,
1319 "unable to map backing store for hugepages");
1325 os_mem_prealloc(fd
, area
, memory
);
1336 /* Called with the ramlist lock held. */
1337 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1339 RAMBlock
*block
, *next_block
;
1340 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1342 assert(size
!= 0); /* it would hand out same offset multiple times */
1344 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1348 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1349 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1351 end
= block
->offset
+ block
->max_length
;
1353 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1354 if (next_block
->offset
>= end
) {
1355 next
= MIN(next
, next_block
->offset
);
1358 if (next
- end
>= size
&& next
- end
< mingap
) {
1360 mingap
= next
- end
;
1364 if (offset
== RAM_ADDR_MAX
) {
1365 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1373 ram_addr_t
last_ram_offset(void)
1376 ram_addr_t last
= 0;
1379 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1380 last
= MAX(last
, block
->offset
+ block
->max_length
);
1386 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1390 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1391 if (!machine_dump_guest_core(current_machine
)) {
1392 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1394 perror("qemu_madvise");
1395 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1396 "but dump_guest_core=off specified\n");
1401 /* Called within an RCU critical section, or while the ramlist lock
1404 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1408 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1409 if (block
->offset
== addr
) {
1417 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1422 /* Called with iothread lock held. */
1423 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1425 RAMBlock
*new_block
, *block
;
1428 new_block
= find_ram_block(addr
);
1430 assert(!new_block
->idstr
[0]);
1433 char *id
= qdev_get_dev_path(dev
);
1435 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1439 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1441 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1442 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1443 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1451 /* Called with iothread lock held. */
1452 void qemu_ram_unset_idstr(ram_addr_t addr
)
1456 /* FIXME: arch_init.c assumes that this is not called throughout
1457 * migration. Ignore the problem since hot-unplug during migration
1458 * does not work anyway.
1462 block
= find_ram_block(addr
);
1464 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1469 static int memory_try_enable_merging(void *addr
, size_t len
)
1471 if (!machine_mem_merge(current_machine
)) {
1472 /* disabled by the user */
1476 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1479 /* Only legal before guest might have detected the memory size: e.g. on
1480 * incoming migration, or right after reset.
1482 * As memory core doesn't know how is memory accessed, it is up to
1483 * resize callback to update device state and/or add assertions to detect
1484 * misuse, if necessary.
1486 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1488 RAMBlock
*block
= find_ram_block(base
);
1492 newsize
= HOST_PAGE_ALIGN(newsize
);
1494 if (block
->used_length
== newsize
) {
1498 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1499 error_setg_errno(errp
, EINVAL
,
1500 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1501 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1502 newsize
, block
->used_length
);
1506 if (block
->max_length
< newsize
) {
1507 error_setg_errno(errp
, EINVAL
,
1508 "Length too large: %s: 0x" RAM_ADDR_FMT
1509 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1510 newsize
, block
->max_length
);
1514 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1515 block
->used_length
= newsize
;
1516 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1518 memory_region_set_size(block
->mr
, newsize
);
1519 if (block
->resized
) {
1520 block
->resized(block
->idstr
, newsize
, block
->host
);
1525 /* Called with ram_list.mutex held */
1526 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1527 ram_addr_t new_ram_size
)
1529 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1530 DIRTY_MEMORY_BLOCK_SIZE
);
1531 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1532 DIRTY_MEMORY_BLOCK_SIZE
);
1535 /* Only need to extend if block count increased */
1536 if (new_num_blocks
<= old_num_blocks
) {
1540 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1541 DirtyMemoryBlocks
*old_blocks
;
1542 DirtyMemoryBlocks
*new_blocks
;
1545 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1546 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1547 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1549 if (old_num_blocks
) {
1550 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1551 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1554 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1555 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1558 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1561 g_free_rcu(old_blocks
, rcu
);
1566 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1569 RAMBlock
*last_block
= NULL
;
1570 ram_addr_t old_ram_size
, new_ram_size
;
1573 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1575 qemu_mutex_lock_ramlist();
1576 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1578 if (!new_block
->host
) {
1579 if (xen_enabled()) {
1580 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1581 new_block
->mr
, &err
);
1583 error_propagate(errp
, err
);
1584 qemu_mutex_unlock_ramlist();
1587 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1588 &new_block
->mr
->align
);
1589 if (!new_block
->host
) {
1590 error_setg_errno(errp
, errno
,
1591 "cannot set up guest memory '%s'",
1592 memory_region_name(new_block
->mr
));
1593 qemu_mutex_unlock_ramlist();
1595 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1599 new_ram_size
= MAX(old_ram_size
,
1600 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1601 if (new_ram_size
> old_ram_size
) {
1602 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1603 dirty_memory_extend(old_ram_size
, new_ram_size
);
1605 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1606 * QLIST (which has an RCU-friendly variant) does not have insertion at
1607 * tail, so save the last element in last_block.
1609 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1611 if (block
->max_length
< new_block
->max_length
) {
1616 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1617 } else if (last_block
) {
1618 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1619 } else { /* list is empty */
1620 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1622 ram_list
.mru_block
= NULL
;
1624 /* Write list before version */
1627 qemu_mutex_unlock_ramlist();
1629 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1630 new_block
->used_length
,
1633 if (new_block
->host
) {
1634 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1635 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1636 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1637 if (kvm_enabled()) {
1638 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1644 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1645 bool share
, const char *mem_path
,
1648 RAMBlock
*new_block
;
1649 Error
*local_err
= NULL
;
1651 if (xen_enabled()) {
1652 error_setg(errp
, "-mem-path not supported with Xen");
1656 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1658 * file_ram_alloc() needs to allocate just like
1659 * phys_mem_alloc, but we haven't bothered to provide
1663 "-mem-path not supported with this accelerator");
1667 size
= HOST_PAGE_ALIGN(size
);
1668 new_block
= g_malloc0(sizeof(*new_block
));
1670 new_block
->used_length
= size
;
1671 new_block
->max_length
= size
;
1672 new_block
->flags
= share
? RAM_SHARED
: 0;
1673 new_block
->host
= file_ram_alloc(new_block
, size
,
1675 if (!new_block
->host
) {
1680 ram_block_add(new_block
, &local_err
);
1683 error_propagate(errp
, local_err
);
1691 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1692 void (*resized
)(const char*,
1695 void *host
, bool resizeable
,
1696 MemoryRegion
*mr
, Error
**errp
)
1698 RAMBlock
*new_block
;
1699 Error
*local_err
= NULL
;
1701 size
= HOST_PAGE_ALIGN(size
);
1702 max_size
= HOST_PAGE_ALIGN(max_size
);
1703 new_block
= g_malloc0(sizeof(*new_block
));
1705 new_block
->resized
= resized
;
1706 new_block
->used_length
= size
;
1707 new_block
->max_length
= max_size
;
1708 assert(max_size
>= size
);
1710 new_block
->host
= host
;
1712 new_block
->flags
|= RAM_PREALLOC
;
1715 new_block
->flags
|= RAM_RESIZEABLE
;
1717 ram_block_add(new_block
, &local_err
);
1720 error_propagate(errp
, local_err
);
1726 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1727 MemoryRegion
*mr
, Error
**errp
)
1729 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1732 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1734 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1737 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1738 void (*resized
)(const char*,
1741 MemoryRegion
*mr
, Error
**errp
)
1743 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1746 static void reclaim_ramblock(RAMBlock
*block
)
1748 if (block
->flags
& RAM_PREALLOC
) {
1750 } else if (xen_enabled()) {
1751 xen_invalidate_map_cache_entry(block
->host
);
1753 } else if (block
->fd
>= 0) {
1754 qemu_ram_munmap(block
->host
, block
->max_length
);
1758 qemu_anon_ram_free(block
->host
, block
->max_length
);
1763 void qemu_ram_free(RAMBlock
*block
)
1765 qemu_mutex_lock_ramlist();
1766 QLIST_REMOVE_RCU(block
, next
);
1767 ram_list
.mru_block
= NULL
;
1768 /* Write list before version */
1771 call_rcu(block
, reclaim_ramblock
, rcu
);
1772 qemu_mutex_unlock_ramlist();
1776 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1783 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1784 offset
= addr
- block
->offset
;
1785 if (offset
< block
->max_length
) {
1786 vaddr
= ramblock_ptr(block
, offset
);
1787 if (block
->flags
& RAM_PREALLOC
) {
1789 } else if (xen_enabled()) {
1793 if (block
->fd
>= 0) {
1794 flags
|= (block
->flags
& RAM_SHARED
?
1795 MAP_SHARED
: MAP_PRIVATE
);
1796 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1797 flags
, block
->fd
, offset
);
1800 * Remap needs to match alloc. Accelerators that
1801 * set phys_mem_alloc never remap. If they did,
1802 * we'd need a remap hook here.
1804 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1806 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1807 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1810 if (area
!= vaddr
) {
1811 fprintf(stderr
, "Could not remap addr: "
1812 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1816 memory_try_enable_merging(vaddr
, length
);
1817 qemu_ram_setup_dump(vaddr
, length
);
1822 #endif /* !_WIN32 */
1824 int qemu_get_ram_fd(ram_addr_t addr
)
1830 block
= qemu_get_ram_block(addr
);
1836 void qemu_set_ram_fd(ram_addr_t addr
, int fd
)
1841 block
= qemu_get_ram_block(addr
);
1846 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1852 block
= qemu_get_ram_block(addr
);
1853 ptr
= ramblock_ptr(block
, 0);
1858 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1859 * This should not be used for general purpose DMA. Use address_space_map
1860 * or address_space_rw instead. For local memory (e.g. video ram) that the
1861 * device owns, use memory_region_get_ram_ptr.
1863 * Called within RCU critical section.
1865 void *qemu_get_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1867 RAMBlock
*block
= ram_block
;
1869 if (block
== NULL
) {
1870 block
= qemu_get_ram_block(addr
);
1873 if (xen_enabled() && block
->host
== NULL
) {
1874 /* We need to check if the requested address is in the RAM
1875 * because we don't want to map the entire memory in QEMU.
1876 * In that case just map until the end of the page.
1878 if (block
->offset
== 0) {
1879 return xen_map_cache(addr
, 0, 0);
1882 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1884 return ramblock_ptr(block
, addr
- block
->offset
);
1887 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1888 * but takes a size argument.
1890 * Called within RCU critical section.
1892 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1895 RAMBlock
*block
= ram_block
;
1896 ram_addr_t offset_inside_block
;
1901 if (block
== NULL
) {
1902 block
= qemu_get_ram_block(addr
);
1904 offset_inside_block
= addr
- block
->offset
;
1905 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1907 if (xen_enabled() && block
->host
== NULL
) {
1908 /* We need to check if the requested address is in the RAM
1909 * because we don't want to map the entire memory in QEMU.
1910 * In that case just map the requested area.
1912 if (block
->offset
== 0) {
1913 return xen_map_cache(addr
, *size
, 1);
1916 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1919 return ramblock_ptr(block
, offset_inside_block
);
1923 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1926 * ptr: Host pointer to look up
1927 * round_offset: If true round the result offset down to a page boundary
1928 * *ram_addr: set to result ram_addr
1929 * *offset: set to result offset within the RAMBlock
1931 * Returns: RAMBlock (or NULL if not found)
1933 * By the time this function returns, the returned pointer is not protected
1934 * by RCU anymore. If the caller is not within an RCU critical section and
1935 * does not hold the iothread lock, it must have other means of protecting the
1936 * pointer, such as a reference to the region that includes the incoming
1939 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1940 ram_addr_t
*ram_addr
,
1944 uint8_t *host
= ptr
;
1946 if (xen_enabled()) {
1948 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1949 block
= qemu_get_ram_block(*ram_addr
);
1951 *offset
= (host
- block
->host
);
1958 block
= atomic_rcu_read(&ram_list
.mru_block
);
1959 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1963 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1964 /* This case append when the block is not mapped. */
1965 if (block
->host
== NULL
) {
1968 if (host
- block
->host
< block
->max_length
) {
1977 *offset
= (host
- block
->host
);
1979 *offset
&= TARGET_PAGE_MASK
;
1981 *ram_addr
= block
->offset
+ *offset
;
1987 * Finds the named RAMBlock
1989 * name: The name of RAMBlock to find
1991 * Returns: RAMBlock (or NULL if not found)
1993 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1997 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1998 if (!strcmp(name
, block
->idstr
)) {
2006 /* Some of the softmmu routines need to translate from a host pointer
2007 (typically a TLB entry) back to a ram offset. */
2008 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2011 ram_addr_t offset
; /* Not used */
2013 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
2022 /* Called within RCU critical section. */
2023 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
2024 uint64_t val
, unsigned size
)
2026 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2027 tb_invalidate_phys_page_fast(ram_addr
, size
);
2031 stb_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2034 stw_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2037 stl_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2042 /* Set both VGA and migration bits for simplicity and to remove
2043 * the notdirty callback faster.
2045 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2046 DIRTY_CLIENTS_NOCODE
);
2047 /* we remove the notdirty callback only if the code has been
2049 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2050 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2054 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2055 unsigned size
, bool is_write
)
2060 static const MemoryRegionOps notdirty_mem_ops
= {
2061 .write
= notdirty_mem_write
,
2062 .valid
.accepts
= notdirty_mem_accepts
,
2063 .endianness
= DEVICE_NATIVE_ENDIAN
,
2066 /* Generate a debug exception if a watchpoint has been hit. */
2067 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2069 CPUState
*cpu
= current_cpu
;
2070 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2071 CPUArchState
*env
= cpu
->env_ptr
;
2072 target_ulong pc
, cs_base
;
2077 if (cpu
->watchpoint_hit
) {
2078 /* We re-entered the check after replacing the TB. Now raise
2079 * the debug interrupt so that is will trigger after the
2080 * current instruction. */
2081 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2084 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2085 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2086 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2087 && (wp
->flags
& flags
)) {
2088 if (flags
== BP_MEM_READ
) {
2089 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2091 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2093 wp
->hitaddr
= vaddr
;
2094 wp
->hitattrs
= attrs
;
2095 if (!cpu
->watchpoint_hit
) {
2096 if (wp
->flags
& BP_CPU
&&
2097 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2098 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2101 cpu
->watchpoint_hit
= wp
;
2102 tb_check_watchpoint(cpu
);
2103 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2104 cpu
->exception_index
= EXCP_DEBUG
;
2107 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2108 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2109 cpu_resume_from_signal(cpu
, NULL
);
2113 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2118 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2119 so these check for a hit then pass through to the normal out-of-line
2121 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2122 unsigned size
, MemTxAttrs attrs
)
2126 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2127 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2129 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2132 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2135 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2138 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2146 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2147 uint64_t val
, unsigned size
,
2151 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2152 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2154 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2157 address_space_stb(as
, addr
, val
, attrs
, &res
);
2160 address_space_stw(as
, addr
, val
, attrs
, &res
);
2163 address_space_stl(as
, addr
, val
, attrs
, &res
);
2170 static const MemoryRegionOps watch_mem_ops
= {
2171 .read_with_attrs
= watch_mem_read
,
2172 .write_with_attrs
= watch_mem_write
,
2173 .endianness
= DEVICE_NATIVE_ENDIAN
,
2176 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2177 unsigned len
, MemTxAttrs attrs
)
2179 subpage_t
*subpage
= opaque
;
2183 #if defined(DEBUG_SUBPAGE)
2184 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2185 subpage
, len
, addr
);
2187 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2194 *data
= ldub_p(buf
);
2197 *data
= lduw_p(buf
);
2210 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2211 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2213 subpage_t
*subpage
= opaque
;
2216 #if defined(DEBUG_SUBPAGE)
2217 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2218 " value %"PRIx64
"\n",
2219 __func__
, subpage
, len
, addr
, value
);
2237 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2241 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2242 unsigned len
, bool is_write
)
2244 subpage_t
*subpage
= opaque
;
2245 #if defined(DEBUG_SUBPAGE)
2246 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2247 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2250 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2254 static const MemoryRegionOps subpage_ops
= {
2255 .read_with_attrs
= subpage_read
,
2256 .write_with_attrs
= subpage_write
,
2257 .impl
.min_access_size
= 1,
2258 .impl
.max_access_size
= 8,
2259 .valid
.min_access_size
= 1,
2260 .valid
.max_access_size
= 8,
2261 .valid
.accepts
= subpage_accepts
,
2262 .endianness
= DEVICE_NATIVE_ENDIAN
,
2265 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2270 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2272 idx
= SUBPAGE_IDX(start
);
2273 eidx
= SUBPAGE_IDX(end
);
2274 #if defined(DEBUG_SUBPAGE)
2275 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2276 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2278 for (; idx
<= eidx
; idx
++) {
2279 mmio
->sub_section
[idx
] = section
;
2285 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2289 mmio
= g_malloc0(sizeof(subpage_t
));
2293 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2294 NULL
, TARGET_PAGE_SIZE
);
2295 mmio
->iomem
.subpage
= true;
2296 #if defined(DEBUG_SUBPAGE)
2297 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2298 mmio
, base
, TARGET_PAGE_SIZE
);
2300 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2305 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2309 MemoryRegionSection section
= {
2310 .address_space
= as
,
2312 .offset_within_address_space
= 0,
2313 .offset_within_region
= 0,
2314 .size
= int128_2_64(),
2317 return phys_section_add(map
, §ion
);
2320 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2322 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2323 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2324 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2325 MemoryRegionSection
*sections
= d
->map
.sections
;
2327 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2330 static void io_mem_init(void)
2332 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2333 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2335 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2337 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2341 static void mem_begin(MemoryListener
*listener
)
2343 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2344 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2347 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2348 assert(n
== PHYS_SECTION_UNASSIGNED
);
2349 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2350 assert(n
== PHYS_SECTION_NOTDIRTY
);
2351 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2352 assert(n
== PHYS_SECTION_ROM
);
2353 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2354 assert(n
== PHYS_SECTION_WATCH
);
2356 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2358 as
->next_dispatch
= d
;
2361 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2363 phys_sections_free(&d
->map
);
2367 static void mem_commit(MemoryListener
*listener
)
2369 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2370 AddressSpaceDispatch
*cur
= as
->dispatch
;
2371 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2373 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2375 atomic_rcu_set(&as
->dispatch
, next
);
2377 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2381 static void tcg_commit(MemoryListener
*listener
)
2383 CPUAddressSpace
*cpuas
;
2384 AddressSpaceDispatch
*d
;
2386 /* since each CPU stores ram addresses in its TLB cache, we must
2387 reset the modified entries */
2388 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2389 cpu_reloading_memory_map();
2390 /* The CPU and TLB are protected by the iothread lock.
2391 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2392 * may have split the RCU critical section.
2394 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2395 cpuas
->memory_dispatch
= d
;
2396 tlb_flush(cpuas
->cpu
, 1);
2399 void address_space_init_dispatch(AddressSpace
*as
)
2401 as
->dispatch
= NULL
;
2402 as
->dispatch_listener
= (MemoryListener
) {
2404 .commit
= mem_commit
,
2405 .region_add
= mem_add
,
2406 .region_nop
= mem_add
,
2409 memory_listener_register(&as
->dispatch_listener
, as
);
2412 void address_space_unregister(AddressSpace
*as
)
2414 memory_listener_unregister(&as
->dispatch_listener
);
2417 void address_space_destroy_dispatch(AddressSpace
*as
)
2419 AddressSpaceDispatch
*d
= as
->dispatch
;
2421 atomic_rcu_set(&as
->dispatch
, NULL
);
2423 call_rcu(d
, address_space_dispatch_free
, rcu
);
2427 static void memory_map_init(void)
2429 system_memory
= g_malloc(sizeof(*system_memory
));
2431 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2432 address_space_init(&address_space_memory
, system_memory
, "memory");
2434 system_io
= g_malloc(sizeof(*system_io
));
2435 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2437 address_space_init(&address_space_io
, system_io
, "I/O");
2440 MemoryRegion
*get_system_memory(void)
2442 return system_memory
;
2445 MemoryRegion
*get_system_io(void)
2450 #endif /* !defined(CONFIG_USER_ONLY) */
2452 /* physical memory access (slow version, mainly for debug) */
2453 #if defined(CONFIG_USER_ONLY)
2454 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2455 uint8_t *buf
, int len
, int is_write
)
2462 page
= addr
& TARGET_PAGE_MASK
;
2463 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2466 flags
= page_get_flags(page
);
2467 if (!(flags
& PAGE_VALID
))
2470 if (!(flags
& PAGE_WRITE
))
2472 /* XXX: this code should not depend on lock_user */
2473 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2476 unlock_user(p
, addr
, l
);
2478 if (!(flags
& PAGE_READ
))
2480 /* XXX: this code should not depend on lock_user */
2481 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2484 unlock_user(p
, addr
, 0);
2495 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2498 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2499 /* No early return if dirty_log_mask is or becomes 0, because
2500 * cpu_physical_memory_set_dirty_range will still call
2501 * xen_modified_memory.
2503 if (dirty_log_mask
) {
2505 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2507 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2508 tb_invalidate_phys_range(addr
, addr
+ length
);
2509 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2511 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2514 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2516 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2518 /* Regions are assumed to support 1-4 byte accesses unless
2519 otherwise specified. */
2520 if (access_size_max
== 0) {
2521 access_size_max
= 4;
2524 /* Bound the maximum access by the alignment of the address. */
2525 if (!mr
->ops
->impl
.unaligned
) {
2526 unsigned align_size_max
= addr
& -addr
;
2527 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2528 access_size_max
= align_size_max
;
2532 /* Don't attempt accesses larger than the maximum. */
2533 if (l
> access_size_max
) {
2534 l
= access_size_max
;
2541 static bool prepare_mmio_access(MemoryRegion
*mr
)
2543 bool unlocked
= !qemu_mutex_iothread_locked();
2544 bool release_lock
= false;
2546 if (unlocked
&& mr
->global_locking
) {
2547 qemu_mutex_lock_iothread();
2549 release_lock
= true;
2551 if (mr
->flush_coalesced_mmio
) {
2553 qemu_mutex_lock_iothread();
2555 qemu_flush_coalesced_mmio_buffer();
2557 qemu_mutex_unlock_iothread();
2561 return release_lock
;
2564 /* Called within RCU critical section. */
2565 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2568 int len
, hwaddr addr1
,
2569 hwaddr l
, MemoryRegion
*mr
)
2573 MemTxResult result
= MEMTX_OK
;
2574 bool release_lock
= false;
2577 if (!memory_access_is_direct(mr
, true)) {
2578 release_lock
|= prepare_mmio_access(mr
);
2579 l
= memory_access_size(mr
, l
, addr1
);
2580 /* XXX: could force current_cpu to NULL to avoid
2584 /* 64 bit write access */
2586 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2590 /* 32 bit write access */
2592 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2596 /* 16 bit write access */
2598 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2602 /* 8 bit write access */
2604 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2611 addr1
+= memory_region_get_ram_addr(mr
);
2613 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2614 memcpy(ptr
, buf
, l
);
2615 invalidate_and_set_dirty(mr
, addr1
, l
);
2619 qemu_mutex_unlock_iothread();
2620 release_lock
= false;
2632 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2638 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2639 const uint8_t *buf
, int len
)
2644 MemTxResult result
= MEMTX_OK
;
2649 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2650 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2658 /* Called within RCU critical section. */
2659 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2660 MemTxAttrs attrs
, uint8_t *buf
,
2661 int len
, hwaddr addr1
, hwaddr l
,
2666 MemTxResult result
= MEMTX_OK
;
2667 bool release_lock
= false;
2670 if (!memory_access_is_direct(mr
, false)) {
2672 release_lock
|= prepare_mmio_access(mr
);
2673 l
= memory_access_size(mr
, l
, addr1
);
2676 /* 64 bit read access */
2677 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2682 /* 32 bit read access */
2683 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2688 /* 16 bit read access */
2689 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2694 /* 8 bit read access */
2695 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2704 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
2705 memory_region_get_ram_addr(mr
) + addr1
);
2706 memcpy(buf
, ptr
, l
);
2710 qemu_mutex_unlock_iothread();
2711 release_lock
= false;
2723 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2729 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2730 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2735 MemTxResult result
= MEMTX_OK
;
2740 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2741 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2749 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2750 uint8_t *buf
, int len
, bool is_write
)
2753 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2755 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2759 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2760 int len
, int is_write
)
2762 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2763 buf
, len
, is_write
);
2766 enum write_rom_type
{
2771 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2772 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2782 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2784 if (!(memory_region_is_ram(mr
) ||
2785 memory_region_is_romd(mr
))) {
2786 l
= memory_access_size(mr
, l
, addr1
);
2788 addr1
+= memory_region_get_ram_addr(mr
);
2790 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2793 memcpy(ptr
, buf
, l
);
2794 invalidate_and_set_dirty(mr
, addr1
, l
);
2797 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2808 /* used for ROM loading : can write in RAM and ROM */
2809 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2810 const uint8_t *buf
, int len
)
2812 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2815 void cpu_flush_icache_range(hwaddr start
, int len
)
2818 * This function should do the same thing as an icache flush that was
2819 * triggered from within the guest. For TCG we are always cache coherent,
2820 * so there is no need to flush anything. For KVM / Xen we need to flush
2821 * the host's instruction cache at least.
2823 if (tcg_enabled()) {
2827 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2828 start
, NULL
, len
, FLUSH_CACHE
);
2839 static BounceBuffer bounce
;
2841 typedef struct MapClient
{
2843 QLIST_ENTRY(MapClient
) link
;
2846 QemuMutex map_client_list_lock
;
2847 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2848 = QLIST_HEAD_INITIALIZER(map_client_list
);
2850 static void cpu_unregister_map_client_do(MapClient
*client
)
2852 QLIST_REMOVE(client
, link
);
2856 static void cpu_notify_map_clients_locked(void)
2860 while (!QLIST_EMPTY(&map_client_list
)) {
2861 client
= QLIST_FIRST(&map_client_list
);
2862 qemu_bh_schedule(client
->bh
);
2863 cpu_unregister_map_client_do(client
);
2867 void cpu_register_map_client(QEMUBH
*bh
)
2869 MapClient
*client
= g_malloc(sizeof(*client
));
2871 qemu_mutex_lock(&map_client_list_lock
);
2873 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2874 if (!atomic_read(&bounce
.in_use
)) {
2875 cpu_notify_map_clients_locked();
2877 qemu_mutex_unlock(&map_client_list_lock
);
2880 void cpu_exec_init_all(void)
2882 qemu_mutex_init(&ram_list
.mutex
);
2885 qemu_mutex_init(&map_client_list_lock
);
2888 void cpu_unregister_map_client(QEMUBH
*bh
)
2892 qemu_mutex_lock(&map_client_list_lock
);
2893 QLIST_FOREACH(client
, &map_client_list
, link
) {
2894 if (client
->bh
== bh
) {
2895 cpu_unregister_map_client_do(client
);
2899 qemu_mutex_unlock(&map_client_list_lock
);
2902 static void cpu_notify_map_clients(void)
2904 qemu_mutex_lock(&map_client_list_lock
);
2905 cpu_notify_map_clients_locked();
2906 qemu_mutex_unlock(&map_client_list_lock
);
2909 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2917 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2918 if (!memory_access_is_direct(mr
, is_write
)) {
2919 l
= memory_access_size(mr
, l
, addr
);
2920 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2932 /* Map a physical memory region into a host virtual address.
2933 * May map a subset of the requested range, given by and returned in *plen.
2934 * May return NULL if resources needed to perform the mapping are exhausted.
2935 * Use only for reads OR writes - not for read-modify-write operations.
2936 * Use cpu_register_map_client() to know when retrying the map operation is
2937 * likely to succeed.
2939 void *address_space_map(AddressSpace
*as
,
2946 hwaddr l
, xlat
, base
;
2947 MemoryRegion
*mr
, *this_mr
;
2957 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2959 if (!memory_access_is_direct(mr
, is_write
)) {
2960 if (atomic_xchg(&bounce
.in_use
, true)) {
2964 /* Avoid unbounded allocations */
2965 l
= MIN(l
, TARGET_PAGE_SIZE
);
2966 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2970 memory_region_ref(mr
);
2973 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2979 return bounce
.buffer
;
2983 raddr
= memory_region_get_ram_addr(mr
);
2994 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2995 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
3000 memory_region_ref(mr
);
3002 ptr
= qemu_ram_ptr_length(mr
->ram_block
, raddr
+ base
, plen
);
3008 /* Unmaps a memory region previously mapped by address_space_map().
3009 * Will also mark the memory as dirty if is_write == 1. access_len gives
3010 * the amount of memory that was actually read or written by the caller.
3012 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3013 int is_write
, hwaddr access_len
)
3015 if (buffer
!= bounce
.buffer
) {
3019 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
3022 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3024 if (xen_enabled()) {
3025 xen_invalidate_map_cache_entry(buffer
);
3027 memory_region_unref(mr
);
3031 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3032 bounce
.buffer
, access_len
);
3034 qemu_vfree(bounce
.buffer
);
3035 bounce
.buffer
= NULL
;
3036 memory_region_unref(bounce
.mr
);
3037 atomic_mb_set(&bounce
.in_use
, false);
3038 cpu_notify_map_clients();
3041 void *cpu_physical_memory_map(hwaddr addr
,
3045 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3048 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3049 int is_write
, hwaddr access_len
)
3051 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3054 /* warning: addr must be aligned */
3055 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3057 MemTxResult
*result
,
3058 enum device_endian endian
)
3066 bool release_lock
= false;
3069 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3070 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3071 release_lock
|= prepare_mmio_access(mr
);
3074 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3075 #if defined(TARGET_WORDS_BIGENDIAN)
3076 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3080 if (endian
== DEVICE_BIG_ENDIAN
) {
3086 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3087 (memory_region_get_ram_addr(mr
)
3091 case DEVICE_LITTLE_ENDIAN
:
3092 val
= ldl_le_p(ptr
);
3094 case DEVICE_BIG_ENDIAN
:
3095 val
= ldl_be_p(ptr
);
3107 qemu_mutex_unlock_iothread();
3113 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3114 MemTxAttrs attrs
, MemTxResult
*result
)
3116 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3117 DEVICE_NATIVE_ENDIAN
);
3120 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3121 MemTxAttrs attrs
, MemTxResult
*result
)
3123 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3124 DEVICE_LITTLE_ENDIAN
);
3127 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3128 MemTxAttrs attrs
, MemTxResult
*result
)
3130 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3134 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3136 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3139 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3141 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3144 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3146 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3149 /* warning: addr must be aligned */
3150 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3152 MemTxResult
*result
,
3153 enum device_endian endian
)
3161 bool release_lock
= false;
3164 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3166 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3167 release_lock
|= prepare_mmio_access(mr
);
3170 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3171 #if defined(TARGET_WORDS_BIGENDIAN)
3172 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3176 if (endian
== DEVICE_BIG_ENDIAN
) {
3182 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3183 (memory_region_get_ram_addr(mr
)
3187 case DEVICE_LITTLE_ENDIAN
:
3188 val
= ldq_le_p(ptr
);
3190 case DEVICE_BIG_ENDIAN
:
3191 val
= ldq_be_p(ptr
);
3203 qemu_mutex_unlock_iothread();
3209 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3210 MemTxAttrs attrs
, MemTxResult
*result
)
3212 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3213 DEVICE_NATIVE_ENDIAN
);
3216 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3217 MemTxAttrs attrs
, MemTxResult
*result
)
3219 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3220 DEVICE_LITTLE_ENDIAN
);
3223 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3224 MemTxAttrs attrs
, MemTxResult
*result
)
3226 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3230 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3232 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3235 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3237 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3240 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3242 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3246 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3247 MemTxAttrs attrs
, MemTxResult
*result
)
3252 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3259 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3261 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3264 /* warning: addr must be aligned */
3265 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3268 MemTxResult
*result
,
3269 enum device_endian endian
)
3277 bool release_lock
= false;
3280 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3282 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3283 release_lock
|= prepare_mmio_access(mr
);
3286 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3287 #if defined(TARGET_WORDS_BIGENDIAN)
3288 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3292 if (endian
== DEVICE_BIG_ENDIAN
) {
3298 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3299 (memory_region_get_ram_addr(mr
)
3303 case DEVICE_LITTLE_ENDIAN
:
3304 val
= lduw_le_p(ptr
);
3306 case DEVICE_BIG_ENDIAN
:
3307 val
= lduw_be_p(ptr
);
3319 qemu_mutex_unlock_iothread();
3325 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3326 MemTxAttrs attrs
, MemTxResult
*result
)
3328 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3329 DEVICE_NATIVE_ENDIAN
);
3332 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3333 MemTxAttrs attrs
, MemTxResult
*result
)
3335 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3336 DEVICE_LITTLE_ENDIAN
);
3339 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3340 MemTxAttrs attrs
, MemTxResult
*result
)
3342 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3346 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3348 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3351 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3353 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3356 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3358 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3361 /* warning: addr must be aligned. The ram page is not masked as dirty
3362 and the code inside is not invalidated. It is useful if the dirty
3363 bits are used to track modified PTEs */
3364 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3365 MemTxAttrs attrs
, MemTxResult
*result
)
3372 uint8_t dirty_log_mask
;
3373 bool release_lock
= false;
3376 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3378 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3379 release_lock
|= prepare_mmio_access(mr
);
3381 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3383 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3384 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3387 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3388 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3389 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3396 qemu_mutex_unlock_iothread();
3401 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3403 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3406 /* warning: addr must be aligned */
3407 static inline void address_space_stl_internal(AddressSpace
*as
,
3408 hwaddr addr
, uint32_t val
,
3410 MemTxResult
*result
,
3411 enum device_endian endian
)
3418 bool release_lock
= false;
3421 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3423 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3424 release_lock
|= prepare_mmio_access(mr
);
3426 #if defined(TARGET_WORDS_BIGENDIAN)
3427 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3431 if (endian
== DEVICE_BIG_ENDIAN
) {
3435 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3438 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3439 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3441 case DEVICE_LITTLE_ENDIAN
:
3444 case DEVICE_BIG_ENDIAN
:
3451 invalidate_and_set_dirty(mr
, addr1
, 4);
3458 qemu_mutex_unlock_iothread();
3463 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3464 MemTxAttrs attrs
, MemTxResult
*result
)
3466 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3467 DEVICE_NATIVE_ENDIAN
);
3470 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3471 MemTxAttrs attrs
, MemTxResult
*result
)
3473 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3474 DEVICE_LITTLE_ENDIAN
);
3477 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3478 MemTxAttrs attrs
, MemTxResult
*result
)
3480 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3484 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3486 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3489 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3491 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3494 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3496 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3500 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3501 MemTxAttrs attrs
, MemTxResult
*result
)
3506 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3512 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3514 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3517 /* warning: addr must be aligned */
3518 static inline void address_space_stw_internal(AddressSpace
*as
,
3519 hwaddr addr
, uint32_t val
,
3521 MemTxResult
*result
,
3522 enum device_endian endian
)
3529 bool release_lock
= false;
3532 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3533 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3534 release_lock
|= prepare_mmio_access(mr
);
3536 #if defined(TARGET_WORDS_BIGENDIAN)
3537 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3541 if (endian
== DEVICE_BIG_ENDIAN
) {
3545 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3548 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3549 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3551 case DEVICE_LITTLE_ENDIAN
:
3554 case DEVICE_BIG_ENDIAN
:
3561 invalidate_and_set_dirty(mr
, addr1
, 2);
3568 qemu_mutex_unlock_iothread();
3573 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3574 MemTxAttrs attrs
, MemTxResult
*result
)
3576 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3577 DEVICE_NATIVE_ENDIAN
);
3580 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3581 MemTxAttrs attrs
, MemTxResult
*result
)
3583 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3584 DEVICE_LITTLE_ENDIAN
);
3587 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3588 MemTxAttrs attrs
, MemTxResult
*result
)
3590 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3594 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3596 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3599 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3601 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3604 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3606 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3610 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3611 MemTxAttrs attrs
, MemTxResult
*result
)
3615 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3621 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3622 MemTxAttrs attrs
, MemTxResult
*result
)
3625 val
= cpu_to_le64(val
);
3626 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3631 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3632 MemTxAttrs attrs
, MemTxResult
*result
)
3635 val
= cpu_to_be64(val
);
3636 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3642 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3644 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3647 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3649 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3652 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3654 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3657 /* virtual memory access for debug (includes writing to ROM) */
3658 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3659 uint8_t *buf
, int len
, int is_write
)
3669 page
= addr
& TARGET_PAGE_MASK
;
3670 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3671 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3672 /* if no physical page mapped, return an error */
3673 if (phys_addr
== -1)
3675 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3678 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3680 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3683 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3684 MEMTXATTRS_UNSPECIFIED
,
3695 * Allows code that needs to deal with migration bitmaps etc to still be built
3696 * target independent.
3698 size_t qemu_target_page_bits(void)
3700 return TARGET_PAGE_BITS
;
3706 * A helper function for the _utterly broken_ virtio device model to find out if
3707 * it's running on a big endian machine. Don't do this at home kids!
3709 bool target_words_bigendian(void);
3710 bool target_words_bigendian(void)
3712 #if defined(TARGET_WORDS_BIGENDIAN)
3719 #ifndef CONFIG_USER_ONLY
3720 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3727 mr
= address_space_translate(&address_space_memory
,
3728 phys_addr
, &phys_addr
, &l
, false);
3730 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3735 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3741 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3742 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3743 block
->used_length
, opaque
);