4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
25 #include "qemu/cutils.h"
27 #include "exec/exec-all.h"
29 #include "hw/qdev-core.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "hw/boards.h"
32 #include "hw/xen/xen.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "qemu/error-report.h"
39 #if defined(CONFIG_USER_ONLY)
41 #else /* !CONFIG_USER_ONLY */
43 #include "exec/memory.h"
44 #include "exec/ioport.h"
45 #include "sysemu/dma.h"
46 #include "exec/address-spaces.h"
47 #include "sysemu/xen-mapcache.h"
50 #include "exec/cpu-all.h"
51 #include "qemu/rcu_queue.h"
52 #include "qemu/main-loop.h"
53 #include "translate-all.h"
54 #include "sysemu/replay.h"
56 #include "exec/memory-internal.h"
57 #include "exec/ram_addr.h"
60 #include "qemu/range.h"
62 #include "qemu/mmap-alloc.h"
65 //#define DEBUG_SUBPAGE
67 #if !defined(CONFIG_USER_ONLY)
68 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
69 * are protected by the ramlist lock.
71 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
73 static MemoryRegion
*system_memory
;
74 static MemoryRegion
*system_io
;
76 AddressSpace address_space_io
;
77 AddressSpace address_space_memory
;
79 MemoryRegion io_mem_rom
, io_mem_notdirty
;
80 static MemoryRegion io_mem_unassigned
;
82 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
83 #define RAM_PREALLOC (1 << 0)
85 /* RAM is mmap-ed with MAP_SHARED */
86 #define RAM_SHARED (1 << 1)
88 /* Only a portion of RAM (used_length) is actually used, and migrated.
89 * This used_length size can change across reboots.
91 #define RAM_RESIZEABLE (1 << 2)
95 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
96 /* current CPU in the current thread. It is only valid inside
98 __thread CPUState
*current_cpu
;
99 /* 0 = Do not count executed instructions.
100 1 = Precise instruction counting.
101 2 = Adaptive rate instruction counting. */
104 #if !defined(CONFIG_USER_ONLY)
106 typedef struct PhysPageEntry PhysPageEntry
;
108 struct PhysPageEntry
{
109 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
111 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
115 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117 /* Size of the L2 (and L3, etc) page tables. */
118 #define ADDR_SPACE_BITS 64
121 #define P_L2_SIZE (1 << P_L2_BITS)
123 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125 typedef PhysPageEntry Node
[P_L2_SIZE
];
127 typedef struct PhysPageMap
{
130 unsigned sections_nb
;
131 unsigned sections_nb_alloc
;
133 unsigned nodes_nb_alloc
;
135 MemoryRegionSection
*sections
;
138 struct AddressSpaceDispatch
{
141 MemoryRegionSection
*mru_section
;
142 /* This is a multi-level map on the physical address space.
143 * The bottom level has pointers to MemoryRegionSections.
145 PhysPageEntry phys_map
;
150 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
151 typedef struct subpage_t
{
155 uint16_t sub_section
[TARGET_PAGE_SIZE
];
158 #define PHYS_SECTION_UNASSIGNED 0
159 #define PHYS_SECTION_NOTDIRTY 1
160 #define PHYS_SECTION_ROM 2
161 #define PHYS_SECTION_WATCH 3
163 static void io_mem_init(void);
164 static void memory_map_init(void);
165 static void tcg_commit(MemoryListener
*listener
);
167 static MemoryRegion io_mem_watch
;
170 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
171 * @cpu: the CPU whose AddressSpace this is
172 * @as: the AddressSpace itself
173 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
174 * @tcg_as_listener: listener for tracking changes to the AddressSpace
176 struct CPUAddressSpace
{
179 struct AddressSpaceDispatch
*memory_dispatch
;
180 MemoryListener tcg_as_listener
;
185 #if !defined(CONFIG_USER_ONLY)
187 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
189 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
190 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
191 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
192 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
196 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
203 ret
= map
->nodes_nb
++;
205 assert(ret
!= PHYS_MAP_NODE_NIL
);
206 assert(ret
!= map
->nodes_nb_alloc
);
208 e
.skip
= leaf
? 0 : 1;
209 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
210 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
211 memcpy(&p
[i
], &e
, sizeof(e
));
216 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
217 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
221 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
223 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
224 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
226 p
= map
->nodes
[lp
->ptr
];
227 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
229 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
230 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
236 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
242 static void phys_page_set(AddressSpaceDispatch
*d
,
243 hwaddr index
, hwaddr nb
,
246 /* Wildly overreserve - it doesn't matter much. */
247 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
249 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
252 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
253 * and update our entry so we can skip it and go directly to the destination.
255 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
257 unsigned valid_ptr
= P_L2_SIZE
;
262 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
267 for (i
= 0; i
< P_L2_SIZE
; i
++) {
268 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
275 phys_page_compact(&p
[i
], nodes
, compacted
);
279 /* We can only compress if there's only one child. */
284 assert(valid_ptr
< P_L2_SIZE
);
286 /* Don't compress if it won't fit in the # of bits we have. */
287 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
291 lp
->ptr
= p
[valid_ptr
].ptr
;
292 if (!p
[valid_ptr
].skip
) {
293 /* If our only child is a leaf, make this a leaf. */
294 /* By design, we should have made this node a leaf to begin with so we
295 * should never reach here.
296 * But since it's so simple to handle this, let's do it just in case we
301 lp
->skip
+= p
[valid_ptr
].skip
;
305 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
307 DECLARE_BITMAP(compacted
, nodes_nb
);
309 if (d
->phys_map
.skip
) {
310 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
314 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
317 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
318 * the section must cover the entire address space.
320 return section
->size
.hi
||
321 range_covers_byte(section
->offset_within_address_space
,
322 section
->size
.lo
, addr
);
325 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
326 Node
*nodes
, MemoryRegionSection
*sections
)
329 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
332 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
333 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
334 return §ions
[PHYS_SECTION_UNASSIGNED
];
337 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
340 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
341 return §ions
[lp
.ptr
];
343 return §ions
[PHYS_SECTION_UNASSIGNED
];
347 bool memory_region_is_unassigned(MemoryRegion
*mr
)
349 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
350 && mr
!= &io_mem_watch
;
353 /* Called from RCU critical section */
354 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
356 bool resolve_subpage
)
358 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
362 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
363 section_covers_addr(section
, addr
)) {
366 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
370 if (resolve_subpage
&& section
->mr
->subpage
) {
371 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
372 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
375 atomic_set(&d
->mru_section
, section
);
380 /* Called from RCU critical section */
381 static MemoryRegionSection
*
382 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
383 hwaddr
*plen
, bool resolve_subpage
)
385 MemoryRegionSection
*section
;
389 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
390 /* Compute offset within MemoryRegionSection */
391 addr
-= section
->offset_within_address_space
;
393 /* Compute offset within MemoryRegion */
394 *xlat
= addr
+ section
->offset_within_region
;
398 /* MMIO registers can be expected to perform full-width accesses based only
399 * on their address, without considering adjacent registers that could
400 * decode to completely different MemoryRegions. When such registers
401 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
402 * regions overlap wildly. For this reason we cannot clamp the accesses
405 * If the length is small (as is the case for address_space_ldl/stl),
406 * everything works fine. If the incoming length is large, however,
407 * the caller really has to do the clamping through memory_access_size.
409 if (memory_region_is_ram(mr
)) {
410 diff
= int128_sub(section
->size
, int128_make64(addr
));
411 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
416 /* Called from RCU critical section */
417 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
418 hwaddr
*xlat
, hwaddr
*plen
,
422 MemoryRegionSection
*section
;
426 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
427 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
430 if (!mr
->iommu_ops
) {
434 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
435 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
436 | (addr
& iotlb
.addr_mask
));
437 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
438 if (!(iotlb
.perm
& (1 << is_write
))) {
439 mr
= &io_mem_unassigned
;
443 as
= iotlb
.target_as
;
446 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
447 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
448 *plen
= MIN(page
, *plen
);
455 /* Called from RCU critical section */
456 MemoryRegionSection
*
457 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
458 hwaddr
*xlat
, hwaddr
*plen
)
460 MemoryRegionSection
*section
;
461 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
463 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
465 assert(!section
->mr
->iommu_ops
);
470 #if !defined(CONFIG_USER_ONLY)
472 static int cpu_common_post_load(void *opaque
, int version_id
)
474 CPUState
*cpu
= opaque
;
476 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
477 version_id is increased. */
478 cpu
->interrupt_request
&= ~0x01;
484 static int cpu_common_pre_load(void *opaque
)
486 CPUState
*cpu
= opaque
;
488 cpu
->exception_index
= -1;
493 static bool cpu_common_exception_index_needed(void *opaque
)
495 CPUState
*cpu
= opaque
;
497 return tcg_enabled() && cpu
->exception_index
!= -1;
500 static const VMStateDescription vmstate_cpu_common_exception_index
= {
501 .name
= "cpu_common/exception_index",
503 .minimum_version_id
= 1,
504 .needed
= cpu_common_exception_index_needed
,
505 .fields
= (VMStateField
[]) {
506 VMSTATE_INT32(exception_index
, CPUState
),
507 VMSTATE_END_OF_LIST()
511 static bool cpu_common_crash_occurred_needed(void *opaque
)
513 CPUState
*cpu
= opaque
;
515 return cpu
->crash_occurred
;
518 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
519 .name
= "cpu_common/crash_occurred",
521 .minimum_version_id
= 1,
522 .needed
= cpu_common_crash_occurred_needed
,
523 .fields
= (VMStateField
[]) {
524 VMSTATE_BOOL(crash_occurred
, CPUState
),
525 VMSTATE_END_OF_LIST()
529 const VMStateDescription vmstate_cpu_common
= {
530 .name
= "cpu_common",
532 .minimum_version_id
= 1,
533 .pre_load
= cpu_common_pre_load
,
534 .post_load
= cpu_common_post_load
,
535 .fields
= (VMStateField
[]) {
536 VMSTATE_UINT32(halted
, CPUState
),
537 VMSTATE_UINT32(interrupt_request
, CPUState
),
538 VMSTATE_END_OF_LIST()
540 .subsections
= (const VMStateDescription
*[]) {
541 &vmstate_cpu_common_exception_index
,
542 &vmstate_cpu_common_crash_occurred
,
549 CPUState
*qemu_get_cpu(int index
)
554 if (cpu
->cpu_index
== index
) {
562 #if !defined(CONFIG_USER_ONLY)
563 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
565 CPUAddressSpace
*newas
;
567 /* Target code should have set num_ases before calling us */
568 assert(asidx
< cpu
->num_ases
);
571 /* address space 0 gets the convenience alias */
575 /* KVM cannot currently support multiple address spaces. */
576 assert(asidx
== 0 || !kvm_enabled());
578 if (!cpu
->cpu_ases
) {
579 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
582 newas
= &cpu
->cpu_ases
[asidx
];
586 newas
->tcg_as_listener
.commit
= tcg_commit
;
587 memory_listener_register(&newas
->tcg_as_listener
, as
);
591 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
593 /* Return the AddressSpace corresponding to the specified index */
594 return cpu
->cpu_ases
[asidx
].as
;
598 #ifndef CONFIG_USER_ONLY
599 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
601 static int cpu_get_free_index(Error
**errp
)
603 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
605 if (cpu
>= MAX_CPUMASK_BITS
) {
606 error_setg(errp
, "Trying to use more CPUs than max of %d",
611 bitmap_set(cpu_index_map
, cpu
, 1);
615 static void cpu_release_index(CPUState
*cpu
)
617 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
621 static int cpu_get_free_index(Error
**errp
)
626 CPU_FOREACH(some_cpu
) {
632 static void cpu_release_index(CPUState
*cpu
)
638 void cpu_exec_exit(CPUState
*cpu
)
640 #if defined(CONFIG_USER_ONLY)
643 if (cpu
->cpu_index
== -1) {
644 /* cpu_index was never allocated by this @cpu or was already freed. */
645 #if defined(CONFIG_USER_ONLY)
651 QTAILQ_REMOVE(&cpus
, cpu
, node
);
652 cpu_release_index(cpu
);
654 #if defined(CONFIG_USER_ONLY)
659 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
661 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
662 Error
*local_err
= NULL
;
667 #ifndef CONFIG_USER_ONLY
668 cpu
->thread_id
= qemu_get_thread_id();
670 /* This is a softmmu CPU object, so create a property for it
671 * so users can wire up its memory. (This can't go in qom/cpu.c
672 * because that file is compiled only once for both user-mode
673 * and system builds.) The default if no link is set up is to use
674 * the system address space.
676 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
677 (Object
**)&cpu
->memory
,
678 qdev_prop_allow_set_link_before_realize
,
679 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
681 cpu
->memory
= system_memory
;
682 object_ref(OBJECT(cpu
->memory
));
685 #if defined(CONFIG_USER_ONLY)
688 cpu
->cpu_index
= cpu_get_free_index(&local_err
);
690 error_propagate(errp
, local_err
);
691 #if defined(CONFIG_USER_ONLY)
696 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
697 #if defined(CONFIG_USER_ONLY)
701 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
702 vmstate_register(NULL
, cpu
->cpu_index
, &vmstate_cpu_common
, cpu
);
704 if (cc
->vmsd
!= NULL
) {
705 vmstate_register(NULL
, cpu
->cpu_index
, cc
->vmsd
, cpu
);
710 #if defined(CONFIG_USER_ONLY)
711 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
713 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
716 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
719 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
720 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
722 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
723 phys
| (pc
& ~TARGET_PAGE_MASK
));
728 #if defined(CONFIG_USER_ONLY)
729 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
734 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
740 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
744 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
745 int flags
, CPUWatchpoint
**watchpoint
)
750 /* Add a watchpoint. */
751 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
752 int flags
, CPUWatchpoint
**watchpoint
)
756 /* forbid ranges which are empty or run off the end of the address space */
757 if (len
== 0 || (addr
+ len
- 1) < addr
) {
758 error_report("tried to set invalid watchpoint at %"
759 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
762 wp
= g_malloc(sizeof(*wp
));
768 /* keep all GDB-injected watchpoints in front */
769 if (flags
& BP_GDB
) {
770 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
772 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
775 tlb_flush_page(cpu
, addr
);
782 /* Remove a specific watchpoint. */
783 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
788 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
789 if (addr
== wp
->vaddr
&& len
== wp
->len
790 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
791 cpu_watchpoint_remove_by_ref(cpu
, wp
);
798 /* Remove a specific watchpoint by reference. */
799 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
801 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
803 tlb_flush_page(cpu
, watchpoint
->vaddr
);
808 /* Remove all matching watchpoints. */
809 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
811 CPUWatchpoint
*wp
, *next
;
813 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
814 if (wp
->flags
& mask
) {
815 cpu_watchpoint_remove_by_ref(cpu
, wp
);
820 /* Return true if this watchpoint address matches the specified
821 * access (ie the address range covered by the watchpoint overlaps
822 * partially or completely with the address range covered by the
825 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
829 /* We know the lengths are non-zero, but a little caution is
830 * required to avoid errors in the case where the range ends
831 * exactly at the top of the address space and so addr + len
832 * wraps round to zero.
834 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
835 vaddr addrend
= addr
+ len
- 1;
837 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
842 /* Add a breakpoint. */
843 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
844 CPUBreakpoint
**breakpoint
)
848 bp
= g_malloc(sizeof(*bp
));
853 /* keep all GDB-injected breakpoints in front */
854 if (flags
& BP_GDB
) {
855 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
857 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
860 breakpoint_invalidate(cpu
, pc
);
868 /* Remove a specific breakpoint. */
869 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
873 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
874 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
875 cpu_breakpoint_remove_by_ref(cpu
, bp
);
882 /* Remove a specific breakpoint by reference. */
883 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
885 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
887 breakpoint_invalidate(cpu
, breakpoint
->pc
);
892 /* Remove all matching breakpoints. */
893 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
895 CPUBreakpoint
*bp
, *next
;
897 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
898 if (bp
->flags
& mask
) {
899 cpu_breakpoint_remove_by_ref(cpu
, bp
);
904 /* enable or disable single step mode. EXCP_DEBUG is returned by the
905 CPU loop after each instruction */
906 void cpu_single_step(CPUState
*cpu
, int enabled
)
908 if (cpu
->singlestep_enabled
!= enabled
) {
909 cpu
->singlestep_enabled
= enabled
;
911 kvm_update_guest_debug(cpu
, 0);
913 /* must flush all the translated code to avoid inconsistencies */
914 /* XXX: only flush what is necessary */
920 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
927 fprintf(stderr
, "qemu: fatal: ");
928 vfprintf(stderr
, fmt
, ap
);
929 fprintf(stderr
, "\n");
930 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
931 if (qemu_log_separate()) {
932 qemu_log("qemu: fatal: ");
933 qemu_log_vprintf(fmt
, ap2
);
935 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
942 #if defined(CONFIG_USER_ONLY)
944 struct sigaction act
;
945 sigfillset(&act
.sa_mask
);
946 act
.sa_handler
= SIG_DFL
;
947 sigaction(SIGABRT
, &act
, NULL
);
953 #if !defined(CONFIG_USER_ONLY)
954 /* Called from RCU critical section */
955 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
959 block
= atomic_rcu_read(&ram_list
.mru_block
);
960 if (block
&& addr
- block
->offset
< block
->max_length
) {
963 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
964 if (addr
- block
->offset
< block
->max_length
) {
969 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
973 /* It is safe to write mru_block outside the iothread lock. This
978 * xxx removed from list
982 * call_rcu(reclaim_ramblock, xxx);
985 * atomic_rcu_set is not needed here. The block was already published
986 * when it was placed into the list. Here we're just making an extra
987 * copy of the pointer.
989 ram_list
.mru_block
= block
;
993 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
1000 end
= TARGET_PAGE_ALIGN(start
+ length
);
1001 start
&= TARGET_PAGE_MASK
;
1004 block
= qemu_get_ram_block(start
);
1005 assert(block
== qemu_get_ram_block(end
- 1));
1006 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
1008 tlb_reset_dirty(cpu
, start1
, length
);
1013 /* Note: start and end must be within the same ram block. */
1014 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
1018 DirtyMemoryBlocks
*blocks
;
1019 unsigned long end
, page
;
1026 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1027 page
= start
>> TARGET_PAGE_BITS
;
1031 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1033 while (page
< end
) {
1034 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1035 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1036 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1038 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1045 if (dirty
&& tcg_enabled()) {
1046 tlb_reset_dirty_range_all(start
, length
);
1052 /* Called from RCU critical section */
1053 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1054 MemoryRegionSection
*section
,
1056 hwaddr paddr
, hwaddr xlat
,
1058 target_ulong
*address
)
1063 if (memory_region_is_ram(section
->mr
)) {
1065 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1066 if (!section
->readonly
) {
1067 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1069 iotlb
|= PHYS_SECTION_ROM
;
1072 AddressSpaceDispatch
*d
;
1074 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1075 iotlb
= section
- d
->map
.sections
;
1079 /* Make accesses to pages with watchpoints go via the
1080 watchpoint trap routines. */
1081 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1082 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1083 /* Avoid trapping reads of pages with a write breakpoint. */
1084 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1085 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1086 *address
|= TLB_MMIO
;
1094 #endif /* defined(CONFIG_USER_ONLY) */
1096 #if !defined(CONFIG_USER_ONLY)
1098 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1100 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1102 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1103 qemu_anon_ram_alloc
;
1106 * Set a custom physical guest memory alloator.
1107 * Accelerators with unusual needs may need this. Hopefully, we can
1108 * get rid of it eventually.
1110 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1112 phys_mem_alloc
= alloc
;
1115 static uint16_t phys_section_add(PhysPageMap
*map
,
1116 MemoryRegionSection
*section
)
1118 /* The physical section number is ORed with a page-aligned
1119 * pointer to produce the iotlb entries. Thus it should
1120 * never overflow into the page-aligned value.
1122 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1124 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1125 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1126 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1127 map
->sections_nb_alloc
);
1129 map
->sections
[map
->sections_nb
] = *section
;
1130 memory_region_ref(section
->mr
);
1131 return map
->sections_nb
++;
1134 static void phys_section_destroy(MemoryRegion
*mr
)
1136 bool have_sub_page
= mr
->subpage
;
1138 memory_region_unref(mr
);
1140 if (have_sub_page
) {
1141 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1142 object_unref(OBJECT(&subpage
->iomem
));
1147 static void phys_sections_free(PhysPageMap
*map
)
1149 while (map
->sections_nb
> 0) {
1150 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1151 phys_section_destroy(section
->mr
);
1153 g_free(map
->sections
);
1157 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1160 hwaddr base
= section
->offset_within_address_space
1162 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1163 d
->map
.nodes
, d
->map
.sections
);
1164 MemoryRegionSection subsection
= {
1165 .offset_within_address_space
= base
,
1166 .size
= int128_make64(TARGET_PAGE_SIZE
),
1170 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1172 if (!(existing
->mr
->subpage
)) {
1173 subpage
= subpage_init(d
->as
, base
);
1174 subsection
.address_space
= d
->as
;
1175 subsection
.mr
= &subpage
->iomem
;
1176 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1177 phys_section_add(&d
->map
, &subsection
));
1179 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1181 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1182 end
= start
+ int128_get64(section
->size
) - 1;
1183 subpage_register(subpage
, start
, end
,
1184 phys_section_add(&d
->map
, section
));
1188 static void register_multipage(AddressSpaceDispatch
*d
,
1189 MemoryRegionSection
*section
)
1191 hwaddr start_addr
= section
->offset_within_address_space
;
1192 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1193 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1197 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1200 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1202 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1203 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1204 MemoryRegionSection now
= *section
, remain
= *section
;
1205 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1207 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1208 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1209 - now
.offset_within_address_space
;
1211 now
.size
= int128_min(int128_make64(left
), now
.size
);
1212 register_subpage(d
, &now
);
1214 now
.size
= int128_zero();
1216 while (int128_ne(remain
.size
, now
.size
)) {
1217 remain
.size
= int128_sub(remain
.size
, now
.size
);
1218 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1219 remain
.offset_within_region
+= int128_get64(now
.size
);
1221 if (int128_lt(remain
.size
, page_size
)) {
1222 register_subpage(d
, &now
);
1223 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1224 now
.size
= page_size
;
1225 register_subpage(d
, &now
);
1227 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1228 register_multipage(d
, &now
);
1233 void qemu_flush_coalesced_mmio_buffer(void)
1236 kvm_flush_coalesced_mmio_buffer();
1239 void qemu_mutex_lock_ramlist(void)
1241 qemu_mutex_lock(&ram_list
.mutex
);
1244 void qemu_mutex_unlock_ramlist(void)
1246 qemu_mutex_unlock(&ram_list
.mutex
);
1250 static void *file_ram_alloc(RAMBlock
*block
,
1255 bool unlink_on_error
= false;
1257 char *sanitized_name
;
1263 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1265 "host lacks kvm mmu notifiers, -mem-path unsupported");
1270 fd
= open(path
, O_RDWR
);
1272 /* @path names an existing file, use it */
1275 if (errno
== ENOENT
) {
1276 /* @path names a file that doesn't exist, create it */
1277 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1279 unlink_on_error
= true;
1282 } else if (errno
== EISDIR
) {
1283 /* @path names a directory, create a file there */
1284 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1285 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1286 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1292 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1294 g_free(sanitized_name
);
1296 fd
= mkstemp(filename
);
1304 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1305 error_setg_errno(errp
, errno
,
1306 "can't open backing store %s for guest RAM",
1311 * Try again on EINTR and EEXIST. The latter happens when
1312 * something else creates the file between our two open().
1316 page_size
= qemu_fd_getpagesize(fd
);
1317 block
->mr
->align
= MAX(page_size
, QEMU_VMALLOC_ALIGN
);
1319 if (memory
< page_size
) {
1320 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1321 "or larger than page size 0x%" PRIx64
,
1326 memory
= ROUND_UP(memory
, page_size
);
1329 * ftruncate is not supported by hugetlbfs in older
1330 * hosts, so don't bother bailing out on errors.
1331 * If anything goes wrong with it under other filesystems,
1334 if (ftruncate(fd
, memory
)) {
1335 perror("ftruncate");
1338 area
= qemu_ram_mmap(fd
, memory
, block
->mr
->align
,
1339 block
->flags
& RAM_SHARED
);
1340 if (area
== MAP_FAILED
) {
1341 error_setg_errno(errp
, errno
,
1342 "unable to map backing store for guest RAM");
1347 os_mem_prealloc(fd
, area
, memory
);
1354 if (unlink_on_error
) {
1364 /* Called with the ramlist lock held. */
1365 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1367 RAMBlock
*block
, *next_block
;
1368 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1370 assert(size
!= 0); /* it would hand out same offset multiple times */
1372 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1376 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1377 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1379 end
= block
->offset
+ block
->max_length
;
1381 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1382 if (next_block
->offset
>= end
) {
1383 next
= MIN(next
, next_block
->offset
);
1386 if (next
- end
>= size
&& next
- end
< mingap
) {
1388 mingap
= next
- end
;
1392 if (offset
== RAM_ADDR_MAX
) {
1393 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1401 ram_addr_t
last_ram_offset(void)
1404 ram_addr_t last
= 0;
1407 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1408 last
= MAX(last
, block
->offset
+ block
->max_length
);
1414 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1418 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1419 if (!machine_dump_guest_core(current_machine
)) {
1420 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1422 perror("qemu_madvise");
1423 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1424 "but dump_guest_core=off specified\n");
1429 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1434 /* Called with iothread lock held. */
1435 void qemu_ram_set_idstr(RAMBlock
*new_block
, const char *name
, DeviceState
*dev
)
1440 assert(!new_block
->idstr
[0]);
1443 char *id
= qdev_get_dev_path(dev
);
1445 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1449 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1452 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1453 if (block
!= new_block
&&
1454 !strcmp(block
->idstr
, new_block
->idstr
)) {
1455 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1463 /* Called with iothread lock held. */
1464 void qemu_ram_unset_idstr(RAMBlock
*block
)
1466 /* FIXME: arch_init.c assumes that this is not called throughout
1467 * migration. Ignore the problem since hot-unplug during migration
1468 * does not work anyway.
1471 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1475 static int memory_try_enable_merging(void *addr
, size_t len
)
1477 if (!machine_mem_merge(current_machine
)) {
1478 /* disabled by the user */
1482 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1485 /* Only legal before guest might have detected the memory size: e.g. on
1486 * incoming migration, or right after reset.
1488 * As memory core doesn't know how is memory accessed, it is up to
1489 * resize callback to update device state and/or add assertions to detect
1490 * misuse, if necessary.
1492 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
)
1496 newsize
= HOST_PAGE_ALIGN(newsize
);
1498 if (block
->used_length
== newsize
) {
1502 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1503 error_setg_errno(errp
, EINVAL
,
1504 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1505 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1506 newsize
, block
->used_length
);
1510 if (block
->max_length
< newsize
) {
1511 error_setg_errno(errp
, EINVAL
,
1512 "Length too large: %s: 0x" RAM_ADDR_FMT
1513 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1514 newsize
, block
->max_length
);
1518 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1519 block
->used_length
= newsize
;
1520 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1522 memory_region_set_size(block
->mr
, newsize
);
1523 if (block
->resized
) {
1524 block
->resized(block
->idstr
, newsize
, block
->host
);
1529 /* Called with ram_list.mutex held */
1530 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1531 ram_addr_t new_ram_size
)
1533 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1534 DIRTY_MEMORY_BLOCK_SIZE
);
1535 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1536 DIRTY_MEMORY_BLOCK_SIZE
);
1539 /* Only need to extend if block count increased */
1540 if (new_num_blocks
<= old_num_blocks
) {
1544 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1545 DirtyMemoryBlocks
*old_blocks
;
1546 DirtyMemoryBlocks
*new_blocks
;
1549 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1550 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1551 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1553 if (old_num_blocks
) {
1554 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1555 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1558 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1559 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1562 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1565 g_free_rcu(old_blocks
, rcu
);
1570 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1573 RAMBlock
*last_block
= NULL
;
1574 ram_addr_t old_ram_size
, new_ram_size
;
1577 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1579 qemu_mutex_lock_ramlist();
1580 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1582 if (!new_block
->host
) {
1583 if (xen_enabled()) {
1584 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1585 new_block
->mr
, &err
);
1587 error_propagate(errp
, err
);
1588 qemu_mutex_unlock_ramlist();
1592 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1593 &new_block
->mr
->align
);
1594 if (!new_block
->host
) {
1595 error_setg_errno(errp
, errno
,
1596 "cannot set up guest memory '%s'",
1597 memory_region_name(new_block
->mr
));
1598 qemu_mutex_unlock_ramlist();
1601 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1605 new_ram_size
= MAX(old_ram_size
,
1606 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1607 if (new_ram_size
> old_ram_size
) {
1608 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1609 dirty_memory_extend(old_ram_size
, new_ram_size
);
1611 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1612 * QLIST (which has an RCU-friendly variant) does not have insertion at
1613 * tail, so save the last element in last_block.
1615 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1617 if (block
->max_length
< new_block
->max_length
) {
1622 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1623 } else if (last_block
) {
1624 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1625 } else { /* list is empty */
1626 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1628 ram_list
.mru_block
= NULL
;
1630 /* Write list before version */
1633 qemu_mutex_unlock_ramlist();
1635 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1636 new_block
->used_length
,
1639 if (new_block
->host
) {
1640 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1641 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1642 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1643 if (kvm_enabled()) {
1644 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1650 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1651 bool share
, const char *mem_path
,
1654 RAMBlock
*new_block
;
1655 Error
*local_err
= NULL
;
1657 if (xen_enabled()) {
1658 error_setg(errp
, "-mem-path not supported with Xen");
1662 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1664 * file_ram_alloc() needs to allocate just like
1665 * phys_mem_alloc, but we haven't bothered to provide
1669 "-mem-path not supported with this accelerator");
1673 size
= HOST_PAGE_ALIGN(size
);
1674 new_block
= g_malloc0(sizeof(*new_block
));
1676 new_block
->used_length
= size
;
1677 new_block
->max_length
= size
;
1678 new_block
->flags
= share
? RAM_SHARED
: 0;
1679 new_block
->host
= file_ram_alloc(new_block
, size
,
1681 if (!new_block
->host
) {
1686 ram_block_add(new_block
, &local_err
);
1689 error_propagate(errp
, local_err
);
1697 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1698 void (*resized
)(const char*,
1701 void *host
, bool resizeable
,
1702 MemoryRegion
*mr
, Error
**errp
)
1704 RAMBlock
*new_block
;
1705 Error
*local_err
= NULL
;
1707 size
= HOST_PAGE_ALIGN(size
);
1708 max_size
= HOST_PAGE_ALIGN(max_size
);
1709 new_block
= g_malloc0(sizeof(*new_block
));
1711 new_block
->resized
= resized
;
1712 new_block
->used_length
= size
;
1713 new_block
->max_length
= max_size
;
1714 assert(max_size
>= size
);
1716 new_block
->host
= host
;
1718 new_block
->flags
|= RAM_PREALLOC
;
1721 new_block
->flags
|= RAM_RESIZEABLE
;
1723 ram_block_add(new_block
, &local_err
);
1726 error_propagate(errp
, local_err
);
1732 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1733 MemoryRegion
*mr
, Error
**errp
)
1735 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1738 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1740 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1743 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1744 void (*resized
)(const char*,
1747 MemoryRegion
*mr
, Error
**errp
)
1749 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1752 static void reclaim_ramblock(RAMBlock
*block
)
1754 if (block
->flags
& RAM_PREALLOC
) {
1756 } else if (xen_enabled()) {
1757 xen_invalidate_map_cache_entry(block
->host
);
1759 } else if (block
->fd
>= 0) {
1760 qemu_ram_munmap(block
->host
, block
->max_length
);
1764 qemu_anon_ram_free(block
->host
, block
->max_length
);
1769 void qemu_ram_free(RAMBlock
*block
)
1775 qemu_mutex_lock_ramlist();
1776 QLIST_REMOVE_RCU(block
, next
);
1777 ram_list
.mru_block
= NULL
;
1778 /* Write list before version */
1781 call_rcu(block
, reclaim_ramblock
, rcu
);
1782 qemu_mutex_unlock_ramlist();
1786 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1793 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1794 offset
= addr
- block
->offset
;
1795 if (offset
< block
->max_length
) {
1796 vaddr
= ramblock_ptr(block
, offset
);
1797 if (block
->flags
& RAM_PREALLOC
) {
1799 } else if (xen_enabled()) {
1803 if (block
->fd
>= 0) {
1804 flags
|= (block
->flags
& RAM_SHARED
?
1805 MAP_SHARED
: MAP_PRIVATE
);
1806 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1807 flags
, block
->fd
, offset
);
1810 * Remap needs to match alloc. Accelerators that
1811 * set phys_mem_alloc never remap. If they did,
1812 * we'd need a remap hook here.
1814 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1816 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1817 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1820 if (area
!= vaddr
) {
1821 fprintf(stderr
, "Could not remap addr: "
1822 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1826 memory_try_enable_merging(vaddr
, length
);
1827 qemu_ram_setup_dump(vaddr
, length
);
1832 #endif /* !_WIN32 */
1834 int qemu_get_ram_fd(ram_addr_t addr
)
1840 block
= qemu_get_ram_block(addr
);
1846 void qemu_set_ram_fd(ram_addr_t addr
, int fd
)
1851 block
= qemu_get_ram_block(addr
);
1856 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1862 block
= qemu_get_ram_block(addr
);
1863 ptr
= ramblock_ptr(block
, 0);
1868 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1869 * This should not be used for general purpose DMA. Use address_space_map
1870 * or address_space_rw instead. For local memory (e.g. video ram) that the
1871 * device owns, use memory_region_get_ram_ptr.
1873 * Called within RCU critical section.
1875 void *qemu_get_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1877 RAMBlock
*block
= ram_block
;
1879 if (block
== NULL
) {
1880 block
= qemu_get_ram_block(addr
);
1883 if (xen_enabled() && block
->host
== NULL
) {
1884 /* We need to check if the requested address is in the RAM
1885 * because we don't want to map the entire memory in QEMU.
1886 * In that case just map until the end of the page.
1888 if (block
->offset
== 0) {
1889 return xen_map_cache(addr
, 0, 0);
1892 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1894 return ramblock_ptr(block
, addr
- block
->offset
);
1897 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1898 * but takes a size argument.
1900 * Called within RCU critical section.
1902 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1905 RAMBlock
*block
= ram_block
;
1906 ram_addr_t offset_inside_block
;
1911 if (block
== NULL
) {
1912 block
= qemu_get_ram_block(addr
);
1914 offset_inside_block
= addr
- block
->offset
;
1915 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1917 if (xen_enabled() && block
->host
== NULL
) {
1918 /* We need to check if the requested address is in the RAM
1919 * because we don't want to map the entire memory in QEMU.
1920 * In that case just map the requested area.
1922 if (block
->offset
== 0) {
1923 return xen_map_cache(addr
, *size
, 1);
1926 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1929 return ramblock_ptr(block
, offset_inside_block
);
1933 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1936 * ptr: Host pointer to look up
1937 * round_offset: If true round the result offset down to a page boundary
1938 * *ram_addr: set to result ram_addr
1939 * *offset: set to result offset within the RAMBlock
1941 * Returns: RAMBlock (or NULL if not found)
1943 * By the time this function returns, the returned pointer is not protected
1944 * by RCU anymore. If the caller is not within an RCU critical section and
1945 * does not hold the iothread lock, it must have other means of protecting the
1946 * pointer, such as a reference to the region that includes the incoming
1949 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1950 ram_addr_t
*ram_addr
,
1954 uint8_t *host
= ptr
;
1956 if (xen_enabled()) {
1958 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1959 block
= qemu_get_ram_block(*ram_addr
);
1961 *offset
= (host
- block
->host
);
1968 block
= atomic_rcu_read(&ram_list
.mru_block
);
1969 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1973 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1974 /* This case append when the block is not mapped. */
1975 if (block
->host
== NULL
) {
1978 if (host
- block
->host
< block
->max_length
) {
1987 *offset
= (host
- block
->host
);
1989 *offset
&= TARGET_PAGE_MASK
;
1991 *ram_addr
= block
->offset
+ *offset
;
1997 * Finds the named RAMBlock
1999 * name: The name of RAMBlock to find
2001 * Returns: RAMBlock (or NULL if not found)
2003 RAMBlock
*qemu_ram_block_by_name(const char *name
)
2007 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
2008 if (!strcmp(name
, block
->idstr
)) {
2016 /* Some of the softmmu routines need to translate from a host pointer
2017 (typically a TLB entry) back to a ram offset. */
2018 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2021 ram_addr_t offset
; /* Not used */
2023 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
2032 /* Called within RCU critical section. */
2033 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
2034 uint64_t val
, unsigned size
)
2036 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2037 tb_invalidate_phys_page_fast(ram_addr
, size
);
2041 stb_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2044 stw_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2047 stl_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2052 /* Set both VGA and migration bits for simplicity and to remove
2053 * the notdirty callback faster.
2055 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2056 DIRTY_CLIENTS_NOCODE
);
2057 /* we remove the notdirty callback only if the code has been
2059 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2060 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2064 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2065 unsigned size
, bool is_write
)
2070 static const MemoryRegionOps notdirty_mem_ops
= {
2071 .write
= notdirty_mem_write
,
2072 .valid
.accepts
= notdirty_mem_accepts
,
2073 .endianness
= DEVICE_NATIVE_ENDIAN
,
2076 /* Generate a debug exception if a watchpoint has been hit. */
2077 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2079 CPUState
*cpu
= current_cpu
;
2080 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2081 CPUArchState
*env
= cpu
->env_ptr
;
2082 target_ulong pc
, cs_base
;
2087 if (cpu
->watchpoint_hit
) {
2088 /* We re-entered the check after replacing the TB. Now raise
2089 * the debug interrupt so that is will trigger after the
2090 * current instruction. */
2091 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2094 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2095 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2096 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2097 && (wp
->flags
& flags
)) {
2098 if (flags
== BP_MEM_READ
) {
2099 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2101 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2103 wp
->hitaddr
= vaddr
;
2104 wp
->hitattrs
= attrs
;
2105 if (!cpu
->watchpoint_hit
) {
2106 if (wp
->flags
& BP_CPU
&&
2107 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2108 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2111 cpu
->watchpoint_hit
= wp
;
2112 tb_check_watchpoint(cpu
);
2113 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2114 cpu
->exception_index
= EXCP_DEBUG
;
2117 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2118 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2119 cpu_resume_from_signal(cpu
, NULL
);
2123 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2128 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2129 so these check for a hit then pass through to the normal out-of-line
2131 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2132 unsigned size
, MemTxAttrs attrs
)
2136 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2137 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2139 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2142 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2145 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2148 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2156 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2157 uint64_t val
, unsigned size
,
2161 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2162 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2164 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2167 address_space_stb(as
, addr
, val
, attrs
, &res
);
2170 address_space_stw(as
, addr
, val
, attrs
, &res
);
2173 address_space_stl(as
, addr
, val
, attrs
, &res
);
2180 static const MemoryRegionOps watch_mem_ops
= {
2181 .read_with_attrs
= watch_mem_read
,
2182 .write_with_attrs
= watch_mem_write
,
2183 .endianness
= DEVICE_NATIVE_ENDIAN
,
2186 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2187 unsigned len
, MemTxAttrs attrs
)
2189 subpage_t
*subpage
= opaque
;
2193 #if defined(DEBUG_SUBPAGE)
2194 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2195 subpage
, len
, addr
);
2197 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2204 *data
= ldub_p(buf
);
2207 *data
= lduw_p(buf
);
2220 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2221 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2223 subpage_t
*subpage
= opaque
;
2226 #if defined(DEBUG_SUBPAGE)
2227 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2228 " value %"PRIx64
"\n",
2229 __func__
, subpage
, len
, addr
, value
);
2247 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2251 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2252 unsigned len
, bool is_write
)
2254 subpage_t
*subpage
= opaque
;
2255 #if defined(DEBUG_SUBPAGE)
2256 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2257 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2260 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2264 static const MemoryRegionOps subpage_ops
= {
2265 .read_with_attrs
= subpage_read
,
2266 .write_with_attrs
= subpage_write
,
2267 .impl
.min_access_size
= 1,
2268 .impl
.max_access_size
= 8,
2269 .valid
.min_access_size
= 1,
2270 .valid
.max_access_size
= 8,
2271 .valid
.accepts
= subpage_accepts
,
2272 .endianness
= DEVICE_NATIVE_ENDIAN
,
2275 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2280 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2282 idx
= SUBPAGE_IDX(start
);
2283 eidx
= SUBPAGE_IDX(end
);
2284 #if defined(DEBUG_SUBPAGE)
2285 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2286 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2288 for (; idx
<= eidx
; idx
++) {
2289 mmio
->sub_section
[idx
] = section
;
2295 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2299 mmio
= g_malloc0(sizeof(subpage_t
));
2303 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2304 NULL
, TARGET_PAGE_SIZE
);
2305 mmio
->iomem
.subpage
= true;
2306 #if defined(DEBUG_SUBPAGE)
2307 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2308 mmio
, base
, TARGET_PAGE_SIZE
);
2310 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2315 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2319 MemoryRegionSection section
= {
2320 .address_space
= as
,
2322 .offset_within_address_space
= 0,
2323 .offset_within_region
= 0,
2324 .size
= int128_2_64(),
2327 return phys_section_add(map
, §ion
);
2330 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2332 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2333 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2334 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2335 MemoryRegionSection
*sections
= d
->map
.sections
;
2337 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2340 static void io_mem_init(void)
2342 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2343 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2345 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2347 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2351 static void mem_begin(MemoryListener
*listener
)
2353 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2354 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2357 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2358 assert(n
== PHYS_SECTION_UNASSIGNED
);
2359 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2360 assert(n
== PHYS_SECTION_NOTDIRTY
);
2361 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2362 assert(n
== PHYS_SECTION_ROM
);
2363 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2364 assert(n
== PHYS_SECTION_WATCH
);
2366 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2368 as
->next_dispatch
= d
;
2371 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2373 phys_sections_free(&d
->map
);
2377 static void mem_commit(MemoryListener
*listener
)
2379 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2380 AddressSpaceDispatch
*cur
= as
->dispatch
;
2381 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2383 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2385 atomic_rcu_set(&as
->dispatch
, next
);
2387 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2391 static void tcg_commit(MemoryListener
*listener
)
2393 CPUAddressSpace
*cpuas
;
2394 AddressSpaceDispatch
*d
;
2396 /* since each CPU stores ram addresses in its TLB cache, we must
2397 reset the modified entries */
2398 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2399 cpu_reloading_memory_map();
2400 /* The CPU and TLB are protected by the iothread lock.
2401 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2402 * may have split the RCU critical section.
2404 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2405 cpuas
->memory_dispatch
= d
;
2406 tlb_flush(cpuas
->cpu
, 1);
2409 void address_space_init_dispatch(AddressSpace
*as
)
2411 as
->dispatch
= NULL
;
2412 as
->dispatch_listener
= (MemoryListener
) {
2414 .commit
= mem_commit
,
2415 .region_add
= mem_add
,
2416 .region_nop
= mem_add
,
2419 memory_listener_register(&as
->dispatch_listener
, as
);
2422 void address_space_unregister(AddressSpace
*as
)
2424 memory_listener_unregister(&as
->dispatch_listener
);
2427 void address_space_destroy_dispatch(AddressSpace
*as
)
2429 AddressSpaceDispatch
*d
= as
->dispatch
;
2431 atomic_rcu_set(&as
->dispatch
, NULL
);
2433 call_rcu(d
, address_space_dispatch_free
, rcu
);
2437 static void memory_map_init(void)
2439 system_memory
= g_malloc(sizeof(*system_memory
));
2441 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2442 address_space_init(&address_space_memory
, system_memory
, "memory");
2444 system_io
= g_malloc(sizeof(*system_io
));
2445 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2447 address_space_init(&address_space_io
, system_io
, "I/O");
2450 MemoryRegion
*get_system_memory(void)
2452 return system_memory
;
2455 MemoryRegion
*get_system_io(void)
2460 #endif /* !defined(CONFIG_USER_ONLY) */
2462 /* physical memory access (slow version, mainly for debug) */
2463 #if defined(CONFIG_USER_ONLY)
2464 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2465 uint8_t *buf
, int len
, int is_write
)
2472 page
= addr
& TARGET_PAGE_MASK
;
2473 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2476 flags
= page_get_flags(page
);
2477 if (!(flags
& PAGE_VALID
))
2480 if (!(flags
& PAGE_WRITE
))
2482 /* XXX: this code should not depend on lock_user */
2483 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2486 unlock_user(p
, addr
, l
);
2488 if (!(flags
& PAGE_READ
))
2490 /* XXX: this code should not depend on lock_user */
2491 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2494 unlock_user(p
, addr
, 0);
2505 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2508 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2509 /* No early return if dirty_log_mask is or becomes 0, because
2510 * cpu_physical_memory_set_dirty_range will still call
2511 * xen_modified_memory.
2513 if (dirty_log_mask
) {
2515 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2517 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2518 tb_invalidate_phys_range(addr
, addr
+ length
);
2519 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2521 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2524 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2526 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2528 /* Regions are assumed to support 1-4 byte accesses unless
2529 otherwise specified. */
2530 if (access_size_max
== 0) {
2531 access_size_max
= 4;
2534 /* Bound the maximum access by the alignment of the address. */
2535 if (!mr
->ops
->impl
.unaligned
) {
2536 unsigned align_size_max
= addr
& -addr
;
2537 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2538 access_size_max
= align_size_max
;
2542 /* Don't attempt accesses larger than the maximum. */
2543 if (l
> access_size_max
) {
2544 l
= access_size_max
;
2551 static bool prepare_mmio_access(MemoryRegion
*mr
)
2553 bool unlocked
= !qemu_mutex_iothread_locked();
2554 bool release_lock
= false;
2556 if (unlocked
&& mr
->global_locking
) {
2557 qemu_mutex_lock_iothread();
2559 release_lock
= true;
2561 if (mr
->flush_coalesced_mmio
) {
2563 qemu_mutex_lock_iothread();
2565 qemu_flush_coalesced_mmio_buffer();
2567 qemu_mutex_unlock_iothread();
2571 return release_lock
;
2574 /* Called within RCU critical section. */
2575 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2578 int len
, hwaddr addr1
,
2579 hwaddr l
, MemoryRegion
*mr
)
2583 MemTxResult result
= MEMTX_OK
;
2584 bool release_lock
= false;
2587 if (!memory_access_is_direct(mr
, true)) {
2588 release_lock
|= prepare_mmio_access(mr
);
2589 l
= memory_access_size(mr
, l
, addr1
);
2590 /* XXX: could force current_cpu to NULL to avoid
2594 /* 64 bit write access */
2596 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2600 /* 32 bit write access */
2602 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2606 /* 16 bit write access */
2608 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2612 /* 8 bit write access */
2614 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2621 addr1
+= memory_region_get_ram_addr(mr
);
2623 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2624 memcpy(ptr
, buf
, l
);
2625 invalidate_and_set_dirty(mr
, addr1
, l
);
2629 qemu_mutex_unlock_iothread();
2630 release_lock
= false;
2642 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2648 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2649 const uint8_t *buf
, int len
)
2654 MemTxResult result
= MEMTX_OK
;
2659 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2660 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2668 /* Called within RCU critical section. */
2669 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2670 MemTxAttrs attrs
, uint8_t *buf
,
2671 int len
, hwaddr addr1
, hwaddr l
,
2676 MemTxResult result
= MEMTX_OK
;
2677 bool release_lock
= false;
2680 if (!memory_access_is_direct(mr
, false)) {
2682 release_lock
|= prepare_mmio_access(mr
);
2683 l
= memory_access_size(mr
, l
, addr1
);
2686 /* 64 bit read access */
2687 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2692 /* 32 bit read access */
2693 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2698 /* 16 bit read access */
2699 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2704 /* 8 bit read access */
2705 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2714 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
2715 memory_region_get_ram_addr(mr
) + addr1
);
2716 memcpy(buf
, ptr
, l
);
2720 qemu_mutex_unlock_iothread();
2721 release_lock
= false;
2733 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2739 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2740 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2745 MemTxResult result
= MEMTX_OK
;
2750 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2751 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2759 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2760 uint8_t *buf
, int len
, bool is_write
)
2763 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2765 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2769 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2770 int len
, int is_write
)
2772 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2773 buf
, len
, is_write
);
2776 enum write_rom_type
{
2781 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2782 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2792 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2794 if (!(memory_region_is_ram(mr
) ||
2795 memory_region_is_romd(mr
))) {
2796 l
= memory_access_size(mr
, l
, addr1
);
2798 addr1
+= memory_region_get_ram_addr(mr
);
2800 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2803 memcpy(ptr
, buf
, l
);
2804 invalidate_and_set_dirty(mr
, addr1
, l
);
2807 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2818 /* used for ROM loading : can write in RAM and ROM */
2819 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2820 const uint8_t *buf
, int len
)
2822 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2825 void cpu_flush_icache_range(hwaddr start
, int len
)
2828 * This function should do the same thing as an icache flush that was
2829 * triggered from within the guest. For TCG we are always cache coherent,
2830 * so there is no need to flush anything. For KVM / Xen we need to flush
2831 * the host's instruction cache at least.
2833 if (tcg_enabled()) {
2837 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2838 start
, NULL
, len
, FLUSH_CACHE
);
2849 static BounceBuffer bounce
;
2851 typedef struct MapClient
{
2853 QLIST_ENTRY(MapClient
) link
;
2856 QemuMutex map_client_list_lock
;
2857 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2858 = QLIST_HEAD_INITIALIZER(map_client_list
);
2860 static void cpu_unregister_map_client_do(MapClient
*client
)
2862 QLIST_REMOVE(client
, link
);
2866 static void cpu_notify_map_clients_locked(void)
2870 while (!QLIST_EMPTY(&map_client_list
)) {
2871 client
= QLIST_FIRST(&map_client_list
);
2872 qemu_bh_schedule(client
->bh
);
2873 cpu_unregister_map_client_do(client
);
2877 void cpu_register_map_client(QEMUBH
*bh
)
2879 MapClient
*client
= g_malloc(sizeof(*client
));
2881 qemu_mutex_lock(&map_client_list_lock
);
2883 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2884 if (!atomic_read(&bounce
.in_use
)) {
2885 cpu_notify_map_clients_locked();
2887 qemu_mutex_unlock(&map_client_list_lock
);
2890 void cpu_exec_init_all(void)
2892 qemu_mutex_init(&ram_list
.mutex
);
2895 qemu_mutex_init(&map_client_list_lock
);
2898 void cpu_unregister_map_client(QEMUBH
*bh
)
2902 qemu_mutex_lock(&map_client_list_lock
);
2903 QLIST_FOREACH(client
, &map_client_list
, link
) {
2904 if (client
->bh
== bh
) {
2905 cpu_unregister_map_client_do(client
);
2909 qemu_mutex_unlock(&map_client_list_lock
);
2912 static void cpu_notify_map_clients(void)
2914 qemu_mutex_lock(&map_client_list_lock
);
2915 cpu_notify_map_clients_locked();
2916 qemu_mutex_unlock(&map_client_list_lock
);
2919 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2927 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2928 if (!memory_access_is_direct(mr
, is_write
)) {
2929 l
= memory_access_size(mr
, l
, addr
);
2930 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2942 /* Map a physical memory region into a host virtual address.
2943 * May map a subset of the requested range, given by and returned in *plen.
2944 * May return NULL if resources needed to perform the mapping are exhausted.
2945 * Use only for reads OR writes - not for read-modify-write operations.
2946 * Use cpu_register_map_client() to know when retrying the map operation is
2947 * likely to succeed.
2949 void *address_space_map(AddressSpace
*as
,
2956 hwaddr l
, xlat
, base
;
2957 MemoryRegion
*mr
, *this_mr
;
2967 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2969 if (!memory_access_is_direct(mr
, is_write
)) {
2970 if (atomic_xchg(&bounce
.in_use
, true)) {
2974 /* Avoid unbounded allocations */
2975 l
= MIN(l
, TARGET_PAGE_SIZE
);
2976 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2980 memory_region_ref(mr
);
2983 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2989 return bounce
.buffer
;
2993 raddr
= memory_region_get_ram_addr(mr
);
3004 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
3005 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
3010 memory_region_ref(mr
);
3012 ptr
= qemu_ram_ptr_length(mr
->ram_block
, raddr
+ base
, plen
);
3018 /* Unmaps a memory region previously mapped by address_space_map().
3019 * Will also mark the memory as dirty if is_write == 1. access_len gives
3020 * the amount of memory that was actually read or written by the caller.
3022 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3023 int is_write
, hwaddr access_len
)
3025 if (buffer
!= bounce
.buffer
) {
3029 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
3032 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3034 if (xen_enabled()) {
3035 xen_invalidate_map_cache_entry(buffer
);
3037 memory_region_unref(mr
);
3041 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3042 bounce
.buffer
, access_len
);
3044 qemu_vfree(bounce
.buffer
);
3045 bounce
.buffer
= NULL
;
3046 memory_region_unref(bounce
.mr
);
3047 atomic_mb_set(&bounce
.in_use
, false);
3048 cpu_notify_map_clients();
3051 void *cpu_physical_memory_map(hwaddr addr
,
3055 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3058 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3059 int is_write
, hwaddr access_len
)
3061 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3064 /* warning: addr must be aligned */
3065 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3067 MemTxResult
*result
,
3068 enum device_endian endian
)
3076 bool release_lock
= false;
3079 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3080 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3081 release_lock
|= prepare_mmio_access(mr
);
3084 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3085 #if defined(TARGET_WORDS_BIGENDIAN)
3086 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3090 if (endian
== DEVICE_BIG_ENDIAN
) {
3096 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3097 memory_region_get_ram_addr(mr
) + addr1
);
3099 case DEVICE_LITTLE_ENDIAN
:
3100 val
= ldl_le_p(ptr
);
3102 case DEVICE_BIG_ENDIAN
:
3103 val
= ldl_be_p(ptr
);
3115 qemu_mutex_unlock_iothread();
3121 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3122 MemTxAttrs attrs
, MemTxResult
*result
)
3124 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3125 DEVICE_NATIVE_ENDIAN
);
3128 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3129 MemTxAttrs attrs
, MemTxResult
*result
)
3131 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3132 DEVICE_LITTLE_ENDIAN
);
3135 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3136 MemTxAttrs attrs
, MemTxResult
*result
)
3138 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3142 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3144 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3147 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3149 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3152 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3154 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3157 /* warning: addr must be aligned */
3158 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3160 MemTxResult
*result
,
3161 enum device_endian endian
)
3169 bool release_lock
= false;
3172 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3174 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3175 release_lock
|= prepare_mmio_access(mr
);
3178 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3179 #if defined(TARGET_WORDS_BIGENDIAN)
3180 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3184 if (endian
== DEVICE_BIG_ENDIAN
) {
3190 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3191 memory_region_get_ram_addr(mr
) + addr1
);
3193 case DEVICE_LITTLE_ENDIAN
:
3194 val
= ldq_le_p(ptr
);
3196 case DEVICE_BIG_ENDIAN
:
3197 val
= ldq_be_p(ptr
);
3209 qemu_mutex_unlock_iothread();
3215 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3216 MemTxAttrs attrs
, MemTxResult
*result
)
3218 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3219 DEVICE_NATIVE_ENDIAN
);
3222 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3223 MemTxAttrs attrs
, MemTxResult
*result
)
3225 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3226 DEVICE_LITTLE_ENDIAN
);
3229 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3230 MemTxAttrs attrs
, MemTxResult
*result
)
3232 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3236 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3238 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3241 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3243 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3246 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3248 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3252 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3253 MemTxAttrs attrs
, MemTxResult
*result
)
3258 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3265 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3267 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3270 /* warning: addr must be aligned */
3271 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3274 MemTxResult
*result
,
3275 enum device_endian endian
)
3283 bool release_lock
= false;
3286 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3288 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3289 release_lock
|= prepare_mmio_access(mr
);
3292 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3293 #if defined(TARGET_WORDS_BIGENDIAN)
3294 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3298 if (endian
== DEVICE_BIG_ENDIAN
) {
3304 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3305 memory_region_get_ram_addr(mr
) + addr1
);
3307 case DEVICE_LITTLE_ENDIAN
:
3308 val
= lduw_le_p(ptr
);
3310 case DEVICE_BIG_ENDIAN
:
3311 val
= lduw_be_p(ptr
);
3323 qemu_mutex_unlock_iothread();
3329 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3330 MemTxAttrs attrs
, MemTxResult
*result
)
3332 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3333 DEVICE_NATIVE_ENDIAN
);
3336 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3337 MemTxAttrs attrs
, MemTxResult
*result
)
3339 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3340 DEVICE_LITTLE_ENDIAN
);
3343 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3344 MemTxAttrs attrs
, MemTxResult
*result
)
3346 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3350 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3352 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3355 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3357 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3360 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3362 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3365 /* warning: addr must be aligned. The ram page is not masked as dirty
3366 and the code inside is not invalidated. It is useful if the dirty
3367 bits are used to track modified PTEs */
3368 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3369 MemTxAttrs attrs
, MemTxResult
*result
)
3376 uint8_t dirty_log_mask
;
3377 bool release_lock
= false;
3380 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3382 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3383 release_lock
|= prepare_mmio_access(mr
);
3385 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3387 addr1
+= memory_region_get_ram_addr(mr
);
3388 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3391 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3392 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3393 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3400 qemu_mutex_unlock_iothread();
3405 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3407 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3410 /* warning: addr must be aligned */
3411 static inline void address_space_stl_internal(AddressSpace
*as
,
3412 hwaddr addr
, uint32_t val
,
3414 MemTxResult
*result
,
3415 enum device_endian endian
)
3422 bool release_lock
= false;
3425 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3427 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3428 release_lock
|= prepare_mmio_access(mr
);
3430 #if defined(TARGET_WORDS_BIGENDIAN)
3431 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3435 if (endian
== DEVICE_BIG_ENDIAN
) {
3439 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3442 addr1
+= memory_region_get_ram_addr(mr
);
3443 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3445 case DEVICE_LITTLE_ENDIAN
:
3448 case DEVICE_BIG_ENDIAN
:
3455 invalidate_and_set_dirty(mr
, addr1
, 4);
3462 qemu_mutex_unlock_iothread();
3467 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3468 MemTxAttrs attrs
, MemTxResult
*result
)
3470 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3471 DEVICE_NATIVE_ENDIAN
);
3474 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3475 MemTxAttrs attrs
, MemTxResult
*result
)
3477 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3478 DEVICE_LITTLE_ENDIAN
);
3481 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3482 MemTxAttrs attrs
, MemTxResult
*result
)
3484 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3488 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3490 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3493 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3495 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3498 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3500 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3504 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3505 MemTxAttrs attrs
, MemTxResult
*result
)
3510 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3516 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3518 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3521 /* warning: addr must be aligned */
3522 static inline void address_space_stw_internal(AddressSpace
*as
,
3523 hwaddr addr
, uint32_t val
,
3525 MemTxResult
*result
,
3526 enum device_endian endian
)
3533 bool release_lock
= false;
3536 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3537 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3538 release_lock
|= prepare_mmio_access(mr
);
3540 #if defined(TARGET_WORDS_BIGENDIAN)
3541 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3545 if (endian
== DEVICE_BIG_ENDIAN
) {
3549 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3552 addr1
+= memory_region_get_ram_addr(mr
);
3553 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3555 case DEVICE_LITTLE_ENDIAN
:
3558 case DEVICE_BIG_ENDIAN
:
3565 invalidate_and_set_dirty(mr
, addr1
, 2);
3572 qemu_mutex_unlock_iothread();
3577 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3578 MemTxAttrs attrs
, MemTxResult
*result
)
3580 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3581 DEVICE_NATIVE_ENDIAN
);
3584 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3585 MemTxAttrs attrs
, MemTxResult
*result
)
3587 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3588 DEVICE_LITTLE_ENDIAN
);
3591 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3592 MemTxAttrs attrs
, MemTxResult
*result
)
3594 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3598 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3600 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3603 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3605 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3608 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3610 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3614 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3615 MemTxAttrs attrs
, MemTxResult
*result
)
3619 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3625 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3626 MemTxAttrs attrs
, MemTxResult
*result
)
3629 val
= cpu_to_le64(val
);
3630 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3635 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3636 MemTxAttrs attrs
, MemTxResult
*result
)
3639 val
= cpu_to_be64(val
);
3640 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3646 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3648 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3651 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3653 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3656 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3658 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3661 /* virtual memory access for debug (includes writing to ROM) */
3662 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3663 uint8_t *buf
, int len
, int is_write
)
3673 page
= addr
& TARGET_PAGE_MASK
;
3674 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3675 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3676 /* if no physical page mapped, return an error */
3677 if (phys_addr
== -1)
3679 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3682 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3684 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3687 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3688 MEMTXATTRS_UNSPECIFIED
,
3699 * Allows code that needs to deal with migration bitmaps etc to still be built
3700 * target independent.
3702 size_t qemu_target_page_bits(void)
3704 return TARGET_PAGE_BITS
;
3710 * A helper function for the _utterly broken_ virtio device model to find out if
3711 * it's running on a big endian machine. Don't do this at home kids!
3713 bool target_words_bigendian(void);
3714 bool target_words_bigendian(void)
3716 #if defined(TARGET_WORDS_BIGENDIAN)
3723 #ifndef CONFIG_USER_ONLY
3724 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3731 mr
= address_space_translate(&address_space_memory
,
3732 phys_addr
, &phys_addr
, &l
, false);
3734 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3739 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3745 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3746 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3747 block
->used_length
, opaque
);