4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
58 #include "qemu/range.h"
60 #include "qemu/mmap-alloc.h"
63 //#define DEBUG_SUBPAGE
65 #if !defined(CONFIG_USER_ONLY)
66 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
69 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
71 static MemoryRegion
*system_memory
;
72 static MemoryRegion
*system_io
;
74 AddressSpace address_space_io
;
75 AddressSpace address_space_memory
;
77 MemoryRegion io_mem_rom
, io_mem_notdirty
;
78 static MemoryRegion io_mem_unassigned
;
80 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81 #define RAM_PREALLOC (1 << 0)
83 /* RAM is mmap-ed with MAP_SHARED */
84 #define RAM_SHARED (1 << 1)
86 /* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
89 #define RAM_RESIZEABLE (1 << 2)
93 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
94 /* current CPU in the current thread. It is only valid inside
96 __thread CPUState
*current_cpu
;
97 /* 0 = Do not count executed instructions.
98 1 = Precise instruction counting.
99 2 = Adaptive rate instruction counting. */
102 #if !defined(CONFIG_USER_ONLY)
104 typedef struct PhysPageEntry PhysPageEntry
;
106 struct PhysPageEntry
{
107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
113 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
115 /* Size of the L2 (and L3, etc) page tables. */
116 #define ADDR_SPACE_BITS 64
119 #define P_L2_SIZE (1 << P_L2_BITS)
121 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
123 typedef PhysPageEntry Node
[P_L2_SIZE
];
125 typedef struct PhysPageMap
{
128 unsigned sections_nb
;
129 unsigned sections_nb_alloc
;
131 unsigned nodes_nb_alloc
;
133 MemoryRegionSection
*sections
;
136 struct AddressSpaceDispatch
{
139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
142 PhysPageEntry phys_map
;
147 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148 typedef struct subpage_t
{
152 uint16_t sub_section
[TARGET_PAGE_SIZE
];
155 #define PHYS_SECTION_UNASSIGNED 0
156 #define PHYS_SECTION_NOTDIRTY 1
157 #define PHYS_SECTION_ROM 2
158 #define PHYS_SECTION_WATCH 3
160 static void io_mem_init(void);
161 static void memory_map_init(void);
162 static void tcg_commit(MemoryListener
*listener
);
164 static MemoryRegion io_mem_watch
;
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
173 struct CPUAddressSpace
{
176 struct AddressSpaceDispatch
*memory_dispatch
;
177 MemoryListener tcg_as_listener
;
182 #if !defined(CONFIG_USER_ONLY)
184 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
186 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
187 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
188 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
189 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
193 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
200 ret
= map
->nodes_nb
++;
202 assert(ret
!= PHYS_MAP_NODE_NIL
);
203 assert(ret
!= map
->nodes_nb_alloc
);
205 e
.skip
= leaf
? 0 : 1;
206 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
207 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
208 memcpy(&p
[i
], &e
, sizeof(e
));
213 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
214 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
218 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
220 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
221 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
223 p
= map
->nodes
[lp
->ptr
];
224 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
226 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
227 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
233 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
239 static void phys_page_set(AddressSpaceDispatch
*d
,
240 hwaddr index
, hwaddr nb
,
243 /* Wildly overreserve - it doesn't matter much. */
244 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
246 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
249 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
252 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
254 unsigned valid_ptr
= P_L2_SIZE
;
259 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
264 for (i
= 0; i
< P_L2_SIZE
; i
++) {
265 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
272 phys_page_compact(&p
[i
], nodes
, compacted
);
276 /* We can only compress if there's only one child. */
281 assert(valid_ptr
< P_L2_SIZE
);
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
288 lp
->ptr
= p
[valid_ptr
].ptr
;
289 if (!p
[valid_ptr
].skip
) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
298 lp
->skip
+= p
[valid_ptr
].skip
;
302 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
304 DECLARE_BITMAP(compacted
, nodes_nb
);
306 if (d
->phys_map
.skip
) {
307 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
311 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
312 Node
*nodes
, MemoryRegionSection
*sections
)
315 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
318 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
319 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
320 return §ions
[PHYS_SECTION_UNASSIGNED
];
323 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
326 if (sections
[lp
.ptr
].size
.hi
||
327 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
328 sections
[lp
.ptr
].size
.lo
, addr
)) {
329 return §ions
[lp
.ptr
];
331 return §ions
[PHYS_SECTION_UNASSIGNED
];
335 bool memory_region_is_unassigned(MemoryRegion
*mr
)
337 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
338 && mr
!= &io_mem_watch
;
341 /* Called from RCU critical section */
342 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
344 bool resolve_subpage
)
346 MemoryRegionSection
*section
;
349 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
350 if (resolve_subpage
&& section
->mr
->subpage
) {
351 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
352 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
357 /* Called from RCU critical section */
358 static MemoryRegionSection
*
359 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
360 hwaddr
*plen
, bool resolve_subpage
)
362 MemoryRegionSection
*section
;
366 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
367 /* Compute offset within MemoryRegionSection */
368 addr
-= section
->offset_within_address_space
;
370 /* Compute offset within MemoryRegion */
371 *xlat
= addr
+ section
->offset_within_region
;
375 /* MMIO registers can be expected to perform full-width accesses based only
376 * on their address, without considering adjacent registers that could
377 * decode to completely different MemoryRegions. When such registers
378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379 * regions overlap wildly. For this reason we cannot clamp the accesses
382 * If the length is small (as is the case for address_space_ldl/stl),
383 * everything works fine. If the incoming length is large, however,
384 * the caller really has to do the clamping through memory_access_size.
386 if (memory_region_is_ram(mr
)) {
387 diff
= int128_sub(section
->size
, int128_make64(addr
));
388 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
393 /* Called from RCU critical section */
394 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
395 hwaddr
*xlat
, hwaddr
*plen
,
399 MemoryRegionSection
*section
;
403 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
404 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
407 if (!mr
->iommu_ops
) {
411 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
412 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
413 | (addr
& iotlb
.addr_mask
));
414 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
415 if (!(iotlb
.perm
& (1 << is_write
))) {
416 mr
= &io_mem_unassigned
;
420 as
= iotlb
.target_as
;
423 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
424 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
425 *plen
= MIN(page
, *plen
);
432 /* Called from RCU critical section */
433 MemoryRegionSection
*
434 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
435 hwaddr
*xlat
, hwaddr
*plen
)
437 MemoryRegionSection
*section
;
438 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
440 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
442 assert(!section
->mr
->iommu_ops
);
447 #if !defined(CONFIG_USER_ONLY)
449 static int cpu_common_post_load(void *opaque
, int version_id
)
451 CPUState
*cpu
= opaque
;
453 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
454 version_id is increased. */
455 cpu
->interrupt_request
&= ~0x01;
461 static int cpu_common_pre_load(void *opaque
)
463 CPUState
*cpu
= opaque
;
465 cpu
->exception_index
= -1;
470 static bool cpu_common_exception_index_needed(void *opaque
)
472 CPUState
*cpu
= opaque
;
474 return tcg_enabled() && cpu
->exception_index
!= -1;
477 static const VMStateDescription vmstate_cpu_common_exception_index
= {
478 .name
= "cpu_common/exception_index",
480 .minimum_version_id
= 1,
481 .needed
= cpu_common_exception_index_needed
,
482 .fields
= (VMStateField
[]) {
483 VMSTATE_INT32(exception_index
, CPUState
),
484 VMSTATE_END_OF_LIST()
488 static bool cpu_common_crash_occurred_needed(void *opaque
)
490 CPUState
*cpu
= opaque
;
492 return cpu
->crash_occurred
;
495 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
496 .name
= "cpu_common/crash_occurred",
498 .minimum_version_id
= 1,
499 .needed
= cpu_common_crash_occurred_needed
,
500 .fields
= (VMStateField
[]) {
501 VMSTATE_BOOL(crash_occurred
, CPUState
),
502 VMSTATE_END_OF_LIST()
506 const VMStateDescription vmstate_cpu_common
= {
507 .name
= "cpu_common",
509 .minimum_version_id
= 1,
510 .pre_load
= cpu_common_pre_load
,
511 .post_load
= cpu_common_post_load
,
512 .fields
= (VMStateField
[]) {
513 VMSTATE_UINT32(halted
, CPUState
),
514 VMSTATE_UINT32(interrupt_request
, CPUState
),
515 VMSTATE_END_OF_LIST()
517 .subsections
= (const VMStateDescription
*[]) {
518 &vmstate_cpu_common_exception_index
,
519 &vmstate_cpu_common_crash_occurred
,
526 CPUState
*qemu_get_cpu(int index
)
531 if (cpu
->cpu_index
== index
) {
539 #if !defined(CONFIG_USER_ONLY)
540 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
542 CPUAddressSpace
*newas
;
544 /* Target code should have set num_ases before calling us */
545 assert(asidx
< cpu
->num_ases
);
548 /* address space 0 gets the convenience alias */
552 /* KVM cannot currently support multiple address spaces. */
553 assert(asidx
== 0 || !kvm_enabled());
555 if (!cpu
->cpu_ases
) {
556 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
559 newas
= &cpu
->cpu_ases
[asidx
];
563 newas
->tcg_as_listener
.commit
= tcg_commit
;
564 memory_listener_register(&newas
->tcg_as_listener
, as
);
568 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
570 /* Return the AddressSpace corresponding to the specified index */
571 return cpu
->cpu_ases
[asidx
].as
;
575 #ifndef CONFIG_USER_ONLY
576 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
578 static int cpu_get_free_index(Error
**errp
)
580 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
582 if (cpu
>= MAX_CPUMASK_BITS
) {
583 error_setg(errp
, "Trying to use more CPUs than max of %d",
588 bitmap_set(cpu_index_map
, cpu
, 1);
592 void cpu_exec_exit(CPUState
*cpu
)
594 if (cpu
->cpu_index
== -1) {
595 /* cpu_index was never allocated by this @cpu or was already freed. */
599 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
604 static int cpu_get_free_index(Error
**errp
)
609 CPU_FOREACH(some_cpu
) {
615 void cpu_exec_exit(CPUState
*cpu
)
620 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
622 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
624 Error
*local_err
= NULL
;
629 #ifndef CONFIG_USER_ONLY
630 cpu
->thread_id
= qemu_get_thread_id();
632 /* This is a softmmu CPU object, so create a property for it
633 * so users can wire up its memory. (This can't go in qom/cpu.c
634 * because that file is compiled only once for both user-mode
635 * and system builds.) The default if no link is set up is to use
636 * the system address space.
638 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
639 (Object
**)&cpu
->memory
,
640 qdev_prop_allow_set_link_before_realize
,
641 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
643 cpu
->memory
= system_memory
;
644 object_ref(OBJECT(cpu
->memory
));
647 #if defined(CONFIG_USER_ONLY)
650 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
652 error_propagate(errp
, local_err
);
653 #if defined(CONFIG_USER_ONLY)
658 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
659 #if defined(CONFIG_USER_ONLY)
662 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
663 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
665 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
666 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
667 cpu_save
, cpu_load
, cpu
->env_ptr
);
668 assert(cc
->vmsd
== NULL
);
669 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
671 if (cc
->vmsd
!= NULL
) {
672 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
676 #if defined(CONFIG_USER_ONLY)
677 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
679 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
682 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
685 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
686 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
688 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
689 phys
| (pc
& ~TARGET_PAGE_MASK
));
694 #if defined(CONFIG_USER_ONLY)
695 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
700 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
706 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
710 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
711 int flags
, CPUWatchpoint
**watchpoint
)
716 /* Add a watchpoint. */
717 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
718 int flags
, CPUWatchpoint
**watchpoint
)
722 /* forbid ranges which are empty or run off the end of the address space */
723 if (len
== 0 || (addr
+ len
- 1) < addr
) {
724 error_report("tried to set invalid watchpoint at %"
725 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
728 wp
= g_malloc(sizeof(*wp
));
734 /* keep all GDB-injected watchpoints in front */
735 if (flags
& BP_GDB
) {
736 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
738 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
741 tlb_flush_page(cpu
, addr
);
748 /* Remove a specific watchpoint. */
749 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
754 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
755 if (addr
== wp
->vaddr
&& len
== wp
->len
756 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
757 cpu_watchpoint_remove_by_ref(cpu
, wp
);
764 /* Remove a specific watchpoint by reference. */
765 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
767 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
769 tlb_flush_page(cpu
, watchpoint
->vaddr
);
774 /* Remove all matching watchpoints. */
775 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
777 CPUWatchpoint
*wp
, *next
;
779 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
780 if (wp
->flags
& mask
) {
781 cpu_watchpoint_remove_by_ref(cpu
, wp
);
786 /* Return true if this watchpoint address matches the specified
787 * access (ie the address range covered by the watchpoint overlaps
788 * partially or completely with the address range covered by the
791 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
795 /* We know the lengths are non-zero, but a little caution is
796 * required to avoid errors in the case where the range ends
797 * exactly at the top of the address space and so addr + len
798 * wraps round to zero.
800 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
801 vaddr addrend
= addr
+ len
- 1;
803 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
808 /* Add a breakpoint. */
809 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
810 CPUBreakpoint
**breakpoint
)
814 bp
= g_malloc(sizeof(*bp
));
819 /* keep all GDB-injected breakpoints in front */
820 if (flags
& BP_GDB
) {
821 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
823 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
826 breakpoint_invalidate(cpu
, pc
);
834 /* Remove a specific breakpoint. */
835 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
839 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
840 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
841 cpu_breakpoint_remove_by_ref(cpu
, bp
);
848 /* Remove a specific breakpoint by reference. */
849 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
851 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
853 breakpoint_invalidate(cpu
, breakpoint
->pc
);
858 /* Remove all matching breakpoints. */
859 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
861 CPUBreakpoint
*bp
, *next
;
863 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
864 if (bp
->flags
& mask
) {
865 cpu_breakpoint_remove_by_ref(cpu
, bp
);
870 /* enable or disable single step mode. EXCP_DEBUG is returned by the
871 CPU loop after each instruction */
872 void cpu_single_step(CPUState
*cpu
, int enabled
)
874 if (cpu
->singlestep_enabled
!= enabled
) {
875 cpu
->singlestep_enabled
= enabled
;
877 kvm_update_guest_debug(cpu
, 0);
879 /* must flush all the translated code to avoid inconsistencies */
880 /* XXX: only flush what is necessary */
886 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
893 fprintf(stderr
, "qemu: fatal: ");
894 vfprintf(stderr
, fmt
, ap
);
895 fprintf(stderr
, "\n");
896 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
897 if (qemu_log_separate()) {
898 qemu_log("qemu: fatal: ");
899 qemu_log_vprintf(fmt
, ap2
);
901 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
908 #if defined(CONFIG_USER_ONLY)
910 struct sigaction act
;
911 sigfillset(&act
.sa_mask
);
912 act
.sa_handler
= SIG_DFL
;
913 sigaction(SIGABRT
, &act
, NULL
);
919 #if !defined(CONFIG_USER_ONLY)
920 /* Called from RCU critical section */
921 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
925 block
= atomic_rcu_read(&ram_list
.mru_block
);
926 if (block
&& addr
- block
->offset
< block
->max_length
) {
929 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
930 if (addr
- block
->offset
< block
->max_length
) {
935 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
939 /* It is safe to write mru_block outside the iothread lock. This
944 * xxx removed from list
948 * call_rcu(reclaim_ramblock, xxx);
951 * atomic_rcu_set is not needed here. The block was already published
952 * when it was placed into the list. Here we're just making an extra
953 * copy of the pointer.
955 ram_list
.mru_block
= block
;
959 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
966 end
= TARGET_PAGE_ALIGN(start
+ length
);
967 start
&= TARGET_PAGE_MASK
;
970 block
= qemu_get_ram_block(start
);
971 assert(block
== qemu_get_ram_block(end
- 1));
972 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
974 tlb_reset_dirty(cpu
, start1
, length
);
979 /* Note: start and end must be within the same ram block. */
980 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
984 unsigned long end
, page
;
991 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
992 page
= start
>> TARGET_PAGE_BITS
;
993 dirty
= bitmap_test_and_clear_atomic(ram_list
.dirty_memory
[client
],
996 if (dirty
&& tcg_enabled()) {
997 tlb_reset_dirty_range_all(start
, length
);
1003 /* Called from RCU critical section */
1004 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1005 MemoryRegionSection
*section
,
1007 hwaddr paddr
, hwaddr xlat
,
1009 target_ulong
*address
)
1014 if (memory_region_is_ram(section
->mr
)) {
1016 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1018 if (!section
->readonly
) {
1019 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1021 iotlb
|= PHYS_SECTION_ROM
;
1024 AddressSpaceDispatch
*d
;
1026 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1027 iotlb
= section
- d
->map
.sections
;
1031 /* Make accesses to pages with watchpoints go via the
1032 watchpoint trap routines. */
1033 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1034 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1035 /* Avoid trapping reads of pages with a write breakpoint. */
1036 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1037 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1038 *address
|= TLB_MMIO
;
1046 #endif /* defined(CONFIG_USER_ONLY) */
1048 #if !defined(CONFIG_USER_ONLY)
1050 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1052 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1054 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1055 qemu_anon_ram_alloc
;
1058 * Set a custom physical guest memory alloator.
1059 * Accelerators with unusual needs may need this. Hopefully, we can
1060 * get rid of it eventually.
1062 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1064 phys_mem_alloc
= alloc
;
1067 static uint16_t phys_section_add(PhysPageMap
*map
,
1068 MemoryRegionSection
*section
)
1070 /* The physical section number is ORed with a page-aligned
1071 * pointer to produce the iotlb entries. Thus it should
1072 * never overflow into the page-aligned value.
1074 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1076 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1077 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1078 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1079 map
->sections_nb_alloc
);
1081 map
->sections
[map
->sections_nb
] = *section
;
1082 memory_region_ref(section
->mr
);
1083 return map
->sections_nb
++;
1086 static void phys_section_destroy(MemoryRegion
*mr
)
1088 bool have_sub_page
= mr
->subpage
;
1090 memory_region_unref(mr
);
1092 if (have_sub_page
) {
1093 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1094 object_unref(OBJECT(&subpage
->iomem
));
1099 static void phys_sections_free(PhysPageMap
*map
)
1101 while (map
->sections_nb
> 0) {
1102 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1103 phys_section_destroy(section
->mr
);
1105 g_free(map
->sections
);
1109 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1112 hwaddr base
= section
->offset_within_address_space
1114 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1115 d
->map
.nodes
, d
->map
.sections
);
1116 MemoryRegionSection subsection
= {
1117 .offset_within_address_space
= base
,
1118 .size
= int128_make64(TARGET_PAGE_SIZE
),
1122 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1124 if (!(existing
->mr
->subpage
)) {
1125 subpage
= subpage_init(d
->as
, base
);
1126 subsection
.address_space
= d
->as
;
1127 subsection
.mr
= &subpage
->iomem
;
1128 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1129 phys_section_add(&d
->map
, &subsection
));
1131 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1133 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1134 end
= start
+ int128_get64(section
->size
) - 1;
1135 subpage_register(subpage
, start
, end
,
1136 phys_section_add(&d
->map
, section
));
1140 static void register_multipage(AddressSpaceDispatch
*d
,
1141 MemoryRegionSection
*section
)
1143 hwaddr start_addr
= section
->offset_within_address_space
;
1144 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1145 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1149 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1152 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1154 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1155 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1156 MemoryRegionSection now
= *section
, remain
= *section
;
1157 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1159 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1160 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1161 - now
.offset_within_address_space
;
1163 now
.size
= int128_min(int128_make64(left
), now
.size
);
1164 register_subpage(d
, &now
);
1166 now
.size
= int128_zero();
1168 while (int128_ne(remain
.size
, now
.size
)) {
1169 remain
.size
= int128_sub(remain
.size
, now
.size
);
1170 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1171 remain
.offset_within_region
+= int128_get64(now
.size
);
1173 if (int128_lt(remain
.size
, page_size
)) {
1174 register_subpage(d
, &now
);
1175 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1176 now
.size
= page_size
;
1177 register_subpage(d
, &now
);
1179 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1180 register_multipage(d
, &now
);
1185 void qemu_flush_coalesced_mmio_buffer(void)
1188 kvm_flush_coalesced_mmio_buffer();
1191 void qemu_mutex_lock_ramlist(void)
1193 qemu_mutex_lock(&ram_list
.mutex
);
1196 void qemu_mutex_unlock_ramlist(void)
1198 qemu_mutex_unlock(&ram_list
.mutex
);
1203 #include <sys/vfs.h>
1205 #define HUGETLBFS_MAGIC 0x958458f6
1207 static long gethugepagesize(const char *path
, Error
**errp
)
1213 ret
= statfs(path
, &fs
);
1214 } while (ret
!= 0 && errno
== EINTR
);
1217 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1225 static void *file_ram_alloc(RAMBlock
*block
,
1232 char *sanitized_name
;
1237 Error
*local_err
= NULL
;
1239 hpagesize
= gethugepagesize(path
, &local_err
);
1241 error_propagate(errp
, local_err
);
1244 block
->mr
->align
= hpagesize
;
1246 if (memory
< hpagesize
) {
1247 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1248 "or larger than huge page size 0x%" PRIx64
,
1253 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1255 "host lacks kvm mmu notifiers, -mem-path unsupported");
1259 if (!stat(path
, &st
) && S_ISDIR(st
.st_mode
)) {
1260 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1261 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1262 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1268 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1270 g_free(sanitized_name
);
1272 fd
= mkstemp(filename
);
1278 fd
= open(path
, O_RDWR
| O_CREAT
, 0644);
1282 error_setg_errno(errp
, errno
,
1283 "unable to create backing store for hugepages");
1287 memory
= ROUND_UP(memory
, hpagesize
);
1290 * ftruncate is not supported by hugetlbfs in older
1291 * hosts, so don't bother bailing out on errors.
1292 * If anything goes wrong with it under other filesystems,
1295 if (ftruncate(fd
, memory
)) {
1296 perror("ftruncate");
1299 area
= qemu_ram_mmap(fd
, memory
, hpagesize
, block
->flags
& RAM_SHARED
);
1300 if (area
== MAP_FAILED
) {
1301 error_setg_errno(errp
, errno
,
1302 "unable to map backing store for hugepages");
1308 os_mem_prealloc(fd
, area
, memory
);
1319 /* Called with the ramlist lock held. */
1320 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1322 RAMBlock
*block
, *next_block
;
1323 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1325 assert(size
!= 0); /* it would hand out same offset multiple times */
1327 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1331 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1332 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1334 end
= block
->offset
+ block
->max_length
;
1336 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1337 if (next_block
->offset
>= end
) {
1338 next
= MIN(next
, next_block
->offset
);
1341 if (next
- end
>= size
&& next
- end
< mingap
) {
1343 mingap
= next
- end
;
1347 if (offset
== RAM_ADDR_MAX
) {
1348 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1356 ram_addr_t
last_ram_offset(void)
1359 ram_addr_t last
= 0;
1362 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1363 last
= MAX(last
, block
->offset
+ block
->max_length
);
1369 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1373 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1374 if (!machine_dump_guest_core(current_machine
)) {
1375 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1377 perror("qemu_madvise");
1378 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1379 "but dump_guest_core=off specified\n");
1384 /* Called within an RCU critical section, or while the ramlist lock
1387 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1391 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1392 if (block
->offset
== addr
) {
1400 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1405 /* Called with iothread lock held. */
1406 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1408 RAMBlock
*new_block
, *block
;
1411 new_block
= find_ram_block(addr
);
1413 assert(!new_block
->idstr
[0]);
1416 char *id
= qdev_get_dev_path(dev
);
1418 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1422 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1424 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1425 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1426 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1434 /* Called with iothread lock held. */
1435 void qemu_ram_unset_idstr(ram_addr_t addr
)
1439 /* FIXME: arch_init.c assumes that this is not called throughout
1440 * migration. Ignore the problem since hot-unplug during migration
1441 * does not work anyway.
1445 block
= find_ram_block(addr
);
1447 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1452 static int memory_try_enable_merging(void *addr
, size_t len
)
1454 if (!machine_mem_merge(current_machine
)) {
1455 /* disabled by the user */
1459 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1462 /* Only legal before guest might have detected the memory size: e.g. on
1463 * incoming migration, or right after reset.
1465 * As memory core doesn't know how is memory accessed, it is up to
1466 * resize callback to update device state and/or add assertions to detect
1467 * misuse, if necessary.
1469 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1471 RAMBlock
*block
= find_ram_block(base
);
1475 newsize
= HOST_PAGE_ALIGN(newsize
);
1477 if (block
->used_length
== newsize
) {
1481 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1482 error_setg_errno(errp
, EINVAL
,
1483 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1484 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1485 newsize
, block
->used_length
);
1489 if (block
->max_length
< newsize
) {
1490 error_setg_errno(errp
, EINVAL
,
1491 "Length too large: %s: 0x" RAM_ADDR_FMT
1492 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1493 newsize
, block
->max_length
);
1497 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1498 block
->used_length
= newsize
;
1499 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1501 memory_region_set_size(block
->mr
, newsize
);
1502 if (block
->resized
) {
1503 block
->resized(block
->idstr
, newsize
, block
->host
);
1508 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1511 RAMBlock
*last_block
= NULL
;
1512 ram_addr_t old_ram_size
, new_ram_size
;
1514 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1516 qemu_mutex_lock_ramlist();
1517 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1519 if (!new_block
->host
) {
1520 if (xen_enabled()) {
1521 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1524 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1525 &new_block
->mr
->align
);
1526 if (!new_block
->host
) {
1527 error_setg_errno(errp
, errno
,
1528 "cannot set up guest memory '%s'",
1529 memory_region_name(new_block
->mr
));
1530 qemu_mutex_unlock_ramlist();
1533 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1537 new_ram_size
= MAX(old_ram_size
,
1538 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1539 if (new_ram_size
> old_ram_size
) {
1540 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1542 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1543 * QLIST (which has an RCU-friendly variant) does not have insertion at
1544 * tail, so save the last element in last_block.
1546 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1548 if (block
->max_length
< new_block
->max_length
) {
1553 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1554 } else if (last_block
) {
1555 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1556 } else { /* list is empty */
1557 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1559 ram_list
.mru_block
= NULL
;
1561 /* Write list before version */
1564 qemu_mutex_unlock_ramlist();
1566 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1568 if (new_ram_size
> old_ram_size
) {
1571 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1572 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1573 ram_list
.dirty_memory
[i
] =
1574 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1575 old_ram_size
, new_ram_size
);
1578 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1579 new_block
->used_length
,
1582 if (new_block
->host
) {
1583 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1584 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1585 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1586 if (kvm_enabled()) {
1587 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1591 return new_block
->offset
;
1595 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1596 bool share
, const char *mem_path
,
1599 RAMBlock
*new_block
;
1601 Error
*local_err
= NULL
;
1603 if (xen_enabled()) {
1604 error_setg(errp
, "-mem-path not supported with Xen");
1608 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1610 * file_ram_alloc() needs to allocate just like
1611 * phys_mem_alloc, but we haven't bothered to provide
1615 "-mem-path not supported with this accelerator");
1619 size
= HOST_PAGE_ALIGN(size
);
1620 new_block
= g_malloc0(sizeof(*new_block
));
1622 new_block
->used_length
= size
;
1623 new_block
->max_length
= size
;
1624 new_block
->flags
= share
? RAM_SHARED
: 0;
1625 new_block
->host
= file_ram_alloc(new_block
, size
,
1627 if (!new_block
->host
) {
1632 addr
= ram_block_add(new_block
, &local_err
);
1635 error_propagate(errp
, local_err
);
1643 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1644 void (*resized
)(const char*,
1647 void *host
, bool resizeable
,
1648 MemoryRegion
*mr
, Error
**errp
)
1650 RAMBlock
*new_block
;
1652 Error
*local_err
= NULL
;
1654 size
= HOST_PAGE_ALIGN(size
);
1655 max_size
= HOST_PAGE_ALIGN(max_size
);
1656 new_block
= g_malloc0(sizeof(*new_block
));
1658 new_block
->resized
= resized
;
1659 new_block
->used_length
= size
;
1660 new_block
->max_length
= max_size
;
1661 assert(max_size
>= size
);
1663 new_block
->host
= host
;
1665 new_block
->flags
|= RAM_PREALLOC
;
1668 new_block
->flags
|= RAM_RESIZEABLE
;
1670 addr
= ram_block_add(new_block
, &local_err
);
1673 error_propagate(errp
, local_err
);
1679 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1680 MemoryRegion
*mr
, Error
**errp
)
1682 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1685 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1687 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1690 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1691 void (*resized
)(const char*,
1694 MemoryRegion
*mr
, Error
**errp
)
1696 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1699 static void reclaim_ramblock(RAMBlock
*block
)
1701 if (block
->flags
& RAM_PREALLOC
) {
1703 } else if (xen_enabled()) {
1704 xen_invalidate_map_cache_entry(block
->host
);
1706 } else if (block
->fd
>= 0) {
1707 qemu_ram_munmap(block
->host
, block
->max_length
);
1711 qemu_anon_ram_free(block
->host
, block
->max_length
);
1716 void qemu_ram_free(ram_addr_t addr
)
1720 qemu_mutex_lock_ramlist();
1721 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1722 if (addr
== block
->offset
) {
1723 QLIST_REMOVE_RCU(block
, next
);
1724 ram_list
.mru_block
= NULL
;
1725 /* Write list before version */
1728 call_rcu(block
, reclaim_ramblock
, rcu
);
1732 qemu_mutex_unlock_ramlist();
1736 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1743 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1744 offset
= addr
- block
->offset
;
1745 if (offset
< block
->max_length
) {
1746 vaddr
= ramblock_ptr(block
, offset
);
1747 if (block
->flags
& RAM_PREALLOC
) {
1749 } else if (xen_enabled()) {
1753 if (block
->fd
>= 0) {
1754 flags
|= (block
->flags
& RAM_SHARED
?
1755 MAP_SHARED
: MAP_PRIVATE
);
1756 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1757 flags
, block
->fd
, offset
);
1760 * Remap needs to match alloc. Accelerators that
1761 * set phys_mem_alloc never remap. If they did,
1762 * we'd need a remap hook here.
1764 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1766 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1767 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1770 if (area
!= vaddr
) {
1771 fprintf(stderr
, "Could not remap addr: "
1772 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1776 memory_try_enable_merging(vaddr
, length
);
1777 qemu_ram_setup_dump(vaddr
, length
);
1782 #endif /* !_WIN32 */
1784 int qemu_get_ram_fd(ram_addr_t addr
)
1790 block
= qemu_get_ram_block(addr
);
1796 void qemu_set_ram_fd(ram_addr_t addr
, int fd
)
1801 block
= qemu_get_ram_block(addr
);
1806 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1812 block
= qemu_get_ram_block(addr
);
1813 ptr
= ramblock_ptr(block
, 0);
1818 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1819 * This should not be used for general purpose DMA. Use address_space_map
1820 * or address_space_rw instead. For local memory (e.g. video ram) that the
1821 * device owns, use memory_region_get_ram_ptr.
1823 * Called within RCU critical section.
1825 void *qemu_get_ram_ptr(ram_addr_t addr
)
1827 RAMBlock
*block
= qemu_get_ram_block(addr
);
1829 if (xen_enabled() && block
->host
== NULL
) {
1830 /* We need to check if the requested address is in the RAM
1831 * because we don't want to map the entire memory in QEMU.
1832 * In that case just map until the end of the page.
1834 if (block
->offset
== 0) {
1835 return xen_map_cache(addr
, 0, 0);
1838 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1840 return ramblock_ptr(block
, addr
- block
->offset
);
1843 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1844 * but takes a size argument.
1846 * Called within RCU critical section.
1848 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1851 ram_addr_t offset_inside_block
;
1856 block
= qemu_get_ram_block(addr
);
1857 offset_inside_block
= addr
- block
->offset
;
1858 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1860 if (xen_enabled() && block
->host
== NULL
) {
1861 /* We need to check if the requested address is in the RAM
1862 * because we don't want to map the entire memory in QEMU.
1863 * In that case just map the requested area.
1865 if (block
->offset
== 0) {
1866 return xen_map_cache(addr
, *size
, 1);
1869 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1872 return ramblock_ptr(block
, offset_inside_block
);
1876 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1879 * ptr: Host pointer to look up
1880 * round_offset: If true round the result offset down to a page boundary
1881 * *ram_addr: set to result ram_addr
1882 * *offset: set to result offset within the RAMBlock
1884 * Returns: RAMBlock (or NULL if not found)
1886 * By the time this function returns, the returned pointer is not protected
1887 * by RCU anymore. If the caller is not within an RCU critical section and
1888 * does not hold the iothread lock, it must have other means of protecting the
1889 * pointer, such as a reference to the region that includes the incoming
1892 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1893 ram_addr_t
*ram_addr
,
1897 uint8_t *host
= ptr
;
1899 if (xen_enabled()) {
1901 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1902 block
= qemu_get_ram_block(*ram_addr
);
1904 *offset
= (host
- block
->host
);
1911 block
= atomic_rcu_read(&ram_list
.mru_block
);
1912 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1916 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1917 /* This case append when the block is not mapped. */
1918 if (block
->host
== NULL
) {
1921 if (host
- block
->host
< block
->max_length
) {
1930 *offset
= (host
- block
->host
);
1932 *offset
&= TARGET_PAGE_MASK
;
1934 *ram_addr
= block
->offset
+ *offset
;
1940 * Finds the named RAMBlock
1942 * name: The name of RAMBlock to find
1944 * Returns: RAMBlock (or NULL if not found)
1946 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1950 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1951 if (!strcmp(name
, block
->idstr
)) {
1959 /* Some of the softmmu routines need to translate from a host pointer
1960 (typically a TLB entry) back to a ram offset. */
1961 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1964 ram_addr_t offset
; /* Not used */
1966 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
1975 /* Called within RCU critical section. */
1976 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1977 uint64_t val
, unsigned size
)
1979 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1980 tb_invalidate_phys_page_fast(ram_addr
, size
);
1984 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1987 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1990 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1995 /* Set both VGA and migration bits for simplicity and to remove
1996 * the notdirty callback faster.
1998 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
1999 DIRTY_CLIENTS_NOCODE
);
2000 /* we remove the notdirty callback only if the code has been
2002 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2003 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2007 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2008 unsigned size
, bool is_write
)
2013 static const MemoryRegionOps notdirty_mem_ops
= {
2014 .write
= notdirty_mem_write
,
2015 .valid
.accepts
= notdirty_mem_accepts
,
2016 .endianness
= DEVICE_NATIVE_ENDIAN
,
2019 /* Generate a debug exception if a watchpoint has been hit. */
2020 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2022 CPUState
*cpu
= current_cpu
;
2023 CPUArchState
*env
= cpu
->env_ptr
;
2024 target_ulong pc
, cs_base
;
2029 if (cpu
->watchpoint_hit
) {
2030 /* We re-entered the check after replacing the TB. Now raise
2031 * the debug interrupt so that is will trigger after the
2032 * current instruction. */
2033 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2036 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2037 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2038 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2039 && (wp
->flags
& flags
)) {
2040 if (flags
== BP_MEM_READ
) {
2041 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2043 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2045 wp
->hitaddr
= vaddr
;
2046 wp
->hitattrs
= attrs
;
2047 if (!cpu
->watchpoint_hit
) {
2048 cpu
->watchpoint_hit
= wp
;
2049 tb_check_watchpoint(cpu
);
2050 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2051 cpu
->exception_index
= EXCP_DEBUG
;
2054 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2055 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2056 cpu_resume_from_signal(cpu
, NULL
);
2060 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2065 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2066 so these check for a hit then pass through to the normal out-of-line
2068 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2069 unsigned size
, MemTxAttrs attrs
)
2073 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2074 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2076 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2079 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2082 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2085 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2093 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2094 uint64_t val
, unsigned size
,
2098 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2099 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2101 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2104 address_space_stb(as
, addr
, val
, attrs
, &res
);
2107 address_space_stw(as
, addr
, val
, attrs
, &res
);
2110 address_space_stl(as
, addr
, val
, attrs
, &res
);
2117 static const MemoryRegionOps watch_mem_ops
= {
2118 .read_with_attrs
= watch_mem_read
,
2119 .write_with_attrs
= watch_mem_write
,
2120 .endianness
= DEVICE_NATIVE_ENDIAN
,
2123 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2124 unsigned len
, MemTxAttrs attrs
)
2126 subpage_t
*subpage
= opaque
;
2130 #if defined(DEBUG_SUBPAGE)
2131 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2132 subpage
, len
, addr
);
2134 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2141 *data
= ldub_p(buf
);
2144 *data
= lduw_p(buf
);
2157 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2158 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2160 subpage_t
*subpage
= opaque
;
2163 #if defined(DEBUG_SUBPAGE)
2164 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2165 " value %"PRIx64
"\n",
2166 __func__
, subpage
, len
, addr
, value
);
2184 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2188 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2189 unsigned len
, bool is_write
)
2191 subpage_t
*subpage
= opaque
;
2192 #if defined(DEBUG_SUBPAGE)
2193 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2194 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2197 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2201 static const MemoryRegionOps subpage_ops
= {
2202 .read_with_attrs
= subpage_read
,
2203 .write_with_attrs
= subpage_write
,
2204 .impl
.min_access_size
= 1,
2205 .impl
.max_access_size
= 8,
2206 .valid
.min_access_size
= 1,
2207 .valid
.max_access_size
= 8,
2208 .valid
.accepts
= subpage_accepts
,
2209 .endianness
= DEVICE_NATIVE_ENDIAN
,
2212 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2217 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2219 idx
= SUBPAGE_IDX(start
);
2220 eidx
= SUBPAGE_IDX(end
);
2221 #if defined(DEBUG_SUBPAGE)
2222 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2223 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2225 for (; idx
<= eidx
; idx
++) {
2226 mmio
->sub_section
[idx
] = section
;
2232 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2236 mmio
= g_malloc0(sizeof(subpage_t
));
2240 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2241 NULL
, TARGET_PAGE_SIZE
);
2242 mmio
->iomem
.subpage
= true;
2243 #if defined(DEBUG_SUBPAGE)
2244 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2245 mmio
, base
, TARGET_PAGE_SIZE
);
2247 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2252 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2256 MemoryRegionSection section
= {
2257 .address_space
= as
,
2259 .offset_within_address_space
= 0,
2260 .offset_within_region
= 0,
2261 .size
= int128_2_64(),
2264 return phys_section_add(map
, §ion
);
2267 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2269 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2270 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2271 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2272 MemoryRegionSection
*sections
= d
->map
.sections
;
2274 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2277 static void io_mem_init(void)
2279 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2280 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2282 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2284 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2288 static void mem_begin(MemoryListener
*listener
)
2290 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2291 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2294 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2295 assert(n
== PHYS_SECTION_UNASSIGNED
);
2296 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2297 assert(n
== PHYS_SECTION_NOTDIRTY
);
2298 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2299 assert(n
== PHYS_SECTION_ROM
);
2300 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2301 assert(n
== PHYS_SECTION_WATCH
);
2303 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2305 as
->next_dispatch
= d
;
2308 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2310 phys_sections_free(&d
->map
);
2314 static void mem_commit(MemoryListener
*listener
)
2316 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2317 AddressSpaceDispatch
*cur
= as
->dispatch
;
2318 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2320 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2322 atomic_rcu_set(&as
->dispatch
, next
);
2324 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2328 static void tcg_commit(MemoryListener
*listener
)
2330 CPUAddressSpace
*cpuas
;
2331 AddressSpaceDispatch
*d
;
2333 /* since each CPU stores ram addresses in its TLB cache, we must
2334 reset the modified entries */
2335 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2336 cpu_reloading_memory_map();
2337 /* The CPU and TLB are protected by the iothread lock.
2338 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2339 * may have split the RCU critical section.
2341 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2342 cpuas
->memory_dispatch
= d
;
2343 tlb_flush(cpuas
->cpu
, 1);
2346 void address_space_init_dispatch(AddressSpace
*as
)
2348 as
->dispatch
= NULL
;
2349 as
->dispatch_listener
= (MemoryListener
) {
2351 .commit
= mem_commit
,
2352 .region_add
= mem_add
,
2353 .region_nop
= mem_add
,
2356 memory_listener_register(&as
->dispatch_listener
, as
);
2359 void address_space_unregister(AddressSpace
*as
)
2361 memory_listener_unregister(&as
->dispatch_listener
);
2364 void address_space_destroy_dispatch(AddressSpace
*as
)
2366 AddressSpaceDispatch
*d
= as
->dispatch
;
2368 atomic_rcu_set(&as
->dispatch
, NULL
);
2370 call_rcu(d
, address_space_dispatch_free
, rcu
);
2374 static void memory_map_init(void)
2376 system_memory
= g_malloc(sizeof(*system_memory
));
2378 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2379 address_space_init(&address_space_memory
, system_memory
, "memory");
2381 system_io
= g_malloc(sizeof(*system_io
));
2382 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2384 address_space_init(&address_space_io
, system_io
, "I/O");
2387 MemoryRegion
*get_system_memory(void)
2389 return system_memory
;
2392 MemoryRegion
*get_system_io(void)
2397 #endif /* !defined(CONFIG_USER_ONLY) */
2399 /* physical memory access (slow version, mainly for debug) */
2400 #if defined(CONFIG_USER_ONLY)
2401 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2402 uint8_t *buf
, int len
, int is_write
)
2409 page
= addr
& TARGET_PAGE_MASK
;
2410 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2413 flags
= page_get_flags(page
);
2414 if (!(flags
& PAGE_VALID
))
2417 if (!(flags
& PAGE_WRITE
))
2419 /* XXX: this code should not depend on lock_user */
2420 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2423 unlock_user(p
, addr
, l
);
2425 if (!(flags
& PAGE_READ
))
2427 /* XXX: this code should not depend on lock_user */
2428 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2431 unlock_user(p
, addr
, 0);
2442 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2445 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2446 /* No early return if dirty_log_mask is or becomes 0, because
2447 * cpu_physical_memory_set_dirty_range will still call
2448 * xen_modified_memory.
2450 if (dirty_log_mask
) {
2452 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2454 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2455 tb_invalidate_phys_range(addr
, addr
+ length
);
2456 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2458 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2461 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2463 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2465 /* Regions are assumed to support 1-4 byte accesses unless
2466 otherwise specified. */
2467 if (access_size_max
== 0) {
2468 access_size_max
= 4;
2471 /* Bound the maximum access by the alignment of the address. */
2472 if (!mr
->ops
->impl
.unaligned
) {
2473 unsigned align_size_max
= addr
& -addr
;
2474 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2475 access_size_max
= align_size_max
;
2479 /* Don't attempt accesses larger than the maximum. */
2480 if (l
> access_size_max
) {
2481 l
= access_size_max
;
2488 static bool prepare_mmio_access(MemoryRegion
*mr
)
2490 bool unlocked
= !qemu_mutex_iothread_locked();
2491 bool release_lock
= false;
2493 if (unlocked
&& mr
->global_locking
) {
2494 qemu_mutex_lock_iothread();
2496 release_lock
= true;
2498 if (mr
->flush_coalesced_mmio
) {
2500 qemu_mutex_lock_iothread();
2502 qemu_flush_coalesced_mmio_buffer();
2504 qemu_mutex_unlock_iothread();
2508 return release_lock
;
2511 /* Called within RCU critical section. */
2512 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2515 int len
, hwaddr addr1
,
2516 hwaddr l
, MemoryRegion
*mr
)
2520 MemTxResult result
= MEMTX_OK
;
2521 bool release_lock
= false;
2524 if (!memory_access_is_direct(mr
, true)) {
2525 release_lock
|= prepare_mmio_access(mr
);
2526 l
= memory_access_size(mr
, l
, addr1
);
2527 /* XXX: could force current_cpu to NULL to avoid
2531 /* 64 bit write access */
2533 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2537 /* 32 bit write access */
2539 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2543 /* 16 bit write access */
2545 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2549 /* 8 bit write access */
2551 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2558 addr1
+= memory_region_get_ram_addr(mr
);
2560 ptr
= qemu_get_ram_ptr(addr1
);
2561 memcpy(ptr
, buf
, l
);
2562 invalidate_and_set_dirty(mr
, addr1
, l
);
2566 qemu_mutex_unlock_iothread();
2567 release_lock
= false;
2579 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2585 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2586 const uint8_t *buf
, int len
)
2591 MemTxResult result
= MEMTX_OK
;
2596 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2597 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2605 /* Called within RCU critical section. */
2606 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2607 MemTxAttrs attrs
, uint8_t *buf
,
2608 int len
, hwaddr addr1
, hwaddr l
,
2613 MemTxResult result
= MEMTX_OK
;
2614 bool release_lock
= false;
2617 if (!memory_access_is_direct(mr
, false)) {
2619 release_lock
|= prepare_mmio_access(mr
);
2620 l
= memory_access_size(mr
, l
, addr1
);
2623 /* 64 bit read access */
2624 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2629 /* 32 bit read access */
2630 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2635 /* 16 bit read access */
2636 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2641 /* 8 bit read access */
2642 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2651 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2652 memcpy(buf
, ptr
, l
);
2656 qemu_mutex_unlock_iothread();
2657 release_lock
= false;
2669 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2675 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2676 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2681 MemTxResult result
= MEMTX_OK
;
2686 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2687 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2695 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2696 uint8_t *buf
, int len
, bool is_write
)
2699 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2701 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2705 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2706 int len
, int is_write
)
2708 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2709 buf
, len
, is_write
);
2712 enum write_rom_type
{
2717 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2718 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2728 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2730 if (!(memory_region_is_ram(mr
) ||
2731 memory_region_is_romd(mr
))) {
2732 l
= memory_access_size(mr
, l
, addr1
);
2734 addr1
+= memory_region_get_ram_addr(mr
);
2736 ptr
= qemu_get_ram_ptr(addr1
);
2739 memcpy(ptr
, buf
, l
);
2740 invalidate_and_set_dirty(mr
, addr1
, l
);
2743 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2754 /* used for ROM loading : can write in RAM and ROM */
2755 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2756 const uint8_t *buf
, int len
)
2758 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2761 void cpu_flush_icache_range(hwaddr start
, int len
)
2764 * This function should do the same thing as an icache flush that was
2765 * triggered from within the guest. For TCG we are always cache coherent,
2766 * so there is no need to flush anything. For KVM / Xen we need to flush
2767 * the host's instruction cache at least.
2769 if (tcg_enabled()) {
2773 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2774 start
, NULL
, len
, FLUSH_CACHE
);
2785 static BounceBuffer bounce
;
2787 typedef struct MapClient
{
2789 QLIST_ENTRY(MapClient
) link
;
2792 QemuMutex map_client_list_lock
;
2793 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2794 = QLIST_HEAD_INITIALIZER(map_client_list
);
2796 static void cpu_unregister_map_client_do(MapClient
*client
)
2798 QLIST_REMOVE(client
, link
);
2802 static void cpu_notify_map_clients_locked(void)
2806 while (!QLIST_EMPTY(&map_client_list
)) {
2807 client
= QLIST_FIRST(&map_client_list
);
2808 qemu_bh_schedule(client
->bh
);
2809 cpu_unregister_map_client_do(client
);
2813 void cpu_register_map_client(QEMUBH
*bh
)
2815 MapClient
*client
= g_malloc(sizeof(*client
));
2817 qemu_mutex_lock(&map_client_list_lock
);
2819 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2820 if (!atomic_read(&bounce
.in_use
)) {
2821 cpu_notify_map_clients_locked();
2823 qemu_mutex_unlock(&map_client_list_lock
);
2826 void cpu_exec_init_all(void)
2828 qemu_mutex_init(&ram_list
.mutex
);
2831 qemu_mutex_init(&map_client_list_lock
);
2834 void cpu_unregister_map_client(QEMUBH
*bh
)
2838 qemu_mutex_lock(&map_client_list_lock
);
2839 QLIST_FOREACH(client
, &map_client_list
, link
) {
2840 if (client
->bh
== bh
) {
2841 cpu_unregister_map_client_do(client
);
2845 qemu_mutex_unlock(&map_client_list_lock
);
2848 static void cpu_notify_map_clients(void)
2850 qemu_mutex_lock(&map_client_list_lock
);
2851 cpu_notify_map_clients_locked();
2852 qemu_mutex_unlock(&map_client_list_lock
);
2855 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2863 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2864 if (!memory_access_is_direct(mr
, is_write
)) {
2865 l
= memory_access_size(mr
, l
, addr
);
2866 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2878 /* Map a physical memory region into a host virtual address.
2879 * May map a subset of the requested range, given by and returned in *plen.
2880 * May return NULL if resources needed to perform the mapping are exhausted.
2881 * Use only for reads OR writes - not for read-modify-write operations.
2882 * Use cpu_register_map_client() to know when retrying the map operation is
2883 * likely to succeed.
2885 void *address_space_map(AddressSpace
*as
,
2892 hwaddr l
, xlat
, base
;
2893 MemoryRegion
*mr
, *this_mr
;
2903 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2905 if (!memory_access_is_direct(mr
, is_write
)) {
2906 if (atomic_xchg(&bounce
.in_use
, true)) {
2910 /* Avoid unbounded allocations */
2911 l
= MIN(l
, TARGET_PAGE_SIZE
);
2912 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2916 memory_region_ref(mr
);
2919 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2925 return bounce
.buffer
;
2929 raddr
= memory_region_get_ram_addr(mr
);
2940 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2941 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2946 memory_region_ref(mr
);
2948 ptr
= qemu_ram_ptr_length(raddr
+ base
, plen
);
2954 /* Unmaps a memory region previously mapped by address_space_map().
2955 * Will also mark the memory as dirty if is_write == 1. access_len gives
2956 * the amount of memory that was actually read or written by the caller.
2958 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2959 int is_write
, hwaddr access_len
)
2961 if (buffer
!= bounce
.buffer
) {
2965 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2968 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2970 if (xen_enabled()) {
2971 xen_invalidate_map_cache_entry(buffer
);
2973 memory_region_unref(mr
);
2977 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2978 bounce
.buffer
, access_len
);
2980 qemu_vfree(bounce
.buffer
);
2981 bounce
.buffer
= NULL
;
2982 memory_region_unref(bounce
.mr
);
2983 atomic_mb_set(&bounce
.in_use
, false);
2984 cpu_notify_map_clients();
2987 void *cpu_physical_memory_map(hwaddr addr
,
2991 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2994 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2995 int is_write
, hwaddr access_len
)
2997 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3000 /* warning: addr must be aligned */
3001 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3003 MemTxResult
*result
,
3004 enum device_endian endian
)
3012 bool release_lock
= false;
3015 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3016 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3017 release_lock
|= prepare_mmio_access(mr
);
3020 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3021 #if defined(TARGET_WORDS_BIGENDIAN)
3022 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3026 if (endian
== DEVICE_BIG_ENDIAN
) {
3032 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3036 case DEVICE_LITTLE_ENDIAN
:
3037 val
= ldl_le_p(ptr
);
3039 case DEVICE_BIG_ENDIAN
:
3040 val
= ldl_be_p(ptr
);
3052 qemu_mutex_unlock_iothread();
3058 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3059 MemTxAttrs attrs
, MemTxResult
*result
)
3061 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3062 DEVICE_NATIVE_ENDIAN
);
3065 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3066 MemTxAttrs attrs
, MemTxResult
*result
)
3068 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3069 DEVICE_LITTLE_ENDIAN
);
3072 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3073 MemTxAttrs attrs
, MemTxResult
*result
)
3075 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3079 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3081 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3084 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3086 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3089 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3091 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3094 /* warning: addr must be aligned */
3095 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3097 MemTxResult
*result
,
3098 enum device_endian endian
)
3106 bool release_lock
= false;
3109 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3111 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3112 release_lock
|= prepare_mmio_access(mr
);
3115 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3116 #if defined(TARGET_WORDS_BIGENDIAN)
3117 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3121 if (endian
== DEVICE_BIG_ENDIAN
) {
3127 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3131 case DEVICE_LITTLE_ENDIAN
:
3132 val
= ldq_le_p(ptr
);
3134 case DEVICE_BIG_ENDIAN
:
3135 val
= ldq_be_p(ptr
);
3147 qemu_mutex_unlock_iothread();
3153 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3154 MemTxAttrs attrs
, MemTxResult
*result
)
3156 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3157 DEVICE_NATIVE_ENDIAN
);
3160 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3161 MemTxAttrs attrs
, MemTxResult
*result
)
3163 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3164 DEVICE_LITTLE_ENDIAN
);
3167 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3168 MemTxAttrs attrs
, MemTxResult
*result
)
3170 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3174 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3176 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3179 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3181 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3184 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3186 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3190 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3191 MemTxAttrs attrs
, MemTxResult
*result
)
3196 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3203 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3205 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3208 /* warning: addr must be aligned */
3209 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3212 MemTxResult
*result
,
3213 enum device_endian endian
)
3221 bool release_lock
= false;
3224 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3226 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3227 release_lock
|= prepare_mmio_access(mr
);
3230 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3231 #if defined(TARGET_WORDS_BIGENDIAN)
3232 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3236 if (endian
== DEVICE_BIG_ENDIAN
) {
3242 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3246 case DEVICE_LITTLE_ENDIAN
:
3247 val
= lduw_le_p(ptr
);
3249 case DEVICE_BIG_ENDIAN
:
3250 val
= lduw_be_p(ptr
);
3262 qemu_mutex_unlock_iothread();
3268 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3269 MemTxAttrs attrs
, MemTxResult
*result
)
3271 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3272 DEVICE_NATIVE_ENDIAN
);
3275 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3276 MemTxAttrs attrs
, MemTxResult
*result
)
3278 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3279 DEVICE_LITTLE_ENDIAN
);
3282 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3283 MemTxAttrs attrs
, MemTxResult
*result
)
3285 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3289 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3291 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3294 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3296 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3299 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3301 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3304 /* warning: addr must be aligned. The ram page is not masked as dirty
3305 and the code inside is not invalidated. It is useful if the dirty
3306 bits are used to track modified PTEs */
3307 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3308 MemTxAttrs attrs
, MemTxResult
*result
)
3315 uint8_t dirty_log_mask
;
3316 bool release_lock
= false;
3319 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3321 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3322 release_lock
|= prepare_mmio_access(mr
);
3324 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3326 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3327 ptr
= qemu_get_ram_ptr(addr1
);
3330 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3331 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3332 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3339 qemu_mutex_unlock_iothread();
3344 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3346 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3349 /* warning: addr must be aligned */
3350 static inline void address_space_stl_internal(AddressSpace
*as
,
3351 hwaddr addr
, uint32_t val
,
3353 MemTxResult
*result
,
3354 enum device_endian endian
)
3361 bool release_lock
= false;
3364 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3366 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3367 release_lock
|= prepare_mmio_access(mr
);
3369 #if defined(TARGET_WORDS_BIGENDIAN)
3370 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3374 if (endian
== DEVICE_BIG_ENDIAN
) {
3378 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3381 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3382 ptr
= qemu_get_ram_ptr(addr1
);
3384 case DEVICE_LITTLE_ENDIAN
:
3387 case DEVICE_BIG_ENDIAN
:
3394 invalidate_and_set_dirty(mr
, addr1
, 4);
3401 qemu_mutex_unlock_iothread();
3406 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3407 MemTxAttrs attrs
, MemTxResult
*result
)
3409 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3410 DEVICE_NATIVE_ENDIAN
);
3413 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3414 MemTxAttrs attrs
, MemTxResult
*result
)
3416 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3417 DEVICE_LITTLE_ENDIAN
);
3420 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3421 MemTxAttrs attrs
, MemTxResult
*result
)
3423 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3427 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3429 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3432 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3434 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3437 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3439 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3443 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3444 MemTxAttrs attrs
, MemTxResult
*result
)
3449 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3455 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3457 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3460 /* warning: addr must be aligned */
3461 static inline void address_space_stw_internal(AddressSpace
*as
,
3462 hwaddr addr
, uint32_t val
,
3464 MemTxResult
*result
,
3465 enum device_endian endian
)
3472 bool release_lock
= false;
3475 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3476 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3477 release_lock
|= prepare_mmio_access(mr
);
3479 #if defined(TARGET_WORDS_BIGENDIAN)
3480 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3484 if (endian
== DEVICE_BIG_ENDIAN
) {
3488 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3491 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3492 ptr
= qemu_get_ram_ptr(addr1
);
3494 case DEVICE_LITTLE_ENDIAN
:
3497 case DEVICE_BIG_ENDIAN
:
3504 invalidate_and_set_dirty(mr
, addr1
, 2);
3511 qemu_mutex_unlock_iothread();
3516 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3517 MemTxAttrs attrs
, MemTxResult
*result
)
3519 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3520 DEVICE_NATIVE_ENDIAN
);
3523 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3524 MemTxAttrs attrs
, MemTxResult
*result
)
3526 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3527 DEVICE_LITTLE_ENDIAN
);
3530 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3531 MemTxAttrs attrs
, MemTxResult
*result
)
3533 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3537 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3539 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3542 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3544 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3547 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3549 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3553 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3554 MemTxAttrs attrs
, MemTxResult
*result
)
3558 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3564 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3565 MemTxAttrs attrs
, MemTxResult
*result
)
3568 val
= cpu_to_le64(val
);
3569 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3574 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3575 MemTxAttrs attrs
, MemTxResult
*result
)
3578 val
= cpu_to_be64(val
);
3579 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3585 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3587 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3590 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3592 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3595 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3597 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3600 /* virtual memory access for debug (includes writing to ROM) */
3601 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3602 uint8_t *buf
, int len
, int is_write
)
3612 page
= addr
& TARGET_PAGE_MASK
;
3613 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3614 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3615 /* if no physical page mapped, return an error */
3616 if (phys_addr
== -1)
3618 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3621 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3623 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3626 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3627 MEMTXATTRS_UNSPECIFIED
,
3638 * Allows code that needs to deal with migration bitmaps etc to still be built
3639 * target independent.
3641 size_t qemu_target_page_bits(void)
3643 return TARGET_PAGE_BITS
;
3649 * A helper function for the _utterly broken_ virtio device model to find out if
3650 * it's running on a big endian machine. Don't do this at home kids!
3652 bool target_words_bigendian(void);
3653 bool target_words_bigendian(void)
3655 #if defined(TARGET_WORDS_BIGENDIAN)
3662 #ifndef CONFIG_USER_ONLY
3663 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3670 mr
= address_space_translate(&address_space_memory
,
3671 phys_addr
, &phys_addr
, &l
, false);
3673 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3678 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3684 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3685 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3686 block
->used_length
, opaque
);