4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
58 #include "qemu/range.h"
60 #include "qemu/mmap-alloc.h"
63 //#define DEBUG_SUBPAGE
65 #if !defined(CONFIG_USER_ONLY)
66 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
69 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
71 static MemoryRegion
*system_memory
;
72 static MemoryRegion
*system_io
;
74 AddressSpace address_space_io
;
75 AddressSpace address_space_memory
;
77 MemoryRegion io_mem_rom
, io_mem_notdirty
;
78 static MemoryRegion io_mem_unassigned
;
80 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81 #define RAM_PREALLOC (1 << 0)
83 /* RAM is mmap-ed with MAP_SHARED */
84 #define RAM_SHARED (1 << 1)
86 /* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
89 #define RAM_RESIZEABLE (1 << 2)
93 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
94 /* current CPU in the current thread. It is only valid inside
96 __thread CPUState
*current_cpu
;
97 /* 0 = Do not count executed instructions.
98 1 = Precise instruction counting.
99 2 = Adaptive rate instruction counting. */
102 #if !defined(CONFIG_USER_ONLY)
104 typedef struct PhysPageEntry PhysPageEntry
;
106 struct PhysPageEntry
{
107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
113 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
115 /* Size of the L2 (and L3, etc) page tables. */
116 #define ADDR_SPACE_BITS 64
119 #define P_L2_SIZE (1 << P_L2_BITS)
121 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
123 typedef PhysPageEntry Node
[P_L2_SIZE
];
125 typedef struct PhysPageMap
{
128 unsigned sections_nb
;
129 unsigned sections_nb_alloc
;
131 unsigned nodes_nb_alloc
;
133 MemoryRegionSection
*sections
;
136 struct AddressSpaceDispatch
{
139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
142 PhysPageEntry phys_map
;
147 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148 typedef struct subpage_t
{
152 uint16_t sub_section
[TARGET_PAGE_SIZE
];
155 #define PHYS_SECTION_UNASSIGNED 0
156 #define PHYS_SECTION_NOTDIRTY 1
157 #define PHYS_SECTION_ROM 2
158 #define PHYS_SECTION_WATCH 3
160 static void io_mem_init(void);
161 static void memory_map_init(void);
162 static void tcg_commit(MemoryListener
*listener
);
164 static MemoryRegion io_mem_watch
;
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
173 struct CPUAddressSpace
{
176 struct AddressSpaceDispatch
*memory_dispatch
;
177 MemoryListener tcg_as_listener
;
182 #if !defined(CONFIG_USER_ONLY)
184 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
186 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
187 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
188 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
189 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
193 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
200 ret
= map
->nodes_nb
++;
202 assert(ret
!= PHYS_MAP_NODE_NIL
);
203 assert(ret
!= map
->nodes_nb_alloc
);
205 e
.skip
= leaf
? 0 : 1;
206 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
207 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
208 memcpy(&p
[i
], &e
, sizeof(e
));
213 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
214 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
218 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
220 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
221 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
223 p
= map
->nodes
[lp
->ptr
];
224 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
226 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
227 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
233 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
239 static void phys_page_set(AddressSpaceDispatch
*d
,
240 hwaddr index
, hwaddr nb
,
243 /* Wildly overreserve - it doesn't matter much. */
244 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
246 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
249 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
252 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
254 unsigned valid_ptr
= P_L2_SIZE
;
259 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
264 for (i
= 0; i
< P_L2_SIZE
; i
++) {
265 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
272 phys_page_compact(&p
[i
], nodes
, compacted
);
276 /* We can only compress if there's only one child. */
281 assert(valid_ptr
< P_L2_SIZE
);
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
288 lp
->ptr
= p
[valid_ptr
].ptr
;
289 if (!p
[valid_ptr
].skip
) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
298 lp
->skip
+= p
[valid_ptr
].skip
;
302 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
304 DECLARE_BITMAP(compacted
, nodes_nb
);
306 if (d
->phys_map
.skip
) {
307 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
311 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
312 Node
*nodes
, MemoryRegionSection
*sections
)
315 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
318 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
319 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
320 return §ions
[PHYS_SECTION_UNASSIGNED
];
323 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
326 if (sections
[lp
.ptr
].size
.hi
||
327 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
328 sections
[lp
.ptr
].size
.lo
, addr
)) {
329 return §ions
[lp
.ptr
];
331 return §ions
[PHYS_SECTION_UNASSIGNED
];
335 bool memory_region_is_unassigned(MemoryRegion
*mr
)
337 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
338 && mr
!= &io_mem_watch
;
341 /* Called from RCU critical section */
342 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
344 bool resolve_subpage
)
346 MemoryRegionSection
*section
;
349 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
350 if (resolve_subpage
&& section
->mr
->subpage
) {
351 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
352 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
357 /* Called from RCU critical section */
358 static MemoryRegionSection
*
359 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
360 hwaddr
*plen
, bool resolve_subpage
)
362 MemoryRegionSection
*section
;
366 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
367 /* Compute offset within MemoryRegionSection */
368 addr
-= section
->offset_within_address_space
;
370 /* Compute offset within MemoryRegion */
371 *xlat
= addr
+ section
->offset_within_region
;
375 /* MMIO registers can be expected to perform full-width accesses based only
376 * on their address, without considering adjacent registers that could
377 * decode to completely different MemoryRegions. When such registers
378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379 * regions overlap wildly. For this reason we cannot clamp the accesses
382 * If the length is small (as is the case for address_space_ldl/stl),
383 * everything works fine. If the incoming length is large, however,
384 * the caller really has to do the clamping through memory_access_size.
386 if (memory_region_is_ram(mr
)) {
387 diff
= int128_sub(section
->size
, int128_make64(addr
));
388 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
393 /* Called from RCU critical section */
394 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
395 hwaddr
*xlat
, hwaddr
*plen
,
399 MemoryRegionSection
*section
;
403 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
404 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
407 if (!mr
->iommu_ops
) {
411 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
412 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
413 | (addr
& iotlb
.addr_mask
));
414 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
415 if (!(iotlb
.perm
& (1 << is_write
))) {
416 mr
= &io_mem_unassigned
;
420 as
= iotlb
.target_as
;
423 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
424 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
425 *plen
= MIN(page
, *plen
);
432 /* Called from RCU critical section */
433 MemoryRegionSection
*
434 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
435 hwaddr
*xlat
, hwaddr
*plen
)
437 MemoryRegionSection
*section
;
438 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
440 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
442 assert(!section
->mr
->iommu_ops
);
447 #if !defined(CONFIG_USER_ONLY)
449 static int cpu_common_post_load(void *opaque
, int version_id
)
451 CPUState
*cpu
= opaque
;
453 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
454 version_id is increased. */
455 cpu
->interrupt_request
&= ~0x01;
461 static int cpu_common_pre_load(void *opaque
)
463 CPUState
*cpu
= opaque
;
465 cpu
->exception_index
= -1;
470 static bool cpu_common_exception_index_needed(void *opaque
)
472 CPUState
*cpu
= opaque
;
474 return tcg_enabled() && cpu
->exception_index
!= -1;
477 static const VMStateDescription vmstate_cpu_common_exception_index
= {
478 .name
= "cpu_common/exception_index",
480 .minimum_version_id
= 1,
481 .needed
= cpu_common_exception_index_needed
,
482 .fields
= (VMStateField
[]) {
483 VMSTATE_INT32(exception_index
, CPUState
),
484 VMSTATE_END_OF_LIST()
488 static bool cpu_common_crash_occurred_needed(void *opaque
)
490 CPUState
*cpu
= opaque
;
492 return cpu
->crash_occurred
;
495 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
496 .name
= "cpu_common/crash_occurred",
498 .minimum_version_id
= 1,
499 .needed
= cpu_common_crash_occurred_needed
,
500 .fields
= (VMStateField
[]) {
501 VMSTATE_BOOL(crash_occurred
, CPUState
),
502 VMSTATE_END_OF_LIST()
506 const VMStateDescription vmstate_cpu_common
= {
507 .name
= "cpu_common",
509 .minimum_version_id
= 1,
510 .pre_load
= cpu_common_pre_load
,
511 .post_load
= cpu_common_post_load
,
512 .fields
= (VMStateField
[]) {
513 VMSTATE_UINT32(halted
, CPUState
),
514 VMSTATE_UINT32(interrupt_request
, CPUState
),
515 VMSTATE_END_OF_LIST()
517 .subsections
= (const VMStateDescription
*[]) {
518 &vmstate_cpu_common_exception_index
,
519 &vmstate_cpu_common_crash_occurred
,
526 CPUState
*qemu_get_cpu(int index
)
531 if (cpu
->cpu_index
== index
) {
539 #if !defined(CONFIG_USER_ONLY)
540 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
542 CPUAddressSpace
*newas
;
544 /* Target code should have set num_ases before calling us */
545 assert(asidx
< cpu
->num_ases
);
548 /* address space 0 gets the convenience alias */
552 /* KVM cannot currently support multiple address spaces. */
553 assert(asidx
== 0 || !kvm_enabled());
555 if (!cpu
->cpu_ases
) {
556 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
559 newas
= &cpu
->cpu_ases
[asidx
];
563 newas
->tcg_as_listener
.commit
= tcg_commit
;
564 memory_listener_register(&newas
->tcg_as_listener
, as
);
568 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
570 /* Return the AddressSpace corresponding to the specified index */
571 return cpu
->cpu_ases
[asidx
].as
;
575 #ifndef CONFIG_USER_ONLY
576 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
578 static int cpu_get_free_index(Error
**errp
)
580 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
582 if (cpu
>= MAX_CPUMASK_BITS
) {
583 error_setg(errp
, "Trying to use more CPUs than max of %d",
588 bitmap_set(cpu_index_map
, cpu
, 1);
592 void cpu_exec_exit(CPUState
*cpu
)
594 if (cpu
->cpu_index
== -1) {
595 /* cpu_index was never allocated by this @cpu or was already freed. */
599 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
604 static int cpu_get_free_index(Error
**errp
)
609 CPU_FOREACH(some_cpu
) {
615 void cpu_exec_exit(CPUState
*cpu
)
620 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
622 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
624 Error
*local_err
= NULL
;
629 #ifndef CONFIG_USER_ONLY
630 cpu
->thread_id
= qemu_get_thread_id();
632 /* This is a softmmu CPU object, so create a property for it
633 * so users can wire up its memory. (This can't go in qom/cpu.c
634 * because that file is compiled only once for both user-mode
635 * and system builds.) The default if no link is set up is to use
636 * the system address space.
638 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
639 (Object
**)&cpu
->memory
,
640 qdev_prop_allow_set_link_before_realize
,
641 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
643 cpu
->memory
= system_memory
;
644 object_ref(OBJECT(cpu
->memory
));
647 #if defined(CONFIG_USER_ONLY)
650 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
652 error_propagate(errp
, local_err
);
653 #if defined(CONFIG_USER_ONLY)
658 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
659 #if defined(CONFIG_USER_ONLY)
662 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
663 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
665 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
666 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
667 cpu_save
, cpu_load
, cpu
->env_ptr
);
668 assert(cc
->vmsd
== NULL
);
669 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
671 if (cc
->vmsd
!= NULL
) {
672 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
676 #if defined(CONFIG_USER_ONLY)
677 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
679 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
682 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
685 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
686 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
688 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
689 phys
| (pc
& ~TARGET_PAGE_MASK
));
694 #if defined(CONFIG_USER_ONLY)
695 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
700 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
706 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
710 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
711 int flags
, CPUWatchpoint
**watchpoint
)
716 /* Add a watchpoint. */
717 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
718 int flags
, CPUWatchpoint
**watchpoint
)
722 /* forbid ranges which are empty or run off the end of the address space */
723 if (len
== 0 || (addr
+ len
- 1) < addr
) {
724 error_report("tried to set invalid watchpoint at %"
725 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
728 wp
= g_malloc(sizeof(*wp
));
734 /* keep all GDB-injected watchpoints in front */
735 if (flags
& BP_GDB
) {
736 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
738 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
741 tlb_flush_page(cpu
, addr
);
748 /* Remove a specific watchpoint. */
749 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
754 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
755 if (addr
== wp
->vaddr
&& len
== wp
->len
756 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
757 cpu_watchpoint_remove_by_ref(cpu
, wp
);
764 /* Remove a specific watchpoint by reference. */
765 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
767 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
769 tlb_flush_page(cpu
, watchpoint
->vaddr
);
774 /* Remove all matching watchpoints. */
775 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
777 CPUWatchpoint
*wp
, *next
;
779 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
780 if (wp
->flags
& mask
) {
781 cpu_watchpoint_remove_by_ref(cpu
, wp
);
786 /* Return true if this watchpoint address matches the specified
787 * access (ie the address range covered by the watchpoint overlaps
788 * partially or completely with the address range covered by the
791 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
795 /* We know the lengths are non-zero, but a little caution is
796 * required to avoid errors in the case where the range ends
797 * exactly at the top of the address space and so addr + len
798 * wraps round to zero.
800 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
801 vaddr addrend
= addr
+ len
- 1;
803 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
808 /* Add a breakpoint. */
809 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
810 CPUBreakpoint
**breakpoint
)
814 bp
= g_malloc(sizeof(*bp
));
819 /* keep all GDB-injected breakpoints in front */
820 if (flags
& BP_GDB
) {
821 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
823 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
826 breakpoint_invalidate(cpu
, pc
);
834 /* Remove a specific breakpoint. */
835 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
839 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
840 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
841 cpu_breakpoint_remove_by_ref(cpu
, bp
);
848 /* Remove a specific breakpoint by reference. */
849 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
851 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
853 breakpoint_invalidate(cpu
, breakpoint
->pc
);
858 /* Remove all matching breakpoints. */
859 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
861 CPUBreakpoint
*bp
, *next
;
863 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
864 if (bp
->flags
& mask
) {
865 cpu_breakpoint_remove_by_ref(cpu
, bp
);
870 /* enable or disable single step mode. EXCP_DEBUG is returned by the
871 CPU loop after each instruction */
872 void cpu_single_step(CPUState
*cpu
, int enabled
)
874 if (cpu
->singlestep_enabled
!= enabled
) {
875 cpu
->singlestep_enabled
= enabled
;
877 kvm_update_guest_debug(cpu
, 0);
879 /* must flush all the translated code to avoid inconsistencies */
880 /* XXX: only flush what is necessary */
886 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
893 fprintf(stderr
, "qemu: fatal: ");
894 vfprintf(stderr
, fmt
, ap
);
895 fprintf(stderr
, "\n");
896 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
897 if (qemu_log_separate()) {
898 qemu_log("qemu: fatal: ");
899 qemu_log_vprintf(fmt
, ap2
);
901 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
908 #if defined(CONFIG_USER_ONLY)
910 struct sigaction act
;
911 sigfillset(&act
.sa_mask
);
912 act
.sa_handler
= SIG_DFL
;
913 sigaction(SIGABRT
, &act
, NULL
);
919 #if !defined(CONFIG_USER_ONLY)
920 /* Called from RCU critical section */
921 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
925 block
= atomic_rcu_read(&ram_list
.mru_block
);
926 if (block
&& addr
- block
->offset
< block
->max_length
) {
929 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
930 if (addr
- block
->offset
< block
->max_length
) {
935 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
939 /* It is safe to write mru_block outside the iothread lock. This
944 * xxx removed from list
948 * call_rcu(reclaim_ramblock, xxx);
951 * atomic_rcu_set is not needed here. The block was already published
952 * when it was placed into the list. Here we're just making an extra
953 * copy of the pointer.
955 ram_list
.mru_block
= block
;
959 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
966 end
= TARGET_PAGE_ALIGN(start
+ length
);
967 start
&= TARGET_PAGE_MASK
;
970 block
= qemu_get_ram_block(start
);
971 assert(block
== qemu_get_ram_block(end
- 1));
972 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
974 tlb_reset_dirty(cpu
, start1
, length
);
979 /* Note: start and end must be within the same ram block. */
980 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
984 unsigned long end
, page
;
991 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
992 page
= start
>> TARGET_PAGE_BITS
;
993 dirty
= bitmap_test_and_clear_atomic(ram_list
.dirty_memory
[client
],
996 if (dirty
&& tcg_enabled()) {
997 tlb_reset_dirty_range_all(start
, length
);
1003 /* Called from RCU critical section */
1004 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1005 MemoryRegionSection
*section
,
1007 hwaddr paddr
, hwaddr xlat
,
1009 target_ulong
*address
)
1014 if (memory_region_is_ram(section
->mr
)) {
1016 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1018 if (!section
->readonly
) {
1019 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1021 iotlb
|= PHYS_SECTION_ROM
;
1024 AddressSpaceDispatch
*d
;
1026 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1027 iotlb
= section
- d
->map
.sections
;
1031 /* Make accesses to pages with watchpoints go via the
1032 watchpoint trap routines. */
1033 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1034 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1035 /* Avoid trapping reads of pages with a write breakpoint. */
1036 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1037 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1038 *address
|= TLB_MMIO
;
1046 #endif /* defined(CONFIG_USER_ONLY) */
1048 #if !defined(CONFIG_USER_ONLY)
1050 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1052 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1054 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1055 qemu_anon_ram_alloc
;
1058 * Set a custom physical guest memory alloator.
1059 * Accelerators with unusual needs may need this. Hopefully, we can
1060 * get rid of it eventually.
1062 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1064 phys_mem_alloc
= alloc
;
1067 static uint16_t phys_section_add(PhysPageMap
*map
,
1068 MemoryRegionSection
*section
)
1070 /* The physical section number is ORed with a page-aligned
1071 * pointer to produce the iotlb entries. Thus it should
1072 * never overflow into the page-aligned value.
1074 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1076 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1077 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1078 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1079 map
->sections_nb_alloc
);
1081 map
->sections
[map
->sections_nb
] = *section
;
1082 memory_region_ref(section
->mr
);
1083 return map
->sections_nb
++;
1086 static void phys_section_destroy(MemoryRegion
*mr
)
1088 bool have_sub_page
= mr
->subpage
;
1090 memory_region_unref(mr
);
1092 if (have_sub_page
) {
1093 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1094 object_unref(OBJECT(&subpage
->iomem
));
1099 static void phys_sections_free(PhysPageMap
*map
)
1101 while (map
->sections_nb
> 0) {
1102 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1103 phys_section_destroy(section
->mr
);
1105 g_free(map
->sections
);
1109 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1112 hwaddr base
= section
->offset_within_address_space
1114 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1115 d
->map
.nodes
, d
->map
.sections
);
1116 MemoryRegionSection subsection
= {
1117 .offset_within_address_space
= base
,
1118 .size
= int128_make64(TARGET_PAGE_SIZE
),
1122 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1124 if (!(existing
->mr
->subpage
)) {
1125 subpage
= subpage_init(d
->as
, base
);
1126 subsection
.address_space
= d
->as
;
1127 subsection
.mr
= &subpage
->iomem
;
1128 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1129 phys_section_add(&d
->map
, &subsection
));
1131 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1133 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1134 end
= start
+ int128_get64(section
->size
) - 1;
1135 subpage_register(subpage
, start
, end
,
1136 phys_section_add(&d
->map
, section
));
1140 static void register_multipage(AddressSpaceDispatch
*d
,
1141 MemoryRegionSection
*section
)
1143 hwaddr start_addr
= section
->offset_within_address_space
;
1144 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1145 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1149 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1152 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1154 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1155 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1156 MemoryRegionSection now
= *section
, remain
= *section
;
1157 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1159 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1160 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1161 - now
.offset_within_address_space
;
1163 now
.size
= int128_min(int128_make64(left
), now
.size
);
1164 register_subpage(d
, &now
);
1166 now
.size
= int128_zero();
1168 while (int128_ne(remain
.size
, now
.size
)) {
1169 remain
.size
= int128_sub(remain
.size
, now
.size
);
1170 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1171 remain
.offset_within_region
+= int128_get64(now
.size
);
1173 if (int128_lt(remain
.size
, page_size
)) {
1174 register_subpage(d
, &now
);
1175 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1176 now
.size
= page_size
;
1177 register_subpage(d
, &now
);
1179 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1180 register_multipage(d
, &now
);
1185 void qemu_flush_coalesced_mmio_buffer(void)
1188 kvm_flush_coalesced_mmio_buffer();
1191 void qemu_mutex_lock_ramlist(void)
1193 qemu_mutex_lock(&ram_list
.mutex
);
1196 void qemu_mutex_unlock_ramlist(void)
1198 qemu_mutex_unlock(&ram_list
.mutex
);
1203 #include <sys/vfs.h>
1205 #define HUGETLBFS_MAGIC 0x958458f6
1207 static long gethugepagesize(const char *path
, Error
**errp
)
1213 ret
= statfs(path
, &fs
);
1214 } while (ret
!= 0 && errno
== EINTR
);
1217 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1225 static void *file_ram_alloc(RAMBlock
*block
,
1232 char *sanitized_name
;
1237 Error
*local_err
= NULL
;
1239 hpagesize
= gethugepagesize(path
, &local_err
);
1241 error_propagate(errp
, local_err
);
1244 block
->mr
->align
= hpagesize
;
1246 if (memory
< hpagesize
) {
1247 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1248 "or larger than huge page size 0x%" PRIx64
,
1253 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1255 "host lacks kvm mmu notifiers, -mem-path unsupported");
1259 if (!stat(path
, &st
) && S_ISDIR(st
.st_mode
)) {
1260 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1261 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1262 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1268 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1270 g_free(sanitized_name
);
1272 fd
= mkstemp(filename
);
1278 fd
= open(path
, O_RDWR
| O_CREAT
, 0644);
1282 error_setg_errno(errp
, errno
,
1283 "unable to create backing store for hugepages");
1287 memory
= ROUND_UP(memory
, hpagesize
);
1290 * ftruncate is not supported by hugetlbfs in older
1291 * hosts, so don't bother bailing out on errors.
1292 * If anything goes wrong with it under other filesystems,
1295 if (ftruncate(fd
, memory
)) {
1296 perror("ftruncate");
1299 area
= qemu_ram_mmap(fd
, memory
, hpagesize
, block
->flags
& RAM_SHARED
);
1300 if (area
== MAP_FAILED
) {
1301 error_setg_errno(errp
, errno
,
1302 "unable to map backing store for hugepages");
1308 os_mem_prealloc(fd
, area
, memory
);
1319 /* Called with the ramlist lock held. */
1320 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1322 RAMBlock
*block
, *next_block
;
1323 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1325 assert(size
!= 0); /* it would hand out same offset multiple times */
1327 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1331 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1332 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1334 end
= block
->offset
+ block
->max_length
;
1336 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1337 if (next_block
->offset
>= end
) {
1338 next
= MIN(next
, next_block
->offset
);
1341 if (next
- end
>= size
&& next
- end
< mingap
) {
1343 mingap
= next
- end
;
1347 if (offset
== RAM_ADDR_MAX
) {
1348 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1356 ram_addr_t
last_ram_offset(void)
1359 ram_addr_t last
= 0;
1362 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1363 last
= MAX(last
, block
->offset
+ block
->max_length
);
1369 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1373 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1374 if (!machine_dump_guest_core(current_machine
)) {
1375 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1377 perror("qemu_madvise");
1378 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1379 "but dump_guest_core=off specified\n");
1384 /* Called within an RCU critical section, or while the ramlist lock
1387 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1391 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1392 if (block
->offset
== addr
) {
1400 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1405 /* Called with iothread lock held. */
1406 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1408 RAMBlock
*new_block
, *block
;
1411 new_block
= find_ram_block(addr
);
1413 assert(!new_block
->idstr
[0]);
1416 char *id
= qdev_get_dev_path(dev
);
1418 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1422 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1424 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1425 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1426 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1434 /* Called with iothread lock held. */
1435 void qemu_ram_unset_idstr(ram_addr_t addr
)
1439 /* FIXME: arch_init.c assumes that this is not called throughout
1440 * migration. Ignore the problem since hot-unplug during migration
1441 * does not work anyway.
1445 block
= find_ram_block(addr
);
1447 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1452 static int memory_try_enable_merging(void *addr
, size_t len
)
1454 if (!machine_mem_merge(current_machine
)) {
1455 /* disabled by the user */
1459 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1462 /* Only legal before guest might have detected the memory size: e.g. on
1463 * incoming migration, or right after reset.
1465 * As memory core doesn't know how is memory accessed, it is up to
1466 * resize callback to update device state and/or add assertions to detect
1467 * misuse, if necessary.
1469 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1471 RAMBlock
*block
= find_ram_block(base
);
1475 newsize
= HOST_PAGE_ALIGN(newsize
);
1477 if (block
->used_length
== newsize
) {
1481 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1482 error_setg_errno(errp
, EINVAL
,
1483 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1484 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1485 newsize
, block
->used_length
);
1489 if (block
->max_length
< newsize
) {
1490 error_setg_errno(errp
, EINVAL
,
1491 "Length too large: %s: 0x" RAM_ADDR_FMT
1492 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1493 newsize
, block
->max_length
);
1497 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1498 block
->used_length
= newsize
;
1499 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1501 memory_region_set_size(block
->mr
, newsize
);
1502 if (block
->resized
) {
1503 block
->resized(block
->idstr
, newsize
, block
->host
);
1508 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1511 RAMBlock
*last_block
= NULL
;
1512 ram_addr_t old_ram_size
, new_ram_size
;
1515 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1517 qemu_mutex_lock_ramlist();
1518 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1520 if (!new_block
->host
) {
1521 if (xen_enabled()) {
1522 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1523 new_block
->mr
, &err
);
1525 error_propagate(errp
, err
);
1526 qemu_mutex_unlock_ramlist();
1530 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1531 &new_block
->mr
->align
);
1532 if (!new_block
->host
) {
1533 error_setg_errno(errp
, errno
,
1534 "cannot set up guest memory '%s'",
1535 memory_region_name(new_block
->mr
));
1536 qemu_mutex_unlock_ramlist();
1539 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1543 new_ram_size
= MAX(old_ram_size
,
1544 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1545 if (new_ram_size
> old_ram_size
) {
1546 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1548 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1549 * QLIST (which has an RCU-friendly variant) does not have insertion at
1550 * tail, so save the last element in last_block.
1552 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1554 if (block
->max_length
< new_block
->max_length
) {
1559 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1560 } else if (last_block
) {
1561 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1562 } else { /* list is empty */
1563 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1565 ram_list
.mru_block
= NULL
;
1567 /* Write list before version */
1570 qemu_mutex_unlock_ramlist();
1572 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1574 if (new_ram_size
> old_ram_size
) {
1577 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1578 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1579 ram_list
.dirty_memory
[i
] =
1580 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1581 old_ram_size
, new_ram_size
);
1584 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1585 new_block
->used_length
,
1588 if (new_block
->host
) {
1589 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1590 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1591 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1592 if (kvm_enabled()) {
1593 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1597 return new_block
->offset
;
1601 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1602 bool share
, const char *mem_path
,
1605 RAMBlock
*new_block
;
1607 Error
*local_err
= NULL
;
1609 if (xen_enabled()) {
1610 error_setg(errp
, "-mem-path not supported with Xen");
1614 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1616 * file_ram_alloc() needs to allocate just like
1617 * phys_mem_alloc, but we haven't bothered to provide
1621 "-mem-path not supported with this accelerator");
1625 size
= HOST_PAGE_ALIGN(size
);
1626 new_block
= g_malloc0(sizeof(*new_block
));
1628 new_block
->used_length
= size
;
1629 new_block
->max_length
= size
;
1630 new_block
->flags
= share
? RAM_SHARED
: 0;
1631 new_block
->host
= file_ram_alloc(new_block
, size
,
1633 if (!new_block
->host
) {
1638 addr
= ram_block_add(new_block
, &local_err
);
1641 error_propagate(errp
, local_err
);
1649 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1650 void (*resized
)(const char*,
1653 void *host
, bool resizeable
,
1654 MemoryRegion
*mr
, Error
**errp
)
1656 RAMBlock
*new_block
;
1658 Error
*local_err
= NULL
;
1660 size
= HOST_PAGE_ALIGN(size
);
1661 max_size
= HOST_PAGE_ALIGN(max_size
);
1662 new_block
= g_malloc0(sizeof(*new_block
));
1664 new_block
->resized
= resized
;
1665 new_block
->used_length
= size
;
1666 new_block
->max_length
= max_size
;
1667 assert(max_size
>= size
);
1669 new_block
->host
= host
;
1671 new_block
->flags
|= RAM_PREALLOC
;
1674 new_block
->flags
|= RAM_RESIZEABLE
;
1676 addr
= ram_block_add(new_block
, &local_err
);
1679 error_propagate(errp
, local_err
);
1685 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1686 MemoryRegion
*mr
, Error
**errp
)
1688 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1691 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1693 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1696 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1697 void (*resized
)(const char*,
1700 MemoryRegion
*mr
, Error
**errp
)
1702 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1705 static void reclaim_ramblock(RAMBlock
*block
)
1707 if (block
->flags
& RAM_PREALLOC
) {
1709 } else if (xen_enabled()) {
1710 xen_invalidate_map_cache_entry(block
->host
);
1712 } else if (block
->fd
>= 0) {
1713 qemu_ram_munmap(block
->host
, block
->max_length
);
1717 qemu_anon_ram_free(block
->host
, block
->max_length
);
1722 void qemu_ram_free(ram_addr_t addr
)
1726 qemu_mutex_lock_ramlist();
1727 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1728 if (addr
== block
->offset
) {
1729 QLIST_REMOVE_RCU(block
, next
);
1730 ram_list
.mru_block
= NULL
;
1731 /* Write list before version */
1734 call_rcu(block
, reclaim_ramblock
, rcu
);
1738 qemu_mutex_unlock_ramlist();
1742 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1749 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1750 offset
= addr
- block
->offset
;
1751 if (offset
< block
->max_length
) {
1752 vaddr
= ramblock_ptr(block
, offset
);
1753 if (block
->flags
& RAM_PREALLOC
) {
1755 } else if (xen_enabled()) {
1759 if (block
->fd
>= 0) {
1760 flags
|= (block
->flags
& RAM_SHARED
?
1761 MAP_SHARED
: MAP_PRIVATE
);
1762 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1763 flags
, block
->fd
, offset
);
1766 * Remap needs to match alloc. Accelerators that
1767 * set phys_mem_alloc never remap. If they did,
1768 * we'd need a remap hook here.
1770 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1772 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1773 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1776 if (area
!= vaddr
) {
1777 fprintf(stderr
, "Could not remap addr: "
1778 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1782 memory_try_enable_merging(vaddr
, length
);
1783 qemu_ram_setup_dump(vaddr
, length
);
1788 #endif /* !_WIN32 */
1790 int qemu_get_ram_fd(ram_addr_t addr
)
1796 block
= qemu_get_ram_block(addr
);
1802 void qemu_set_ram_fd(ram_addr_t addr
, int fd
)
1807 block
= qemu_get_ram_block(addr
);
1812 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1818 block
= qemu_get_ram_block(addr
);
1819 ptr
= ramblock_ptr(block
, 0);
1824 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1825 * This should not be used for general purpose DMA. Use address_space_map
1826 * or address_space_rw instead. For local memory (e.g. video ram) that the
1827 * device owns, use memory_region_get_ram_ptr.
1829 * Called within RCU critical section.
1831 void *qemu_get_ram_ptr(ram_addr_t addr
)
1833 RAMBlock
*block
= qemu_get_ram_block(addr
);
1835 if (xen_enabled() && block
->host
== NULL
) {
1836 /* We need to check if the requested address is in the RAM
1837 * because we don't want to map the entire memory in QEMU.
1838 * In that case just map until the end of the page.
1840 if (block
->offset
== 0) {
1841 return xen_map_cache(addr
, 0, 0);
1844 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1846 return ramblock_ptr(block
, addr
- block
->offset
);
1849 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1850 * but takes a size argument.
1852 * Called within RCU critical section.
1854 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1857 ram_addr_t offset_inside_block
;
1862 block
= qemu_get_ram_block(addr
);
1863 offset_inside_block
= addr
- block
->offset
;
1864 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1866 if (xen_enabled() && block
->host
== NULL
) {
1867 /* We need to check if the requested address is in the RAM
1868 * because we don't want to map the entire memory in QEMU.
1869 * In that case just map the requested area.
1871 if (block
->offset
== 0) {
1872 return xen_map_cache(addr
, *size
, 1);
1875 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1878 return ramblock_ptr(block
, offset_inside_block
);
1882 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1885 * ptr: Host pointer to look up
1886 * round_offset: If true round the result offset down to a page boundary
1887 * *ram_addr: set to result ram_addr
1888 * *offset: set to result offset within the RAMBlock
1890 * Returns: RAMBlock (or NULL if not found)
1892 * By the time this function returns, the returned pointer is not protected
1893 * by RCU anymore. If the caller is not within an RCU critical section and
1894 * does not hold the iothread lock, it must have other means of protecting the
1895 * pointer, such as a reference to the region that includes the incoming
1898 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1899 ram_addr_t
*ram_addr
,
1903 uint8_t *host
= ptr
;
1905 if (xen_enabled()) {
1907 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1908 block
= qemu_get_ram_block(*ram_addr
);
1910 *offset
= (host
- block
->host
);
1917 block
= atomic_rcu_read(&ram_list
.mru_block
);
1918 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1922 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1923 /* This case append when the block is not mapped. */
1924 if (block
->host
== NULL
) {
1927 if (host
- block
->host
< block
->max_length
) {
1936 *offset
= (host
- block
->host
);
1938 *offset
&= TARGET_PAGE_MASK
;
1940 *ram_addr
= block
->offset
+ *offset
;
1946 * Finds the named RAMBlock
1948 * name: The name of RAMBlock to find
1950 * Returns: RAMBlock (or NULL if not found)
1952 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1956 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1957 if (!strcmp(name
, block
->idstr
)) {
1965 /* Some of the softmmu routines need to translate from a host pointer
1966 (typically a TLB entry) back to a ram offset. */
1967 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1970 ram_addr_t offset
; /* Not used */
1972 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
1981 /* Called within RCU critical section. */
1982 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1983 uint64_t val
, unsigned size
)
1985 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1986 tb_invalidate_phys_page_fast(ram_addr
, size
);
1990 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1993 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1996 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2001 /* Set both VGA and migration bits for simplicity and to remove
2002 * the notdirty callback faster.
2004 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2005 DIRTY_CLIENTS_NOCODE
);
2006 /* we remove the notdirty callback only if the code has been
2008 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2009 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2013 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2014 unsigned size
, bool is_write
)
2019 static const MemoryRegionOps notdirty_mem_ops
= {
2020 .write
= notdirty_mem_write
,
2021 .valid
.accepts
= notdirty_mem_accepts
,
2022 .endianness
= DEVICE_NATIVE_ENDIAN
,
2025 /* Generate a debug exception if a watchpoint has been hit. */
2026 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2028 CPUState
*cpu
= current_cpu
;
2029 CPUArchState
*env
= cpu
->env_ptr
;
2030 target_ulong pc
, cs_base
;
2035 if (cpu
->watchpoint_hit
) {
2036 /* We re-entered the check after replacing the TB. Now raise
2037 * the debug interrupt so that is will trigger after the
2038 * current instruction. */
2039 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2042 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2043 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2044 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2045 && (wp
->flags
& flags
)) {
2046 if (flags
== BP_MEM_READ
) {
2047 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2049 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2051 wp
->hitaddr
= vaddr
;
2052 wp
->hitattrs
= attrs
;
2053 if (!cpu
->watchpoint_hit
) {
2054 cpu
->watchpoint_hit
= wp
;
2055 tb_check_watchpoint(cpu
);
2056 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2057 cpu
->exception_index
= EXCP_DEBUG
;
2060 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2061 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2062 cpu_resume_from_signal(cpu
, NULL
);
2066 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2071 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2072 so these check for a hit then pass through to the normal out-of-line
2074 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2075 unsigned size
, MemTxAttrs attrs
)
2079 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2080 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2082 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2085 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2088 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2091 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2099 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2100 uint64_t val
, unsigned size
,
2104 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2105 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2107 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2110 address_space_stb(as
, addr
, val
, attrs
, &res
);
2113 address_space_stw(as
, addr
, val
, attrs
, &res
);
2116 address_space_stl(as
, addr
, val
, attrs
, &res
);
2123 static const MemoryRegionOps watch_mem_ops
= {
2124 .read_with_attrs
= watch_mem_read
,
2125 .write_with_attrs
= watch_mem_write
,
2126 .endianness
= DEVICE_NATIVE_ENDIAN
,
2129 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2130 unsigned len
, MemTxAttrs attrs
)
2132 subpage_t
*subpage
= opaque
;
2136 #if defined(DEBUG_SUBPAGE)
2137 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2138 subpage
, len
, addr
);
2140 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2147 *data
= ldub_p(buf
);
2150 *data
= lduw_p(buf
);
2163 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2164 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2166 subpage_t
*subpage
= opaque
;
2169 #if defined(DEBUG_SUBPAGE)
2170 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2171 " value %"PRIx64
"\n",
2172 __func__
, subpage
, len
, addr
, value
);
2190 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2194 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2195 unsigned len
, bool is_write
)
2197 subpage_t
*subpage
= opaque
;
2198 #if defined(DEBUG_SUBPAGE)
2199 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2200 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2203 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2207 static const MemoryRegionOps subpage_ops
= {
2208 .read_with_attrs
= subpage_read
,
2209 .write_with_attrs
= subpage_write
,
2210 .impl
.min_access_size
= 1,
2211 .impl
.max_access_size
= 8,
2212 .valid
.min_access_size
= 1,
2213 .valid
.max_access_size
= 8,
2214 .valid
.accepts
= subpage_accepts
,
2215 .endianness
= DEVICE_NATIVE_ENDIAN
,
2218 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2223 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2225 idx
= SUBPAGE_IDX(start
);
2226 eidx
= SUBPAGE_IDX(end
);
2227 #if defined(DEBUG_SUBPAGE)
2228 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2229 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2231 for (; idx
<= eidx
; idx
++) {
2232 mmio
->sub_section
[idx
] = section
;
2238 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2242 mmio
= g_malloc0(sizeof(subpage_t
));
2246 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2247 NULL
, TARGET_PAGE_SIZE
);
2248 mmio
->iomem
.subpage
= true;
2249 #if defined(DEBUG_SUBPAGE)
2250 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2251 mmio
, base
, TARGET_PAGE_SIZE
);
2253 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2258 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2262 MemoryRegionSection section
= {
2263 .address_space
= as
,
2265 .offset_within_address_space
= 0,
2266 .offset_within_region
= 0,
2267 .size
= int128_2_64(),
2270 return phys_section_add(map
, §ion
);
2273 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2275 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2276 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2277 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2278 MemoryRegionSection
*sections
= d
->map
.sections
;
2280 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2283 static void io_mem_init(void)
2285 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2286 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2288 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2290 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2294 static void mem_begin(MemoryListener
*listener
)
2296 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2297 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2300 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2301 assert(n
== PHYS_SECTION_UNASSIGNED
);
2302 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2303 assert(n
== PHYS_SECTION_NOTDIRTY
);
2304 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2305 assert(n
== PHYS_SECTION_ROM
);
2306 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2307 assert(n
== PHYS_SECTION_WATCH
);
2309 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2311 as
->next_dispatch
= d
;
2314 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2316 phys_sections_free(&d
->map
);
2320 static void mem_commit(MemoryListener
*listener
)
2322 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2323 AddressSpaceDispatch
*cur
= as
->dispatch
;
2324 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2326 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2328 atomic_rcu_set(&as
->dispatch
, next
);
2330 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2334 static void tcg_commit(MemoryListener
*listener
)
2336 CPUAddressSpace
*cpuas
;
2337 AddressSpaceDispatch
*d
;
2339 /* since each CPU stores ram addresses in its TLB cache, we must
2340 reset the modified entries */
2341 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2342 cpu_reloading_memory_map();
2343 /* The CPU and TLB are protected by the iothread lock.
2344 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2345 * may have split the RCU critical section.
2347 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2348 cpuas
->memory_dispatch
= d
;
2349 tlb_flush(cpuas
->cpu
, 1);
2352 void address_space_init_dispatch(AddressSpace
*as
)
2354 as
->dispatch
= NULL
;
2355 as
->dispatch_listener
= (MemoryListener
) {
2357 .commit
= mem_commit
,
2358 .region_add
= mem_add
,
2359 .region_nop
= mem_add
,
2362 memory_listener_register(&as
->dispatch_listener
, as
);
2365 void address_space_unregister(AddressSpace
*as
)
2367 memory_listener_unregister(&as
->dispatch_listener
);
2370 void address_space_destroy_dispatch(AddressSpace
*as
)
2372 AddressSpaceDispatch
*d
= as
->dispatch
;
2374 atomic_rcu_set(&as
->dispatch
, NULL
);
2376 call_rcu(d
, address_space_dispatch_free
, rcu
);
2380 static void memory_map_init(void)
2382 system_memory
= g_malloc(sizeof(*system_memory
));
2384 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2385 address_space_init(&address_space_memory
, system_memory
, "memory");
2387 system_io
= g_malloc(sizeof(*system_io
));
2388 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2390 address_space_init(&address_space_io
, system_io
, "I/O");
2393 MemoryRegion
*get_system_memory(void)
2395 return system_memory
;
2398 MemoryRegion
*get_system_io(void)
2403 #endif /* !defined(CONFIG_USER_ONLY) */
2405 /* physical memory access (slow version, mainly for debug) */
2406 #if defined(CONFIG_USER_ONLY)
2407 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2408 uint8_t *buf
, int len
, int is_write
)
2415 page
= addr
& TARGET_PAGE_MASK
;
2416 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2419 flags
= page_get_flags(page
);
2420 if (!(flags
& PAGE_VALID
))
2423 if (!(flags
& PAGE_WRITE
))
2425 /* XXX: this code should not depend on lock_user */
2426 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2429 unlock_user(p
, addr
, l
);
2431 if (!(flags
& PAGE_READ
))
2433 /* XXX: this code should not depend on lock_user */
2434 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2437 unlock_user(p
, addr
, 0);
2448 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2451 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2452 /* No early return if dirty_log_mask is or becomes 0, because
2453 * cpu_physical_memory_set_dirty_range will still call
2454 * xen_modified_memory.
2456 if (dirty_log_mask
) {
2458 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2460 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2461 tb_invalidate_phys_range(addr
, addr
+ length
);
2462 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2464 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2467 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2469 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2471 /* Regions are assumed to support 1-4 byte accesses unless
2472 otherwise specified. */
2473 if (access_size_max
== 0) {
2474 access_size_max
= 4;
2477 /* Bound the maximum access by the alignment of the address. */
2478 if (!mr
->ops
->impl
.unaligned
) {
2479 unsigned align_size_max
= addr
& -addr
;
2480 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2481 access_size_max
= align_size_max
;
2485 /* Don't attempt accesses larger than the maximum. */
2486 if (l
> access_size_max
) {
2487 l
= access_size_max
;
2494 static bool prepare_mmio_access(MemoryRegion
*mr
)
2496 bool unlocked
= !qemu_mutex_iothread_locked();
2497 bool release_lock
= false;
2499 if (unlocked
&& mr
->global_locking
) {
2500 qemu_mutex_lock_iothread();
2502 release_lock
= true;
2504 if (mr
->flush_coalesced_mmio
) {
2506 qemu_mutex_lock_iothread();
2508 qemu_flush_coalesced_mmio_buffer();
2510 qemu_mutex_unlock_iothread();
2514 return release_lock
;
2517 /* Called within RCU critical section. */
2518 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2521 int len
, hwaddr addr1
,
2522 hwaddr l
, MemoryRegion
*mr
)
2526 MemTxResult result
= MEMTX_OK
;
2527 bool release_lock
= false;
2530 if (!memory_access_is_direct(mr
, true)) {
2531 release_lock
|= prepare_mmio_access(mr
);
2532 l
= memory_access_size(mr
, l
, addr1
);
2533 /* XXX: could force current_cpu to NULL to avoid
2537 /* 64 bit write access */
2539 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2543 /* 32 bit write access */
2545 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2549 /* 16 bit write access */
2551 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2555 /* 8 bit write access */
2557 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2564 addr1
+= memory_region_get_ram_addr(mr
);
2566 ptr
= qemu_get_ram_ptr(addr1
);
2567 memcpy(ptr
, buf
, l
);
2568 invalidate_and_set_dirty(mr
, addr1
, l
);
2572 qemu_mutex_unlock_iothread();
2573 release_lock
= false;
2585 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2591 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2592 const uint8_t *buf
, int len
)
2597 MemTxResult result
= MEMTX_OK
;
2602 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2603 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2611 /* Called within RCU critical section. */
2612 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2613 MemTxAttrs attrs
, uint8_t *buf
,
2614 int len
, hwaddr addr1
, hwaddr l
,
2619 MemTxResult result
= MEMTX_OK
;
2620 bool release_lock
= false;
2623 if (!memory_access_is_direct(mr
, false)) {
2625 release_lock
|= prepare_mmio_access(mr
);
2626 l
= memory_access_size(mr
, l
, addr1
);
2629 /* 64 bit read access */
2630 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2635 /* 32 bit read access */
2636 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2641 /* 16 bit read access */
2642 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2647 /* 8 bit read access */
2648 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2657 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2658 memcpy(buf
, ptr
, l
);
2662 qemu_mutex_unlock_iothread();
2663 release_lock
= false;
2675 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2681 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2682 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2687 MemTxResult result
= MEMTX_OK
;
2692 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2693 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2701 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2702 uint8_t *buf
, int len
, bool is_write
)
2705 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2707 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2711 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2712 int len
, int is_write
)
2714 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2715 buf
, len
, is_write
);
2718 enum write_rom_type
{
2723 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2724 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2734 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2736 if (!(memory_region_is_ram(mr
) ||
2737 memory_region_is_romd(mr
))) {
2738 l
= memory_access_size(mr
, l
, addr1
);
2740 addr1
+= memory_region_get_ram_addr(mr
);
2742 ptr
= qemu_get_ram_ptr(addr1
);
2745 memcpy(ptr
, buf
, l
);
2746 invalidate_and_set_dirty(mr
, addr1
, l
);
2749 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2760 /* used for ROM loading : can write in RAM and ROM */
2761 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2762 const uint8_t *buf
, int len
)
2764 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2767 void cpu_flush_icache_range(hwaddr start
, int len
)
2770 * This function should do the same thing as an icache flush that was
2771 * triggered from within the guest. For TCG we are always cache coherent,
2772 * so there is no need to flush anything. For KVM / Xen we need to flush
2773 * the host's instruction cache at least.
2775 if (tcg_enabled()) {
2779 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2780 start
, NULL
, len
, FLUSH_CACHE
);
2791 static BounceBuffer bounce
;
2793 typedef struct MapClient
{
2795 QLIST_ENTRY(MapClient
) link
;
2798 QemuMutex map_client_list_lock
;
2799 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2800 = QLIST_HEAD_INITIALIZER(map_client_list
);
2802 static void cpu_unregister_map_client_do(MapClient
*client
)
2804 QLIST_REMOVE(client
, link
);
2808 static void cpu_notify_map_clients_locked(void)
2812 while (!QLIST_EMPTY(&map_client_list
)) {
2813 client
= QLIST_FIRST(&map_client_list
);
2814 qemu_bh_schedule(client
->bh
);
2815 cpu_unregister_map_client_do(client
);
2819 void cpu_register_map_client(QEMUBH
*bh
)
2821 MapClient
*client
= g_malloc(sizeof(*client
));
2823 qemu_mutex_lock(&map_client_list_lock
);
2825 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2826 if (!atomic_read(&bounce
.in_use
)) {
2827 cpu_notify_map_clients_locked();
2829 qemu_mutex_unlock(&map_client_list_lock
);
2832 void cpu_exec_init_all(void)
2834 qemu_mutex_init(&ram_list
.mutex
);
2837 qemu_mutex_init(&map_client_list_lock
);
2840 void cpu_unregister_map_client(QEMUBH
*bh
)
2844 qemu_mutex_lock(&map_client_list_lock
);
2845 QLIST_FOREACH(client
, &map_client_list
, link
) {
2846 if (client
->bh
== bh
) {
2847 cpu_unregister_map_client_do(client
);
2851 qemu_mutex_unlock(&map_client_list_lock
);
2854 static void cpu_notify_map_clients(void)
2856 qemu_mutex_lock(&map_client_list_lock
);
2857 cpu_notify_map_clients_locked();
2858 qemu_mutex_unlock(&map_client_list_lock
);
2861 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2869 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2870 if (!memory_access_is_direct(mr
, is_write
)) {
2871 l
= memory_access_size(mr
, l
, addr
);
2872 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2884 /* Map a physical memory region into a host virtual address.
2885 * May map a subset of the requested range, given by and returned in *plen.
2886 * May return NULL if resources needed to perform the mapping are exhausted.
2887 * Use only for reads OR writes - not for read-modify-write operations.
2888 * Use cpu_register_map_client() to know when retrying the map operation is
2889 * likely to succeed.
2891 void *address_space_map(AddressSpace
*as
,
2898 hwaddr l
, xlat
, base
;
2899 MemoryRegion
*mr
, *this_mr
;
2909 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2911 if (!memory_access_is_direct(mr
, is_write
)) {
2912 if (atomic_xchg(&bounce
.in_use
, true)) {
2916 /* Avoid unbounded allocations */
2917 l
= MIN(l
, TARGET_PAGE_SIZE
);
2918 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2922 memory_region_ref(mr
);
2925 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2931 return bounce
.buffer
;
2935 raddr
= memory_region_get_ram_addr(mr
);
2946 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2947 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2952 memory_region_ref(mr
);
2954 ptr
= qemu_ram_ptr_length(raddr
+ base
, plen
);
2960 /* Unmaps a memory region previously mapped by address_space_map().
2961 * Will also mark the memory as dirty if is_write == 1. access_len gives
2962 * the amount of memory that was actually read or written by the caller.
2964 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2965 int is_write
, hwaddr access_len
)
2967 if (buffer
!= bounce
.buffer
) {
2971 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2974 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2976 if (xen_enabled()) {
2977 xen_invalidate_map_cache_entry(buffer
);
2979 memory_region_unref(mr
);
2983 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2984 bounce
.buffer
, access_len
);
2986 qemu_vfree(bounce
.buffer
);
2987 bounce
.buffer
= NULL
;
2988 memory_region_unref(bounce
.mr
);
2989 atomic_mb_set(&bounce
.in_use
, false);
2990 cpu_notify_map_clients();
2993 void *cpu_physical_memory_map(hwaddr addr
,
2997 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3000 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3001 int is_write
, hwaddr access_len
)
3003 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3006 /* warning: addr must be aligned */
3007 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3009 MemTxResult
*result
,
3010 enum device_endian endian
)
3018 bool release_lock
= false;
3021 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3022 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3023 release_lock
|= prepare_mmio_access(mr
);
3026 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3027 #if defined(TARGET_WORDS_BIGENDIAN)
3028 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3032 if (endian
== DEVICE_BIG_ENDIAN
) {
3038 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3042 case DEVICE_LITTLE_ENDIAN
:
3043 val
= ldl_le_p(ptr
);
3045 case DEVICE_BIG_ENDIAN
:
3046 val
= ldl_be_p(ptr
);
3058 qemu_mutex_unlock_iothread();
3064 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3065 MemTxAttrs attrs
, MemTxResult
*result
)
3067 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3068 DEVICE_NATIVE_ENDIAN
);
3071 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3072 MemTxAttrs attrs
, MemTxResult
*result
)
3074 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3075 DEVICE_LITTLE_ENDIAN
);
3078 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3079 MemTxAttrs attrs
, MemTxResult
*result
)
3081 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3085 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3087 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3090 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3092 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3095 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3097 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3100 /* warning: addr must be aligned */
3101 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3103 MemTxResult
*result
,
3104 enum device_endian endian
)
3112 bool release_lock
= false;
3115 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3117 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3118 release_lock
|= prepare_mmio_access(mr
);
3121 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3122 #if defined(TARGET_WORDS_BIGENDIAN)
3123 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3127 if (endian
== DEVICE_BIG_ENDIAN
) {
3133 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3137 case DEVICE_LITTLE_ENDIAN
:
3138 val
= ldq_le_p(ptr
);
3140 case DEVICE_BIG_ENDIAN
:
3141 val
= ldq_be_p(ptr
);
3153 qemu_mutex_unlock_iothread();
3159 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3160 MemTxAttrs attrs
, MemTxResult
*result
)
3162 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3163 DEVICE_NATIVE_ENDIAN
);
3166 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3167 MemTxAttrs attrs
, MemTxResult
*result
)
3169 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3170 DEVICE_LITTLE_ENDIAN
);
3173 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3174 MemTxAttrs attrs
, MemTxResult
*result
)
3176 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3180 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3182 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3185 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3187 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3190 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3192 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3196 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3197 MemTxAttrs attrs
, MemTxResult
*result
)
3202 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3209 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3211 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3214 /* warning: addr must be aligned */
3215 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3218 MemTxResult
*result
,
3219 enum device_endian endian
)
3227 bool release_lock
= false;
3230 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3232 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3233 release_lock
|= prepare_mmio_access(mr
);
3236 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3237 #if defined(TARGET_WORDS_BIGENDIAN)
3238 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3242 if (endian
== DEVICE_BIG_ENDIAN
) {
3248 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3252 case DEVICE_LITTLE_ENDIAN
:
3253 val
= lduw_le_p(ptr
);
3255 case DEVICE_BIG_ENDIAN
:
3256 val
= lduw_be_p(ptr
);
3268 qemu_mutex_unlock_iothread();
3274 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3275 MemTxAttrs attrs
, MemTxResult
*result
)
3277 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3278 DEVICE_NATIVE_ENDIAN
);
3281 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3282 MemTxAttrs attrs
, MemTxResult
*result
)
3284 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3285 DEVICE_LITTLE_ENDIAN
);
3288 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3289 MemTxAttrs attrs
, MemTxResult
*result
)
3291 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3295 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3297 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3300 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3302 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3305 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3307 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3310 /* warning: addr must be aligned. The ram page is not masked as dirty
3311 and the code inside is not invalidated. It is useful if the dirty
3312 bits are used to track modified PTEs */
3313 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3314 MemTxAttrs attrs
, MemTxResult
*result
)
3321 uint8_t dirty_log_mask
;
3322 bool release_lock
= false;
3325 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3327 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3328 release_lock
|= prepare_mmio_access(mr
);
3330 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3332 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3333 ptr
= qemu_get_ram_ptr(addr1
);
3336 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3337 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3338 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3345 qemu_mutex_unlock_iothread();
3350 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3352 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3355 /* warning: addr must be aligned */
3356 static inline void address_space_stl_internal(AddressSpace
*as
,
3357 hwaddr addr
, uint32_t val
,
3359 MemTxResult
*result
,
3360 enum device_endian endian
)
3367 bool release_lock
= false;
3370 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3372 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3373 release_lock
|= prepare_mmio_access(mr
);
3375 #if defined(TARGET_WORDS_BIGENDIAN)
3376 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3380 if (endian
== DEVICE_BIG_ENDIAN
) {
3384 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3387 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3388 ptr
= qemu_get_ram_ptr(addr1
);
3390 case DEVICE_LITTLE_ENDIAN
:
3393 case DEVICE_BIG_ENDIAN
:
3400 invalidate_and_set_dirty(mr
, addr1
, 4);
3407 qemu_mutex_unlock_iothread();
3412 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3413 MemTxAttrs attrs
, MemTxResult
*result
)
3415 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3416 DEVICE_NATIVE_ENDIAN
);
3419 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3420 MemTxAttrs attrs
, MemTxResult
*result
)
3422 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3423 DEVICE_LITTLE_ENDIAN
);
3426 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3427 MemTxAttrs attrs
, MemTxResult
*result
)
3429 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3433 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3435 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3438 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3440 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3443 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3445 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3449 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3450 MemTxAttrs attrs
, MemTxResult
*result
)
3455 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3461 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3463 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3466 /* warning: addr must be aligned */
3467 static inline void address_space_stw_internal(AddressSpace
*as
,
3468 hwaddr addr
, uint32_t val
,
3470 MemTxResult
*result
,
3471 enum device_endian endian
)
3478 bool release_lock
= false;
3481 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3482 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3483 release_lock
|= prepare_mmio_access(mr
);
3485 #if defined(TARGET_WORDS_BIGENDIAN)
3486 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3490 if (endian
== DEVICE_BIG_ENDIAN
) {
3494 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3497 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3498 ptr
= qemu_get_ram_ptr(addr1
);
3500 case DEVICE_LITTLE_ENDIAN
:
3503 case DEVICE_BIG_ENDIAN
:
3510 invalidate_and_set_dirty(mr
, addr1
, 2);
3517 qemu_mutex_unlock_iothread();
3522 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3523 MemTxAttrs attrs
, MemTxResult
*result
)
3525 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3526 DEVICE_NATIVE_ENDIAN
);
3529 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3530 MemTxAttrs attrs
, MemTxResult
*result
)
3532 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3533 DEVICE_LITTLE_ENDIAN
);
3536 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3537 MemTxAttrs attrs
, MemTxResult
*result
)
3539 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3543 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3545 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3548 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3550 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3553 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3555 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3559 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3560 MemTxAttrs attrs
, MemTxResult
*result
)
3564 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3570 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3571 MemTxAttrs attrs
, MemTxResult
*result
)
3574 val
= cpu_to_le64(val
);
3575 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3580 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3581 MemTxAttrs attrs
, MemTxResult
*result
)
3584 val
= cpu_to_be64(val
);
3585 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3591 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3593 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3596 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3598 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3601 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3603 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3606 /* virtual memory access for debug (includes writing to ROM) */
3607 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3608 uint8_t *buf
, int len
, int is_write
)
3618 page
= addr
& TARGET_PAGE_MASK
;
3619 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3620 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3621 /* if no physical page mapped, return an error */
3622 if (phys_addr
== -1)
3624 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3627 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3629 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3632 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3633 MEMTXATTRS_UNSPECIFIED
,
3644 * Allows code that needs to deal with migration bitmaps etc to still be built
3645 * target independent.
3647 size_t qemu_target_page_bits(void)
3649 return TARGET_PAGE_BITS
;
3655 * A helper function for the _utterly broken_ virtio device model to find out if
3656 * it's running on a big endian machine. Don't do this at home kids!
3658 bool target_words_bigendian(void);
3659 bool target_words_bigendian(void)
3661 #if defined(TARGET_WORDS_BIGENDIAN)
3668 #ifndef CONFIG_USER_ONLY
3669 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3676 mr
= address_space_translate(&address_space_memory
,
3677 phys_addr
, &phys_addr
, &l
, false);
3679 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3684 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3690 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3691 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3692 block
->used_length
, opaque
);