4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
24 #include "qemu-common.h"
28 #if !defined(CONFIG_USER_ONLY)
29 #include "hw/boards.h"
32 #include "sysemu/kvm.h"
33 #include "sysemu/sysemu.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
47 #include "exec/cpu-all.h"
48 #include "qemu/rcu_queue.h"
49 #include "qemu/main-loop.h"
50 #include "translate-all.h"
51 #include "sysemu/replay.h"
53 #include "exec/memory-internal.h"
54 #include "exec/ram_addr.h"
57 #include "qemu/range.h"
59 #include "qemu/mmap-alloc.h"
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
66 * are protected by the ramlist lock.
68 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
70 static MemoryRegion
*system_memory
;
71 static MemoryRegion
*system_io
;
73 AddressSpace address_space_io
;
74 AddressSpace address_space_memory
;
76 MemoryRegion io_mem_rom
, io_mem_notdirty
;
77 static MemoryRegion io_mem_unassigned
;
79 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
80 #define RAM_PREALLOC (1 << 0)
82 /* RAM is mmap-ed with MAP_SHARED */
83 #define RAM_SHARED (1 << 1)
85 /* Only a portion of RAM (used_length) is actually used, and migrated.
86 * This used_length size can change across reboots.
88 #define RAM_RESIZEABLE (1 << 2)
92 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
93 /* current CPU in the current thread. It is only valid inside
95 __thread CPUState
*current_cpu
;
96 /* 0 = Do not count executed instructions.
97 1 = Precise instruction counting.
98 2 = Adaptive rate instruction counting. */
101 #if !defined(CONFIG_USER_ONLY)
103 typedef struct PhysPageEntry PhysPageEntry
;
105 struct PhysPageEntry
{
106 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
108 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
112 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
114 /* Size of the L2 (and L3, etc) page tables. */
115 #define ADDR_SPACE_BITS 64
118 #define P_L2_SIZE (1 << P_L2_BITS)
120 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
122 typedef PhysPageEntry Node
[P_L2_SIZE
];
124 typedef struct PhysPageMap
{
127 unsigned sections_nb
;
128 unsigned sections_nb_alloc
;
130 unsigned nodes_nb_alloc
;
132 MemoryRegionSection
*sections
;
135 struct AddressSpaceDispatch
{
138 MemoryRegionSection
*mru_section
;
139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
142 PhysPageEntry phys_map
;
147 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148 typedef struct subpage_t
{
152 uint16_t sub_section
[TARGET_PAGE_SIZE
];
155 #define PHYS_SECTION_UNASSIGNED 0
156 #define PHYS_SECTION_NOTDIRTY 1
157 #define PHYS_SECTION_ROM 2
158 #define PHYS_SECTION_WATCH 3
160 static void io_mem_init(void);
161 static void memory_map_init(void);
162 static void tcg_commit(MemoryListener
*listener
);
164 static MemoryRegion io_mem_watch
;
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
173 struct CPUAddressSpace
{
176 struct AddressSpaceDispatch
*memory_dispatch
;
177 MemoryListener tcg_as_listener
;
182 #if !defined(CONFIG_USER_ONLY)
184 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
186 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
187 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
188 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
189 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
193 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
200 ret
= map
->nodes_nb
++;
202 assert(ret
!= PHYS_MAP_NODE_NIL
);
203 assert(ret
!= map
->nodes_nb_alloc
);
205 e
.skip
= leaf
? 0 : 1;
206 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
207 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
208 memcpy(&p
[i
], &e
, sizeof(e
));
213 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
214 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
218 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
220 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
221 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
223 p
= map
->nodes
[lp
->ptr
];
224 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
226 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
227 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
233 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
239 static void phys_page_set(AddressSpaceDispatch
*d
,
240 hwaddr index
, hwaddr nb
,
243 /* Wildly overreserve - it doesn't matter much. */
244 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
246 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
249 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
252 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
254 unsigned valid_ptr
= P_L2_SIZE
;
259 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
264 for (i
= 0; i
< P_L2_SIZE
; i
++) {
265 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
272 phys_page_compact(&p
[i
], nodes
, compacted
);
276 /* We can only compress if there's only one child. */
281 assert(valid_ptr
< P_L2_SIZE
);
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
288 lp
->ptr
= p
[valid_ptr
].ptr
;
289 if (!p
[valid_ptr
].skip
) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
298 lp
->skip
+= p
[valid_ptr
].skip
;
302 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
304 DECLARE_BITMAP(compacted
, nodes_nb
);
306 if (d
->phys_map
.skip
) {
307 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
311 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
314 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
315 * the section must cover the entire address space.
317 return section
->size
.hi
||
318 range_covers_byte(section
->offset_within_address_space
,
319 section
->size
.lo
, addr
);
322 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
323 Node
*nodes
, MemoryRegionSection
*sections
)
326 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
329 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
330 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
331 return §ions
[PHYS_SECTION_UNASSIGNED
];
334 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
337 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
338 return §ions
[lp
.ptr
];
340 return §ions
[PHYS_SECTION_UNASSIGNED
];
344 bool memory_region_is_unassigned(MemoryRegion
*mr
)
346 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
347 && mr
!= &io_mem_watch
;
350 /* Called from RCU critical section */
351 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
353 bool resolve_subpage
)
355 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
359 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
360 section_covers_addr(section
, addr
)) {
363 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
367 if (resolve_subpage
&& section
->mr
->subpage
) {
368 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
369 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
372 atomic_set(&d
->mru_section
, section
);
377 /* Called from RCU critical section */
378 static MemoryRegionSection
*
379 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
380 hwaddr
*plen
, bool resolve_subpage
)
382 MemoryRegionSection
*section
;
386 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
387 /* Compute offset within MemoryRegionSection */
388 addr
-= section
->offset_within_address_space
;
390 /* Compute offset within MemoryRegion */
391 *xlat
= addr
+ section
->offset_within_region
;
395 /* MMIO registers can be expected to perform full-width accesses based only
396 * on their address, without considering adjacent registers that could
397 * decode to completely different MemoryRegions. When such registers
398 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
399 * regions overlap wildly. For this reason we cannot clamp the accesses
402 * If the length is small (as is the case for address_space_ldl/stl),
403 * everything works fine. If the incoming length is large, however,
404 * the caller really has to do the clamping through memory_access_size.
406 if (memory_region_is_ram(mr
)) {
407 diff
= int128_sub(section
->size
, int128_make64(addr
));
408 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
413 /* Called from RCU critical section */
414 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
415 hwaddr
*xlat
, hwaddr
*plen
,
419 MemoryRegionSection
*section
;
423 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
424 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
427 if (!mr
->iommu_ops
) {
431 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
432 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
433 | (addr
& iotlb
.addr_mask
));
434 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
435 if (!(iotlb
.perm
& (1 << is_write
))) {
436 mr
= &io_mem_unassigned
;
440 as
= iotlb
.target_as
;
443 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
444 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
445 *plen
= MIN(page
, *plen
);
452 /* Called from RCU critical section */
453 MemoryRegionSection
*
454 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
455 hwaddr
*xlat
, hwaddr
*plen
)
457 MemoryRegionSection
*section
;
458 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
460 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
462 assert(!section
->mr
->iommu_ops
);
467 #if !defined(CONFIG_USER_ONLY)
469 static int cpu_common_post_load(void *opaque
, int version_id
)
471 CPUState
*cpu
= opaque
;
473 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
474 version_id is increased. */
475 cpu
->interrupt_request
&= ~0x01;
481 static int cpu_common_pre_load(void *opaque
)
483 CPUState
*cpu
= opaque
;
485 cpu
->exception_index
= -1;
490 static bool cpu_common_exception_index_needed(void *opaque
)
492 CPUState
*cpu
= opaque
;
494 return tcg_enabled() && cpu
->exception_index
!= -1;
497 static const VMStateDescription vmstate_cpu_common_exception_index
= {
498 .name
= "cpu_common/exception_index",
500 .minimum_version_id
= 1,
501 .needed
= cpu_common_exception_index_needed
,
502 .fields
= (VMStateField
[]) {
503 VMSTATE_INT32(exception_index
, CPUState
),
504 VMSTATE_END_OF_LIST()
508 static bool cpu_common_crash_occurred_needed(void *opaque
)
510 CPUState
*cpu
= opaque
;
512 return cpu
->crash_occurred
;
515 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
516 .name
= "cpu_common/crash_occurred",
518 .minimum_version_id
= 1,
519 .needed
= cpu_common_crash_occurred_needed
,
520 .fields
= (VMStateField
[]) {
521 VMSTATE_BOOL(crash_occurred
, CPUState
),
522 VMSTATE_END_OF_LIST()
526 const VMStateDescription vmstate_cpu_common
= {
527 .name
= "cpu_common",
529 .minimum_version_id
= 1,
530 .pre_load
= cpu_common_pre_load
,
531 .post_load
= cpu_common_post_load
,
532 .fields
= (VMStateField
[]) {
533 VMSTATE_UINT32(halted
, CPUState
),
534 VMSTATE_UINT32(interrupt_request
, CPUState
),
535 VMSTATE_END_OF_LIST()
537 .subsections
= (const VMStateDescription
*[]) {
538 &vmstate_cpu_common_exception_index
,
539 &vmstate_cpu_common_crash_occurred
,
546 CPUState
*qemu_get_cpu(int index
)
551 if (cpu
->cpu_index
== index
) {
559 #if !defined(CONFIG_USER_ONLY)
560 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
562 CPUAddressSpace
*newas
;
564 /* Target code should have set num_ases before calling us */
565 assert(asidx
< cpu
->num_ases
);
568 /* address space 0 gets the convenience alias */
572 /* KVM cannot currently support multiple address spaces. */
573 assert(asidx
== 0 || !kvm_enabled());
575 if (!cpu
->cpu_ases
) {
576 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
579 newas
= &cpu
->cpu_ases
[asidx
];
583 newas
->tcg_as_listener
.commit
= tcg_commit
;
584 memory_listener_register(&newas
->tcg_as_listener
, as
);
588 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
590 /* Return the AddressSpace corresponding to the specified index */
591 return cpu
->cpu_ases
[asidx
].as
;
595 #ifndef CONFIG_USER_ONLY
596 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
598 static int cpu_get_free_index(Error
**errp
)
600 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
602 if (cpu
>= MAX_CPUMASK_BITS
) {
603 error_setg(errp
, "Trying to use more CPUs than max of %d",
608 bitmap_set(cpu_index_map
, cpu
, 1);
612 void cpu_exec_exit(CPUState
*cpu
)
614 if (cpu
->cpu_index
== -1) {
615 /* cpu_index was never allocated by this @cpu or was already freed. */
619 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
624 static int cpu_get_free_index(Error
**errp
)
629 CPU_FOREACH(some_cpu
) {
635 void cpu_exec_exit(CPUState
*cpu
)
640 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
642 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
644 Error
*local_err
= NULL
;
646 #ifdef TARGET_WORDS_BIGENDIAN
647 cpu
->bigendian
= true;
649 cpu
->bigendian
= false;
654 #ifndef CONFIG_USER_ONLY
655 cpu
->thread_id
= qemu_get_thread_id();
657 /* This is a softmmu CPU object, so create a property for it
658 * so users can wire up its memory. (This can't go in qom/cpu.c
659 * because that file is compiled only once for both user-mode
660 * and system builds.) The default if no link is set up is to use
661 * the system address space.
663 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
664 (Object
**)&cpu
->memory
,
665 qdev_prop_allow_set_link_before_realize
,
666 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
668 cpu
->memory
= system_memory
;
669 object_ref(OBJECT(cpu
->memory
));
672 #if defined(CONFIG_USER_ONLY)
675 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
677 error_propagate(errp
, local_err
);
678 #if defined(CONFIG_USER_ONLY)
683 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
684 #if defined(CONFIG_USER_ONLY)
687 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
688 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
690 if (cc
->vmsd
!= NULL
) {
691 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
695 #if defined(CONFIG_USER_ONLY)
696 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
698 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
701 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
704 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
705 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
707 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
708 phys
| (pc
& ~TARGET_PAGE_MASK
));
713 #if defined(CONFIG_USER_ONLY)
714 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
719 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
725 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
729 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
730 int flags
, CPUWatchpoint
**watchpoint
)
735 /* Add a watchpoint. */
736 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
737 int flags
, CPUWatchpoint
**watchpoint
)
741 /* forbid ranges which are empty or run off the end of the address space */
742 if (len
== 0 || (addr
+ len
- 1) < addr
) {
743 error_report("tried to set invalid watchpoint at %"
744 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
747 wp
= g_malloc(sizeof(*wp
));
753 /* keep all GDB-injected watchpoints in front */
754 if (flags
& BP_GDB
) {
755 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
757 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
760 tlb_flush_page(cpu
, addr
);
767 /* Remove a specific watchpoint. */
768 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
773 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
774 if (addr
== wp
->vaddr
&& len
== wp
->len
775 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
776 cpu_watchpoint_remove_by_ref(cpu
, wp
);
783 /* Remove a specific watchpoint by reference. */
784 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
786 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
788 tlb_flush_page(cpu
, watchpoint
->vaddr
);
793 /* Remove all matching watchpoints. */
794 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
796 CPUWatchpoint
*wp
, *next
;
798 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
799 if (wp
->flags
& mask
) {
800 cpu_watchpoint_remove_by_ref(cpu
, wp
);
805 /* Return true if this watchpoint address matches the specified
806 * access (ie the address range covered by the watchpoint overlaps
807 * partially or completely with the address range covered by the
810 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
814 /* We know the lengths are non-zero, but a little caution is
815 * required to avoid errors in the case where the range ends
816 * exactly at the top of the address space and so addr + len
817 * wraps round to zero.
819 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
820 vaddr addrend
= addr
+ len
- 1;
822 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
827 /* Add a breakpoint. */
828 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
829 CPUBreakpoint
**breakpoint
)
833 bp
= g_malloc(sizeof(*bp
));
838 /* keep all GDB-injected breakpoints in front */
839 if (flags
& BP_GDB
) {
840 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
842 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
845 breakpoint_invalidate(cpu
, pc
);
853 /* Remove a specific breakpoint. */
854 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
858 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
859 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
860 cpu_breakpoint_remove_by_ref(cpu
, bp
);
867 /* Remove a specific breakpoint by reference. */
868 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
870 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
872 breakpoint_invalidate(cpu
, breakpoint
->pc
);
877 /* Remove all matching breakpoints. */
878 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
880 CPUBreakpoint
*bp
, *next
;
882 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
883 if (bp
->flags
& mask
) {
884 cpu_breakpoint_remove_by_ref(cpu
, bp
);
889 /* enable or disable single step mode. EXCP_DEBUG is returned by the
890 CPU loop after each instruction */
891 void cpu_single_step(CPUState
*cpu
, int enabled
)
893 if (cpu
->singlestep_enabled
!= enabled
) {
894 cpu
->singlestep_enabled
= enabled
;
896 kvm_update_guest_debug(cpu
, 0);
898 /* must flush all the translated code to avoid inconsistencies */
899 /* XXX: only flush what is necessary */
905 void QEMU_NORETURN
cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
912 fprintf(stderr
, "qemu: fatal: ");
913 vfprintf(stderr
, fmt
, ap
);
914 fprintf(stderr
, "\n");
915 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
916 if (qemu_log_separate()) {
917 qemu_log("qemu: fatal: ");
918 qemu_log_vprintf(fmt
, ap2
);
920 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
927 #if defined(CONFIG_USER_ONLY)
929 struct sigaction act
;
930 sigfillset(&act
.sa_mask
);
931 act
.sa_handler
= SIG_DFL
;
932 sigaction(SIGABRT
, &act
, NULL
);
938 #if !defined(CONFIG_USER_ONLY)
939 /* Called from RCU critical section */
940 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
944 block
= atomic_rcu_read(&ram_list
.mru_block
);
945 if (block
&& addr
- block
->offset
< block
->max_length
) {
948 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
949 if (addr
- block
->offset
< block
->max_length
) {
954 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
958 /* It is safe to write mru_block outside the iothread lock. This
963 * xxx removed from list
967 * call_rcu(reclaim_ramblock, xxx);
970 * atomic_rcu_set is not needed here. The block was already published
971 * when it was placed into the list. Here we're just making an extra
972 * copy of the pointer.
974 ram_list
.mru_block
= block
;
978 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
985 end
= TARGET_PAGE_ALIGN(start
+ length
);
986 start
&= TARGET_PAGE_MASK
;
989 block
= qemu_get_ram_block(start
);
990 assert(block
== qemu_get_ram_block(end
- 1));
991 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
993 tlb_reset_dirty(cpu
, start1
, length
);
998 /* Note: start and end must be within the same ram block. */
999 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
1003 DirtyMemoryBlocks
*blocks
;
1004 unsigned long end
, page
;
1011 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1012 page
= start
>> TARGET_PAGE_BITS
;
1016 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1018 while (page
< end
) {
1019 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1020 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1021 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1023 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1030 if (dirty
&& tcg_enabled()) {
1031 tlb_reset_dirty_range_all(start
, length
);
1037 /* Called from RCU critical section */
1038 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1039 MemoryRegionSection
*section
,
1041 hwaddr paddr
, hwaddr xlat
,
1043 target_ulong
*address
)
1048 if (memory_region_is_ram(section
->mr
)) {
1050 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1052 if (!section
->readonly
) {
1053 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1055 iotlb
|= PHYS_SECTION_ROM
;
1058 AddressSpaceDispatch
*d
;
1060 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1061 iotlb
= section
- d
->map
.sections
;
1065 /* Make accesses to pages with watchpoints go via the
1066 watchpoint trap routines. */
1067 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1068 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1069 /* Avoid trapping reads of pages with a write breakpoint. */
1070 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1071 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1072 *address
|= TLB_MMIO
;
1080 #endif /* defined(CONFIG_USER_ONLY) */
1082 #if !defined(CONFIG_USER_ONLY)
1084 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1086 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1088 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1089 qemu_anon_ram_alloc
;
1092 * Set a custom physical guest memory alloator.
1093 * Accelerators with unusual needs may need this. Hopefully, we can
1094 * get rid of it eventually.
1096 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1098 phys_mem_alloc
= alloc
;
1101 static uint16_t phys_section_add(PhysPageMap
*map
,
1102 MemoryRegionSection
*section
)
1104 /* The physical section number is ORed with a page-aligned
1105 * pointer to produce the iotlb entries. Thus it should
1106 * never overflow into the page-aligned value.
1108 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1110 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1111 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1112 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1113 map
->sections_nb_alloc
);
1115 map
->sections
[map
->sections_nb
] = *section
;
1116 memory_region_ref(section
->mr
);
1117 return map
->sections_nb
++;
1120 static void phys_section_destroy(MemoryRegion
*mr
)
1122 bool have_sub_page
= mr
->subpage
;
1124 memory_region_unref(mr
);
1126 if (have_sub_page
) {
1127 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1128 object_unref(OBJECT(&subpage
->iomem
));
1133 static void phys_sections_free(PhysPageMap
*map
)
1135 while (map
->sections_nb
> 0) {
1136 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1137 phys_section_destroy(section
->mr
);
1139 g_free(map
->sections
);
1143 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1146 hwaddr base
= section
->offset_within_address_space
1148 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1149 d
->map
.nodes
, d
->map
.sections
);
1150 MemoryRegionSection subsection
= {
1151 .offset_within_address_space
= base
,
1152 .size
= int128_make64(TARGET_PAGE_SIZE
),
1156 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1158 if (!(existing
->mr
->subpage
)) {
1159 subpage
= subpage_init(d
->as
, base
);
1160 subsection
.address_space
= d
->as
;
1161 subsection
.mr
= &subpage
->iomem
;
1162 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1163 phys_section_add(&d
->map
, &subsection
));
1165 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1167 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1168 end
= start
+ int128_get64(section
->size
) - 1;
1169 subpage_register(subpage
, start
, end
,
1170 phys_section_add(&d
->map
, section
));
1174 static void register_multipage(AddressSpaceDispatch
*d
,
1175 MemoryRegionSection
*section
)
1177 hwaddr start_addr
= section
->offset_within_address_space
;
1178 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1179 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1183 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1186 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1188 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1189 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1190 MemoryRegionSection now
= *section
, remain
= *section
;
1191 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1193 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1194 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1195 - now
.offset_within_address_space
;
1197 now
.size
= int128_min(int128_make64(left
), now
.size
);
1198 register_subpage(d
, &now
);
1200 now
.size
= int128_zero();
1202 while (int128_ne(remain
.size
, now
.size
)) {
1203 remain
.size
= int128_sub(remain
.size
, now
.size
);
1204 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1205 remain
.offset_within_region
+= int128_get64(now
.size
);
1207 if (int128_lt(remain
.size
, page_size
)) {
1208 register_subpage(d
, &now
);
1209 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1210 now
.size
= page_size
;
1211 register_subpage(d
, &now
);
1213 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1214 register_multipage(d
, &now
);
1219 void qemu_flush_coalesced_mmio_buffer(void)
1222 kvm_flush_coalesced_mmio_buffer();
1225 void qemu_mutex_lock_ramlist(void)
1227 qemu_mutex_lock(&ram_list
.mutex
);
1230 void qemu_mutex_unlock_ramlist(void)
1232 qemu_mutex_unlock(&ram_list
.mutex
);
1236 static void *file_ram_alloc(RAMBlock
*block
,
1241 bool unlink_on_error
= false;
1243 char *sanitized_name
;
1245 void * volatile area
= NULL
;
1249 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1251 "host lacks kvm mmu notifiers, -mem-path unsupported");
1256 fd
= open(path
, O_RDWR
);
1258 /* @path names an existing file, use it */
1261 if (errno
== ENOENT
) {
1262 /* @path names a file that doesn't exist, create it */
1263 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1265 unlink_on_error
= true;
1268 } else if (errno
== EISDIR
) {
1269 /* @path names a directory, create a file there */
1270 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1271 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1272 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1278 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1280 g_free(sanitized_name
);
1282 fd
= mkstemp(filename
);
1290 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1291 error_setg_errno(errp
, errno
,
1292 "can't open backing store %s for guest RAM",
1297 * Try again on EINTR and EEXIST. The latter happens when
1298 * something else creates the file between our two open().
1302 page_size
= qemu_fd_getpagesize(fd
);
1303 block
->mr
->align
= page_size
;
1305 if (memory
< page_size
) {
1306 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1307 "or larger than page size 0x%" PRIx64
,
1312 memory
= ROUND_UP(memory
, page_size
);
1315 * ftruncate is not supported by hugetlbfs in older
1316 * hosts, so don't bother bailing out on errors.
1317 * If anything goes wrong with it under other filesystems,
1320 if (ftruncate(fd
, memory
)) {
1321 perror("ftruncate");
1324 area
= qemu_ram_mmap(fd
, memory
, page_size
, block
->flags
& RAM_SHARED
);
1325 if (area
== MAP_FAILED
) {
1326 error_setg_errno(errp
, errno
,
1327 "unable to map backing store for guest RAM");
1333 os_mem_prealloc(fd
, area
, memory
);
1340 if (unlink_on_error
) {
1348 /* Called with the ramlist lock held. */
1349 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1351 RAMBlock
*block
, *next_block
;
1352 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1354 assert(size
!= 0); /* it would hand out same offset multiple times */
1356 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1360 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1361 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1363 end
= block
->offset
+ block
->max_length
;
1365 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1366 if (next_block
->offset
>= end
) {
1367 next
= MIN(next
, next_block
->offset
);
1370 if (next
- end
>= size
&& next
- end
< mingap
) {
1372 mingap
= next
- end
;
1376 if (offset
== RAM_ADDR_MAX
) {
1377 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1385 ram_addr_t
last_ram_offset(void)
1388 ram_addr_t last
= 0;
1391 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1392 last
= MAX(last
, block
->offset
+ block
->max_length
);
1398 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1402 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1403 if (!machine_dump_guest_core(current_machine
)) {
1404 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1406 perror("qemu_madvise");
1407 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1408 "but dump_guest_core=off specified\n");
1413 /* Called within an RCU critical section, or while the ramlist lock
1416 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1420 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1421 if (block
->offset
== addr
) {
1429 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1434 /* Called with iothread lock held. */
1435 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1437 RAMBlock
*new_block
, *block
;
1440 new_block
= find_ram_block(addr
);
1442 assert(!new_block
->idstr
[0]);
1445 char *id
= qdev_get_dev_path(dev
);
1447 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1451 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1453 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1454 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1455 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1463 /* Called with iothread lock held. */
1464 void qemu_ram_unset_idstr(ram_addr_t addr
)
1468 /* FIXME: arch_init.c assumes that this is not called throughout
1469 * migration. Ignore the problem since hot-unplug during migration
1470 * does not work anyway.
1474 block
= find_ram_block(addr
);
1476 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1481 static int memory_try_enable_merging(void *addr
, size_t len
)
1483 if (!machine_mem_merge(current_machine
)) {
1484 /* disabled by the user */
1488 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1491 /* Only legal before guest might have detected the memory size: e.g. on
1492 * incoming migration, or right after reset.
1494 * As memory core doesn't know how is memory accessed, it is up to
1495 * resize callback to update device state and/or add assertions to detect
1496 * misuse, if necessary.
1498 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1500 RAMBlock
*block
= find_ram_block(base
);
1504 newsize
= HOST_PAGE_ALIGN(newsize
);
1506 if (block
->used_length
== newsize
) {
1510 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1511 error_setg_errno(errp
, EINVAL
,
1512 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1513 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1514 newsize
, block
->used_length
);
1518 if (block
->max_length
< newsize
) {
1519 error_setg_errno(errp
, EINVAL
,
1520 "Length too large: %s: 0x" RAM_ADDR_FMT
1521 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1522 newsize
, block
->max_length
);
1526 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1527 block
->used_length
= newsize
;
1528 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1530 memory_region_set_size(block
->mr
, newsize
);
1531 if (block
->resized
) {
1532 block
->resized(block
->idstr
, newsize
, block
->host
);
1537 /* Called with ram_list.mutex held */
1538 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1539 ram_addr_t new_ram_size
)
1541 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1542 DIRTY_MEMORY_BLOCK_SIZE
);
1543 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1544 DIRTY_MEMORY_BLOCK_SIZE
);
1547 /* Only need to extend if block count increased */
1548 if (new_num_blocks
<= old_num_blocks
) {
1552 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1553 DirtyMemoryBlocks
*old_blocks
;
1554 DirtyMemoryBlocks
*new_blocks
;
1557 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1558 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1559 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1561 if (old_num_blocks
) {
1562 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1563 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1566 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1567 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1570 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1573 g_free_rcu(old_blocks
, rcu
);
1578 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1581 RAMBlock
*last_block
= NULL
;
1582 ram_addr_t old_ram_size
, new_ram_size
;
1585 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1587 qemu_mutex_lock_ramlist();
1588 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1590 if (!new_block
->host
) {
1591 if (xen_enabled()) {
1592 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1593 new_block
->mr
, &err
);
1595 error_propagate(errp
, err
);
1596 qemu_mutex_unlock_ramlist();
1600 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1601 &new_block
->mr
->align
);
1602 if (!new_block
->host
) {
1603 error_setg_errno(errp
, errno
,
1604 "cannot set up guest memory '%s'",
1605 memory_region_name(new_block
->mr
));
1606 qemu_mutex_unlock_ramlist();
1609 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1613 new_ram_size
= MAX(old_ram_size
,
1614 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1615 if (new_ram_size
> old_ram_size
) {
1616 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1617 dirty_memory_extend(old_ram_size
, new_ram_size
);
1619 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1620 * QLIST (which has an RCU-friendly variant) does not have insertion at
1621 * tail, so save the last element in last_block.
1623 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1625 if (block
->max_length
< new_block
->max_length
) {
1630 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1631 } else if (last_block
) {
1632 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1633 } else { /* list is empty */
1634 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1636 ram_list
.mru_block
= NULL
;
1638 /* Write list before version */
1641 qemu_mutex_unlock_ramlist();
1643 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1644 new_block
->used_length
,
1647 if (new_block
->host
) {
1648 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1649 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1650 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1651 if (kvm_enabled()) {
1652 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1658 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1659 bool share
, const char *mem_path
,
1662 RAMBlock
*new_block
;
1663 Error
*local_err
= NULL
;
1665 if (xen_enabled()) {
1666 error_setg(errp
, "-mem-path not supported with Xen");
1670 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1672 * file_ram_alloc() needs to allocate just like
1673 * phys_mem_alloc, but we haven't bothered to provide
1677 "-mem-path not supported with this accelerator");
1681 size
= HOST_PAGE_ALIGN(size
);
1682 new_block
= g_malloc0(sizeof(*new_block
));
1684 new_block
->used_length
= size
;
1685 new_block
->max_length
= size
;
1686 new_block
->flags
= share
? RAM_SHARED
: 0;
1687 new_block
->host
= file_ram_alloc(new_block
, size
,
1689 if (!new_block
->host
) {
1694 ram_block_add(new_block
, &local_err
);
1697 error_propagate(errp
, local_err
);
1705 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1706 void (*resized
)(const char*,
1709 void *host
, bool resizeable
,
1710 MemoryRegion
*mr
, Error
**errp
)
1712 RAMBlock
*new_block
;
1713 Error
*local_err
= NULL
;
1715 size
= HOST_PAGE_ALIGN(size
);
1716 max_size
= HOST_PAGE_ALIGN(max_size
);
1717 new_block
= g_malloc0(sizeof(*new_block
));
1719 new_block
->resized
= resized
;
1720 new_block
->used_length
= size
;
1721 new_block
->max_length
= max_size
;
1722 assert(max_size
>= size
);
1724 new_block
->host
= host
;
1726 new_block
->flags
|= RAM_PREALLOC
;
1729 new_block
->flags
|= RAM_RESIZEABLE
;
1731 ram_block_add(new_block
, &local_err
);
1734 error_propagate(errp
, local_err
);
1740 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1741 MemoryRegion
*mr
, Error
**errp
)
1743 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1746 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1748 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1751 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1752 void (*resized
)(const char*,
1755 MemoryRegion
*mr
, Error
**errp
)
1757 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1760 static void reclaim_ramblock(RAMBlock
*block
)
1762 if (block
->flags
& RAM_PREALLOC
) {
1764 } else if (xen_enabled()) {
1765 xen_invalidate_map_cache_entry(block
->host
);
1767 } else if (block
->fd
>= 0) {
1768 qemu_ram_munmap(block
->host
, block
->max_length
);
1772 qemu_anon_ram_free(block
->host
, block
->max_length
);
1777 void qemu_ram_free(RAMBlock
*block
)
1779 qemu_mutex_lock_ramlist();
1780 QLIST_REMOVE_RCU(block
, next
);
1781 ram_list
.mru_block
= NULL
;
1782 /* Write list before version */
1785 call_rcu(block
, reclaim_ramblock
, rcu
);
1786 qemu_mutex_unlock_ramlist();
1790 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1797 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1798 offset
= addr
- block
->offset
;
1799 if (offset
< block
->max_length
) {
1800 vaddr
= ramblock_ptr(block
, offset
);
1801 if (block
->flags
& RAM_PREALLOC
) {
1803 } else if (xen_enabled()) {
1807 if (block
->fd
>= 0) {
1808 flags
|= (block
->flags
& RAM_SHARED
?
1809 MAP_SHARED
: MAP_PRIVATE
);
1810 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1811 flags
, block
->fd
, offset
);
1814 * Remap needs to match alloc. Accelerators that
1815 * set phys_mem_alloc never remap. If they did,
1816 * we'd need a remap hook here.
1818 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1820 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1821 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1824 if (area
!= vaddr
) {
1825 fprintf(stderr
, "Could not remap addr: "
1826 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1830 memory_try_enable_merging(vaddr
, length
);
1831 qemu_ram_setup_dump(vaddr
, length
);
1836 #endif /* !_WIN32 */
1838 int qemu_get_ram_fd(ram_addr_t addr
)
1844 block
= qemu_get_ram_block(addr
);
1850 void qemu_set_ram_fd(ram_addr_t addr
, int fd
)
1855 block
= qemu_get_ram_block(addr
);
1860 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1866 block
= qemu_get_ram_block(addr
);
1867 ptr
= ramblock_ptr(block
, 0);
1872 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1873 * This should not be used for general purpose DMA. Use address_space_map
1874 * or address_space_rw instead. For local memory (e.g. video ram) that the
1875 * device owns, use memory_region_get_ram_ptr.
1877 * Called within RCU critical section.
1879 void *qemu_get_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1881 RAMBlock
*block
= ram_block
;
1883 if (block
== NULL
) {
1884 block
= qemu_get_ram_block(addr
);
1887 if (xen_enabled() && block
->host
== NULL
) {
1888 /* We need to check if the requested address is in the RAM
1889 * because we don't want to map the entire memory in QEMU.
1890 * In that case just map until the end of the page.
1892 if (block
->offset
== 0) {
1893 return xen_map_cache(addr
, 0, 0);
1896 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1898 return ramblock_ptr(block
, addr
- block
->offset
);
1901 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1902 * but takes a size argument.
1904 * Called within RCU critical section.
1906 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1909 RAMBlock
*block
= ram_block
;
1910 ram_addr_t offset_inside_block
;
1915 if (block
== NULL
) {
1916 block
= qemu_get_ram_block(addr
);
1918 offset_inside_block
= addr
- block
->offset
;
1919 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1921 if (xen_enabled() && block
->host
== NULL
) {
1922 /* We need to check if the requested address is in the RAM
1923 * because we don't want to map the entire memory in QEMU.
1924 * In that case just map the requested area.
1926 if (block
->offset
== 0) {
1927 return xen_map_cache(addr
, *size
, 1);
1930 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1933 return ramblock_ptr(block
, offset_inside_block
);
1937 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1940 * ptr: Host pointer to look up
1941 * round_offset: If true round the result offset down to a page boundary
1942 * *ram_addr: set to result ram_addr
1943 * *offset: set to result offset within the RAMBlock
1945 * Returns: RAMBlock (or NULL if not found)
1947 * By the time this function returns, the returned pointer is not protected
1948 * by RCU anymore. If the caller is not within an RCU critical section and
1949 * does not hold the iothread lock, it must have other means of protecting the
1950 * pointer, such as a reference to the region that includes the incoming
1953 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1954 ram_addr_t
*ram_addr
,
1958 uint8_t *host
= ptr
;
1960 if (xen_enabled()) {
1962 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1963 block
= qemu_get_ram_block(*ram_addr
);
1965 *offset
= (host
- block
->host
);
1972 block
= atomic_rcu_read(&ram_list
.mru_block
);
1973 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1977 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1978 /* This case append when the block is not mapped. */
1979 if (block
->host
== NULL
) {
1982 if (host
- block
->host
< block
->max_length
) {
1991 *offset
= (host
- block
->host
);
1993 *offset
&= TARGET_PAGE_MASK
;
1995 *ram_addr
= block
->offset
+ *offset
;
2001 * Finds the named RAMBlock
2003 * name: The name of RAMBlock to find
2005 * Returns: RAMBlock (or NULL if not found)
2007 RAMBlock
*qemu_ram_block_by_name(const char *name
)
2011 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
2012 if (!strcmp(name
, block
->idstr
)) {
2020 /* Some of the softmmu routines need to translate from a host pointer
2021 (typically a TLB entry) back to a ram offset. */
2022 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2025 ram_addr_t offset
; /* Not used */
2027 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
2036 /* Called within RCU critical section. */
2037 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
2038 uint64_t val
, unsigned size
)
2040 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2041 tb_invalidate_phys_page_fast(ram_addr
, size
);
2045 stb_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2048 stw_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2051 stl_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2056 /* Set both VGA and migration bits for simplicity and to remove
2057 * the notdirty callback faster.
2059 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2060 DIRTY_CLIENTS_NOCODE
);
2061 /* we remove the notdirty callback only if the code has been
2063 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2064 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2068 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2069 unsigned size
, bool is_write
)
2074 static const MemoryRegionOps notdirty_mem_ops
= {
2075 .write
= notdirty_mem_write
,
2076 .valid
.accepts
= notdirty_mem_accepts
,
2077 .endianness
= DEVICE_NATIVE_ENDIAN
,
2080 /* Generate a debug exception if a watchpoint has been hit. */
2081 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2083 CPUState
*cpu
= current_cpu
;
2084 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2085 CPUArchState
*env
= cpu
->env_ptr
;
2086 target_ulong pc
, cs_base
;
2091 if (cpu
->watchpoint_hit
) {
2092 /* We re-entered the check after replacing the TB. Now raise
2093 * the debug interrupt so that is will trigger after the
2094 * current instruction. */
2095 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2098 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2099 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2100 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2101 && (wp
->flags
& flags
)) {
2102 if (flags
== BP_MEM_READ
) {
2103 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2105 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2107 wp
->hitaddr
= vaddr
;
2108 wp
->hitattrs
= attrs
;
2109 if (!cpu
->watchpoint_hit
) {
2110 if (wp
->flags
& BP_CPU
&&
2111 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2112 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2115 cpu
->watchpoint_hit
= wp
;
2116 tb_check_watchpoint(cpu
);
2117 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2118 cpu
->exception_index
= EXCP_DEBUG
;
2121 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2122 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2123 cpu_resume_from_signal(cpu
, NULL
);
2127 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2132 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2133 so these check for a hit then pass through to the normal out-of-line
2135 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2136 unsigned size
, MemTxAttrs attrs
)
2140 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2141 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2143 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2146 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2149 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2152 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2160 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2161 uint64_t val
, unsigned size
,
2165 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2166 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2168 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2171 address_space_stb(as
, addr
, val
, attrs
, &res
);
2174 address_space_stw(as
, addr
, val
, attrs
, &res
);
2177 address_space_stl(as
, addr
, val
, attrs
, &res
);
2184 static const MemoryRegionOps watch_mem_ops
= {
2185 .read_with_attrs
= watch_mem_read
,
2186 .write_with_attrs
= watch_mem_write
,
2187 .endianness
= DEVICE_NATIVE_ENDIAN
,
2190 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2191 unsigned len
, MemTxAttrs attrs
)
2193 subpage_t
*subpage
= opaque
;
2197 #if defined(DEBUG_SUBPAGE)
2198 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2199 subpage
, len
, addr
);
2201 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2208 *data
= ldub_p(buf
);
2211 *data
= lduw_p(buf
);
2224 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2225 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2227 subpage_t
*subpage
= opaque
;
2230 #if defined(DEBUG_SUBPAGE)
2231 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2232 " value %"PRIx64
"\n",
2233 __func__
, subpage
, len
, addr
, value
);
2251 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2255 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2256 unsigned len
, bool is_write
)
2258 subpage_t
*subpage
= opaque
;
2259 #if defined(DEBUG_SUBPAGE)
2260 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2261 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2264 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2268 static const MemoryRegionOps subpage_ops
= {
2269 .read_with_attrs
= subpage_read
,
2270 .write_with_attrs
= subpage_write
,
2271 .impl
.min_access_size
= 1,
2272 .impl
.max_access_size
= 8,
2273 .valid
.min_access_size
= 1,
2274 .valid
.max_access_size
= 8,
2275 .valid
.accepts
= subpage_accepts
,
2276 .endianness
= DEVICE_NATIVE_ENDIAN
,
2279 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2284 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2286 idx
= SUBPAGE_IDX(start
);
2287 eidx
= SUBPAGE_IDX(end
);
2288 #if defined(DEBUG_SUBPAGE)
2289 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2290 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2292 for (; idx
<= eidx
; idx
++) {
2293 mmio
->sub_section
[idx
] = section
;
2299 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2303 mmio
= g_malloc0(sizeof(subpage_t
));
2307 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2308 NULL
, TARGET_PAGE_SIZE
);
2309 mmio
->iomem
.subpage
= true;
2310 #if defined(DEBUG_SUBPAGE)
2311 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2312 mmio
, base
, TARGET_PAGE_SIZE
);
2314 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2319 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2323 MemoryRegionSection section
= {
2324 .address_space
= as
,
2326 .offset_within_address_space
= 0,
2327 .offset_within_region
= 0,
2328 .size
= int128_2_64(),
2331 return phys_section_add(map
, §ion
);
2334 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2336 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2337 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2338 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2339 MemoryRegionSection
*sections
= d
->map
.sections
;
2341 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2344 static void io_mem_init(void)
2346 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2347 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2349 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2351 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2355 static void mem_begin(MemoryListener
*listener
)
2357 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2358 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2361 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2362 assert(n
== PHYS_SECTION_UNASSIGNED
);
2363 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2364 assert(n
== PHYS_SECTION_NOTDIRTY
);
2365 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2366 assert(n
== PHYS_SECTION_ROM
);
2367 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2368 assert(n
== PHYS_SECTION_WATCH
);
2370 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2372 as
->next_dispatch
= d
;
2375 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2377 phys_sections_free(&d
->map
);
2381 static void mem_commit(MemoryListener
*listener
)
2383 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2384 AddressSpaceDispatch
*cur
= as
->dispatch
;
2385 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2387 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2389 atomic_rcu_set(&as
->dispatch
, next
);
2391 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2395 static void tcg_commit(MemoryListener
*listener
)
2397 CPUAddressSpace
*cpuas
;
2398 AddressSpaceDispatch
*d
;
2400 /* since each CPU stores ram addresses in its TLB cache, we must
2401 reset the modified entries */
2402 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2403 cpu_reloading_memory_map();
2404 /* The CPU and TLB are protected by the iothread lock.
2405 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2406 * may have split the RCU critical section.
2408 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2409 cpuas
->memory_dispatch
= d
;
2410 tlb_flush(cpuas
->cpu
, 1);
2413 void address_space_init_dispatch(AddressSpace
*as
)
2415 as
->dispatch
= NULL
;
2416 as
->dispatch_listener
= (MemoryListener
) {
2418 .commit
= mem_commit
,
2419 .region_add
= mem_add
,
2420 .region_nop
= mem_add
,
2423 memory_listener_register(&as
->dispatch_listener
, as
);
2426 void address_space_unregister(AddressSpace
*as
)
2428 memory_listener_unregister(&as
->dispatch_listener
);
2431 void address_space_destroy_dispatch(AddressSpace
*as
)
2433 AddressSpaceDispatch
*d
= as
->dispatch
;
2435 atomic_rcu_set(&as
->dispatch
, NULL
);
2437 call_rcu(d
, address_space_dispatch_free
, rcu
);
2441 static void memory_map_init(void)
2443 system_memory
= g_malloc(sizeof(*system_memory
));
2445 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2446 address_space_init(&address_space_memory
, system_memory
, "memory");
2448 system_io
= g_malloc(sizeof(*system_io
));
2449 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2451 address_space_init(&address_space_io
, system_io
, "I/O");
2454 MemoryRegion
*get_system_memory(void)
2456 return system_memory
;
2459 MemoryRegion
*get_system_io(void)
2464 #endif /* !defined(CONFIG_USER_ONLY) */
2466 /* physical memory access (slow version, mainly for debug) */
2467 #if defined(CONFIG_USER_ONLY)
2468 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2469 uint8_t *buf
, int len
, int is_write
)
2476 page
= addr
& TARGET_PAGE_MASK
;
2477 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2480 flags
= page_get_flags(page
);
2481 if (!(flags
& PAGE_VALID
))
2484 if (!(flags
& PAGE_WRITE
))
2486 /* XXX: this code should not depend on lock_user */
2487 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2490 unlock_user(p
, addr
, l
);
2492 if (!(flags
& PAGE_READ
))
2494 /* XXX: this code should not depend on lock_user */
2495 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2498 unlock_user(p
, addr
, 0);
2509 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2512 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2513 /* No early return if dirty_log_mask is or becomes 0, because
2514 * cpu_physical_memory_set_dirty_range will still call
2515 * xen_modified_memory.
2517 if (dirty_log_mask
) {
2519 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2521 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2522 tb_invalidate_phys_range(addr
, addr
+ length
);
2523 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2525 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2528 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2530 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2532 /* Regions are assumed to support 1-4 byte accesses unless
2533 otherwise specified. */
2534 if (access_size_max
== 0) {
2535 access_size_max
= 4;
2538 /* Bound the maximum access by the alignment of the address. */
2539 if (!mr
->ops
->impl
.unaligned
) {
2540 unsigned align_size_max
= addr
& -addr
;
2541 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2542 access_size_max
= align_size_max
;
2546 /* Don't attempt accesses larger than the maximum. */
2547 if (l
> access_size_max
) {
2548 l
= access_size_max
;
2555 static bool prepare_mmio_access(MemoryRegion
*mr
)
2557 bool unlocked
= !qemu_mutex_iothread_locked();
2558 bool release_lock
= false;
2560 if (unlocked
&& mr
->global_locking
) {
2561 qemu_mutex_lock_iothread();
2563 release_lock
= true;
2565 if (mr
->flush_coalesced_mmio
) {
2567 qemu_mutex_lock_iothread();
2569 qemu_flush_coalesced_mmio_buffer();
2571 qemu_mutex_unlock_iothread();
2575 return release_lock
;
2578 /* Called within RCU critical section. */
2579 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2582 int len
, hwaddr addr1
,
2583 hwaddr l
, MemoryRegion
*mr
)
2587 MemTxResult result
= MEMTX_OK
;
2588 bool release_lock
= false;
2591 if (!memory_access_is_direct(mr
, true)) {
2592 release_lock
|= prepare_mmio_access(mr
);
2593 l
= memory_access_size(mr
, l
, addr1
);
2594 /* XXX: could force current_cpu to NULL to avoid
2598 /* 64 bit write access */
2600 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2604 /* 32 bit write access */
2606 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2610 /* 16 bit write access */
2612 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2616 /* 8 bit write access */
2618 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2625 addr1
+= memory_region_get_ram_addr(mr
);
2627 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2628 memcpy(ptr
, buf
, l
);
2629 invalidate_and_set_dirty(mr
, addr1
, l
);
2633 qemu_mutex_unlock_iothread();
2634 release_lock
= false;
2646 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2652 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2653 const uint8_t *buf
, int len
)
2658 MemTxResult result
= MEMTX_OK
;
2663 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2664 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2672 /* Called within RCU critical section. */
2673 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2674 MemTxAttrs attrs
, uint8_t *buf
,
2675 int len
, hwaddr addr1
, hwaddr l
,
2680 MemTxResult result
= MEMTX_OK
;
2681 bool release_lock
= false;
2684 if (!memory_access_is_direct(mr
, false)) {
2686 release_lock
|= prepare_mmio_access(mr
);
2687 l
= memory_access_size(mr
, l
, addr1
);
2690 /* 64 bit read access */
2691 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2696 /* 32 bit read access */
2697 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2702 /* 16 bit read access */
2703 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2708 /* 8 bit read access */
2709 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2718 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
2719 memory_region_get_ram_addr(mr
) + addr1
);
2720 memcpy(buf
, ptr
, l
);
2724 qemu_mutex_unlock_iothread();
2725 release_lock
= false;
2737 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2743 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2744 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2749 MemTxResult result
= MEMTX_OK
;
2754 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2755 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2763 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2764 uint8_t *buf
, int len
, bool is_write
)
2767 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2769 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2773 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2774 int len
, int is_write
)
2776 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2777 buf
, len
, is_write
);
2780 enum write_rom_type
{
2785 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2786 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2796 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2798 if (!(memory_region_is_ram(mr
) ||
2799 memory_region_is_romd(mr
))) {
2800 l
= memory_access_size(mr
, l
, addr1
);
2802 addr1
+= memory_region_get_ram_addr(mr
);
2804 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2807 memcpy(ptr
, buf
, l
);
2808 invalidate_and_set_dirty(mr
, addr1
, l
);
2811 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2822 /* used for ROM loading : can write in RAM and ROM */
2823 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2824 const uint8_t *buf
, int len
)
2826 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2829 void cpu_flush_icache_range(hwaddr start
, int len
)
2832 * This function should do the same thing as an icache flush that was
2833 * triggered from within the guest. For TCG we are always cache coherent,
2834 * so there is no need to flush anything. For KVM / Xen we need to flush
2835 * the host's instruction cache at least.
2837 if (tcg_enabled()) {
2841 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2842 start
, NULL
, len
, FLUSH_CACHE
);
2853 static BounceBuffer bounce
;
2855 typedef struct MapClient
{
2857 QLIST_ENTRY(MapClient
) link
;
2860 QemuMutex map_client_list_lock
;
2861 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2862 = QLIST_HEAD_INITIALIZER(map_client_list
);
2864 static void cpu_unregister_map_client_do(MapClient
*client
)
2866 QLIST_REMOVE(client
, link
);
2870 static void cpu_notify_map_clients_locked(void)
2874 while (!QLIST_EMPTY(&map_client_list
)) {
2875 client
= QLIST_FIRST(&map_client_list
);
2876 qemu_bh_schedule(client
->bh
);
2877 cpu_unregister_map_client_do(client
);
2881 void cpu_register_map_client(QEMUBH
*bh
)
2883 MapClient
*client
= g_malloc(sizeof(*client
));
2885 qemu_mutex_lock(&map_client_list_lock
);
2887 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2888 if (!atomic_read(&bounce
.in_use
)) {
2889 cpu_notify_map_clients_locked();
2891 qemu_mutex_unlock(&map_client_list_lock
);
2894 void cpu_exec_init_all(void)
2896 qemu_mutex_init(&ram_list
.mutex
);
2899 qemu_mutex_init(&map_client_list_lock
);
2902 void cpu_unregister_map_client(QEMUBH
*bh
)
2906 qemu_mutex_lock(&map_client_list_lock
);
2907 QLIST_FOREACH(client
, &map_client_list
, link
) {
2908 if (client
->bh
== bh
) {
2909 cpu_unregister_map_client_do(client
);
2913 qemu_mutex_unlock(&map_client_list_lock
);
2916 static void cpu_notify_map_clients(void)
2918 qemu_mutex_lock(&map_client_list_lock
);
2919 cpu_notify_map_clients_locked();
2920 qemu_mutex_unlock(&map_client_list_lock
);
2923 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2931 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2932 if (!memory_access_is_direct(mr
, is_write
)) {
2933 l
= memory_access_size(mr
, l
, addr
);
2934 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2946 /* Map a physical memory region into a host virtual address.
2947 * May map a subset of the requested range, given by and returned in *plen.
2948 * May return NULL if resources needed to perform the mapping are exhausted.
2949 * Use only for reads OR writes - not for read-modify-write operations.
2950 * Use cpu_register_map_client() to know when retrying the map operation is
2951 * likely to succeed.
2953 void *address_space_map(AddressSpace
*as
,
2960 hwaddr l
, xlat
, base
;
2961 MemoryRegion
*mr
, *this_mr
;
2971 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2973 if (!memory_access_is_direct(mr
, is_write
)) {
2974 if (atomic_xchg(&bounce
.in_use
, true)) {
2978 /* Avoid unbounded allocations */
2979 l
= MIN(l
, TARGET_PAGE_SIZE
);
2980 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2984 memory_region_ref(mr
);
2987 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2993 return bounce
.buffer
;
2997 raddr
= memory_region_get_ram_addr(mr
);
3008 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
3009 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
3014 memory_region_ref(mr
);
3016 ptr
= qemu_ram_ptr_length(mr
->ram_block
, raddr
+ base
, plen
);
3022 /* Unmaps a memory region previously mapped by address_space_map().
3023 * Will also mark the memory as dirty if is_write == 1. access_len gives
3024 * the amount of memory that was actually read or written by the caller.
3026 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3027 int is_write
, hwaddr access_len
)
3029 if (buffer
!= bounce
.buffer
) {
3033 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
3036 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3038 if (xen_enabled()) {
3039 xen_invalidate_map_cache_entry(buffer
);
3041 memory_region_unref(mr
);
3045 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3046 bounce
.buffer
, access_len
);
3048 qemu_vfree(bounce
.buffer
);
3049 bounce
.buffer
= NULL
;
3050 memory_region_unref(bounce
.mr
);
3051 atomic_mb_set(&bounce
.in_use
, false);
3052 cpu_notify_map_clients();
3055 void *cpu_physical_memory_map(hwaddr addr
,
3059 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3062 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3063 int is_write
, hwaddr access_len
)
3065 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3068 /* warning: addr must be aligned */
3069 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3071 MemTxResult
*result
,
3072 enum device_endian endian
)
3080 bool release_lock
= false;
3083 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3084 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3085 release_lock
|= prepare_mmio_access(mr
);
3088 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3089 #if defined(TARGET_WORDS_BIGENDIAN)
3090 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3094 if (endian
== DEVICE_BIG_ENDIAN
) {
3100 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3101 (memory_region_get_ram_addr(mr
)
3105 case DEVICE_LITTLE_ENDIAN
:
3106 val
= ldl_le_p(ptr
);
3108 case DEVICE_BIG_ENDIAN
:
3109 val
= ldl_be_p(ptr
);
3121 qemu_mutex_unlock_iothread();
3127 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3128 MemTxAttrs attrs
, MemTxResult
*result
)
3130 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3131 DEVICE_NATIVE_ENDIAN
);
3134 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3135 MemTxAttrs attrs
, MemTxResult
*result
)
3137 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3138 DEVICE_LITTLE_ENDIAN
);
3141 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3142 MemTxAttrs attrs
, MemTxResult
*result
)
3144 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3148 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3150 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3153 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3155 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3158 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3160 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3163 /* warning: addr must be aligned */
3164 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3166 MemTxResult
*result
,
3167 enum device_endian endian
)
3175 bool release_lock
= false;
3178 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3180 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3181 release_lock
|= prepare_mmio_access(mr
);
3184 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3185 #if defined(TARGET_WORDS_BIGENDIAN)
3186 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3190 if (endian
== DEVICE_BIG_ENDIAN
) {
3196 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3197 (memory_region_get_ram_addr(mr
)
3201 case DEVICE_LITTLE_ENDIAN
:
3202 val
= ldq_le_p(ptr
);
3204 case DEVICE_BIG_ENDIAN
:
3205 val
= ldq_be_p(ptr
);
3217 qemu_mutex_unlock_iothread();
3223 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3224 MemTxAttrs attrs
, MemTxResult
*result
)
3226 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3227 DEVICE_NATIVE_ENDIAN
);
3230 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3231 MemTxAttrs attrs
, MemTxResult
*result
)
3233 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3234 DEVICE_LITTLE_ENDIAN
);
3237 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3238 MemTxAttrs attrs
, MemTxResult
*result
)
3240 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3244 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3246 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3249 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3251 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3254 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3256 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3260 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3261 MemTxAttrs attrs
, MemTxResult
*result
)
3266 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3273 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3275 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3278 /* warning: addr must be aligned */
3279 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3282 MemTxResult
*result
,
3283 enum device_endian endian
)
3291 bool release_lock
= false;
3294 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3296 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3297 release_lock
|= prepare_mmio_access(mr
);
3300 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3301 #if defined(TARGET_WORDS_BIGENDIAN)
3302 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3306 if (endian
== DEVICE_BIG_ENDIAN
) {
3312 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3313 (memory_region_get_ram_addr(mr
)
3317 case DEVICE_LITTLE_ENDIAN
:
3318 val
= lduw_le_p(ptr
);
3320 case DEVICE_BIG_ENDIAN
:
3321 val
= lduw_be_p(ptr
);
3333 qemu_mutex_unlock_iothread();
3339 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3340 MemTxAttrs attrs
, MemTxResult
*result
)
3342 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3343 DEVICE_NATIVE_ENDIAN
);
3346 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3347 MemTxAttrs attrs
, MemTxResult
*result
)
3349 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3350 DEVICE_LITTLE_ENDIAN
);
3353 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3354 MemTxAttrs attrs
, MemTxResult
*result
)
3356 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3360 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3362 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3365 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3367 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3370 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3372 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3375 /* warning: addr must be aligned. The ram page is not masked as dirty
3376 and the code inside is not invalidated. It is useful if the dirty
3377 bits are used to track modified PTEs */
3378 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3379 MemTxAttrs attrs
, MemTxResult
*result
)
3386 uint8_t dirty_log_mask
;
3387 bool release_lock
= false;
3390 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3392 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3393 release_lock
|= prepare_mmio_access(mr
);
3395 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3397 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3398 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3401 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3402 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3403 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3410 qemu_mutex_unlock_iothread();
3415 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3417 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3420 /* warning: addr must be aligned */
3421 static inline void address_space_stl_internal(AddressSpace
*as
,
3422 hwaddr addr
, uint32_t val
,
3424 MemTxResult
*result
,
3425 enum device_endian endian
)
3432 bool release_lock
= false;
3435 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3437 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3438 release_lock
|= prepare_mmio_access(mr
);
3440 #if defined(TARGET_WORDS_BIGENDIAN)
3441 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3445 if (endian
== DEVICE_BIG_ENDIAN
) {
3449 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3452 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3453 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3455 case DEVICE_LITTLE_ENDIAN
:
3458 case DEVICE_BIG_ENDIAN
:
3465 invalidate_and_set_dirty(mr
, addr1
, 4);
3472 qemu_mutex_unlock_iothread();
3477 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3478 MemTxAttrs attrs
, MemTxResult
*result
)
3480 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3481 DEVICE_NATIVE_ENDIAN
);
3484 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3485 MemTxAttrs attrs
, MemTxResult
*result
)
3487 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3488 DEVICE_LITTLE_ENDIAN
);
3491 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3492 MemTxAttrs attrs
, MemTxResult
*result
)
3494 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3498 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3500 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3503 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3505 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3508 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3510 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3514 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3515 MemTxAttrs attrs
, MemTxResult
*result
)
3520 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3526 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3528 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3531 /* warning: addr must be aligned */
3532 static inline void address_space_stw_internal(AddressSpace
*as
,
3533 hwaddr addr
, uint32_t val
,
3535 MemTxResult
*result
,
3536 enum device_endian endian
)
3543 bool release_lock
= false;
3546 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3547 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3548 release_lock
|= prepare_mmio_access(mr
);
3550 #if defined(TARGET_WORDS_BIGENDIAN)
3551 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3555 if (endian
== DEVICE_BIG_ENDIAN
) {
3559 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3562 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3563 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3565 case DEVICE_LITTLE_ENDIAN
:
3568 case DEVICE_BIG_ENDIAN
:
3575 invalidate_and_set_dirty(mr
, addr1
, 2);
3582 qemu_mutex_unlock_iothread();
3587 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3588 MemTxAttrs attrs
, MemTxResult
*result
)
3590 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3591 DEVICE_NATIVE_ENDIAN
);
3594 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3595 MemTxAttrs attrs
, MemTxResult
*result
)
3597 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3598 DEVICE_LITTLE_ENDIAN
);
3601 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3602 MemTxAttrs attrs
, MemTxResult
*result
)
3604 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3608 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3610 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3613 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3615 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3618 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3620 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3624 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3625 MemTxAttrs attrs
, MemTxResult
*result
)
3629 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3635 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3636 MemTxAttrs attrs
, MemTxResult
*result
)
3639 val
= cpu_to_le64(val
);
3640 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3645 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3646 MemTxAttrs attrs
, MemTxResult
*result
)
3649 val
= cpu_to_be64(val
);
3650 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3656 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3658 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3661 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3663 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3666 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3668 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3671 /* virtual memory access for debug (includes writing to ROM) */
3672 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3673 uint8_t *buf
, int len
, int is_write
)
3683 page
= addr
& TARGET_PAGE_MASK
;
3684 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3685 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3686 /* if no physical page mapped, return an error */
3687 if (phys_addr
== -1)
3689 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3692 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3694 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3697 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3698 MEMTXATTRS_UNSPECIFIED
,
3709 * Allows code that needs to deal with migration bitmaps etc to still be built
3710 * target independent.
3712 size_t qemu_target_page_bits(void)
3714 return TARGET_PAGE_BITS
;
3720 * A helper function for the _utterly broken_ virtio device model to find out if
3721 * it's running on a big endian machine. Don't do this at home kids!
3723 bool target_words_bigendian(void);
3724 bool target_words_bigendian(void)
3726 #if defined(TARGET_WORDS_BIGENDIAN)
3733 #ifndef CONFIG_USER_ONLY
3734 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3741 mr
= address_space_translate(&address_space_memory
,
3742 phys_addr
, &phys_addr
, &l
, false);
3744 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3749 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3755 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3756 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3757 block
->used_length
, opaque
);