4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
24 #include "qemu-common.h"
28 #if !defined(CONFIG_USER_ONLY)
29 #include "hw/boards.h"
32 #include "sysemu/kvm.h"
33 #include "sysemu/sysemu.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
47 #include "exec/cpu-all.h"
48 #include "qemu/rcu_queue.h"
49 #include "qemu/main-loop.h"
50 #include "translate-all.h"
51 #include "sysemu/replay.h"
53 #include "exec/memory-internal.h"
54 #include "exec/ram_addr.h"
57 #include "qemu/range.h"
59 #include "qemu/mmap-alloc.h"
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
66 * are protected by the ramlist lock.
68 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
70 static MemoryRegion
*system_memory
;
71 static MemoryRegion
*system_io
;
73 AddressSpace address_space_io
;
74 AddressSpace address_space_memory
;
76 MemoryRegion io_mem_rom
, io_mem_notdirty
;
77 static MemoryRegion io_mem_unassigned
;
79 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
80 #define RAM_PREALLOC (1 << 0)
82 /* RAM is mmap-ed with MAP_SHARED */
83 #define RAM_SHARED (1 << 1)
85 /* Only a portion of RAM (used_length) is actually used, and migrated.
86 * This used_length size can change across reboots.
88 #define RAM_RESIZEABLE (1 << 2)
92 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
93 /* current CPU in the current thread. It is only valid inside
95 __thread CPUState
*current_cpu
;
96 /* 0 = Do not count executed instructions.
97 1 = Precise instruction counting.
98 2 = Adaptive rate instruction counting. */
101 #if !defined(CONFIG_USER_ONLY)
103 typedef struct PhysPageEntry PhysPageEntry
;
105 struct PhysPageEntry
{
106 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
108 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
112 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
114 /* Size of the L2 (and L3, etc) page tables. */
115 #define ADDR_SPACE_BITS 64
118 #define P_L2_SIZE (1 << P_L2_BITS)
120 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
122 typedef PhysPageEntry Node
[P_L2_SIZE
];
124 typedef struct PhysPageMap
{
127 unsigned sections_nb
;
128 unsigned sections_nb_alloc
;
130 unsigned nodes_nb_alloc
;
132 MemoryRegionSection
*sections
;
135 struct AddressSpaceDispatch
{
138 /* This is a multi-level map on the physical address space.
139 * The bottom level has pointers to MemoryRegionSections.
141 PhysPageEntry phys_map
;
146 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
147 typedef struct subpage_t
{
151 uint16_t sub_section
[TARGET_PAGE_SIZE
];
154 #define PHYS_SECTION_UNASSIGNED 0
155 #define PHYS_SECTION_NOTDIRTY 1
156 #define PHYS_SECTION_ROM 2
157 #define PHYS_SECTION_WATCH 3
159 static void io_mem_init(void);
160 static void memory_map_init(void);
161 static void tcg_commit(MemoryListener
*listener
);
163 static MemoryRegion io_mem_watch
;
166 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
167 * @cpu: the CPU whose AddressSpace this is
168 * @as: the AddressSpace itself
169 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
170 * @tcg_as_listener: listener for tracking changes to the AddressSpace
172 struct CPUAddressSpace
{
175 struct AddressSpaceDispatch
*memory_dispatch
;
176 MemoryListener tcg_as_listener
;
181 #if !defined(CONFIG_USER_ONLY)
183 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
185 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
186 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
187 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
188 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
192 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
199 ret
= map
->nodes_nb
++;
201 assert(ret
!= PHYS_MAP_NODE_NIL
);
202 assert(ret
!= map
->nodes_nb_alloc
);
204 e
.skip
= leaf
? 0 : 1;
205 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
206 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
207 memcpy(&p
[i
], &e
, sizeof(e
));
212 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
213 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
217 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
219 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
220 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
222 p
= map
->nodes
[lp
->ptr
];
223 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
225 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
226 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
232 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
238 static void phys_page_set(AddressSpaceDispatch
*d
,
239 hwaddr index
, hwaddr nb
,
242 /* Wildly overreserve - it doesn't matter much. */
243 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
245 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
248 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
249 * and update our entry so we can skip it and go directly to the destination.
251 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
253 unsigned valid_ptr
= P_L2_SIZE
;
258 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
263 for (i
= 0; i
< P_L2_SIZE
; i
++) {
264 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
271 phys_page_compact(&p
[i
], nodes
, compacted
);
275 /* We can only compress if there's only one child. */
280 assert(valid_ptr
< P_L2_SIZE
);
282 /* Don't compress if it won't fit in the # of bits we have. */
283 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
287 lp
->ptr
= p
[valid_ptr
].ptr
;
288 if (!p
[valid_ptr
].skip
) {
289 /* If our only child is a leaf, make this a leaf. */
290 /* By design, we should have made this node a leaf to begin with so we
291 * should never reach here.
292 * But since it's so simple to handle this, let's do it just in case we
297 lp
->skip
+= p
[valid_ptr
].skip
;
301 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
303 DECLARE_BITMAP(compacted
, nodes_nb
);
305 if (d
->phys_map
.skip
) {
306 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
310 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
311 Node
*nodes
, MemoryRegionSection
*sections
)
314 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
317 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
318 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
319 return §ions
[PHYS_SECTION_UNASSIGNED
];
322 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
325 if (sections
[lp
.ptr
].size
.hi
||
326 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
327 sections
[lp
.ptr
].size
.lo
, addr
)) {
328 return §ions
[lp
.ptr
];
330 return §ions
[PHYS_SECTION_UNASSIGNED
];
334 bool memory_region_is_unassigned(MemoryRegion
*mr
)
336 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
337 && mr
!= &io_mem_watch
;
340 /* Called from RCU critical section */
341 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
343 bool resolve_subpage
)
345 MemoryRegionSection
*section
;
348 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
349 if (resolve_subpage
&& section
->mr
->subpage
) {
350 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
351 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
356 /* Called from RCU critical section */
357 static MemoryRegionSection
*
358 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
359 hwaddr
*plen
, bool resolve_subpage
)
361 MemoryRegionSection
*section
;
365 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
366 /* Compute offset within MemoryRegionSection */
367 addr
-= section
->offset_within_address_space
;
369 /* Compute offset within MemoryRegion */
370 *xlat
= addr
+ section
->offset_within_region
;
374 /* MMIO registers can be expected to perform full-width accesses based only
375 * on their address, without considering adjacent registers that could
376 * decode to completely different MemoryRegions. When such registers
377 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
378 * regions overlap wildly. For this reason we cannot clamp the accesses
381 * If the length is small (as is the case for address_space_ldl/stl),
382 * everything works fine. If the incoming length is large, however,
383 * the caller really has to do the clamping through memory_access_size.
385 if (memory_region_is_ram(mr
)) {
386 diff
= int128_sub(section
->size
, int128_make64(addr
));
387 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
392 /* Called from RCU critical section */
393 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
394 hwaddr
*xlat
, hwaddr
*plen
,
398 MemoryRegionSection
*section
;
402 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
403 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
406 if (!mr
->iommu_ops
) {
410 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
411 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
412 | (addr
& iotlb
.addr_mask
));
413 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
414 if (!(iotlb
.perm
& (1 << is_write
))) {
415 mr
= &io_mem_unassigned
;
419 as
= iotlb
.target_as
;
422 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
423 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
424 *plen
= MIN(page
, *plen
);
431 /* Called from RCU critical section */
432 MemoryRegionSection
*
433 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
434 hwaddr
*xlat
, hwaddr
*plen
)
436 MemoryRegionSection
*section
;
437 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
439 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
441 assert(!section
->mr
->iommu_ops
);
446 #if !defined(CONFIG_USER_ONLY)
448 static int cpu_common_post_load(void *opaque
, int version_id
)
450 CPUState
*cpu
= opaque
;
452 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
453 version_id is increased. */
454 cpu
->interrupt_request
&= ~0x01;
460 static int cpu_common_pre_load(void *opaque
)
462 CPUState
*cpu
= opaque
;
464 cpu
->exception_index
= -1;
469 static bool cpu_common_exception_index_needed(void *opaque
)
471 CPUState
*cpu
= opaque
;
473 return tcg_enabled() && cpu
->exception_index
!= -1;
476 static const VMStateDescription vmstate_cpu_common_exception_index
= {
477 .name
= "cpu_common/exception_index",
479 .minimum_version_id
= 1,
480 .needed
= cpu_common_exception_index_needed
,
481 .fields
= (VMStateField
[]) {
482 VMSTATE_INT32(exception_index
, CPUState
),
483 VMSTATE_END_OF_LIST()
487 static bool cpu_common_crash_occurred_needed(void *opaque
)
489 CPUState
*cpu
= opaque
;
491 return cpu
->crash_occurred
;
494 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
495 .name
= "cpu_common/crash_occurred",
497 .minimum_version_id
= 1,
498 .needed
= cpu_common_crash_occurred_needed
,
499 .fields
= (VMStateField
[]) {
500 VMSTATE_BOOL(crash_occurred
, CPUState
),
501 VMSTATE_END_OF_LIST()
505 const VMStateDescription vmstate_cpu_common
= {
506 .name
= "cpu_common",
508 .minimum_version_id
= 1,
509 .pre_load
= cpu_common_pre_load
,
510 .post_load
= cpu_common_post_load
,
511 .fields
= (VMStateField
[]) {
512 VMSTATE_UINT32(halted
, CPUState
),
513 VMSTATE_UINT32(interrupt_request
, CPUState
),
514 VMSTATE_END_OF_LIST()
516 .subsections
= (const VMStateDescription
*[]) {
517 &vmstate_cpu_common_exception_index
,
518 &vmstate_cpu_common_crash_occurred
,
525 CPUState
*qemu_get_cpu(int index
)
530 if (cpu
->cpu_index
== index
) {
538 #if !defined(CONFIG_USER_ONLY)
539 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
541 CPUAddressSpace
*newas
;
543 /* Target code should have set num_ases before calling us */
544 assert(asidx
< cpu
->num_ases
);
547 /* address space 0 gets the convenience alias */
551 /* KVM cannot currently support multiple address spaces. */
552 assert(asidx
== 0 || !kvm_enabled());
554 if (!cpu
->cpu_ases
) {
555 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
558 newas
= &cpu
->cpu_ases
[asidx
];
562 newas
->tcg_as_listener
.commit
= tcg_commit
;
563 memory_listener_register(&newas
->tcg_as_listener
, as
);
567 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
569 /* Return the AddressSpace corresponding to the specified index */
570 return cpu
->cpu_ases
[asidx
].as
;
574 #ifndef CONFIG_USER_ONLY
575 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
577 static int cpu_get_free_index(Error
**errp
)
579 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
581 if (cpu
>= MAX_CPUMASK_BITS
) {
582 error_setg(errp
, "Trying to use more CPUs than max of %d",
587 bitmap_set(cpu_index_map
, cpu
, 1);
591 void cpu_exec_exit(CPUState
*cpu
)
593 if (cpu
->cpu_index
== -1) {
594 /* cpu_index was never allocated by this @cpu or was already freed. */
598 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
603 static int cpu_get_free_index(Error
**errp
)
608 CPU_FOREACH(some_cpu
) {
614 void cpu_exec_exit(CPUState
*cpu
)
619 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
621 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
623 Error
*local_err
= NULL
;
628 #ifndef CONFIG_USER_ONLY
629 cpu
->thread_id
= qemu_get_thread_id();
631 /* This is a softmmu CPU object, so create a property for it
632 * so users can wire up its memory. (This can't go in qom/cpu.c
633 * because that file is compiled only once for both user-mode
634 * and system builds.) The default if no link is set up is to use
635 * the system address space.
637 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
638 (Object
**)&cpu
->memory
,
639 qdev_prop_allow_set_link_before_realize
,
640 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
642 cpu
->memory
= system_memory
;
643 object_ref(OBJECT(cpu
->memory
));
646 #if defined(CONFIG_USER_ONLY)
649 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
651 error_propagate(errp
, local_err
);
652 #if defined(CONFIG_USER_ONLY)
657 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
658 #if defined(CONFIG_USER_ONLY)
661 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
662 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
664 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
665 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
666 cpu_save
, cpu_load
, cpu
->env_ptr
);
667 assert(cc
->vmsd
== NULL
);
668 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
670 if (cc
->vmsd
!= NULL
) {
671 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
675 #if defined(CONFIG_USER_ONLY)
676 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
678 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
681 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
684 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
685 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
687 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
688 phys
| (pc
& ~TARGET_PAGE_MASK
));
693 #if defined(CONFIG_USER_ONLY)
694 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
699 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
705 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
709 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
710 int flags
, CPUWatchpoint
**watchpoint
)
715 /* Add a watchpoint. */
716 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
717 int flags
, CPUWatchpoint
**watchpoint
)
721 /* forbid ranges which are empty or run off the end of the address space */
722 if (len
== 0 || (addr
+ len
- 1) < addr
) {
723 error_report("tried to set invalid watchpoint at %"
724 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
727 wp
= g_malloc(sizeof(*wp
));
733 /* keep all GDB-injected watchpoints in front */
734 if (flags
& BP_GDB
) {
735 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
737 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
740 tlb_flush_page(cpu
, addr
);
747 /* Remove a specific watchpoint. */
748 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
753 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
754 if (addr
== wp
->vaddr
&& len
== wp
->len
755 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
756 cpu_watchpoint_remove_by_ref(cpu
, wp
);
763 /* Remove a specific watchpoint by reference. */
764 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
766 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
768 tlb_flush_page(cpu
, watchpoint
->vaddr
);
773 /* Remove all matching watchpoints. */
774 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
776 CPUWatchpoint
*wp
, *next
;
778 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
779 if (wp
->flags
& mask
) {
780 cpu_watchpoint_remove_by_ref(cpu
, wp
);
785 /* Return true if this watchpoint address matches the specified
786 * access (ie the address range covered by the watchpoint overlaps
787 * partially or completely with the address range covered by the
790 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
794 /* We know the lengths are non-zero, but a little caution is
795 * required to avoid errors in the case where the range ends
796 * exactly at the top of the address space and so addr + len
797 * wraps round to zero.
799 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
800 vaddr addrend
= addr
+ len
- 1;
802 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
807 /* Add a breakpoint. */
808 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
809 CPUBreakpoint
**breakpoint
)
813 bp
= g_malloc(sizeof(*bp
));
818 /* keep all GDB-injected breakpoints in front */
819 if (flags
& BP_GDB
) {
820 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
822 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
825 breakpoint_invalidate(cpu
, pc
);
833 /* Remove a specific breakpoint. */
834 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
838 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
839 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
840 cpu_breakpoint_remove_by_ref(cpu
, bp
);
847 /* Remove a specific breakpoint by reference. */
848 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
850 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
852 breakpoint_invalidate(cpu
, breakpoint
->pc
);
857 /* Remove all matching breakpoints. */
858 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
860 CPUBreakpoint
*bp
, *next
;
862 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
863 if (bp
->flags
& mask
) {
864 cpu_breakpoint_remove_by_ref(cpu
, bp
);
869 /* enable or disable single step mode. EXCP_DEBUG is returned by the
870 CPU loop after each instruction */
871 void cpu_single_step(CPUState
*cpu
, int enabled
)
873 if (cpu
->singlestep_enabled
!= enabled
) {
874 cpu
->singlestep_enabled
= enabled
;
876 kvm_update_guest_debug(cpu
, 0);
878 /* must flush all the translated code to avoid inconsistencies */
879 /* XXX: only flush what is necessary */
885 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
892 fprintf(stderr
, "qemu: fatal: ");
893 vfprintf(stderr
, fmt
, ap
);
894 fprintf(stderr
, "\n");
895 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
896 if (qemu_log_separate()) {
897 qemu_log("qemu: fatal: ");
898 qemu_log_vprintf(fmt
, ap2
);
900 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
907 #if defined(CONFIG_USER_ONLY)
909 struct sigaction act
;
910 sigfillset(&act
.sa_mask
);
911 act
.sa_handler
= SIG_DFL
;
912 sigaction(SIGABRT
, &act
, NULL
);
918 #if !defined(CONFIG_USER_ONLY)
919 /* Called from RCU critical section */
920 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
924 block
= atomic_rcu_read(&ram_list
.mru_block
);
925 if (block
&& addr
- block
->offset
< block
->max_length
) {
928 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
929 if (addr
- block
->offset
< block
->max_length
) {
934 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
938 /* It is safe to write mru_block outside the iothread lock. This
943 * xxx removed from list
947 * call_rcu(reclaim_ramblock, xxx);
950 * atomic_rcu_set is not needed here. The block was already published
951 * when it was placed into the list. Here we're just making an extra
952 * copy of the pointer.
954 ram_list
.mru_block
= block
;
958 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
965 end
= TARGET_PAGE_ALIGN(start
+ length
);
966 start
&= TARGET_PAGE_MASK
;
969 block
= qemu_get_ram_block(start
);
970 assert(block
== qemu_get_ram_block(end
- 1));
971 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
973 tlb_reset_dirty(cpu
, start1
, length
);
978 /* Note: start and end must be within the same ram block. */
979 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
983 DirtyMemoryBlocks
*blocks
;
984 unsigned long end
, page
;
991 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
992 page
= start
>> TARGET_PAGE_BITS
;
996 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
999 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1000 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1001 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1003 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1010 if (dirty
&& tcg_enabled()) {
1011 tlb_reset_dirty_range_all(start
, length
);
1017 /* Called from RCU critical section */
1018 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1019 MemoryRegionSection
*section
,
1021 hwaddr paddr
, hwaddr xlat
,
1023 target_ulong
*address
)
1028 if (memory_region_is_ram(section
->mr
)) {
1030 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1032 if (!section
->readonly
) {
1033 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1035 iotlb
|= PHYS_SECTION_ROM
;
1038 AddressSpaceDispatch
*d
;
1040 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1041 iotlb
= section
- d
->map
.sections
;
1045 /* Make accesses to pages with watchpoints go via the
1046 watchpoint trap routines. */
1047 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1048 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1049 /* Avoid trapping reads of pages with a write breakpoint. */
1050 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1051 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1052 *address
|= TLB_MMIO
;
1060 #endif /* defined(CONFIG_USER_ONLY) */
1062 #if !defined(CONFIG_USER_ONLY)
1064 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1066 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1068 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1069 qemu_anon_ram_alloc
;
1072 * Set a custom physical guest memory alloator.
1073 * Accelerators with unusual needs may need this. Hopefully, we can
1074 * get rid of it eventually.
1076 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1078 phys_mem_alloc
= alloc
;
1081 static uint16_t phys_section_add(PhysPageMap
*map
,
1082 MemoryRegionSection
*section
)
1084 /* The physical section number is ORed with a page-aligned
1085 * pointer to produce the iotlb entries. Thus it should
1086 * never overflow into the page-aligned value.
1088 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1090 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1091 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1092 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1093 map
->sections_nb_alloc
);
1095 map
->sections
[map
->sections_nb
] = *section
;
1096 memory_region_ref(section
->mr
);
1097 return map
->sections_nb
++;
1100 static void phys_section_destroy(MemoryRegion
*mr
)
1102 bool have_sub_page
= mr
->subpage
;
1104 memory_region_unref(mr
);
1106 if (have_sub_page
) {
1107 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1108 object_unref(OBJECT(&subpage
->iomem
));
1113 static void phys_sections_free(PhysPageMap
*map
)
1115 while (map
->sections_nb
> 0) {
1116 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1117 phys_section_destroy(section
->mr
);
1119 g_free(map
->sections
);
1123 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1126 hwaddr base
= section
->offset_within_address_space
1128 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1129 d
->map
.nodes
, d
->map
.sections
);
1130 MemoryRegionSection subsection
= {
1131 .offset_within_address_space
= base
,
1132 .size
= int128_make64(TARGET_PAGE_SIZE
),
1136 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1138 if (!(existing
->mr
->subpage
)) {
1139 subpage
= subpage_init(d
->as
, base
);
1140 subsection
.address_space
= d
->as
;
1141 subsection
.mr
= &subpage
->iomem
;
1142 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1143 phys_section_add(&d
->map
, &subsection
));
1145 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1147 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1148 end
= start
+ int128_get64(section
->size
) - 1;
1149 subpage_register(subpage
, start
, end
,
1150 phys_section_add(&d
->map
, section
));
1154 static void register_multipage(AddressSpaceDispatch
*d
,
1155 MemoryRegionSection
*section
)
1157 hwaddr start_addr
= section
->offset_within_address_space
;
1158 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1159 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1163 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1166 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1168 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1169 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1170 MemoryRegionSection now
= *section
, remain
= *section
;
1171 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1173 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1174 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1175 - now
.offset_within_address_space
;
1177 now
.size
= int128_min(int128_make64(left
), now
.size
);
1178 register_subpage(d
, &now
);
1180 now
.size
= int128_zero();
1182 while (int128_ne(remain
.size
, now
.size
)) {
1183 remain
.size
= int128_sub(remain
.size
, now
.size
);
1184 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1185 remain
.offset_within_region
+= int128_get64(now
.size
);
1187 if (int128_lt(remain
.size
, page_size
)) {
1188 register_subpage(d
, &now
);
1189 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1190 now
.size
= page_size
;
1191 register_subpage(d
, &now
);
1193 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1194 register_multipage(d
, &now
);
1199 void qemu_flush_coalesced_mmio_buffer(void)
1202 kvm_flush_coalesced_mmio_buffer();
1205 void qemu_mutex_lock_ramlist(void)
1207 qemu_mutex_lock(&ram_list
.mutex
);
1210 void qemu_mutex_unlock_ramlist(void)
1212 qemu_mutex_unlock(&ram_list
.mutex
);
1217 #include <sys/vfs.h>
1219 #define HUGETLBFS_MAGIC 0x958458f6
1221 static long gethugepagesize(const char *path
, Error
**errp
)
1227 ret
= statfs(path
, &fs
);
1228 } while (ret
!= 0 && errno
== EINTR
);
1231 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1239 static void *file_ram_alloc(RAMBlock
*block
,
1246 char *sanitized_name
;
1251 Error
*local_err
= NULL
;
1253 hpagesize
= gethugepagesize(path
, &local_err
);
1255 error_propagate(errp
, local_err
);
1258 block
->mr
->align
= hpagesize
;
1260 if (memory
< hpagesize
) {
1261 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1262 "or larger than huge page size 0x%" PRIx64
,
1267 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1269 "host lacks kvm mmu notifiers, -mem-path unsupported");
1273 if (!stat(path
, &st
) && S_ISDIR(st
.st_mode
)) {
1274 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1275 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1276 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1282 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1284 g_free(sanitized_name
);
1286 fd
= mkstemp(filename
);
1292 fd
= open(path
, O_RDWR
| O_CREAT
, 0644);
1296 error_setg_errno(errp
, errno
,
1297 "unable to create backing store for hugepages");
1301 memory
= ROUND_UP(memory
, hpagesize
);
1304 * ftruncate is not supported by hugetlbfs in older
1305 * hosts, so don't bother bailing out on errors.
1306 * If anything goes wrong with it under other filesystems,
1309 if (ftruncate(fd
, memory
)) {
1310 perror("ftruncate");
1313 area
= qemu_ram_mmap(fd
, memory
, hpagesize
, block
->flags
& RAM_SHARED
);
1314 if (area
== MAP_FAILED
) {
1315 error_setg_errno(errp
, errno
,
1316 "unable to map backing store for hugepages");
1322 os_mem_prealloc(fd
, area
, memory
);
1333 /* Called with the ramlist lock held. */
1334 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1336 RAMBlock
*block
, *next_block
;
1337 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1339 assert(size
!= 0); /* it would hand out same offset multiple times */
1341 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1345 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1346 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1348 end
= block
->offset
+ block
->max_length
;
1350 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1351 if (next_block
->offset
>= end
) {
1352 next
= MIN(next
, next_block
->offset
);
1355 if (next
- end
>= size
&& next
- end
< mingap
) {
1357 mingap
= next
- end
;
1361 if (offset
== RAM_ADDR_MAX
) {
1362 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1370 ram_addr_t
last_ram_offset(void)
1373 ram_addr_t last
= 0;
1376 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1377 last
= MAX(last
, block
->offset
+ block
->max_length
);
1383 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1387 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1388 if (!machine_dump_guest_core(current_machine
)) {
1389 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1391 perror("qemu_madvise");
1392 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1393 "but dump_guest_core=off specified\n");
1398 /* Called within an RCU critical section, or while the ramlist lock
1401 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1405 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1406 if (block
->offset
== addr
) {
1414 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1419 /* Called with iothread lock held. */
1420 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1422 RAMBlock
*new_block
, *block
;
1425 new_block
= find_ram_block(addr
);
1427 assert(!new_block
->idstr
[0]);
1430 char *id
= qdev_get_dev_path(dev
);
1432 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1436 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1438 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1439 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1440 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1448 /* Called with iothread lock held. */
1449 void qemu_ram_unset_idstr(ram_addr_t addr
)
1453 /* FIXME: arch_init.c assumes that this is not called throughout
1454 * migration. Ignore the problem since hot-unplug during migration
1455 * does not work anyway.
1459 block
= find_ram_block(addr
);
1461 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1466 static int memory_try_enable_merging(void *addr
, size_t len
)
1468 if (!machine_mem_merge(current_machine
)) {
1469 /* disabled by the user */
1473 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1476 /* Only legal before guest might have detected the memory size: e.g. on
1477 * incoming migration, or right after reset.
1479 * As memory core doesn't know how is memory accessed, it is up to
1480 * resize callback to update device state and/or add assertions to detect
1481 * misuse, if necessary.
1483 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1485 RAMBlock
*block
= find_ram_block(base
);
1489 newsize
= HOST_PAGE_ALIGN(newsize
);
1491 if (block
->used_length
== newsize
) {
1495 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1496 error_setg_errno(errp
, EINVAL
,
1497 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1498 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1499 newsize
, block
->used_length
);
1503 if (block
->max_length
< newsize
) {
1504 error_setg_errno(errp
, EINVAL
,
1505 "Length too large: %s: 0x" RAM_ADDR_FMT
1506 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1507 newsize
, block
->max_length
);
1511 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1512 block
->used_length
= newsize
;
1513 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1515 memory_region_set_size(block
->mr
, newsize
);
1516 if (block
->resized
) {
1517 block
->resized(block
->idstr
, newsize
, block
->host
);
1522 /* Called with ram_list.mutex held */
1523 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1524 ram_addr_t new_ram_size
)
1526 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1527 DIRTY_MEMORY_BLOCK_SIZE
);
1528 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1529 DIRTY_MEMORY_BLOCK_SIZE
);
1532 /* Only need to extend if block count increased */
1533 if (new_num_blocks
<= old_num_blocks
) {
1537 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1538 DirtyMemoryBlocks
*old_blocks
;
1539 DirtyMemoryBlocks
*new_blocks
;
1542 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1543 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1544 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1546 if (old_num_blocks
) {
1547 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1548 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1551 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1552 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1555 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1558 g_free_rcu(old_blocks
, rcu
);
1563 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1566 RAMBlock
*last_block
= NULL
;
1567 ram_addr_t old_ram_size
, new_ram_size
;
1570 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1572 qemu_mutex_lock_ramlist();
1573 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1575 if (!new_block
->host
) {
1576 if (xen_enabled()) {
1577 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1578 new_block
->mr
, &err
);
1580 error_propagate(errp
, err
);
1581 qemu_mutex_unlock_ramlist();
1585 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1586 &new_block
->mr
->align
);
1587 if (!new_block
->host
) {
1588 error_setg_errno(errp
, errno
,
1589 "cannot set up guest memory '%s'",
1590 memory_region_name(new_block
->mr
));
1591 qemu_mutex_unlock_ramlist();
1594 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1598 new_ram_size
= MAX(old_ram_size
,
1599 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1600 if (new_ram_size
> old_ram_size
) {
1601 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1602 dirty_memory_extend(old_ram_size
, new_ram_size
);
1604 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1605 * QLIST (which has an RCU-friendly variant) does not have insertion at
1606 * tail, so save the last element in last_block.
1608 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1610 if (block
->max_length
< new_block
->max_length
) {
1615 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1616 } else if (last_block
) {
1617 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1618 } else { /* list is empty */
1619 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1621 ram_list
.mru_block
= NULL
;
1623 /* Write list before version */
1626 qemu_mutex_unlock_ramlist();
1628 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1629 new_block
->used_length
,
1632 if (new_block
->host
) {
1633 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1634 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1635 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1636 if (kvm_enabled()) {
1637 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1641 return new_block
->offset
;
1645 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1646 bool share
, const char *mem_path
,
1649 RAMBlock
*new_block
;
1651 Error
*local_err
= NULL
;
1653 if (xen_enabled()) {
1654 error_setg(errp
, "-mem-path not supported with Xen");
1658 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1660 * file_ram_alloc() needs to allocate just like
1661 * phys_mem_alloc, but we haven't bothered to provide
1665 "-mem-path not supported with this accelerator");
1669 size
= HOST_PAGE_ALIGN(size
);
1670 new_block
= g_malloc0(sizeof(*new_block
));
1672 new_block
->used_length
= size
;
1673 new_block
->max_length
= size
;
1674 new_block
->flags
= share
? RAM_SHARED
: 0;
1675 new_block
->host
= file_ram_alloc(new_block
, size
,
1677 if (!new_block
->host
) {
1682 addr
= ram_block_add(new_block
, &local_err
);
1685 error_propagate(errp
, local_err
);
1693 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1694 void (*resized
)(const char*,
1697 void *host
, bool resizeable
,
1698 MemoryRegion
*mr
, Error
**errp
)
1700 RAMBlock
*new_block
;
1702 Error
*local_err
= NULL
;
1704 size
= HOST_PAGE_ALIGN(size
);
1705 max_size
= HOST_PAGE_ALIGN(max_size
);
1706 new_block
= g_malloc0(sizeof(*new_block
));
1708 new_block
->resized
= resized
;
1709 new_block
->used_length
= size
;
1710 new_block
->max_length
= max_size
;
1711 assert(max_size
>= size
);
1713 new_block
->host
= host
;
1715 new_block
->flags
|= RAM_PREALLOC
;
1718 new_block
->flags
|= RAM_RESIZEABLE
;
1720 addr
= ram_block_add(new_block
, &local_err
);
1723 error_propagate(errp
, local_err
);
1729 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1730 MemoryRegion
*mr
, Error
**errp
)
1732 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1735 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1737 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1740 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1741 void (*resized
)(const char*,
1744 MemoryRegion
*mr
, Error
**errp
)
1746 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1749 static void reclaim_ramblock(RAMBlock
*block
)
1751 if (block
->flags
& RAM_PREALLOC
) {
1753 } else if (xen_enabled()) {
1754 xen_invalidate_map_cache_entry(block
->host
);
1756 } else if (block
->fd
>= 0) {
1757 qemu_ram_munmap(block
->host
, block
->max_length
);
1761 qemu_anon_ram_free(block
->host
, block
->max_length
);
1766 void qemu_ram_free(ram_addr_t addr
)
1770 qemu_mutex_lock_ramlist();
1771 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1772 if (addr
== block
->offset
) {
1773 QLIST_REMOVE_RCU(block
, next
);
1774 ram_list
.mru_block
= NULL
;
1775 /* Write list before version */
1778 call_rcu(block
, reclaim_ramblock
, rcu
);
1782 qemu_mutex_unlock_ramlist();
1786 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1793 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1794 offset
= addr
- block
->offset
;
1795 if (offset
< block
->max_length
) {
1796 vaddr
= ramblock_ptr(block
, offset
);
1797 if (block
->flags
& RAM_PREALLOC
) {
1799 } else if (xen_enabled()) {
1803 if (block
->fd
>= 0) {
1804 flags
|= (block
->flags
& RAM_SHARED
?
1805 MAP_SHARED
: MAP_PRIVATE
);
1806 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1807 flags
, block
->fd
, offset
);
1810 * Remap needs to match alloc. Accelerators that
1811 * set phys_mem_alloc never remap. If they did,
1812 * we'd need a remap hook here.
1814 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1816 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1817 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1820 if (area
!= vaddr
) {
1821 fprintf(stderr
, "Could not remap addr: "
1822 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1826 memory_try_enable_merging(vaddr
, length
);
1827 qemu_ram_setup_dump(vaddr
, length
);
1832 #endif /* !_WIN32 */
1834 int qemu_get_ram_fd(ram_addr_t addr
)
1840 block
= qemu_get_ram_block(addr
);
1846 void qemu_set_ram_fd(ram_addr_t addr
, int fd
)
1851 block
= qemu_get_ram_block(addr
);
1856 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1862 block
= qemu_get_ram_block(addr
);
1863 ptr
= ramblock_ptr(block
, 0);
1868 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1869 * This should not be used for general purpose DMA. Use address_space_map
1870 * or address_space_rw instead. For local memory (e.g. video ram) that the
1871 * device owns, use memory_region_get_ram_ptr.
1873 * Called within RCU critical section.
1875 void *qemu_get_ram_ptr(ram_addr_t addr
)
1877 RAMBlock
*block
= qemu_get_ram_block(addr
);
1879 if (xen_enabled() && block
->host
== NULL
) {
1880 /* We need to check if the requested address is in the RAM
1881 * because we don't want to map the entire memory in QEMU.
1882 * In that case just map until the end of the page.
1884 if (block
->offset
== 0) {
1885 return xen_map_cache(addr
, 0, 0);
1888 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1890 return ramblock_ptr(block
, addr
- block
->offset
);
1893 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1894 * but takes a size argument.
1896 * Called within RCU critical section.
1898 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1901 ram_addr_t offset_inside_block
;
1906 block
= qemu_get_ram_block(addr
);
1907 offset_inside_block
= addr
- block
->offset
;
1908 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1910 if (xen_enabled() && block
->host
== NULL
) {
1911 /* We need to check if the requested address is in the RAM
1912 * because we don't want to map the entire memory in QEMU.
1913 * In that case just map the requested area.
1915 if (block
->offset
== 0) {
1916 return xen_map_cache(addr
, *size
, 1);
1919 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1922 return ramblock_ptr(block
, offset_inside_block
);
1926 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1929 * ptr: Host pointer to look up
1930 * round_offset: If true round the result offset down to a page boundary
1931 * *ram_addr: set to result ram_addr
1932 * *offset: set to result offset within the RAMBlock
1934 * Returns: RAMBlock (or NULL if not found)
1936 * By the time this function returns, the returned pointer is not protected
1937 * by RCU anymore. If the caller is not within an RCU critical section and
1938 * does not hold the iothread lock, it must have other means of protecting the
1939 * pointer, such as a reference to the region that includes the incoming
1942 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1943 ram_addr_t
*ram_addr
,
1947 uint8_t *host
= ptr
;
1949 if (xen_enabled()) {
1951 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1952 block
= qemu_get_ram_block(*ram_addr
);
1954 *offset
= (host
- block
->host
);
1961 block
= atomic_rcu_read(&ram_list
.mru_block
);
1962 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1966 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1967 /* This case append when the block is not mapped. */
1968 if (block
->host
== NULL
) {
1971 if (host
- block
->host
< block
->max_length
) {
1980 *offset
= (host
- block
->host
);
1982 *offset
&= TARGET_PAGE_MASK
;
1984 *ram_addr
= block
->offset
+ *offset
;
1990 * Finds the named RAMBlock
1992 * name: The name of RAMBlock to find
1994 * Returns: RAMBlock (or NULL if not found)
1996 RAMBlock
*qemu_ram_block_by_name(const char *name
)
2000 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
2001 if (!strcmp(name
, block
->idstr
)) {
2009 /* Some of the softmmu routines need to translate from a host pointer
2010 (typically a TLB entry) back to a ram offset. */
2011 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2014 ram_addr_t offset
; /* Not used */
2016 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
2025 /* Called within RCU critical section. */
2026 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
2027 uint64_t val
, unsigned size
)
2029 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2030 tb_invalidate_phys_page_fast(ram_addr
, size
);
2034 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2037 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2040 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2045 /* Set both VGA and migration bits for simplicity and to remove
2046 * the notdirty callback faster.
2048 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2049 DIRTY_CLIENTS_NOCODE
);
2050 /* we remove the notdirty callback only if the code has been
2052 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2053 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2057 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2058 unsigned size
, bool is_write
)
2063 static const MemoryRegionOps notdirty_mem_ops
= {
2064 .write
= notdirty_mem_write
,
2065 .valid
.accepts
= notdirty_mem_accepts
,
2066 .endianness
= DEVICE_NATIVE_ENDIAN
,
2069 /* Generate a debug exception if a watchpoint has been hit. */
2070 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2072 CPUState
*cpu
= current_cpu
;
2073 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2074 CPUArchState
*env
= cpu
->env_ptr
;
2075 target_ulong pc
, cs_base
;
2080 if (cpu
->watchpoint_hit
) {
2081 /* We re-entered the check after replacing the TB. Now raise
2082 * the debug interrupt so that is will trigger after the
2083 * current instruction. */
2084 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2087 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2088 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2089 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2090 && (wp
->flags
& flags
)) {
2091 if (flags
== BP_MEM_READ
) {
2092 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2094 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2096 wp
->hitaddr
= vaddr
;
2097 wp
->hitattrs
= attrs
;
2098 if (!cpu
->watchpoint_hit
) {
2099 if (wp
->flags
& BP_CPU
&&
2100 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2101 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2104 cpu
->watchpoint_hit
= wp
;
2105 tb_check_watchpoint(cpu
);
2106 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2107 cpu
->exception_index
= EXCP_DEBUG
;
2110 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2111 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2112 cpu_resume_from_signal(cpu
, NULL
);
2116 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2121 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2122 so these check for a hit then pass through to the normal out-of-line
2124 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2125 unsigned size
, MemTxAttrs attrs
)
2129 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2130 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2132 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2135 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2138 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2141 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2149 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2150 uint64_t val
, unsigned size
,
2154 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2155 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2157 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2160 address_space_stb(as
, addr
, val
, attrs
, &res
);
2163 address_space_stw(as
, addr
, val
, attrs
, &res
);
2166 address_space_stl(as
, addr
, val
, attrs
, &res
);
2173 static const MemoryRegionOps watch_mem_ops
= {
2174 .read_with_attrs
= watch_mem_read
,
2175 .write_with_attrs
= watch_mem_write
,
2176 .endianness
= DEVICE_NATIVE_ENDIAN
,
2179 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2180 unsigned len
, MemTxAttrs attrs
)
2182 subpage_t
*subpage
= opaque
;
2186 #if defined(DEBUG_SUBPAGE)
2187 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2188 subpage
, len
, addr
);
2190 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2197 *data
= ldub_p(buf
);
2200 *data
= lduw_p(buf
);
2213 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2214 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2216 subpage_t
*subpage
= opaque
;
2219 #if defined(DEBUG_SUBPAGE)
2220 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2221 " value %"PRIx64
"\n",
2222 __func__
, subpage
, len
, addr
, value
);
2240 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2244 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2245 unsigned len
, bool is_write
)
2247 subpage_t
*subpage
= opaque
;
2248 #if defined(DEBUG_SUBPAGE)
2249 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2250 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2253 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2257 static const MemoryRegionOps subpage_ops
= {
2258 .read_with_attrs
= subpage_read
,
2259 .write_with_attrs
= subpage_write
,
2260 .impl
.min_access_size
= 1,
2261 .impl
.max_access_size
= 8,
2262 .valid
.min_access_size
= 1,
2263 .valid
.max_access_size
= 8,
2264 .valid
.accepts
= subpage_accepts
,
2265 .endianness
= DEVICE_NATIVE_ENDIAN
,
2268 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2273 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2275 idx
= SUBPAGE_IDX(start
);
2276 eidx
= SUBPAGE_IDX(end
);
2277 #if defined(DEBUG_SUBPAGE)
2278 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2279 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2281 for (; idx
<= eidx
; idx
++) {
2282 mmio
->sub_section
[idx
] = section
;
2288 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2292 mmio
= g_malloc0(sizeof(subpage_t
));
2296 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2297 NULL
, TARGET_PAGE_SIZE
);
2298 mmio
->iomem
.subpage
= true;
2299 #if defined(DEBUG_SUBPAGE)
2300 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2301 mmio
, base
, TARGET_PAGE_SIZE
);
2303 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2308 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2312 MemoryRegionSection section
= {
2313 .address_space
= as
,
2315 .offset_within_address_space
= 0,
2316 .offset_within_region
= 0,
2317 .size
= int128_2_64(),
2320 return phys_section_add(map
, §ion
);
2323 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2325 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2326 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2327 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2328 MemoryRegionSection
*sections
= d
->map
.sections
;
2330 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2333 static void io_mem_init(void)
2335 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2336 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2338 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2340 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2344 static void mem_begin(MemoryListener
*listener
)
2346 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2347 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2350 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2351 assert(n
== PHYS_SECTION_UNASSIGNED
);
2352 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2353 assert(n
== PHYS_SECTION_NOTDIRTY
);
2354 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2355 assert(n
== PHYS_SECTION_ROM
);
2356 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2357 assert(n
== PHYS_SECTION_WATCH
);
2359 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2361 as
->next_dispatch
= d
;
2364 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2366 phys_sections_free(&d
->map
);
2370 static void mem_commit(MemoryListener
*listener
)
2372 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2373 AddressSpaceDispatch
*cur
= as
->dispatch
;
2374 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2376 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2378 atomic_rcu_set(&as
->dispatch
, next
);
2380 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2384 static void tcg_commit(MemoryListener
*listener
)
2386 CPUAddressSpace
*cpuas
;
2387 AddressSpaceDispatch
*d
;
2389 /* since each CPU stores ram addresses in its TLB cache, we must
2390 reset the modified entries */
2391 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2392 cpu_reloading_memory_map();
2393 /* The CPU and TLB are protected by the iothread lock.
2394 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2395 * may have split the RCU critical section.
2397 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2398 cpuas
->memory_dispatch
= d
;
2399 tlb_flush(cpuas
->cpu
, 1);
2402 void address_space_init_dispatch(AddressSpace
*as
)
2404 as
->dispatch
= NULL
;
2405 as
->dispatch_listener
= (MemoryListener
) {
2407 .commit
= mem_commit
,
2408 .region_add
= mem_add
,
2409 .region_nop
= mem_add
,
2412 memory_listener_register(&as
->dispatch_listener
, as
);
2415 void address_space_unregister(AddressSpace
*as
)
2417 memory_listener_unregister(&as
->dispatch_listener
);
2420 void address_space_destroy_dispatch(AddressSpace
*as
)
2422 AddressSpaceDispatch
*d
= as
->dispatch
;
2424 atomic_rcu_set(&as
->dispatch
, NULL
);
2426 call_rcu(d
, address_space_dispatch_free
, rcu
);
2430 static void memory_map_init(void)
2432 system_memory
= g_malloc(sizeof(*system_memory
));
2434 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2435 address_space_init(&address_space_memory
, system_memory
, "memory");
2437 system_io
= g_malloc(sizeof(*system_io
));
2438 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2440 address_space_init(&address_space_io
, system_io
, "I/O");
2443 MemoryRegion
*get_system_memory(void)
2445 return system_memory
;
2448 MemoryRegion
*get_system_io(void)
2453 #endif /* !defined(CONFIG_USER_ONLY) */
2455 /* physical memory access (slow version, mainly for debug) */
2456 #if defined(CONFIG_USER_ONLY)
2457 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2458 uint8_t *buf
, int len
, int is_write
)
2465 page
= addr
& TARGET_PAGE_MASK
;
2466 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2469 flags
= page_get_flags(page
);
2470 if (!(flags
& PAGE_VALID
))
2473 if (!(flags
& PAGE_WRITE
))
2475 /* XXX: this code should not depend on lock_user */
2476 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2479 unlock_user(p
, addr
, l
);
2481 if (!(flags
& PAGE_READ
))
2483 /* XXX: this code should not depend on lock_user */
2484 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2487 unlock_user(p
, addr
, 0);
2498 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2501 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2502 /* No early return if dirty_log_mask is or becomes 0, because
2503 * cpu_physical_memory_set_dirty_range will still call
2504 * xen_modified_memory.
2506 if (dirty_log_mask
) {
2508 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2510 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2511 tb_invalidate_phys_range(addr
, addr
+ length
);
2512 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2514 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2517 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2519 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2521 /* Regions are assumed to support 1-4 byte accesses unless
2522 otherwise specified. */
2523 if (access_size_max
== 0) {
2524 access_size_max
= 4;
2527 /* Bound the maximum access by the alignment of the address. */
2528 if (!mr
->ops
->impl
.unaligned
) {
2529 unsigned align_size_max
= addr
& -addr
;
2530 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2531 access_size_max
= align_size_max
;
2535 /* Don't attempt accesses larger than the maximum. */
2536 if (l
> access_size_max
) {
2537 l
= access_size_max
;
2544 static bool prepare_mmio_access(MemoryRegion
*mr
)
2546 bool unlocked
= !qemu_mutex_iothread_locked();
2547 bool release_lock
= false;
2549 if (unlocked
&& mr
->global_locking
) {
2550 qemu_mutex_lock_iothread();
2552 release_lock
= true;
2554 if (mr
->flush_coalesced_mmio
) {
2556 qemu_mutex_lock_iothread();
2558 qemu_flush_coalesced_mmio_buffer();
2560 qemu_mutex_unlock_iothread();
2564 return release_lock
;
2567 /* Called within RCU critical section. */
2568 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2571 int len
, hwaddr addr1
,
2572 hwaddr l
, MemoryRegion
*mr
)
2576 MemTxResult result
= MEMTX_OK
;
2577 bool release_lock
= false;
2580 if (!memory_access_is_direct(mr
, true)) {
2581 release_lock
|= prepare_mmio_access(mr
);
2582 l
= memory_access_size(mr
, l
, addr1
);
2583 /* XXX: could force current_cpu to NULL to avoid
2587 /* 64 bit write access */
2589 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2593 /* 32 bit write access */
2595 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2599 /* 16 bit write access */
2601 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2605 /* 8 bit write access */
2607 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2614 addr1
+= memory_region_get_ram_addr(mr
);
2616 ptr
= qemu_get_ram_ptr(addr1
);
2617 memcpy(ptr
, buf
, l
);
2618 invalidate_and_set_dirty(mr
, addr1
, l
);
2622 qemu_mutex_unlock_iothread();
2623 release_lock
= false;
2635 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2641 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2642 const uint8_t *buf
, int len
)
2647 MemTxResult result
= MEMTX_OK
;
2652 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2653 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2661 /* Called within RCU critical section. */
2662 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2663 MemTxAttrs attrs
, uint8_t *buf
,
2664 int len
, hwaddr addr1
, hwaddr l
,
2669 MemTxResult result
= MEMTX_OK
;
2670 bool release_lock
= false;
2673 if (!memory_access_is_direct(mr
, false)) {
2675 release_lock
|= prepare_mmio_access(mr
);
2676 l
= memory_access_size(mr
, l
, addr1
);
2679 /* 64 bit read access */
2680 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2685 /* 32 bit read access */
2686 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2691 /* 16 bit read access */
2692 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2697 /* 8 bit read access */
2698 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2707 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2708 memcpy(buf
, ptr
, l
);
2712 qemu_mutex_unlock_iothread();
2713 release_lock
= false;
2725 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2731 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2732 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2737 MemTxResult result
= MEMTX_OK
;
2742 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2743 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2751 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2752 uint8_t *buf
, int len
, bool is_write
)
2755 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2757 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2761 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2762 int len
, int is_write
)
2764 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2765 buf
, len
, is_write
);
2768 enum write_rom_type
{
2773 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2774 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2784 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2786 if (!(memory_region_is_ram(mr
) ||
2787 memory_region_is_romd(mr
))) {
2788 l
= memory_access_size(mr
, l
, addr1
);
2790 addr1
+= memory_region_get_ram_addr(mr
);
2792 ptr
= qemu_get_ram_ptr(addr1
);
2795 memcpy(ptr
, buf
, l
);
2796 invalidate_and_set_dirty(mr
, addr1
, l
);
2799 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2810 /* used for ROM loading : can write in RAM and ROM */
2811 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2812 const uint8_t *buf
, int len
)
2814 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2817 void cpu_flush_icache_range(hwaddr start
, int len
)
2820 * This function should do the same thing as an icache flush that was
2821 * triggered from within the guest. For TCG we are always cache coherent,
2822 * so there is no need to flush anything. For KVM / Xen we need to flush
2823 * the host's instruction cache at least.
2825 if (tcg_enabled()) {
2829 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2830 start
, NULL
, len
, FLUSH_CACHE
);
2841 static BounceBuffer bounce
;
2843 typedef struct MapClient
{
2845 QLIST_ENTRY(MapClient
) link
;
2848 QemuMutex map_client_list_lock
;
2849 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2850 = QLIST_HEAD_INITIALIZER(map_client_list
);
2852 static void cpu_unregister_map_client_do(MapClient
*client
)
2854 QLIST_REMOVE(client
, link
);
2858 static void cpu_notify_map_clients_locked(void)
2862 while (!QLIST_EMPTY(&map_client_list
)) {
2863 client
= QLIST_FIRST(&map_client_list
);
2864 qemu_bh_schedule(client
->bh
);
2865 cpu_unregister_map_client_do(client
);
2869 void cpu_register_map_client(QEMUBH
*bh
)
2871 MapClient
*client
= g_malloc(sizeof(*client
));
2873 qemu_mutex_lock(&map_client_list_lock
);
2875 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2876 if (!atomic_read(&bounce
.in_use
)) {
2877 cpu_notify_map_clients_locked();
2879 qemu_mutex_unlock(&map_client_list_lock
);
2882 void cpu_exec_init_all(void)
2884 qemu_mutex_init(&ram_list
.mutex
);
2887 qemu_mutex_init(&map_client_list_lock
);
2890 void cpu_unregister_map_client(QEMUBH
*bh
)
2894 qemu_mutex_lock(&map_client_list_lock
);
2895 QLIST_FOREACH(client
, &map_client_list
, link
) {
2896 if (client
->bh
== bh
) {
2897 cpu_unregister_map_client_do(client
);
2901 qemu_mutex_unlock(&map_client_list_lock
);
2904 static void cpu_notify_map_clients(void)
2906 qemu_mutex_lock(&map_client_list_lock
);
2907 cpu_notify_map_clients_locked();
2908 qemu_mutex_unlock(&map_client_list_lock
);
2911 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2919 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2920 if (!memory_access_is_direct(mr
, is_write
)) {
2921 l
= memory_access_size(mr
, l
, addr
);
2922 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2934 /* Map a physical memory region into a host virtual address.
2935 * May map a subset of the requested range, given by and returned in *plen.
2936 * May return NULL if resources needed to perform the mapping are exhausted.
2937 * Use only for reads OR writes - not for read-modify-write operations.
2938 * Use cpu_register_map_client() to know when retrying the map operation is
2939 * likely to succeed.
2941 void *address_space_map(AddressSpace
*as
,
2948 hwaddr l
, xlat
, base
;
2949 MemoryRegion
*mr
, *this_mr
;
2959 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2961 if (!memory_access_is_direct(mr
, is_write
)) {
2962 if (atomic_xchg(&bounce
.in_use
, true)) {
2966 /* Avoid unbounded allocations */
2967 l
= MIN(l
, TARGET_PAGE_SIZE
);
2968 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2972 memory_region_ref(mr
);
2975 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2981 return bounce
.buffer
;
2985 raddr
= memory_region_get_ram_addr(mr
);
2996 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2997 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
3002 memory_region_ref(mr
);
3004 ptr
= qemu_ram_ptr_length(raddr
+ base
, plen
);
3010 /* Unmaps a memory region previously mapped by address_space_map().
3011 * Will also mark the memory as dirty if is_write == 1. access_len gives
3012 * the amount of memory that was actually read or written by the caller.
3014 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3015 int is_write
, hwaddr access_len
)
3017 if (buffer
!= bounce
.buffer
) {
3021 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
3024 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3026 if (xen_enabled()) {
3027 xen_invalidate_map_cache_entry(buffer
);
3029 memory_region_unref(mr
);
3033 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3034 bounce
.buffer
, access_len
);
3036 qemu_vfree(bounce
.buffer
);
3037 bounce
.buffer
= NULL
;
3038 memory_region_unref(bounce
.mr
);
3039 atomic_mb_set(&bounce
.in_use
, false);
3040 cpu_notify_map_clients();
3043 void *cpu_physical_memory_map(hwaddr addr
,
3047 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3050 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3051 int is_write
, hwaddr access_len
)
3053 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3056 /* warning: addr must be aligned */
3057 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3059 MemTxResult
*result
,
3060 enum device_endian endian
)
3068 bool release_lock
= false;
3071 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3072 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3073 release_lock
|= prepare_mmio_access(mr
);
3076 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3077 #if defined(TARGET_WORDS_BIGENDIAN)
3078 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3082 if (endian
== DEVICE_BIG_ENDIAN
) {
3088 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3092 case DEVICE_LITTLE_ENDIAN
:
3093 val
= ldl_le_p(ptr
);
3095 case DEVICE_BIG_ENDIAN
:
3096 val
= ldl_be_p(ptr
);
3108 qemu_mutex_unlock_iothread();
3114 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3115 MemTxAttrs attrs
, MemTxResult
*result
)
3117 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3118 DEVICE_NATIVE_ENDIAN
);
3121 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3122 MemTxAttrs attrs
, MemTxResult
*result
)
3124 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3125 DEVICE_LITTLE_ENDIAN
);
3128 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3129 MemTxAttrs attrs
, MemTxResult
*result
)
3131 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3135 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3137 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3140 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3142 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3145 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3147 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3150 /* warning: addr must be aligned */
3151 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3153 MemTxResult
*result
,
3154 enum device_endian endian
)
3162 bool release_lock
= false;
3165 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3167 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3168 release_lock
|= prepare_mmio_access(mr
);
3171 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3172 #if defined(TARGET_WORDS_BIGENDIAN)
3173 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3177 if (endian
== DEVICE_BIG_ENDIAN
) {
3183 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3187 case DEVICE_LITTLE_ENDIAN
:
3188 val
= ldq_le_p(ptr
);
3190 case DEVICE_BIG_ENDIAN
:
3191 val
= ldq_be_p(ptr
);
3203 qemu_mutex_unlock_iothread();
3209 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3210 MemTxAttrs attrs
, MemTxResult
*result
)
3212 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3213 DEVICE_NATIVE_ENDIAN
);
3216 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3217 MemTxAttrs attrs
, MemTxResult
*result
)
3219 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3220 DEVICE_LITTLE_ENDIAN
);
3223 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3224 MemTxAttrs attrs
, MemTxResult
*result
)
3226 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3230 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3232 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3235 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3237 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3240 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3242 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3246 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3247 MemTxAttrs attrs
, MemTxResult
*result
)
3252 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3259 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3261 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3264 /* warning: addr must be aligned */
3265 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3268 MemTxResult
*result
,
3269 enum device_endian endian
)
3277 bool release_lock
= false;
3280 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3282 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3283 release_lock
|= prepare_mmio_access(mr
);
3286 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3287 #if defined(TARGET_WORDS_BIGENDIAN)
3288 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3292 if (endian
== DEVICE_BIG_ENDIAN
) {
3298 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3302 case DEVICE_LITTLE_ENDIAN
:
3303 val
= lduw_le_p(ptr
);
3305 case DEVICE_BIG_ENDIAN
:
3306 val
= lduw_be_p(ptr
);
3318 qemu_mutex_unlock_iothread();
3324 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3325 MemTxAttrs attrs
, MemTxResult
*result
)
3327 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3328 DEVICE_NATIVE_ENDIAN
);
3331 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3332 MemTxAttrs attrs
, MemTxResult
*result
)
3334 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3335 DEVICE_LITTLE_ENDIAN
);
3338 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3339 MemTxAttrs attrs
, MemTxResult
*result
)
3341 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3345 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3347 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3350 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3352 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3355 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3357 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3360 /* warning: addr must be aligned. The ram page is not masked as dirty
3361 and the code inside is not invalidated. It is useful if the dirty
3362 bits are used to track modified PTEs */
3363 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3364 MemTxAttrs attrs
, MemTxResult
*result
)
3371 uint8_t dirty_log_mask
;
3372 bool release_lock
= false;
3375 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3377 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3378 release_lock
|= prepare_mmio_access(mr
);
3380 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3382 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3383 ptr
= qemu_get_ram_ptr(addr1
);
3386 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3387 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3388 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3395 qemu_mutex_unlock_iothread();
3400 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3402 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3405 /* warning: addr must be aligned */
3406 static inline void address_space_stl_internal(AddressSpace
*as
,
3407 hwaddr addr
, uint32_t val
,
3409 MemTxResult
*result
,
3410 enum device_endian endian
)
3417 bool release_lock
= false;
3420 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3422 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3423 release_lock
|= prepare_mmio_access(mr
);
3425 #if defined(TARGET_WORDS_BIGENDIAN)
3426 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3430 if (endian
== DEVICE_BIG_ENDIAN
) {
3434 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3437 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3438 ptr
= qemu_get_ram_ptr(addr1
);
3440 case DEVICE_LITTLE_ENDIAN
:
3443 case DEVICE_BIG_ENDIAN
:
3450 invalidate_and_set_dirty(mr
, addr1
, 4);
3457 qemu_mutex_unlock_iothread();
3462 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3463 MemTxAttrs attrs
, MemTxResult
*result
)
3465 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3466 DEVICE_NATIVE_ENDIAN
);
3469 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3470 MemTxAttrs attrs
, MemTxResult
*result
)
3472 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3473 DEVICE_LITTLE_ENDIAN
);
3476 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3477 MemTxAttrs attrs
, MemTxResult
*result
)
3479 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3483 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3485 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3488 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3490 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3493 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3495 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3499 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3500 MemTxAttrs attrs
, MemTxResult
*result
)
3505 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3511 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3513 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3516 /* warning: addr must be aligned */
3517 static inline void address_space_stw_internal(AddressSpace
*as
,
3518 hwaddr addr
, uint32_t val
,
3520 MemTxResult
*result
,
3521 enum device_endian endian
)
3528 bool release_lock
= false;
3531 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3532 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3533 release_lock
|= prepare_mmio_access(mr
);
3535 #if defined(TARGET_WORDS_BIGENDIAN)
3536 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3540 if (endian
== DEVICE_BIG_ENDIAN
) {
3544 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3547 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3548 ptr
= qemu_get_ram_ptr(addr1
);
3550 case DEVICE_LITTLE_ENDIAN
:
3553 case DEVICE_BIG_ENDIAN
:
3560 invalidate_and_set_dirty(mr
, addr1
, 2);
3567 qemu_mutex_unlock_iothread();
3572 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3573 MemTxAttrs attrs
, MemTxResult
*result
)
3575 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3576 DEVICE_NATIVE_ENDIAN
);
3579 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3580 MemTxAttrs attrs
, MemTxResult
*result
)
3582 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3583 DEVICE_LITTLE_ENDIAN
);
3586 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3587 MemTxAttrs attrs
, MemTxResult
*result
)
3589 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3593 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3595 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3598 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3600 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3603 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3605 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3609 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3610 MemTxAttrs attrs
, MemTxResult
*result
)
3614 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3620 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3621 MemTxAttrs attrs
, MemTxResult
*result
)
3624 val
= cpu_to_le64(val
);
3625 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3630 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3631 MemTxAttrs attrs
, MemTxResult
*result
)
3634 val
= cpu_to_be64(val
);
3635 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3641 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3643 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3646 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3648 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3651 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3653 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3656 /* virtual memory access for debug (includes writing to ROM) */
3657 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3658 uint8_t *buf
, int len
, int is_write
)
3668 page
= addr
& TARGET_PAGE_MASK
;
3669 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3670 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3671 /* if no physical page mapped, return an error */
3672 if (phys_addr
== -1)
3674 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3677 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3679 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3682 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3683 MEMTXATTRS_UNSPECIFIED
,
3694 * Allows code that needs to deal with migration bitmaps etc to still be built
3695 * target independent.
3697 size_t qemu_target_page_bits(void)
3699 return TARGET_PAGE_BITS
;
3705 * A helper function for the _utterly broken_ virtio device model to find out if
3706 * it's running on a big endian machine. Don't do this at home kids!
3708 bool target_words_bigendian(void);
3709 bool target_words_bigendian(void)
3711 #if defined(TARGET_WORDS_BIGENDIAN)
3718 #ifndef CONFIG_USER_ONLY
3719 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3726 mr
= address_space_translate(&address_space_memory
,
3727 phys_addr
, &phys_addr
, &l
, false);
3729 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3734 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3740 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3741 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3742 block
->used_length
, opaque
);