4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
24 #include "qemu/cutils.h"
26 #include "exec/exec-all.h"
28 #include "hw/qdev-core.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #include "hw/xen/xen.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #if defined(CONFIG_USER_ONLY)
40 #else /* !CONFIG_USER_ONLY */
42 #include "exec/memory.h"
43 #include "exec/ioport.h"
44 #include "sysemu/dma.h"
45 #include "exec/address-spaces.h"
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
59 #include "migration/vmstate.h"
61 #include "qemu/range.h"
63 #include "qemu/mmap-alloc.h"
66 //#define DEBUG_SUBPAGE
68 #if !defined(CONFIG_USER_ONLY)
69 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
72 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
74 static MemoryRegion
*system_memory
;
75 static MemoryRegion
*system_io
;
77 AddressSpace address_space_io
;
78 AddressSpace address_space_memory
;
80 MemoryRegion io_mem_rom
, io_mem_notdirty
;
81 static MemoryRegion io_mem_unassigned
;
83 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84 #define RAM_PREALLOC (1 << 0)
86 /* RAM is mmap-ed with MAP_SHARED */
87 #define RAM_SHARED (1 << 1)
89 /* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
92 #define RAM_RESIZEABLE (1 << 2)
96 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
97 /* current CPU in the current thread. It is only valid inside
99 __thread CPUState
*current_cpu
;
100 /* 0 = Do not count executed instructions.
101 1 = Precise instruction counting.
102 2 = Adaptive rate instruction counting. */
105 #if !defined(CONFIG_USER_ONLY)
107 typedef struct PhysPageEntry PhysPageEntry
;
109 struct PhysPageEntry
{
110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
116 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
118 /* Size of the L2 (and L3, etc) page tables. */
119 #define ADDR_SPACE_BITS 64
122 #define P_L2_SIZE (1 << P_L2_BITS)
124 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
126 typedef PhysPageEntry Node
[P_L2_SIZE
];
128 typedef struct PhysPageMap
{
131 unsigned sections_nb
;
132 unsigned sections_nb_alloc
;
134 unsigned nodes_nb_alloc
;
136 MemoryRegionSection
*sections
;
139 struct AddressSpaceDispatch
{
142 MemoryRegionSection
*mru_section
;
143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
146 PhysPageEntry phys_map
;
151 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152 typedef struct subpage_t
{
156 uint16_t sub_section
[TARGET_PAGE_SIZE
];
159 #define PHYS_SECTION_UNASSIGNED 0
160 #define PHYS_SECTION_NOTDIRTY 1
161 #define PHYS_SECTION_ROM 2
162 #define PHYS_SECTION_WATCH 3
164 static void io_mem_init(void);
165 static void memory_map_init(void);
166 static void tcg_commit(MemoryListener
*listener
);
168 static MemoryRegion io_mem_watch
;
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
177 struct CPUAddressSpace
{
180 struct AddressSpaceDispatch
*memory_dispatch
;
181 MemoryListener tcg_as_listener
;
186 #if !defined(CONFIG_USER_ONLY)
188 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
190 static unsigned alloc_hint
= 16;
191 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
192 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, alloc_hint
);
193 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
194 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
195 alloc_hint
= map
->nodes_nb_alloc
;
199 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
206 ret
= map
->nodes_nb
++;
208 assert(ret
!= PHYS_MAP_NODE_NIL
);
209 assert(ret
!= map
->nodes_nb_alloc
);
211 e
.skip
= leaf
? 0 : 1;
212 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
213 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
214 memcpy(&p
[i
], &e
, sizeof(e
));
219 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
220 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
224 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
226 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
227 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
229 p
= map
->nodes
[lp
->ptr
];
230 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
232 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
233 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
239 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
245 static void phys_page_set(AddressSpaceDispatch
*d
,
246 hwaddr index
, hwaddr nb
,
249 /* Wildly overreserve - it doesn't matter much. */
250 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
252 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
255 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
256 * and update our entry so we can skip it and go directly to the destination.
258 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
260 unsigned valid_ptr
= P_L2_SIZE
;
265 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
270 for (i
= 0; i
< P_L2_SIZE
; i
++) {
271 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
278 phys_page_compact(&p
[i
], nodes
, compacted
);
282 /* We can only compress if there's only one child. */
287 assert(valid_ptr
< P_L2_SIZE
);
289 /* Don't compress if it won't fit in the # of bits we have. */
290 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
294 lp
->ptr
= p
[valid_ptr
].ptr
;
295 if (!p
[valid_ptr
].skip
) {
296 /* If our only child is a leaf, make this a leaf. */
297 /* By design, we should have made this node a leaf to begin with so we
298 * should never reach here.
299 * But since it's so simple to handle this, let's do it just in case we
304 lp
->skip
+= p
[valid_ptr
].skip
;
308 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
310 DECLARE_BITMAP(compacted
, nodes_nb
);
312 if (d
->phys_map
.skip
) {
313 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
317 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
320 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
321 * the section must cover the entire address space.
323 return section
->size
.hi
||
324 range_covers_byte(section
->offset_within_address_space
,
325 section
->size
.lo
, addr
);
328 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
329 Node
*nodes
, MemoryRegionSection
*sections
)
332 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
335 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
336 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
337 return §ions
[PHYS_SECTION_UNASSIGNED
];
340 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
343 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
344 return §ions
[lp
.ptr
];
346 return §ions
[PHYS_SECTION_UNASSIGNED
];
350 bool memory_region_is_unassigned(MemoryRegion
*mr
)
352 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
353 && mr
!= &io_mem_watch
;
356 /* Called from RCU critical section */
357 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
359 bool resolve_subpage
)
361 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
365 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
366 section_covers_addr(section
, addr
)) {
369 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
373 if (resolve_subpage
&& section
->mr
->subpage
) {
374 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
375 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
378 atomic_set(&d
->mru_section
, section
);
383 /* Called from RCU critical section */
384 static MemoryRegionSection
*
385 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
386 hwaddr
*plen
, bool resolve_subpage
)
388 MemoryRegionSection
*section
;
392 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
393 /* Compute offset within MemoryRegionSection */
394 addr
-= section
->offset_within_address_space
;
396 /* Compute offset within MemoryRegion */
397 *xlat
= addr
+ section
->offset_within_region
;
401 /* MMIO registers can be expected to perform full-width accesses based only
402 * on their address, without considering adjacent registers that could
403 * decode to completely different MemoryRegions. When such registers
404 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
405 * regions overlap wildly. For this reason we cannot clamp the accesses
408 * If the length is small (as is the case for address_space_ldl/stl),
409 * everything works fine. If the incoming length is large, however,
410 * the caller really has to do the clamping through memory_access_size.
412 if (memory_region_is_ram(mr
)) {
413 diff
= int128_sub(section
->size
, int128_make64(addr
));
414 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
419 /* Called from RCU critical section */
420 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
421 hwaddr
*xlat
, hwaddr
*plen
,
425 MemoryRegionSection
*section
;
429 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
430 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
433 if (!mr
->iommu_ops
) {
437 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
438 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
439 | (addr
& iotlb
.addr_mask
));
440 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
441 if (!(iotlb
.perm
& (1 << is_write
))) {
442 mr
= &io_mem_unassigned
;
446 as
= iotlb
.target_as
;
449 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
450 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
451 *plen
= MIN(page
, *plen
);
458 /* Called from RCU critical section */
459 MemoryRegionSection
*
460 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
461 hwaddr
*xlat
, hwaddr
*plen
)
463 MemoryRegionSection
*section
;
464 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
466 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
468 assert(!section
->mr
->iommu_ops
);
473 #if !defined(CONFIG_USER_ONLY)
475 static int cpu_common_post_load(void *opaque
, int version_id
)
477 CPUState
*cpu
= opaque
;
479 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
480 version_id is increased. */
481 cpu
->interrupt_request
&= ~0x01;
487 static int cpu_common_pre_load(void *opaque
)
489 CPUState
*cpu
= opaque
;
491 cpu
->exception_index
= -1;
496 static bool cpu_common_exception_index_needed(void *opaque
)
498 CPUState
*cpu
= opaque
;
500 return tcg_enabled() && cpu
->exception_index
!= -1;
503 static const VMStateDescription vmstate_cpu_common_exception_index
= {
504 .name
= "cpu_common/exception_index",
506 .minimum_version_id
= 1,
507 .needed
= cpu_common_exception_index_needed
,
508 .fields
= (VMStateField
[]) {
509 VMSTATE_INT32(exception_index
, CPUState
),
510 VMSTATE_END_OF_LIST()
514 static bool cpu_common_crash_occurred_needed(void *opaque
)
516 CPUState
*cpu
= opaque
;
518 return cpu
->crash_occurred
;
521 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
522 .name
= "cpu_common/crash_occurred",
524 .minimum_version_id
= 1,
525 .needed
= cpu_common_crash_occurred_needed
,
526 .fields
= (VMStateField
[]) {
527 VMSTATE_BOOL(crash_occurred
, CPUState
),
528 VMSTATE_END_OF_LIST()
532 const VMStateDescription vmstate_cpu_common
= {
533 .name
= "cpu_common",
535 .minimum_version_id
= 1,
536 .pre_load
= cpu_common_pre_load
,
537 .post_load
= cpu_common_post_load
,
538 .fields
= (VMStateField
[]) {
539 VMSTATE_UINT32(halted
, CPUState
),
540 VMSTATE_UINT32(interrupt_request
, CPUState
),
541 VMSTATE_END_OF_LIST()
543 .subsections
= (const VMStateDescription
*[]) {
544 &vmstate_cpu_common_exception_index
,
545 &vmstate_cpu_common_crash_occurred
,
552 CPUState
*qemu_get_cpu(int index
)
557 if (cpu
->cpu_index
== index
) {
565 #if !defined(CONFIG_USER_ONLY)
566 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
568 CPUAddressSpace
*newas
;
570 /* Target code should have set num_ases before calling us */
571 assert(asidx
< cpu
->num_ases
);
574 /* address space 0 gets the convenience alias */
578 /* KVM cannot currently support multiple address spaces. */
579 assert(asidx
== 0 || !kvm_enabled());
581 if (!cpu
->cpu_ases
) {
582 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
585 newas
= &cpu
->cpu_ases
[asidx
];
589 newas
->tcg_as_listener
.commit
= tcg_commit
;
590 memory_listener_register(&newas
->tcg_as_listener
, as
);
594 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
596 /* Return the AddressSpace corresponding to the specified index */
597 return cpu
->cpu_ases
[asidx
].as
;
601 #ifndef CONFIG_USER_ONLY
602 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
604 static int cpu_get_free_index(Error
**errp
)
606 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
608 if (cpu
>= MAX_CPUMASK_BITS
) {
609 error_setg(errp
, "Trying to use more CPUs than max of %d",
614 bitmap_set(cpu_index_map
, cpu
, 1);
618 static void cpu_release_index(CPUState
*cpu
)
620 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
624 static int cpu_get_free_index(Error
**errp
)
629 CPU_FOREACH(some_cpu
) {
635 static void cpu_release_index(CPUState
*cpu
)
641 void cpu_exec_exit(CPUState
*cpu
)
643 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
645 #if defined(CONFIG_USER_ONLY)
648 if (cpu
->cpu_index
== -1) {
649 /* cpu_index was never allocated by this @cpu or was already freed. */
650 #if defined(CONFIG_USER_ONLY)
656 QTAILQ_REMOVE(&cpus
, cpu
, node
);
657 cpu_release_index(cpu
);
659 #if defined(CONFIG_USER_ONLY)
663 if (cc
->vmsd
!= NULL
) {
664 vmstate_unregister(NULL
, cc
->vmsd
, cpu
);
666 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
667 vmstate_unregister(NULL
, &vmstate_cpu_common
, cpu
);
671 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
673 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
674 Error
*local_err
= NULL
;
676 #ifdef TARGET_WORDS_BIGENDIAN
677 cpu
->bigendian
= true;
679 cpu
->bigendian
= false;
684 #ifndef CONFIG_USER_ONLY
685 cpu
->thread_id
= qemu_get_thread_id();
687 /* This is a softmmu CPU object, so create a property for it
688 * so users can wire up its memory. (This can't go in qom/cpu.c
689 * because that file is compiled only once for both user-mode
690 * and system builds.) The default if no link is set up is to use
691 * the system address space.
693 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
694 (Object
**)&cpu
->memory
,
695 qdev_prop_allow_set_link_before_realize
,
696 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
698 cpu
->memory
= system_memory
;
699 object_ref(OBJECT(cpu
->memory
));
702 #if defined(CONFIG_USER_ONLY)
705 cpu
->cpu_index
= cpu_get_free_index(&local_err
);
707 error_propagate(errp
, local_err
);
708 #if defined(CONFIG_USER_ONLY)
713 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
714 #if defined(CONFIG_USER_ONLY)
718 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
719 vmstate_register(NULL
, cpu
->cpu_index
, &vmstate_cpu_common
, cpu
);
721 if (cc
->vmsd
!= NULL
) {
722 vmstate_register(NULL
, cpu
->cpu_index
, cc
->vmsd
, cpu
);
727 #if defined(CONFIG_USER_ONLY)
728 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
730 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
733 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
736 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
737 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
739 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
740 phys
| (pc
& ~TARGET_PAGE_MASK
));
745 #if defined(CONFIG_USER_ONLY)
746 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
751 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
757 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
761 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
762 int flags
, CPUWatchpoint
**watchpoint
)
767 /* Add a watchpoint. */
768 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
769 int flags
, CPUWatchpoint
**watchpoint
)
773 /* forbid ranges which are empty or run off the end of the address space */
774 if (len
== 0 || (addr
+ len
- 1) < addr
) {
775 error_report("tried to set invalid watchpoint at %"
776 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
779 wp
= g_malloc(sizeof(*wp
));
785 /* keep all GDB-injected watchpoints in front */
786 if (flags
& BP_GDB
) {
787 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
789 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
792 tlb_flush_page(cpu
, addr
);
799 /* Remove a specific watchpoint. */
800 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
805 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
806 if (addr
== wp
->vaddr
&& len
== wp
->len
807 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
808 cpu_watchpoint_remove_by_ref(cpu
, wp
);
815 /* Remove a specific watchpoint by reference. */
816 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
818 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
820 tlb_flush_page(cpu
, watchpoint
->vaddr
);
825 /* Remove all matching watchpoints. */
826 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
828 CPUWatchpoint
*wp
, *next
;
830 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
831 if (wp
->flags
& mask
) {
832 cpu_watchpoint_remove_by_ref(cpu
, wp
);
837 /* Return true if this watchpoint address matches the specified
838 * access (ie the address range covered by the watchpoint overlaps
839 * partially or completely with the address range covered by the
842 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
846 /* We know the lengths are non-zero, but a little caution is
847 * required to avoid errors in the case where the range ends
848 * exactly at the top of the address space and so addr + len
849 * wraps round to zero.
851 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
852 vaddr addrend
= addr
+ len
- 1;
854 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
859 /* Add a breakpoint. */
860 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
861 CPUBreakpoint
**breakpoint
)
865 bp
= g_malloc(sizeof(*bp
));
870 /* keep all GDB-injected breakpoints in front */
871 if (flags
& BP_GDB
) {
872 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
874 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
877 breakpoint_invalidate(cpu
, pc
);
885 /* Remove a specific breakpoint. */
886 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
890 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
891 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
892 cpu_breakpoint_remove_by_ref(cpu
, bp
);
899 /* Remove a specific breakpoint by reference. */
900 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
902 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
904 breakpoint_invalidate(cpu
, breakpoint
->pc
);
909 /* Remove all matching breakpoints. */
910 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
912 CPUBreakpoint
*bp
, *next
;
914 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
915 if (bp
->flags
& mask
) {
916 cpu_breakpoint_remove_by_ref(cpu
, bp
);
921 /* enable or disable single step mode. EXCP_DEBUG is returned by the
922 CPU loop after each instruction */
923 void cpu_single_step(CPUState
*cpu
, int enabled
)
925 if (cpu
->singlestep_enabled
!= enabled
) {
926 cpu
->singlestep_enabled
= enabled
;
928 kvm_update_guest_debug(cpu
, 0);
930 /* must flush all the translated code to avoid inconsistencies */
931 /* XXX: only flush what is necessary */
937 void QEMU_NORETURN
cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
944 fprintf(stderr
, "qemu: fatal: ");
945 vfprintf(stderr
, fmt
, ap
);
946 fprintf(stderr
, "\n");
947 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
948 if (qemu_log_separate()) {
949 qemu_log("qemu: fatal: ");
950 qemu_log_vprintf(fmt
, ap2
);
952 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
959 #if defined(CONFIG_USER_ONLY)
961 struct sigaction act
;
962 sigfillset(&act
.sa_mask
);
963 act
.sa_handler
= SIG_DFL
;
964 sigaction(SIGABRT
, &act
, NULL
);
970 #if !defined(CONFIG_USER_ONLY)
971 /* Called from RCU critical section */
972 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
976 block
= atomic_rcu_read(&ram_list
.mru_block
);
977 if (block
&& addr
- block
->offset
< block
->max_length
) {
980 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
981 if (addr
- block
->offset
< block
->max_length
) {
986 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
990 /* It is safe to write mru_block outside the iothread lock. This
995 * xxx removed from list
999 * call_rcu(reclaim_ramblock, xxx);
1002 * atomic_rcu_set is not needed here. The block was already published
1003 * when it was placed into the list. Here we're just making an extra
1004 * copy of the pointer.
1006 ram_list
.mru_block
= block
;
1010 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
1017 end
= TARGET_PAGE_ALIGN(start
+ length
);
1018 start
&= TARGET_PAGE_MASK
;
1021 block
= qemu_get_ram_block(start
);
1022 assert(block
== qemu_get_ram_block(end
- 1));
1023 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
1025 tlb_reset_dirty(cpu
, start1
, length
);
1030 /* Note: start and end must be within the same ram block. */
1031 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
1035 DirtyMemoryBlocks
*blocks
;
1036 unsigned long end
, page
;
1043 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1044 page
= start
>> TARGET_PAGE_BITS
;
1048 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1050 while (page
< end
) {
1051 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1052 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1053 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1055 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1062 if (dirty
&& tcg_enabled()) {
1063 tlb_reset_dirty_range_all(start
, length
);
1069 /* Called from RCU critical section */
1070 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1071 MemoryRegionSection
*section
,
1073 hwaddr paddr
, hwaddr xlat
,
1075 target_ulong
*address
)
1080 if (memory_region_is_ram(section
->mr
)) {
1082 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1083 if (!section
->readonly
) {
1084 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1086 iotlb
|= PHYS_SECTION_ROM
;
1089 AddressSpaceDispatch
*d
;
1091 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1092 iotlb
= section
- d
->map
.sections
;
1096 /* Make accesses to pages with watchpoints go via the
1097 watchpoint trap routines. */
1098 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1099 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1100 /* Avoid trapping reads of pages with a write breakpoint. */
1101 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1102 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1103 *address
|= TLB_MMIO
;
1111 #endif /* defined(CONFIG_USER_ONLY) */
1113 #if !defined(CONFIG_USER_ONLY)
1115 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1117 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1119 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1120 qemu_anon_ram_alloc
;
1123 * Set a custom physical guest memory alloator.
1124 * Accelerators with unusual needs may need this. Hopefully, we can
1125 * get rid of it eventually.
1127 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1129 phys_mem_alloc
= alloc
;
1132 static uint16_t phys_section_add(PhysPageMap
*map
,
1133 MemoryRegionSection
*section
)
1135 /* The physical section number is ORed with a page-aligned
1136 * pointer to produce the iotlb entries. Thus it should
1137 * never overflow into the page-aligned value.
1139 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1141 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1142 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1143 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1144 map
->sections_nb_alloc
);
1146 map
->sections
[map
->sections_nb
] = *section
;
1147 memory_region_ref(section
->mr
);
1148 return map
->sections_nb
++;
1151 static void phys_section_destroy(MemoryRegion
*mr
)
1153 bool have_sub_page
= mr
->subpage
;
1155 memory_region_unref(mr
);
1157 if (have_sub_page
) {
1158 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1159 object_unref(OBJECT(&subpage
->iomem
));
1164 static void phys_sections_free(PhysPageMap
*map
)
1166 while (map
->sections_nb
> 0) {
1167 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1168 phys_section_destroy(section
->mr
);
1170 g_free(map
->sections
);
1174 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1177 hwaddr base
= section
->offset_within_address_space
1179 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1180 d
->map
.nodes
, d
->map
.sections
);
1181 MemoryRegionSection subsection
= {
1182 .offset_within_address_space
= base
,
1183 .size
= int128_make64(TARGET_PAGE_SIZE
),
1187 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1189 if (!(existing
->mr
->subpage
)) {
1190 subpage
= subpage_init(d
->as
, base
);
1191 subsection
.address_space
= d
->as
;
1192 subsection
.mr
= &subpage
->iomem
;
1193 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1194 phys_section_add(&d
->map
, &subsection
));
1196 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1198 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1199 end
= start
+ int128_get64(section
->size
) - 1;
1200 subpage_register(subpage
, start
, end
,
1201 phys_section_add(&d
->map
, section
));
1205 static void register_multipage(AddressSpaceDispatch
*d
,
1206 MemoryRegionSection
*section
)
1208 hwaddr start_addr
= section
->offset_within_address_space
;
1209 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1210 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1214 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1217 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1219 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1220 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1221 MemoryRegionSection now
= *section
, remain
= *section
;
1222 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1224 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1225 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1226 - now
.offset_within_address_space
;
1228 now
.size
= int128_min(int128_make64(left
), now
.size
);
1229 register_subpage(d
, &now
);
1231 now
.size
= int128_zero();
1233 while (int128_ne(remain
.size
, now
.size
)) {
1234 remain
.size
= int128_sub(remain
.size
, now
.size
);
1235 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1236 remain
.offset_within_region
+= int128_get64(now
.size
);
1238 if (int128_lt(remain
.size
, page_size
)) {
1239 register_subpage(d
, &now
);
1240 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1241 now
.size
= page_size
;
1242 register_subpage(d
, &now
);
1244 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1245 register_multipage(d
, &now
);
1250 void qemu_flush_coalesced_mmio_buffer(void)
1253 kvm_flush_coalesced_mmio_buffer();
1256 void qemu_mutex_lock_ramlist(void)
1258 qemu_mutex_lock(&ram_list
.mutex
);
1261 void qemu_mutex_unlock_ramlist(void)
1263 qemu_mutex_unlock(&ram_list
.mutex
);
1267 static void *file_ram_alloc(RAMBlock
*block
,
1272 bool unlink_on_error
= false;
1274 char *sanitized_name
;
1276 void * volatile area
= NULL
;
1280 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1282 "host lacks kvm mmu notifiers, -mem-path unsupported");
1287 fd
= open(path
, O_RDWR
);
1289 /* @path names an existing file, use it */
1292 if (errno
== ENOENT
) {
1293 /* @path names a file that doesn't exist, create it */
1294 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1296 unlink_on_error
= true;
1299 } else if (errno
== EISDIR
) {
1300 /* @path names a directory, create a file there */
1301 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1302 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1303 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1309 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1311 g_free(sanitized_name
);
1313 fd
= mkstemp(filename
);
1321 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1322 error_setg_errno(errp
, errno
,
1323 "can't open backing store %s for guest RAM",
1328 * Try again on EINTR and EEXIST. The latter happens when
1329 * something else creates the file between our two open().
1333 page_size
= qemu_fd_getpagesize(fd
);
1334 block
->mr
->align
= MAX(page_size
, QEMU_VMALLOC_ALIGN
);
1336 if (memory
< page_size
) {
1337 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1338 "or larger than page size 0x%" PRIx64
,
1343 memory
= ROUND_UP(memory
, page_size
);
1346 * ftruncate is not supported by hugetlbfs in older
1347 * hosts, so don't bother bailing out on errors.
1348 * If anything goes wrong with it under other filesystems,
1351 if (ftruncate(fd
, memory
)) {
1352 perror("ftruncate");
1355 area
= qemu_ram_mmap(fd
, memory
, block
->mr
->align
,
1356 block
->flags
& RAM_SHARED
);
1357 if (area
== MAP_FAILED
) {
1358 error_setg_errno(errp
, errno
,
1359 "unable to map backing store for guest RAM");
1364 os_mem_prealloc(fd
, area
, memory
);
1371 if (unlink_on_error
) {
1381 /* Called with the ramlist lock held. */
1382 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1384 RAMBlock
*block
, *next_block
;
1385 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1387 assert(size
!= 0); /* it would hand out same offset multiple times */
1389 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1393 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1394 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1396 end
= block
->offset
+ block
->max_length
;
1398 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1399 if (next_block
->offset
>= end
) {
1400 next
= MIN(next
, next_block
->offset
);
1403 if (next
- end
>= size
&& next
- end
< mingap
) {
1405 mingap
= next
- end
;
1409 if (offset
== RAM_ADDR_MAX
) {
1410 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1418 ram_addr_t
last_ram_offset(void)
1421 ram_addr_t last
= 0;
1424 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1425 last
= MAX(last
, block
->offset
+ block
->max_length
);
1431 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1435 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1436 if (!machine_dump_guest_core(current_machine
)) {
1437 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1439 perror("qemu_madvise");
1440 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1441 "but dump_guest_core=off specified\n");
1446 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1451 /* Called with iothread lock held. */
1452 void qemu_ram_set_idstr(RAMBlock
*new_block
, const char *name
, DeviceState
*dev
)
1457 assert(!new_block
->idstr
[0]);
1460 char *id
= qdev_get_dev_path(dev
);
1462 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1466 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1469 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1470 if (block
!= new_block
&&
1471 !strcmp(block
->idstr
, new_block
->idstr
)) {
1472 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1480 /* Called with iothread lock held. */
1481 void qemu_ram_unset_idstr(RAMBlock
*block
)
1483 /* FIXME: arch_init.c assumes that this is not called throughout
1484 * migration. Ignore the problem since hot-unplug during migration
1485 * does not work anyway.
1488 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1492 static int memory_try_enable_merging(void *addr
, size_t len
)
1494 if (!machine_mem_merge(current_machine
)) {
1495 /* disabled by the user */
1499 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1502 /* Only legal before guest might have detected the memory size: e.g. on
1503 * incoming migration, or right after reset.
1505 * As memory core doesn't know how is memory accessed, it is up to
1506 * resize callback to update device state and/or add assertions to detect
1507 * misuse, if necessary.
1509 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
)
1513 newsize
= HOST_PAGE_ALIGN(newsize
);
1515 if (block
->used_length
== newsize
) {
1519 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1520 error_setg_errno(errp
, EINVAL
,
1521 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1522 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1523 newsize
, block
->used_length
);
1527 if (block
->max_length
< newsize
) {
1528 error_setg_errno(errp
, EINVAL
,
1529 "Length too large: %s: 0x" RAM_ADDR_FMT
1530 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1531 newsize
, block
->max_length
);
1535 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1536 block
->used_length
= newsize
;
1537 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1539 memory_region_set_size(block
->mr
, newsize
);
1540 if (block
->resized
) {
1541 block
->resized(block
->idstr
, newsize
, block
->host
);
1546 /* Called with ram_list.mutex held */
1547 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1548 ram_addr_t new_ram_size
)
1550 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1551 DIRTY_MEMORY_BLOCK_SIZE
);
1552 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1553 DIRTY_MEMORY_BLOCK_SIZE
);
1556 /* Only need to extend if block count increased */
1557 if (new_num_blocks
<= old_num_blocks
) {
1561 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1562 DirtyMemoryBlocks
*old_blocks
;
1563 DirtyMemoryBlocks
*new_blocks
;
1566 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1567 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1568 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1570 if (old_num_blocks
) {
1571 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1572 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1575 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1576 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1579 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1582 g_free_rcu(old_blocks
, rcu
);
1587 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1590 RAMBlock
*last_block
= NULL
;
1591 ram_addr_t old_ram_size
, new_ram_size
;
1594 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1596 qemu_mutex_lock_ramlist();
1597 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1599 if (!new_block
->host
) {
1600 if (xen_enabled()) {
1601 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1602 new_block
->mr
, &err
);
1604 error_propagate(errp
, err
);
1605 qemu_mutex_unlock_ramlist();
1609 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1610 &new_block
->mr
->align
);
1611 if (!new_block
->host
) {
1612 error_setg_errno(errp
, errno
,
1613 "cannot set up guest memory '%s'",
1614 memory_region_name(new_block
->mr
));
1615 qemu_mutex_unlock_ramlist();
1618 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1622 new_ram_size
= MAX(old_ram_size
,
1623 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1624 if (new_ram_size
> old_ram_size
) {
1625 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1626 dirty_memory_extend(old_ram_size
, new_ram_size
);
1628 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1629 * QLIST (which has an RCU-friendly variant) does not have insertion at
1630 * tail, so save the last element in last_block.
1632 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1634 if (block
->max_length
< new_block
->max_length
) {
1639 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1640 } else if (last_block
) {
1641 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1642 } else { /* list is empty */
1643 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1645 ram_list
.mru_block
= NULL
;
1647 /* Write list before version */
1650 qemu_mutex_unlock_ramlist();
1652 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1653 new_block
->used_length
,
1656 if (new_block
->host
) {
1657 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1658 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1659 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1660 if (kvm_enabled()) {
1661 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1667 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1668 bool share
, const char *mem_path
,
1671 RAMBlock
*new_block
;
1672 Error
*local_err
= NULL
;
1674 if (xen_enabled()) {
1675 error_setg(errp
, "-mem-path not supported with Xen");
1679 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1681 * file_ram_alloc() needs to allocate just like
1682 * phys_mem_alloc, but we haven't bothered to provide
1686 "-mem-path not supported with this accelerator");
1690 size
= HOST_PAGE_ALIGN(size
);
1691 new_block
= g_malloc0(sizeof(*new_block
));
1693 new_block
->used_length
= size
;
1694 new_block
->max_length
= size
;
1695 new_block
->flags
= share
? RAM_SHARED
: 0;
1696 new_block
->host
= file_ram_alloc(new_block
, size
,
1698 if (!new_block
->host
) {
1703 ram_block_add(new_block
, &local_err
);
1706 error_propagate(errp
, local_err
);
1714 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1715 void (*resized
)(const char*,
1718 void *host
, bool resizeable
,
1719 MemoryRegion
*mr
, Error
**errp
)
1721 RAMBlock
*new_block
;
1722 Error
*local_err
= NULL
;
1724 size
= HOST_PAGE_ALIGN(size
);
1725 max_size
= HOST_PAGE_ALIGN(max_size
);
1726 new_block
= g_malloc0(sizeof(*new_block
));
1728 new_block
->resized
= resized
;
1729 new_block
->used_length
= size
;
1730 new_block
->max_length
= max_size
;
1731 assert(max_size
>= size
);
1733 new_block
->host
= host
;
1735 new_block
->flags
|= RAM_PREALLOC
;
1738 new_block
->flags
|= RAM_RESIZEABLE
;
1740 ram_block_add(new_block
, &local_err
);
1743 error_propagate(errp
, local_err
);
1749 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1750 MemoryRegion
*mr
, Error
**errp
)
1752 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1755 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1757 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1760 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1761 void (*resized
)(const char*,
1764 MemoryRegion
*mr
, Error
**errp
)
1766 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1769 static void reclaim_ramblock(RAMBlock
*block
)
1771 if (block
->flags
& RAM_PREALLOC
) {
1773 } else if (xen_enabled()) {
1774 xen_invalidate_map_cache_entry(block
->host
);
1776 } else if (block
->fd
>= 0) {
1777 qemu_ram_munmap(block
->host
, block
->max_length
);
1781 qemu_anon_ram_free(block
->host
, block
->max_length
);
1786 void qemu_ram_free(RAMBlock
*block
)
1792 qemu_mutex_lock_ramlist();
1793 QLIST_REMOVE_RCU(block
, next
);
1794 ram_list
.mru_block
= NULL
;
1795 /* Write list before version */
1798 call_rcu(block
, reclaim_ramblock
, rcu
);
1799 qemu_mutex_unlock_ramlist();
1803 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1810 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1811 offset
= addr
- block
->offset
;
1812 if (offset
< block
->max_length
) {
1813 vaddr
= ramblock_ptr(block
, offset
);
1814 if (block
->flags
& RAM_PREALLOC
) {
1816 } else if (xen_enabled()) {
1820 if (block
->fd
>= 0) {
1821 flags
|= (block
->flags
& RAM_SHARED
?
1822 MAP_SHARED
: MAP_PRIVATE
);
1823 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1824 flags
, block
->fd
, offset
);
1827 * Remap needs to match alloc. Accelerators that
1828 * set phys_mem_alloc never remap. If they did,
1829 * we'd need a remap hook here.
1831 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1833 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1834 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1837 if (area
!= vaddr
) {
1838 fprintf(stderr
, "Could not remap addr: "
1839 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1843 memory_try_enable_merging(vaddr
, length
);
1844 qemu_ram_setup_dump(vaddr
, length
);
1849 #endif /* !_WIN32 */
1851 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1852 * This should not be used for general purpose DMA. Use address_space_map
1853 * or address_space_rw instead. For local memory (e.g. video ram) that the
1854 * device owns, use memory_region_get_ram_ptr.
1856 * Called within RCU critical section.
1858 void *qemu_map_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1860 RAMBlock
*block
= ram_block
;
1862 if (block
== NULL
) {
1863 block
= qemu_get_ram_block(addr
);
1864 addr
-= block
->offset
;
1867 if (xen_enabled() && block
->host
== NULL
) {
1868 /* We need to check if the requested address is in the RAM
1869 * because we don't want to map the entire memory in QEMU.
1870 * In that case just map until the end of the page.
1872 if (block
->offset
== 0) {
1873 return xen_map_cache(addr
, 0, 0);
1876 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1878 return ramblock_ptr(block
, addr
);
1881 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1882 * but takes a size argument.
1884 * Called within RCU critical section.
1886 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1889 RAMBlock
*block
= ram_block
;
1894 if (block
== NULL
) {
1895 block
= qemu_get_ram_block(addr
);
1896 addr
-= block
->offset
;
1898 *size
= MIN(*size
, block
->max_length
- addr
);
1900 if (xen_enabled() && block
->host
== NULL
) {
1901 /* We need to check if the requested address is in the RAM
1902 * because we don't want to map the entire memory in QEMU.
1903 * In that case just map the requested area.
1905 if (block
->offset
== 0) {
1906 return xen_map_cache(addr
, *size
, 1);
1909 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1912 return ramblock_ptr(block
, addr
);
1916 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1919 * ptr: Host pointer to look up
1920 * round_offset: If true round the result offset down to a page boundary
1921 * *ram_addr: set to result ram_addr
1922 * *offset: set to result offset within the RAMBlock
1924 * Returns: RAMBlock (or NULL if not found)
1926 * By the time this function returns, the returned pointer is not protected
1927 * by RCU anymore. If the caller is not within an RCU critical section and
1928 * does not hold the iothread lock, it must have other means of protecting the
1929 * pointer, such as a reference to the region that includes the incoming
1932 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1936 uint8_t *host
= ptr
;
1938 if (xen_enabled()) {
1939 ram_addr_t ram_addr
;
1941 ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1942 block
= qemu_get_ram_block(ram_addr
);
1944 *offset
= ram_addr
- block
->offset
;
1951 block
= atomic_rcu_read(&ram_list
.mru_block
);
1952 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1956 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1957 /* This case append when the block is not mapped. */
1958 if (block
->host
== NULL
) {
1961 if (host
- block
->host
< block
->max_length
) {
1970 *offset
= (host
- block
->host
);
1972 *offset
&= TARGET_PAGE_MASK
;
1979 * Finds the named RAMBlock
1981 * name: The name of RAMBlock to find
1983 * Returns: RAMBlock (or NULL if not found)
1985 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1989 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1990 if (!strcmp(name
, block
->idstr
)) {
1998 /* Some of the softmmu routines need to translate from a host pointer
1999 (typically a TLB entry) back to a ram offset. */
2000 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2005 block
= qemu_ram_block_from_host(ptr
, false, &offset
);
2007 return RAM_ADDR_INVALID
;
2010 return block
->offset
+ offset
;
2013 /* Called within RCU critical section. */
2014 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
2015 uint64_t val
, unsigned size
)
2017 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2018 tb_invalidate_phys_page_fast(ram_addr
, size
);
2022 stb_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2025 stw_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2028 stl_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2033 /* Set both VGA and migration bits for simplicity and to remove
2034 * the notdirty callback faster.
2036 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2037 DIRTY_CLIENTS_NOCODE
);
2038 /* we remove the notdirty callback only if the code has been
2040 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2041 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2045 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2046 unsigned size
, bool is_write
)
2051 static const MemoryRegionOps notdirty_mem_ops
= {
2052 .write
= notdirty_mem_write
,
2053 .valid
.accepts
= notdirty_mem_accepts
,
2054 .endianness
= DEVICE_NATIVE_ENDIAN
,
2057 /* Generate a debug exception if a watchpoint has been hit. */
2058 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2060 CPUState
*cpu
= current_cpu
;
2061 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2062 CPUArchState
*env
= cpu
->env_ptr
;
2063 target_ulong pc
, cs_base
;
2068 if (cpu
->watchpoint_hit
) {
2069 /* We re-entered the check after replacing the TB. Now raise
2070 * the debug interrupt so that is will trigger after the
2071 * current instruction. */
2072 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2075 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2076 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2077 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2078 && (wp
->flags
& flags
)) {
2079 if (flags
== BP_MEM_READ
) {
2080 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2082 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2084 wp
->hitaddr
= vaddr
;
2085 wp
->hitattrs
= attrs
;
2086 if (!cpu
->watchpoint_hit
) {
2087 if (wp
->flags
& BP_CPU
&&
2088 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2089 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2092 cpu
->watchpoint_hit
= wp
;
2093 tb_check_watchpoint(cpu
);
2094 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2095 cpu
->exception_index
= EXCP_DEBUG
;
2098 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2099 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2100 cpu_loop_exit_noexc(cpu
);
2104 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2109 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2110 so these check for a hit then pass through to the normal out-of-line
2112 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2113 unsigned size
, MemTxAttrs attrs
)
2117 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2118 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2120 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2123 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2126 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2129 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2137 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2138 uint64_t val
, unsigned size
,
2142 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2143 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2145 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2148 address_space_stb(as
, addr
, val
, attrs
, &res
);
2151 address_space_stw(as
, addr
, val
, attrs
, &res
);
2154 address_space_stl(as
, addr
, val
, attrs
, &res
);
2161 static const MemoryRegionOps watch_mem_ops
= {
2162 .read_with_attrs
= watch_mem_read
,
2163 .write_with_attrs
= watch_mem_write
,
2164 .endianness
= DEVICE_NATIVE_ENDIAN
,
2167 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2168 unsigned len
, MemTxAttrs attrs
)
2170 subpage_t
*subpage
= opaque
;
2174 #if defined(DEBUG_SUBPAGE)
2175 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2176 subpage
, len
, addr
);
2178 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2185 *data
= ldub_p(buf
);
2188 *data
= lduw_p(buf
);
2201 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2202 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2204 subpage_t
*subpage
= opaque
;
2207 #if defined(DEBUG_SUBPAGE)
2208 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2209 " value %"PRIx64
"\n",
2210 __func__
, subpage
, len
, addr
, value
);
2228 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2232 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2233 unsigned len
, bool is_write
)
2235 subpage_t
*subpage
= opaque
;
2236 #if defined(DEBUG_SUBPAGE)
2237 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2238 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2241 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2245 static const MemoryRegionOps subpage_ops
= {
2246 .read_with_attrs
= subpage_read
,
2247 .write_with_attrs
= subpage_write
,
2248 .impl
.min_access_size
= 1,
2249 .impl
.max_access_size
= 8,
2250 .valid
.min_access_size
= 1,
2251 .valid
.max_access_size
= 8,
2252 .valid
.accepts
= subpage_accepts
,
2253 .endianness
= DEVICE_NATIVE_ENDIAN
,
2256 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2261 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2263 idx
= SUBPAGE_IDX(start
);
2264 eidx
= SUBPAGE_IDX(end
);
2265 #if defined(DEBUG_SUBPAGE)
2266 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2267 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2269 for (; idx
<= eidx
; idx
++) {
2270 mmio
->sub_section
[idx
] = section
;
2276 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2280 mmio
= g_malloc0(sizeof(subpage_t
));
2284 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2285 NULL
, TARGET_PAGE_SIZE
);
2286 mmio
->iomem
.subpage
= true;
2287 #if defined(DEBUG_SUBPAGE)
2288 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2289 mmio
, base
, TARGET_PAGE_SIZE
);
2291 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2296 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2300 MemoryRegionSection section
= {
2301 .address_space
= as
,
2303 .offset_within_address_space
= 0,
2304 .offset_within_region
= 0,
2305 .size
= int128_2_64(),
2308 return phys_section_add(map
, §ion
);
2311 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2313 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2314 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2315 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2316 MemoryRegionSection
*sections
= d
->map
.sections
;
2318 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2321 static void io_mem_init(void)
2323 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2324 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2326 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2328 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2332 static void mem_begin(MemoryListener
*listener
)
2334 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2335 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2338 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2339 assert(n
== PHYS_SECTION_UNASSIGNED
);
2340 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2341 assert(n
== PHYS_SECTION_NOTDIRTY
);
2342 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2343 assert(n
== PHYS_SECTION_ROM
);
2344 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2345 assert(n
== PHYS_SECTION_WATCH
);
2347 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2349 as
->next_dispatch
= d
;
2352 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2354 phys_sections_free(&d
->map
);
2358 static void mem_commit(MemoryListener
*listener
)
2360 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2361 AddressSpaceDispatch
*cur
= as
->dispatch
;
2362 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2364 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2366 atomic_rcu_set(&as
->dispatch
, next
);
2368 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2372 static void tcg_commit(MemoryListener
*listener
)
2374 CPUAddressSpace
*cpuas
;
2375 AddressSpaceDispatch
*d
;
2377 /* since each CPU stores ram addresses in its TLB cache, we must
2378 reset the modified entries */
2379 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2380 cpu_reloading_memory_map();
2381 /* The CPU and TLB are protected by the iothread lock.
2382 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2383 * may have split the RCU critical section.
2385 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2386 cpuas
->memory_dispatch
= d
;
2387 tlb_flush(cpuas
->cpu
, 1);
2390 void address_space_init_dispatch(AddressSpace
*as
)
2392 as
->dispatch
= NULL
;
2393 as
->dispatch_listener
= (MemoryListener
) {
2395 .commit
= mem_commit
,
2396 .region_add
= mem_add
,
2397 .region_nop
= mem_add
,
2400 memory_listener_register(&as
->dispatch_listener
, as
);
2403 void address_space_unregister(AddressSpace
*as
)
2405 memory_listener_unregister(&as
->dispatch_listener
);
2408 void address_space_destroy_dispatch(AddressSpace
*as
)
2410 AddressSpaceDispatch
*d
= as
->dispatch
;
2412 atomic_rcu_set(&as
->dispatch
, NULL
);
2414 call_rcu(d
, address_space_dispatch_free
, rcu
);
2418 static void memory_map_init(void)
2420 system_memory
= g_malloc(sizeof(*system_memory
));
2422 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2423 address_space_init(&address_space_memory
, system_memory
, "memory");
2425 system_io
= g_malloc(sizeof(*system_io
));
2426 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2428 address_space_init(&address_space_io
, system_io
, "I/O");
2431 MemoryRegion
*get_system_memory(void)
2433 return system_memory
;
2436 MemoryRegion
*get_system_io(void)
2441 #endif /* !defined(CONFIG_USER_ONLY) */
2443 /* physical memory access (slow version, mainly for debug) */
2444 #if defined(CONFIG_USER_ONLY)
2445 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2446 uint8_t *buf
, int len
, int is_write
)
2453 page
= addr
& TARGET_PAGE_MASK
;
2454 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2457 flags
= page_get_flags(page
);
2458 if (!(flags
& PAGE_VALID
))
2461 if (!(flags
& PAGE_WRITE
))
2463 /* XXX: this code should not depend on lock_user */
2464 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2467 unlock_user(p
, addr
, l
);
2469 if (!(flags
& PAGE_READ
))
2471 /* XXX: this code should not depend on lock_user */
2472 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2475 unlock_user(p
, addr
, 0);
2486 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2489 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2490 addr
+= memory_region_get_ram_addr(mr
);
2492 /* No early return if dirty_log_mask is or becomes 0, because
2493 * cpu_physical_memory_set_dirty_range will still call
2494 * xen_modified_memory.
2496 if (dirty_log_mask
) {
2498 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2500 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2501 tb_invalidate_phys_range(addr
, addr
+ length
);
2502 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2504 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2507 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2509 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2511 /* Regions are assumed to support 1-4 byte accesses unless
2512 otherwise specified. */
2513 if (access_size_max
== 0) {
2514 access_size_max
= 4;
2517 /* Bound the maximum access by the alignment of the address. */
2518 if (!mr
->ops
->impl
.unaligned
) {
2519 unsigned align_size_max
= addr
& -addr
;
2520 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2521 access_size_max
= align_size_max
;
2525 /* Don't attempt accesses larger than the maximum. */
2526 if (l
> access_size_max
) {
2527 l
= access_size_max
;
2534 static bool prepare_mmio_access(MemoryRegion
*mr
)
2536 bool unlocked
= !qemu_mutex_iothread_locked();
2537 bool release_lock
= false;
2539 if (unlocked
&& mr
->global_locking
) {
2540 qemu_mutex_lock_iothread();
2542 release_lock
= true;
2544 if (mr
->flush_coalesced_mmio
) {
2546 qemu_mutex_lock_iothread();
2548 qemu_flush_coalesced_mmio_buffer();
2550 qemu_mutex_unlock_iothread();
2554 return release_lock
;
2557 /* Called within RCU critical section. */
2558 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2561 int len
, hwaddr addr1
,
2562 hwaddr l
, MemoryRegion
*mr
)
2566 MemTxResult result
= MEMTX_OK
;
2567 bool release_lock
= false;
2570 if (!memory_access_is_direct(mr
, true)) {
2571 release_lock
|= prepare_mmio_access(mr
);
2572 l
= memory_access_size(mr
, l
, addr1
);
2573 /* XXX: could force current_cpu to NULL to avoid
2577 /* 64 bit write access */
2579 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2583 /* 32 bit write access */
2585 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2589 /* 16 bit write access */
2591 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2595 /* 8 bit write access */
2597 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2605 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2606 memcpy(ptr
, buf
, l
);
2607 invalidate_and_set_dirty(mr
, addr1
, l
);
2611 qemu_mutex_unlock_iothread();
2612 release_lock
= false;
2624 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2630 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2631 const uint8_t *buf
, int len
)
2636 MemTxResult result
= MEMTX_OK
;
2641 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2642 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2650 /* Called within RCU critical section. */
2651 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2652 MemTxAttrs attrs
, uint8_t *buf
,
2653 int len
, hwaddr addr1
, hwaddr l
,
2658 MemTxResult result
= MEMTX_OK
;
2659 bool release_lock
= false;
2662 if (!memory_access_is_direct(mr
, false)) {
2664 release_lock
|= prepare_mmio_access(mr
);
2665 l
= memory_access_size(mr
, l
, addr1
);
2668 /* 64 bit read access */
2669 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2674 /* 32 bit read access */
2675 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2680 /* 16 bit read access */
2681 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2686 /* 8 bit read access */
2687 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2696 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2697 memcpy(buf
, ptr
, l
);
2701 qemu_mutex_unlock_iothread();
2702 release_lock
= false;
2714 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2720 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2721 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2726 MemTxResult result
= MEMTX_OK
;
2731 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2732 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2740 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2741 uint8_t *buf
, int len
, bool is_write
)
2744 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2746 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2750 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2751 int len
, int is_write
)
2753 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2754 buf
, len
, is_write
);
2757 enum write_rom_type
{
2762 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2763 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2773 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2775 if (!(memory_region_is_ram(mr
) ||
2776 memory_region_is_romd(mr
))) {
2777 l
= memory_access_size(mr
, l
, addr1
);
2780 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2783 memcpy(ptr
, buf
, l
);
2784 invalidate_and_set_dirty(mr
, addr1
, l
);
2787 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2798 /* used for ROM loading : can write in RAM and ROM */
2799 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2800 const uint8_t *buf
, int len
)
2802 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2805 void cpu_flush_icache_range(hwaddr start
, int len
)
2808 * This function should do the same thing as an icache flush that was
2809 * triggered from within the guest. For TCG we are always cache coherent,
2810 * so there is no need to flush anything. For KVM / Xen we need to flush
2811 * the host's instruction cache at least.
2813 if (tcg_enabled()) {
2817 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2818 start
, NULL
, len
, FLUSH_CACHE
);
2829 static BounceBuffer bounce
;
2831 typedef struct MapClient
{
2833 QLIST_ENTRY(MapClient
) link
;
2836 QemuMutex map_client_list_lock
;
2837 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2838 = QLIST_HEAD_INITIALIZER(map_client_list
);
2840 static void cpu_unregister_map_client_do(MapClient
*client
)
2842 QLIST_REMOVE(client
, link
);
2846 static void cpu_notify_map_clients_locked(void)
2850 while (!QLIST_EMPTY(&map_client_list
)) {
2851 client
= QLIST_FIRST(&map_client_list
);
2852 qemu_bh_schedule(client
->bh
);
2853 cpu_unregister_map_client_do(client
);
2857 void cpu_register_map_client(QEMUBH
*bh
)
2859 MapClient
*client
= g_malloc(sizeof(*client
));
2861 qemu_mutex_lock(&map_client_list_lock
);
2863 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2864 if (!atomic_read(&bounce
.in_use
)) {
2865 cpu_notify_map_clients_locked();
2867 qemu_mutex_unlock(&map_client_list_lock
);
2870 void cpu_exec_init_all(void)
2872 qemu_mutex_init(&ram_list
.mutex
);
2875 qemu_mutex_init(&map_client_list_lock
);
2878 void cpu_unregister_map_client(QEMUBH
*bh
)
2882 qemu_mutex_lock(&map_client_list_lock
);
2883 QLIST_FOREACH(client
, &map_client_list
, link
) {
2884 if (client
->bh
== bh
) {
2885 cpu_unregister_map_client_do(client
);
2889 qemu_mutex_unlock(&map_client_list_lock
);
2892 static void cpu_notify_map_clients(void)
2894 qemu_mutex_lock(&map_client_list_lock
);
2895 cpu_notify_map_clients_locked();
2896 qemu_mutex_unlock(&map_client_list_lock
);
2899 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2907 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2908 if (!memory_access_is_direct(mr
, is_write
)) {
2909 l
= memory_access_size(mr
, l
, addr
);
2910 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2922 /* Map a physical memory region into a host virtual address.
2923 * May map a subset of the requested range, given by and returned in *plen.
2924 * May return NULL if resources needed to perform the mapping are exhausted.
2925 * Use only for reads OR writes - not for read-modify-write operations.
2926 * Use cpu_register_map_client() to know when retrying the map operation is
2927 * likely to succeed.
2929 void *address_space_map(AddressSpace
*as
,
2936 hwaddr l
, xlat
, base
;
2937 MemoryRegion
*mr
, *this_mr
;
2946 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2948 if (!memory_access_is_direct(mr
, is_write
)) {
2949 if (atomic_xchg(&bounce
.in_use
, true)) {
2953 /* Avoid unbounded allocations */
2954 l
= MIN(l
, TARGET_PAGE_SIZE
);
2955 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2959 memory_region_ref(mr
);
2962 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2968 return bounce
.buffer
;
2982 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2983 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2988 memory_region_ref(mr
);
2990 ptr
= qemu_ram_ptr_length(mr
->ram_block
, base
, plen
);
2996 /* Unmaps a memory region previously mapped by address_space_map().
2997 * Will also mark the memory as dirty if is_write == 1. access_len gives
2998 * the amount of memory that was actually read or written by the caller.
3000 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3001 int is_write
, hwaddr access_len
)
3003 if (buffer
!= bounce
.buffer
) {
3007 mr
= memory_region_from_host(buffer
, &addr1
);
3010 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3012 if (xen_enabled()) {
3013 xen_invalidate_map_cache_entry(buffer
);
3015 memory_region_unref(mr
);
3019 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3020 bounce
.buffer
, access_len
);
3022 qemu_vfree(bounce
.buffer
);
3023 bounce
.buffer
= NULL
;
3024 memory_region_unref(bounce
.mr
);
3025 atomic_mb_set(&bounce
.in_use
, false);
3026 cpu_notify_map_clients();
3029 void *cpu_physical_memory_map(hwaddr addr
,
3033 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3036 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3037 int is_write
, hwaddr access_len
)
3039 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3042 /* warning: addr must be aligned */
3043 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3045 MemTxResult
*result
,
3046 enum device_endian endian
)
3054 bool release_lock
= false;
3057 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3058 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3059 release_lock
|= prepare_mmio_access(mr
);
3062 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3063 #if defined(TARGET_WORDS_BIGENDIAN)
3064 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3068 if (endian
== DEVICE_BIG_ENDIAN
) {
3074 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3076 case DEVICE_LITTLE_ENDIAN
:
3077 val
= ldl_le_p(ptr
);
3079 case DEVICE_BIG_ENDIAN
:
3080 val
= ldl_be_p(ptr
);
3092 qemu_mutex_unlock_iothread();
3098 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3099 MemTxAttrs attrs
, MemTxResult
*result
)
3101 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3102 DEVICE_NATIVE_ENDIAN
);
3105 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3106 MemTxAttrs attrs
, MemTxResult
*result
)
3108 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3109 DEVICE_LITTLE_ENDIAN
);
3112 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3113 MemTxAttrs attrs
, MemTxResult
*result
)
3115 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3119 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3121 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3124 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3126 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3129 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3131 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3134 /* warning: addr must be aligned */
3135 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3137 MemTxResult
*result
,
3138 enum device_endian endian
)
3146 bool release_lock
= false;
3149 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3151 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3152 release_lock
|= prepare_mmio_access(mr
);
3155 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3156 #if defined(TARGET_WORDS_BIGENDIAN)
3157 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3161 if (endian
== DEVICE_BIG_ENDIAN
) {
3167 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3169 case DEVICE_LITTLE_ENDIAN
:
3170 val
= ldq_le_p(ptr
);
3172 case DEVICE_BIG_ENDIAN
:
3173 val
= ldq_be_p(ptr
);
3185 qemu_mutex_unlock_iothread();
3191 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3192 MemTxAttrs attrs
, MemTxResult
*result
)
3194 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3195 DEVICE_NATIVE_ENDIAN
);
3198 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3199 MemTxAttrs attrs
, MemTxResult
*result
)
3201 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3202 DEVICE_LITTLE_ENDIAN
);
3205 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3206 MemTxAttrs attrs
, MemTxResult
*result
)
3208 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3212 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3214 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3217 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3219 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3222 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3224 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3228 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3229 MemTxAttrs attrs
, MemTxResult
*result
)
3234 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3241 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3243 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3246 /* warning: addr must be aligned */
3247 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3250 MemTxResult
*result
,
3251 enum device_endian endian
)
3259 bool release_lock
= false;
3262 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3264 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3265 release_lock
|= prepare_mmio_access(mr
);
3268 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3269 #if defined(TARGET_WORDS_BIGENDIAN)
3270 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3274 if (endian
== DEVICE_BIG_ENDIAN
) {
3280 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3282 case DEVICE_LITTLE_ENDIAN
:
3283 val
= lduw_le_p(ptr
);
3285 case DEVICE_BIG_ENDIAN
:
3286 val
= lduw_be_p(ptr
);
3298 qemu_mutex_unlock_iothread();
3304 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3305 MemTxAttrs attrs
, MemTxResult
*result
)
3307 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3308 DEVICE_NATIVE_ENDIAN
);
3311 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3312 MemTxAttrs attrs
, MemTxResult
*result
)
3314 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3315 DEVICE_LITTLE_ENDIAN
);
3318 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3319 MemTxAttrs attrs
, MemTxResult
*result
)
3321 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3325 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3327 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3330 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3332 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3335 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3337 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3340 /* warning: addr must be aligned. The ram page is not masked as dirty
3341 and the code inside is not invalidated. It is useful if the dirty
3342 bits are used to track modified PTEs */
3343 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3344 MemTxAttrs attrs
, MemTxResult
*result
)
3351 uint8_t dirty_log_mask
;
3352 bool release_lock
= false;
3355 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3357 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3358 release_lock
|= prepare_mmio_access(mr
);
3360 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3362 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3365 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3366 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3367 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
3375 qemu_mutex_unlock_iothread();
3380 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3382 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3385 /* warning: addr must be aligned */
3386 static inline void address_space_stl_internal(AddressSpace
*as
,
3387 hwaddr addr
, uint32_t val
,
3389 MemTxResult
*result
,
3390 enum device_endian endian
)
3397 bool release_lock
= false;
3400 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3402 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3403 release_lock
|= prepare_mmio_access(mr
);
3405 #if defined(TARGET_WORDS_BIGENDIAN)
3406 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3410 if (endian
== DEVICE_BIG_ENDIAN
) {
3414 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3417 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3419 case DEVICE_LITTLE_ENDIAN
:
3422 case DEVICE_BIG_ENDIAN
:
3429 invalidate_and_set_dirty(mr
, addr1
, 4);
3436 qemu_mutex_unlock_iothread();
3441 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3442 MemTxAttrs attrs
, MemTxResult
*result
)
3444 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3445 DEVICE_NATIVE_ENDIAN
);
3448 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3449 MemTxAttrs attrs
, MemTxResult
*result
)
3451 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3452 DEVICE_LITTLE_ENDIAN
);
3455 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3456 MemTxAttrs attrs
, MemTxResult
*result
)
3458 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3462 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3464 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3467 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3469 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3472 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3474 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3478 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3479 MemTxAttrs attrs
, MemTxResult
*result
)
3484 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3490 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3492 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3495 /* warning: addr must be aligned */
3496 static inline void address_space_stw_internal(AddressSpace
*as
,
3497 hwaddr addr
, uint32_t val
,
3499 MemTxResult
*result
,
3500 enum device_endian endian
)
3507 bool release_lock
= false;
3510 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3511 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3512 release_lock
|= prepare_mmio_access(mr
);
3514 #if defined(TARGET_WORDS_BIGENDIAN)
3515 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3519 if (endian
== DEVICE_BIG_ENDIAN
) {
3523 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3526 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3528 case DEVICE_LITTLE_ENDIAN
:
3531 case DEVICE_BIG_ENDIAN
:
3538 invalidate_and_set_dirty(mr
, addr1
, 2);
3545 qemu_mutex_unlock_iothread();
3550 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3551 MemTxAttrs attrs
, MemTxResult
*result
)
3553 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3554 DEVICE_NATIVE_ENDIAN
);
3557 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3558 MemTxAttrs attrs
, MemTxResult
*result
)
3560 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3561 DEVICE_LITTLE_ENDIAN
);
3564 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3565 MemTxAttrs attrs
, MemTxResult
*result
)
3567 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3571 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3573 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3576 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3578 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3581 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3583 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3587 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3588 MemTxAttrs attrs
, MemTxResult
*result
)
3592 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3598 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3599 MemTxAttrs attrs
, MemTxResult
*result
)
3602 val
= cpu_to_le64(val
);
3603 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3608 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3609 MemTxAttrs attrs
, MemTxResult
*result
)
3612 val
= cpu_to_be64(val
);
3613 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3619 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3621 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3624 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3626 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3629 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3631 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3634 /* virtual memory access for debug (includes writing to ROM) */
3635 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3636 uint8_t *buf
, int len
, int is_write
)
3646 page
= addr
& TARGET_PAGE_MASK
;
3647 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3648 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3649 /* if no physical page mapped, return an error */
3650 if (phys_addr
== -1)
3652 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3655 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3657 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3660 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3661 MEMTXATTRS_UNSPECIFIED
,
3672 * Allows code that needs to deal with migration bitmaps etc to still be built
3673 * target independent.
3675 size_t qemu_target_page_bits(void)
3677 return TARGET_PAGE_BITS
;
3683 * A helper function for the _utterly broken_ virtio device model to find out if
3684 * it's running on a big endian machine. Don't do this at home kids!
3686 bool target_words_bigendian(void);
3687 bool target_words_bigendian(void)
3689 #if defined(TARGET_WORDS_BIGENDIAN)
3696 #ifndef CONFIG_USER_ONLY
3697 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3704 mr
= address_space_translate(&address_space_memory
,
3705 phys_addr
, &phys_addr
, &l
, false);
3707 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3712 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3718 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3719 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3720 block
->used_length
, opaque
);