4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
24 #include "qemu-common.h"
28 #if !defined(CONFIG_USER_ONLY)
29 #include "hw/boards.h"
32 #include "sysemu/kvm.h"
33 #include "sysemu/sysemu.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
47 #include "exec/cpu-all.h"
48 #include "qemu/rcu_queue.h"
49 #include "qemu/main-loop.h"
50 #include "translate-all.h"
51 #include "sysemu/replay.h"
53 #include "exec/memory-internal.h"
54 #include "exec/ram_addr.h"
57 #include "qemu/range.h"
59 #include "qemu/mmap-alloc.h"
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
66 * are protected by the ramlist lock.
68 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
70 static MemoryRegion
*system_memory
;
71 static MemoryRegion
*system_io
;
73 AddressSpace address_space_io
;
74 AddressSpace address_space_memory
;
76 MemoryRegion io_mem_rom
, io_mem_notdirty
;
77 static MemoryRegion io_mem_unassigned
;
79 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
80 #define RAM_PREALLOC (1 << 0)
82 /* RAM is mmap-ed with MAP_SHARED */
83 #define RAM_SHARED (1 << 1)
85 /* Only a portion of RAM (used_length) is actually used, and migrated.
86 * This used_length size can change across reboots.
88 #define RAM_RESIZEABLE (1 << 2)
92 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
93 /* current CPU in the current thread. It is only valid inside
95 __thread CPUState
*current_cpu
;
96 /* 0 = Do not count executed instructions.
97 1 = Precise instruction counting.
98 2 = Adaptive rate instruction counting. */
101 #if !defined(CONFIG_USER_ONLY)
103 typedef struct PhysPageEntry PhysPageEntry
;
105 struct PhysPageEntry
{
106 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
108 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
112 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
114 /* Size of the L2 (and L3, etc) page tables. */
115 #define ADDR_SPACE_BITS 64
118 #define P_L2_SIZE (1 << P_L2_BITS)
120 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
122 typedef PhysPageEntry Node
[P_L2_SIZE
];
124 typedef struct PhysPageMap
{
127 unsigned sections_nb
;
128 unsigned sections_nb_alloc
;
130 unsigned nodes_nb_alloc
;
132 MemoryRegionSection
*sections
;
135 struct AddressSpaceDispatch
{
138 MemoryRegionSection
*mru_section
;
139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
142 PhysPageEntry phys_map
;
147 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148 typedef struct subpage_t
{
152 uint16_t sub_section
[TARGET_PAGE_SIZE
];
155 #define PHYS_SECTION_UNASSIGNED 0
156 #define PHYS_SECTION_NOTDIRTY 1
157 #define PHYS_SECTION_ROM 2
158 #define PHYS_SECTION_WATCH 3
160 static void io_mem_init(void);
161 static void memory_map_init(void);
162 static void tcg_commit(MemoryListener
*listener
);
164 static MemoryRegion io_mem_watch
;
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
173 struct CPUAddressSpace
{
176 struct AddressSpaceDispatch
*memory_dispatch
;
177 MemoryListener tcg_as_listener
;
182 #if !defined(CONFIG_USER_ONLY)
184 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
186 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
187 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
188 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
189 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
193 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
200 ret
= map
->nodes_nb
++;
202 assert(ret
!= PHYS_MAP_NODE_NIL
);
203 assert(ret
!= map
->nodes_nb_alloc
);
205 e
.skip
= leaf
? 0 : 1;
206 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
207 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
208 memcpy(&p
[i
], &e
, sizeof(e
));
213 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
214 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
218 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
220 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
221 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
223 p
= map
->nodes
[lp
->ptr
];
224 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
226 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
227 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
233 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
239 static void phys_page_set(AddressSpaceDispatch
*d
,
240 hwaddr index
, hwaddr nb
,
243 /* Wildly overreserve - it doesn't matter much. */
244 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
246 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
249 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
252 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
254 unsigned valid_ptr
= P_L2_SIZE
;
259 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
264 for (i
= 0; i
< P_L2_SIZE
; i
++) {
265 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
272 phys_page_compact(&p
[i
], nodes
, compacted
);
276 /* We can only compress if there's only one child. */
281 assert(valid_ptr
< P_L2_SIZE
);
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
288 lp
->ptr
= p
[valid_ptr
].ptr
;
289 if (!p
[valid_ptr
].skip
) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
298 lp
->skip
+= p
[valid_ptr
].skip
;
302 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
304 DECLARE_BITMAP(compacted
, nodes_nb
);
306 if (d
->phys_map
.skip
) {
307 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
311 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
314 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
315 * the section must cover the entire address space.
317 return section
->size
.hi
||
318 range_covers_byte(section
->offset_within_address_space
,
319 section
->size
.lo
, addr
);
322 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
323 Node
*nodes
, MemoryRegionSection
*sections
)
326 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
329 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
330 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
331 return §ions
[PHYS_SECTION_UNASSIGNED
];
334 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
337 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
338 return §ions
[lp
.ptr
];
340 return §ions
[PHYS_SECTION_UNASSIGNED
];
344 bool memory_region_is_unassigned(MemoryRegion
*mr
)
346 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
347 && mr
!= &io_mem_watch
;
350 /* Called from RCU critical section */
351 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
353 bool resolve_subpage
)
355 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
359 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
360 section_covers_addr(section
, addr
)) {
363 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
367 if (resolve_subpage
&& section
->mr
->subpage
) {
368 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
369 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
372 atomic_set(&d
->mru_section
, section
);
377 /* Called from RCU critical section */
378 static MemoryRegionSection
*
379 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
380 hwaddr
*plen
, bool resolve_subpage
)
382 MemoryRegionSection
*section
;
386 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
387 /* Compute offset within MemoryRegionSection */
388 addr
-= section
->offset_within_address_space
;
390 /* Compute offset within MemoryRegion */
391 *xlat
= addr
+ section
->offset_within_region
;
395 /* MMIO registers can be expected to perform full-width accesses based only
396 * on their address, without considering adjacent registers that could
397 * decode to completely different MemoryRegions. When such registers
398 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
399 * regions overlap wildly. For this reason we cannot clamp the accesses
402 * If the length is small (as is the case for address_space_ldl/stl),
403 * everything works fine. If the incoming length is large, however,
404 * the caller really has to do the clamping through memory_access_size.
406 if (memory_region_is_ram(mr
)) {
407 diff
= int128_sub(section
->size
, int128_make64(addr
));
408 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
413 /* Called from RCU critical section */
414 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
415 hwaddr
*xlat
, hwaddr
*plen
,
419 MemoryRegionSection
*section
;
423 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
424 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
427 if (!mr
->iommu_ops
) {
431 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
432 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
433 | (addr
& iotlb
.addr_mask
));
434 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
435 if (!(iotlb
.perm
& (1 << is_write
))) {
436 mr
= &io_mem_unassigned
;
440 as
= iotlb
.target_as
;
443 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
444 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
445 *plen
= MIN(page
, *plen
);
452 /* Called from RCU critical section */
453 MemoryRegionSection
*
454 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
455 hwaddr
*xlat
, hwaddr
*plen
)
457 MemoryRegionSection
*section
;
458 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
460 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
462 assert(!section
->mr
->iommu_ops
);
467 #if !defined(CONFIG_USER_ONLY)
469 static int cpu_common_post_load(void *opaque
, int version_id
)
471 CPUState
*cpu
= opaque
;
473 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
474 version_id is increased. */
475 cpu
->interrupt_request
&= ~0x01;
481 static int cpu_common_pre_load(void *opaque
)
483 CPUState
*cpu
= opaque
;
485 cpu
->exception_index
= -1;
490 static bool cpu_common_exception_index_needed(void *opaque
)
492 CPUState
*cpu
= opaque
;
494 return tcg_enabled() && cpu
->exception_index
!= -1;
497 static const VMStateDescription vmstate_cpu_common_exception_index
= {
498 .name
= "cpu_common/exception_index",
500 .minimum_version_id
= 1,
501 .needed
= cpu_common_exception_index_needed
,
502 .fields
= (VMStateField
[]) {
503 VMSTATE_INT32(exception_index
, CPUState
),
504 VMSTATE_END_OF_LIST()
508 static bool cpu_common_crash_occurred_needed(void *opaque
)
510 CPUState
*cpu
= opaque
;
512 return cpu
->crash_occurred
;
515 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
516 .name
= "cpu_common/crash_occurred",
518 .minimum_version_id
= 1,
519 .needed
= cpu_common_crash_occurred_needed
,
520 .fields
= (VMStateField
[]) {
521 VMSTATE_BOOL(crash_occurred
, CPUState
),
522 VMSTATE_END_OF_LIST()
526 const VMStateDescription vmstate_cpu_common
= {
527 .name
= "cpu_common",
529 .minimum_version_id
= 1,
530 .pre_load
= cpu_common_pre_load
,
531 .post_load
= cpu_common_post_load
,
532 .fields
= (VMStateField
[]) {
533 VMSTATE_UINT32(halted
, CPUState
),
534 VMSTATE_UINT32(interrupt_request
, CPUState
),
535 VMSTATE_END_OF_LIST()
537 .subsections
= (const VMStateDescription
*[]) {
538 &vmstate_cpu_common_exception_index
,
539 &vmstate_cpu_common_crash_occurred
,
546 CPUState
*qemu_get_cpu(int index
)
551 if (cpu
->cpu_index
== index
) {
559 #if !defined(CONFIG_USER_ONLY)
560 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
562 CPUAddressSpace
*newas
;
564 /* Target code should have set num_ases before calling us */
565 assert(asidx
< cpu
->num_ases
);
568 /* address space 0 gets the convenience alias */
572 /* KVM cannot currently support multiple address spaces. */
573 assert(asidx
== 0 || !kvm_enabled());
575 if (!cpu
->cpu_ases
) {
576 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
579 newas
= &cpu
->cpu_ases
[asidx
];
583 newas
->tcg_as_listener
.commit
= tcg_commit
;
584 memory_listener_register(&newas
->tcg_as_listener
, as
);
588 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
590 /* Return the AddressSpace corresponding to the specified index */
591 return cpu
->cpu_ases
[asidx
].as
;
595 #ifndef CONFIG_USER_ONLY
596 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
598 static int cpu_get_free_index(Error
**errp
)
600 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
602 if (cpu
>= MAX_CPUMASK_BITS
) {
603 error_setg(errp
, "Trying to use more CPUs than max of %d",
608 bitmap_set(cpu_index_map
, cpu
, 1);
612 void cpu_exec_exit(CPUState
*cpu
)
614 if (cpu
->cpu_index
== -1) {
615 /* cpu_index was never allocated by this @cpu or was already freed. */
619 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
624 static int cpu_get_free_index(Error
**errp
)
629 CPU_FOREACH(some_cpu
) {
635 void cpu_exec_exit(CPUState
*cpu
)
640 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
642 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
644 Error
*local_err
= NULL
;
649 #ifndef CONFIG_USER_ONLY
650 cpu
->thread_id
= qemu_get_thread_id();
652 /* This is a softmmu CPU object, so create a property for it
653 * so users can wire up its memory. (This can't go in qom/cpu.c
654 * because that file is compiled only once for both user-mode
655 * and system builds.) The default if no link is set up is to use
656 * the system address space.
658 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
659 (Object
**)&cpu
->memory
,
660 qdev_prop_allow_set_link_before_realize
,
661 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
663 cpu
->memory
= system_memory
;
664 object_ref(OBJECT(cpu
->memory
));
667 #if defined(CONFIG_USER_ONLY)
670 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
672 error_propagate(errp
, local_err
);
673 #if defined(CONFIG_USER_ONLY)
678 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
679 #if defined(CONFIG_USER_ONLY)
682 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
683 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
685 if (cc
->vmsd
!= NULL
) {
686 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
690 #if defined(CONFIG_USER_ONLY)
691 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
693 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
696 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
699 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
700 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
702 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
703 phys
| (pc
& ~TARGET_PAGE_MASK
));
708 #if defined(CONFIG_USER_ONLY)
709 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
714 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
720 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
724 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
725 int flags
, CPUWatchpoint
**watchpoint
)
730 /* Add a watchpoint. */
731 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
732 int flags
, CPUWatchpoint
**watchpoint
)
736 /* forbid ranges which are empty or run off the end of the address space */
737 if (len
== 0 || (addr
+ len
- 1) < addr
) {
738 error_report("tried to set invalid watchpoint at %"
739 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
742 wp
= g_malloc(sizeof(*wp
));
748 /* keep all GDB-injected watchpoints in front */
749 if (flags
& BP_GDB
) {
750 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
752 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
755 tlb_flush_page(cpu
, addr
);
762 /* Remove a specific watchpoint. */
763 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
768 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
769 if (addr
== wp
->vaddr
&& len
== wp
->len
770 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
771 cpu_watchpoint_remove_by_ref(cpu
, wp
);
778 /* Remove a specific watchpoint by reference. */
779 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
781 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
783 tlb_flush_page(cpu
, watchpoint
->vaddr
);
788 /* Remove all matching watchpoints. */
789 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
791 CPUWatchpoint
*wp
, *next
;
793 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
794 if (wp
->flags
& mask
) {
795 cpu_watchpoint_remove_by_ref(cpu
, wp
);
800 /* Return true if this watchpoint address matches the specified
801 * access (ie the address range covered by the watchpoint overlaps
802 * partially or completely with the address range covered by the
805 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
809 /* We know the lengths are non-zero, but a little caution is
810 * required to avoid errors in the case where the range ends
811 * exactly at the top of the address space and so addr + len
812 * wraps round to zero.
814 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
815 vaddr addrend
= addr
+ len
- 1;
817 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
822 /* Add a breakpoint. */
823 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
824 CPUBreakpoint
**breakpoint
)
828 bp
= g_malloc(sizeof(*bp
));
833 /* keep all GDB-injected breakpoints in front */
834 if (flags
& BP_GDB
) {
835 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
837 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
840 breakpoint_invalidate(cpu
, pc
);
848 /* Remove a specific breakpoint. */
849 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
853 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
854 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
855 cpu_breakpoint_remove_by_ref(cpu
, bp
);
862 /* Remove a specific breakpoint by reference. */
863 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
865 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
867 breakpoint_invalidate(cpu
, breakpoint
->pc
);
872 /* Remove all matching breakpoints. */
873 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
875 CPUBreakpoint
*bp
, *next
;
877 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
878 if (bp
->flags
& mask
) {
879 cpu_breakpoint_remove_by_ref(cpu
, bp
);
884 /* enable or disable single step mode. EXCP_DEBUG is returned by the
885 CPU loop after each instruction */
886 void cpu_single_step(CPUState
*cpu
, int enabled
)
888 if (cpu
->singlestep_enabled
!= enabled
) {
889 cpu
->singlestep_enabled
= enabled
;
891 kvm_update_guest_debug(cpu
, 0);
893 /* must flush all the translated code to avoid inconsistencies */
894 /* XXX: only flush what is necessary */
900 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
907 fprintf(stderr
, "qemu: fatal: ");
908 vfprintf(stderr
, fmt
, ap
);
909 fprintf(stderr
, "\n");
910 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
911 if (qemu_log_separate()) {
912 qemu_log("qemu: fatal: ");
913 qemu_log_vprintf(fmt
, ap2
);
915 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
922 #if defined(CONFIG_USER_ONLY)
924 struct sigaction act
;
925 sigfillset(&act
.sa_mask
);
926 act
.sa_handler
= SIG_DFL
;
927 sigaction(SIGABRT
, &act
, NULL
);
933 #if !defined(CONFIG_USER_ONLY)
934 /* Called from RCU critical section */
935 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
939 block
= atomic_rcu_read(&ram_list
.mru_block
);
940 if (block
&& addr
- block
->offset
< block
->max_length
) {
943 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
944 if (addr
- block
->offset
< block
->max_length
) {
949 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
953 /* It is safe to write mru_block outside the iothread lock. This
958 * xxx removed from list
962 * call_rcu(reclaim_ramblock, xxx);
965 * atomic_rcu_set is not needed here. The block was already published
966 * when it was placed into the list. Here we're just making an extra
967 * copy of the pointer.
969 ram_list
.mru_block
= block
;
973 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
980 end
= TARGET_PAGE_ALIGN(start
+ length
);
981 start
&= TARGET_PAGE_MASK
;
984 block
= qemu_get_ram_block(start
);
985 assert(block
== qemu_get_ram_block(end
- 1));
986 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
988 tlb_reset_dirty(cpu
, start1
, length
);
993 /* Note: start and end must be within the same ram block. */
994 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
998 DirtyMemoryBlocks
*blocks
;
999 unsigned long end
, page
;
1006 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1007 page
= start
>> TARGET_PAGE_BITS
;
1011 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1013 while (page
< end
) {
1014 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1015 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1016 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1018 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1025 if (dirty
&& tcg_enabled()) {
1026 tlb_reset_dirty_range_all(start
, length
);
1032 /* Called from RCU critical section */
1033 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1034 MemoryRegionSection
*section
,
1036 hwaddr paddr
, hwaddr xlat
,
1038 target_ulong
*address
)
1043 if (memory_region_is_ram(section
->mr
)) {
1045 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1047 if (!section
->readonly
) {
1048 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1050 iotlb
|= PHYS_SECTION_ROM
;
1053 AddressSpaceDispatch
*d
;
1055 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1056 iotlb
= section
- d
->map
.sections
;
1060 /* Make accesses to pages with watchpoints go via the
1061 watchpoint trap routines. */
1062 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1063 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1064 /* Avoid trapping reads of pages with a write breakpoint. */
1065 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1066 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1067 *address
|= TLB_MMIO
;
1075 #endif /* defined(CONFIG_USER_ONLY) */
1077 #if !defined(CONFIG_USER_ONLY)
1079 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1081 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1083 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1084 qemu_anon_ram_alloc
;
1087 * Set a custom physical guest memory alloator.
1088 * Accelerators with unusual needs may need this. Hopefully, we can
1089 * get rid of it eventually.
1091 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1093 phys_mem_alloc
= alloc
;
1096 static uint16_t phys_section_add(PhysPageMap
*map
,
1097 MemoryRegionSection
*section
)
1099 /* The physical section number is ORed with a page-aligned
1100 * pointer to produce the iotlb entries. Thus it should
1101 * never overflow into the page-aligned value.
1103 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1105 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1106 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1107 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1108 map
->sections_nb_alloc
);
1110 map
->sections
[map
->sections_nb
] = *section
;
1111 memory_region_ref(section
->mr
);
1112 return map
->sections_nb
++;
1115 static void phys_section_destroy(MemoryRegion
*mr
)
1117 bool have_sub_page
= mr
->subpage
;
1119 memory_region_unref(mr
);
1121 if (have_sub_page
) {
1122 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1123 object_unref(OBJECT(&subpage
->iomem
));
1128 static void phys_sections_free(PhysPageMap
*map
)
1130 while (map
->sections_nb
> 0) {
1131 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1132 phys_section_destroy(section
->mr
);
1134 g_free(map
->sections
);
1138 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1141 hwaddr base
= section
->offset_within_address_space
1143 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1144 d
->map
.nodes
, d
->map
.sections
);
1145 MemoryRegionSection subsection
= {
1146 .offset_within_address_space
= base
,
1147 .size
= int128_make64(TARGET_PAGE_SIZE
),
1151 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1153 if (!(existing
->mr
->subpage
)) {
1154 subpage
= subpage_init(d
->as
, base
);
1155 subsection
.address_space
= d
->as
;
1156 subsection
.mr
= &subpage
->iomem
;
1157 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1158 phys_section_add(&d
->map
, &subsection
));
1160 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1162 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1163 end
= start
+ int128_get64(section
->size
) - 1;
1164 subpage_register(subpage
, start
, end
,
1165 phys_section_add(&d
->map
, section
));
1169 static void register_multipage(AddressSpaceDispatch
*d
,
1170 MemoryRegionSection
*section
)
1172 hwaddr start_addr
= section
->offset_within_address_space
;
1173 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1174 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1178 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1181 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1183 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1184 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1185 MemoryRegionSection now
= *section
, remain
= *section
;
1186 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1188 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1189 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1190 - now
.offset_within_address_space
;
1192 now
.size
= int128_min(int128_make64(left
), now
.size
);
1193 register_subpage(d
, &now
);
1195 now
.size
= int128_zero();
1197 while (int128_ne(remain
.size
, now
.size
)) {
1198 remain
.size
= int128_sub(remain
.size
, now
.size
);
1199 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1200 remain
.offset_within_region
+= int128_get64(now
.size
);
1202 if (int128_lt(remain
.size
, page_size
)) {
1203 register_subpage(d
, &now
);
1204 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1205 now
.size
= page_size
;
1206 register_subpage(d
, &now
);
1208 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1209 register_multipage(d
, &now
);
1214 void qemu_flush_coalesced_mmio_buffer(void)
1217 kvm_flush_coalesced_mmio_buffer();
1220 void qemu_mutex_lock_ramlist(void)
1222 qemu_mutex_lock(&ram_list
.mutex
);
1225 void qemu_mutex_unlock_ramlist(void)
1227 qemu_mutex_unlock(&ram_list
.mutex
);
1232 #include <sys/vfs.h>
1234 #define HUGETLBFS_MAGIC 0x958458f6
1236 static long gethugepagesize(int fd
)
1242 ret
= fstatfs(fd
, &fs
);
1243 } while (ret
!= 0 && errno
== EINTR
);
1252 static void *file_ram_alloc(RAMBlock
*block
,
1257 bool unlink_on_error
= false;
1259 char *sanitized_name
;
1265 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1267 "host lacks kvm mmu notifiers, -mem-path unsupported");
1272 fd
= open(path
, O_RDWR
);
1274 /* @path names an existing file, use it */
1277 if (errno
== ENOENT
) {
1278 /* @path names a file that doesn't exist, create it */
1279 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1281 unlink_on_error
= true;
1284 } else if (errno
== EISDIR
) {
1285 /* @path names a directory, create a file there */
1286 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1287 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1288 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1294 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1296 g_free(sanitized_name
);
1298 fd
= mkstemp(filename
);
1306 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1307 error_setg_errno(errp
, errno
,
1308 "can't open backing store %s for guest RAM",
1313 * Try again on EINTR and EEXIST. The latter happens when
1314 * something else creates the file between our two open().
1318 hpagesize
= gethugepagesize(fd
);
1319 if (hpagesize
< 0) {
1320 error_setg_errno(errp
, errno
, "can't get page size for %s",
1324 block
->mr
->align
= hpagesize
;
1326 if (memory
< hpagesize
) {
1327 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1328 "or larger than page size 0x%" PRIx64
,
1333 memory
= ROUND_UP(memory
, hpagesize
);
1336 * ftruncate is not supported by hugetlbfs in older
1337 * hosts, so don't bother bailing out on errors.
1338 * If anything goes wrong with it under other filesystems,
1341 if (ftruncate(fd
, memory
)) {
1342 perror("ftruncate");
1345 area
= qemu_ram_mmap(fd
, memory
, hpagesize
, block
->flags
& RAM_SHARED
);
1346 if (area
== MAP_FAILED
) {
1347 error_setg_errno(errp
, errno
,
1348 "unable to map backing store for guest RAM");
1354 os_mem_prealloc(fd
, area
, memory
);
1361 if (unlink_on_error
) {
1369 /* Called with the ramlist lock held. */
1370 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1372 RAMBlock
*block
, *next_block
;
1373 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1375 assert(size
!= 0); /* it would hand out same offset multiple times */
1377 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1381 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1382 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1384 end
= block
->offset
+ block
->max_length
;
1386 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1387 if (next_block
->offset
>= end
) {
1388 next
= MIN(next
, next_block
->offset
);
1391 if (next
- end
>= size
&& next
- end
< mingap
) {
1393 mingap
= next
- end
;
1397 if (offset
== RAM_ADDR_MAX
) {
1398 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1406 ram_addr_t
last_ram_offset(void)
1409 ram_addr_t last
= 0;
1412 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1413 last
= MAX(last
, block
->offset
+ block
->max_length
);
1419 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1423 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1424 if (!machine_dump_guest_core(current_machine
)) {
1425 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1427 perror("qemu_madvise");
1428 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1429 "but dump_guest_core=off specified\n");
1434 /* Called within an RCU critical section, or while the ramlist lock
1437 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1441 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1442 if (block
->offset
== addr
) {
1450 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1455 /* Called with iothread lock held. */
1456 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1458 RAMBlock
*new_block
, *block
;
1461 new_block
= find_ram_block(addr
);
1463 assert(!new_block
->idstr
[0]);
1466 char *id
= qdev_get_dev_path(dev
);
1468 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1472 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1474 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1475 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1476 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1484 /* Called with iothread lock held. */
1485 void qemu_ram_unset_idstr(ram_addr_t addr
)
1489 /* FIXME: arch_init.c assumes that this is not called throughout
1490 * migration. Ignore the problem since hot-unplug during migration
1491 * does not work anyway.
1495 block
= find_ram_block(addr
);
1497 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1502 static int memory_try_enable_merging(void *addr
, size_t len
)
1504 if (!machine_mem_merge(current_machine
)) {
1505 /* disabled by the user */
1509 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1512 /* Only legal before guest might have detected the memory size: e.g. on
1513 * incoming migration, or right after reset.
1515 * As memory core doesn't know how is memory accessed, it is up to
1516 * resize callback to update device state and/or add assertions to detect
1517 * misuse, if necessary.
1519 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1521 RAMBlock
*block
= find_ram_block(base
);
1525 newsize
= HOST_PAGE_ALIGN(newsize
);
1527 if (block
->used_length
== newsize
) {
1531 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1532 error_setg_errno(errp
, EINVAL
,
1533 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1534 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1535 newsize
, block
->used_length
);
1539 if (block
->max_length
< newsize
) {
1540 error_setg_errno(errp
, EINVAL
,
1541 "Length too large: %s: 0x" RAM_ADDR_FMT
1542 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1543 newsize
, block
->max_length
);
1547 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1548 block
->used_length
= newsize
;
1549 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1551 memory_region_set_size(block
->mr
, newsize
);
1552 if (block
->resized
) {
1553 block
->resized(block
->idstr
, newsize
, block
->host
);
1558 /* Called with ram_list.mutex held */
1559 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1560 ram_addr_t new_ram_size
)
1562 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1563 DIRTY_MEMORY_BLOCK_SIZE
);
1564 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1565 DIRTY_MEMORY_BLOCK_SIZE
);
1568 /* Only need to extend if block count increased */
1569 if (new_num_blocks
<= old_num_blocks
) {
1573 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1574 DirtyMemoryBlocks
*old_blocks
;
1575 DirtyMemoryBlocks
*new_blocks
;
1578 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1579 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1580 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1582 if (old_num_blocks
) {
1583 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1584 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1587 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1588 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1591 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1594 g_free_rcu(old_blocks
, rcu
);
1599 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1602 RAMBlock
*last_block
= NULL
;
1603 ram_addr_t old_ram_size
, new_ram_size
;
1606 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1608 qemu_mutex_lock_ramlist();
1609 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1611 if (!new_block
->host
) {
1612 if (xen_enabled()) {
1613 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1614 new_block
->mr
, &err
);
1616 error_propagate(errp
, err
);
1617 qemu_mutex_unlock_ramlist();
1620 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1621 &new_block
->mr
->align
);
1622 if (!new_block
->host
) {
1623 error_setg_errno(errp
, errno
,
1624 "cannot set up guest memory '%s'",
1625 memory_region_name(new_block
->mr
));
1626 qemu_mutex_unlock_ramlist();
1628 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1632 new_ram_size
= MAX(old_ram_size
,
1633 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1634 if (new_ram_size
> old_ram_size
) {
1635 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1636 dirty_memory_extend(old_ram_size
, new_ram_size
);
1638 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1639 * QLIST (which has an RCU-friendly variant) does not have insertion at
1640 * tail, so save the last element in last_block.
1642 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1644 if (block
->max_length
< new_block
->max_length
) {
1649 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1650 } else if (last_block
) {
1651 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1652 } else { /* list is empty */
1653 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1655 ram_list
.mru_block
= NULL
;
1657 /* Write list before version */
1660 qemu_mutex_unlock_ramlist();
1662 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1663 new_block
->used_length
,
1666 if (new_block
->host
) {
1667 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1668 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1669 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1670 if (kvm_enabled()) {
1671 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1677 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1678 bool share
, const char *mem_path
,
1681 RAMBlock
*new_block
;
1682 Error
*local_err
= NULL
;
1684 if (xen_enabled()) {
1685 error_setg(errp
, "-mem-path not supported with Xen");
1689 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1691 * file_ram_alloc() needs to allocate just like
1692 * phys_mem_alloc, but we haven't bothered to provide
1696 "-mem-path not supported with this accelerator");
1700 size
= HOST_PAGE_ALIGN(size
);
1701 new_block
= g_malloc0(sizeof(*new_block
));
1703 new_block
->used_length
= size
;
1704 new_block
->max_length
= size
;
1705 new_block
->flags
= share
? RAM_SHARED
: 0;
1706 new_block
->host
= file_ram_alloc(new_block
, size
,
1708 if (!new_block
->host
) {
1713 ram_block_add(new_block
, &local_err
);
1716 error_propagate(errp
, local_err
);
1724 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1725 void (*resized
)(const char*,
1728 void *host
, bool resizeable
,
1729 MemoryRegion
*mr
, Error
**errp
)
1731 RAMBlock
*new_block
;
1732 Error
*local_err
= NULL
;
1734 size
= HOST_PAGE_ALIGN(size
);
1735 max_size
= HOST_PAGE_ALIGN(max_size
);
1736 new_block
= g_malloc0(sizeof(*new_block
));
1738 new_block
->resized
= resized
;
1739 new_block
->used_length
= size
;
1740 new_block
->max_length
= max_size
;
1741 assert(max_size
>= size
);
1743 new_block
->host
= host
;
1745 new_block
->flags
|= RAM_PREALLOC
;
1748 new_block
->flags
|= RAM_RESIZEABLE
;
1750 ram_block_add(new_block
, &local_err
);
1753 error_propagate(errp
, local_err
);
1759 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1760 MemoryRegion
*mr
, Error
**errp
)
1762 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1765 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1767 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1770 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1771 void (*resized
)(const char*,
1774 MemoryRegion
*mr
, Error
**errp
)
1776 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1779 static void reclaim_ramblock(RAMBlock
*block
)
1781 if (block
->flags
& RAM_PREALLOC
) {
1783 } else if (xen_enabled()) {
1784 xen_invalidate_map_cache_entry(block
->host
);
1786 } else if (block
->fd
>= 0) {
1787 qemu_ram_munmap(block
->host
, block
->max_length
);
1791 qemu_anon_ram_free(block
->host
, block
->max_length
);
1796 void qemu_ram_free(RAMBlock
*block
)
1798 qemu_mutex_lock_ramlist();
1799 QLIST_REMOVE_RCU(block
, next
);
1800 ram_list
.mru_block
= NULL
;
1801 /* Write list before version */
1804 call_rcu(block
, reclaim_ramblock
, rcu
);
1805 qemu_mutex_unlock_ramlist();
1809 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1816 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1817 offset
= addr
- block
->offset
;
1818 if (offset
< block
->max_length
) {
1819 vaddr
= ramblock_ptr(block
, offset
);
1820 if (block
->flags
& RAM_PREALLOC
) {
1822 } else if (xen_enabled()) {
1826 if (block
->fd
>= 0) {
1827 flags
|= (block
->flags
& RAM_SHARED
?
1828 MAP_SHARED
: MAP_PRIVATE
);
1829 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1830 flags
, block
->fd
, offset
);
1833 * Remap needs to match alloc. Accelerators that
1834 * set phys_mem_alloc never remap. If they did,
1835 * we'd need a remap hook here.
1837 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1839 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1840 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1843 if (area
!= vaddr
) {
1844 fprintf(stderr
, "Could not remap addr: "
1845 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1849 memory_try_enable_merging(vaddr
, length
);
1850 qemu_ram_setup_dump(vaddr
, length
);
1855 #endif /* !_WIN32 */
1857 int qemu_get_ram_fd(ram_addr_t addr
)
1863 block
= qemu_get_ram_block(addr
);
1869 void qemu_set_ram_fd(ram_addr_t addr
, int fd
)
1874 block
= qemu_get_ram_block(addr
);
1879 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1885 block
= qemu_get_ram_block(addr
);
1886 ptr
= ramblock_ptr(block
, 0);
1891 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1892 * This should not be used for general purpose DMA. Use address_space_map
1893 * or address_space_rw instead. For local memory (e.g. video ram) that the
1894 * device owns, use memory_region_get_ram_ptr.
1896 * Called within RCU critical section.
1898 void *qemu_get_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1900 RAMBlock
*block
= ram_block
;
1902 if (block
== NULL
) {
1903 block
= qemu_get_ram_block(addr
);
1906 if (xen_enabled() && block
->host
== NULL
) {
1907 /* We need to check if the requested address is in the RAM
1908 * because we don't want to map the entire memory in QEMU.
1909 * In that case just map until the end of the page.
1911 if (block
->offset
== 0) {
1912 return xen_map_cache(addr
, 0, 0);
1915 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1917 return ramblock_ptr(block
, addr
- block
->offset
);
1920 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1921 * but takes a size argument.
1923 * Called within RCU critical section.
1925 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1928 RAMBlock
*block
= ram_block
;
1929 ram_addr_t offset_inside_block
;
1934 if (block
== NULL
) {
1935 block
= qemu_get_ram_block(addr
);
1937 offset_inside_block
= addr
- block
->offset
;
1938 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1940 if (xen_enabled() && block
->host
== NULL
) {
1941 /* We need to check if the requested address is in the RAM
1942 * because we don't want to map the entire memory in QEMU.
1943 * In that case just map the requested area.
1945 if (block
->offset
== 0) {
1946 return xen_map_cache(addr
, *size
, 1);
1949 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1952 return ramblock_ptr(block
, offset_inside_block
);
1956 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1959 * ptr: Host pointer to look up
1960 * round_offset: If true round the result offset down to a page boundary
1961 * *ram_addr: set to result ram_addr
1962 * *offset: set to result offset within the RAMBlock
1964 * Returns: RAMBlock (or NULL if not found)
1966 * By the time this function returns, the returned pointer is not protected
1967 * by RCU anymore. If the caller is not within an RCU critical section and
1968 * does not hold the iothread lock, it must have other means of protecting the
1969 * pointer, such as a reference to the region that includes the incoming
1972 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1973 ram_addr_t
*ram_addr
,
1977 uint8_t *host
= ptr
;
1979 if (xen_enabled()) {
1981 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1982 block
= qemu_get_ram_block(*ram_addr
);
1984 *offset
= (host
- block
->host
);
1991 block
= atomic_rcu_read(&ram_list
.mru_block
);
1992 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1996 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1997 /* This case append when the block is not mapped. */
1998 if (block
->host
== NULL
) {
2001 if (host
- block
->host
< block
->max_length
) {
2010 *offset
= (host
- block
->host
);
2012 *offset
&= TARGET_PAGE_MASK
;
2014 *ram_addr
= block
->offset
+ *offset
;
2020 * Finds the named RAMBlock
2022 * name: The name of RAMBlock to find
2024 * Returns: RAMBlock (or NULL if not found)
2026 RAMBlock
*qemu_ram_block_by_name(const char *name
)
2030 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
2031 if (!strcmp(name
, block
->idstr
)) {
2039 /* Some of the softmmu routines need to translate from a host pointer
2040 (typically a TLB entry) back to a ram offset. */
2041 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2044 ram_addr_t offset
; /* Not used */
2046 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
2055 /* Called within RCU critical section. */
2056 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
2057 uint64_t val
, unsigned size
)
2059 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2060 tb_invalidate_phys_page_fast(ram_addr
, size
);
2064 stb_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2067 stw_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2070 stl_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2075 /* Set both VGA and migration bits for simplicity and to remove
2076 * the notdirty callback faster.
2078 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2079 DIRTY_CLIENTS_NOCODE
);
2080 /* we remove the notdirty callback only if the code has been
2082 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2083 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2087 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2088 unsigned size
, bool is_write
)
2093 static const MemoryRegionOps notdirty_mem_ops
= {
2094 .write
= notdirty_mem_write
,
2095 .valid
.accepts
= notdirty_mem_accepts
,
2096 .endianness
= DEVICE_NATIVE_ENDIAN
,
2099 /* Generate a debug exception if a watchpoint has been hit. */
2100 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2102 CPUState
*cpu
= current_cpu
;
2103 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2104 CPUArchState
*env
= cpu
->env_ptr
;
2105 target_ulong pc
, cs_base
;
2110 if (cpu
->watchpoint_hit
) {
2111 /* We re-entered the check after replacing the TB. Now raise
2112 * the debug interrupt so that is will trigger after the
2113 * current instruction. */
2114 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2117 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2118 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2119 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2120 && (wp
->flags
& flags
)) {
2121 if (flags
== BP_MEM_READ
) {
2122 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2124 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2126 wp
->hitaddr
= vaddr
;
2127 wp
->hitattrs
= attrs
;
2128 if (!cpu
->watchpoint_hit
) {
2129 if (wp
->flags
& BP_CPU
&&
2130 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2131 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2134 cpu
->watchpoint_hit
= wp
;
2135 tb_check_watchpoint(cpu
);
2136 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2137 cpu
->exception_index
= EXCP_DEBUG
;
2140 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2141 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2142 cpu_resume_from_signal(cpu
, NULL
);
2146 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2151 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2152 so these check for a hit then pass through to the normal out-of-line
2154 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2155 unsigned size
, MemTxAttrs attrs
)
2159 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2160 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2162 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2165 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2168 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2171 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2179 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2180 uint64_t val
, unsigned size
,
2184 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2185 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2187 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2190 address_space_stb(as
, addr
, val
, attrs
, &res
);
2193 address_space_stw(as
, addr
, val
, attrs
, &res
);
2196 address_space_stl(as
, addr
, val
, attrs
, &res
);
2203 static const MemoryRegionOps watch_mem_ops
= {
2204 .read_with_attrs
= watch_mem_read
,
2205 .write_with_attrs
= watch_mem_write
,
2206 .endianness
= DEVICE_NATIVE_ENDIAN
,
2209 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2210 unsigned len
, MemTxAttrs attrs
)
2212 subpage_t
*subpage
= opaque
;
2216 #if defined(DEBUG_SUBPAGE)
2217 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2218 subpage
, len
, addr
);
2220 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2227 *data
= ldub_p(buf
);
2230 *data
= lduw_p(buf
);
2243 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2244 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2246 subpage_t
*subpage
= opaque
;
2249 #if defined(DEBUG_SUBPAGE)
2250 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2251 " value %"PRIx64
"\n",
2252 __func__
, subpage
, len
, addr
, value
);
2270 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2274 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2275 unsigned len
, bool is_write
)
2277 subpage_t
*subpage
= opaque
;
2278 #if defined(DEBUG_SUBPAGE)
2279 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2280 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2283 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2287 static const MemoryRegionOps subpage_ops
= {
2288 .read_with_attrs
= subpage_read
,
2289 .write_with_attrs
= subpage_write
,
2290 .impl
.min_access_size
= 1,
2291 .impl
.max_access_size
= 8,
2292 .valid
.min_access_size
= 1,
2293 .valid
.max_access_size
= 8,
2294 .valid
.accepts
= subpage_accepts
,
2295 .endianness
= DEVICE_NATIVE_ENDIAN
,
2298 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2303 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2305 idx
= SUBPAGE_IDX(start
);
2306 eidx
= SUBPAGE_IDX(end
);
2307 #if defined(DEBUG_SUBPAGE)
2308 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2309 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2311 for (; idx
<= eidx
; idx
++) {
2312 mmio
->sub_section
[idx
] = section
;
2318 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2322 mmio
= g_malloc0(sizeof(subpage_t
));
2326 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2327 NULL
, TARGET_PAGE_SIZE
);
2328 mmio
->iomem
.subpage
= true;
2329 #if defined(DEBUG_SUBPAGE)
2330 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2331 mmio
, base
, TARGET_PAGE_SIZE
);
2333 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2338 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2342 MemoryRegionSection section
= {
2343 .address_space
= as
,
2345 .offset_within_address_space
= 0,
2346 .offset_within_region
= 0,
2347 .size
= int128_2_64(),
2350 return phys_section_add(map
, §ion
);
2353 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2355 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2356 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2357 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2358 MemoryRegionSection
*sections
= d
->map
.sections
;
2360 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2363 static void io_mem_init(void)
2365 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2366 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2368 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2370 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2374 static void mem_begin(MemoryListener
*listener
)
2376 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2377 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2380 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2381 assert(n
== PHYS_SECTION_UNASSIGNED
);
2382 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2383 assert(n
== PHYS_SECTION_NOTDIRTY
);
2384 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2385 assert(n
== PHYS_SECTION_ROM
);
2386 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2387 assert(n
== PHYS_SECTION_WATCH
);
2389 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2391 as
->next_dispatch
= d
;
2394 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2396 phys_sections_free(&d
->map
);
2400 static void mem_commit(MemoryListener
*listener
)
2402 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2403 AddressSpaceDispatch
*cur
= as
->dispatch
;
2404 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2406 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2408 atomic_rcu_set(&as
->dispatch
, next
);
2410 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2414 static void tcg_commit(MemoryListener
*listener
)
2416 CPUAddressSpace
*cpuas
;
2417 AddressSpaceDispatch
*d
;
2419 /* since each CPU stores ram addresses in its TLB cache, we must
2420 reset the modified entries */
2421 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2422 cpu_reloading_memory_map();
2423 /* The CPU and TLB are protected by the iothread lock.
2424 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2425 * may have split the RCU critical section.
2427 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2428 cpuas
->memory_dispatch
= d
;
2429 tlb_flush(cpuas
->cpu
, 1);
2432 void address_space_init_dispatch(AddressSpace
*as
)
2434 as
->dispatch
= NULL
;
2435 as
->dispatch_listener
= (MemoryListener
) {
2437 .commit
= mem_commit
,
2438 .region_add
= mem_add
,
2439 .region_nop
= mem_add
,
2442 memory_listener_register(&as
->dispatch_listener
, as
);
2445 void address_space_unregister(AddressSpace
*as
)
2447 memory_listener_unregister(&as
->dispatch_listener
);
2450 void address_space_destroy_dispatch(AddressSpace
*as
)
2452 AddressSpaceDispatch
*d
= as
->dispatch
;
2454 atomic_rcu_set(&as
->dispatch
, NULL
);
2456 call_rcu(d
, address_space_dispatch_free
, rcu
);
2460 static void memory_map_init(void)
2462 system_memory
= g_malloc(sizeof(*system_memory
));
2464 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2465 address_space_init(&address_space_memory
, system_memory
, "memory");
2467 system_io
= g_malloc(sizeof(*system_io
));
2468 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2470 address_space_init(&address_space_io
, system_io
, "I/O");
2473 MemoryRegion
*get_system_memory(void)
2475 return system_memory
;
2478 MemoryRegion
*get_system_io(void)
2483 #endif /* !defined(CONFIG_USER_ONLY) */
2485 /* physical memory access (slow version, mainly for debug) */
2486 #if defined(CONFIG_USER_ONLY)
2487 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2488 uint8_t *buf
, int len
, int is_write
)
2495 page
= addr
& TARGET_PAGE_MASK
;
2496 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2499 flags
= page_get_flags(page
);
2500 if (!(flags
& PAGE_VALID
))
2503 if (!(flags
& PAGE_WRITE
))
2505 /* XXX: this code should not depend on lock_user */
2506 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2509 unlock_user(p
, addr
, l
);
2511 if (!(flags
& PAGE_READ
))
2513 /* XXX: this code should not depend on lock_user */
2514 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2517 unlock_user(p
, addr
, 0);
2528 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2531 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2532 /* No early return if dirty_log_mask is or becomes 0, because
2533 * cpu_physical_memory_set_dirty_range will still call
2534 * xen_modified_memory.
2536 if (dirty_log_mask
) {
2538 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2540 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2541 tb_invalidate_phys_range(addr
, addr
+ length
);
2542 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2544 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2547 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2549 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2551 /* Regions are assumed to support 1-4 byte accesses unless
2552 otherwise specified. */
2553 if (access_size_max
== 0) {
2554 access_size_max
= 4;
2557 /* Bound the maximum access by the alignment of the address. */
2558 if (!mr
->ops
->impl
.unaligned
) {
2559 unsigned align_size_max
= addr
& -addr
;
2560 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2561 access_size_max
= align_size_max
;
2565 /* Don't attempt accesses larger than the maximum. */
2566 if (l
> access_size_max
) {
2567 l
= access_size_max
;
2574 static bool prepare_mmio_access(MemoryRegion
*mr
)
2576 bool unlocked
= !qemu_mutex_iothread_locked();
2577 bool release_lock
= false;
2579 if (unlocked
&& mr
->global_locking
) {
2580 qemu_mutex_lock_iothread();
2582 release_lock
= true;
2584 if (mr
->flush_coalesced_mmio
) {
2586 qemu_mutex_lock_iothread();
2588 qemu_flush_coalesced_mmio_buffer();
2590 qemu_mutex_unlock_iothread();
2594 return release_lock
;
2597 /* Called within RCU critical section. */
2598 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2601 int len
, hwaddr addr1
,
2602 hwaddr l
, MemoryRegion
*mr
)
2606 MemTxResult result
= MEMTX_OK
;
2607 bool release_lock
= false;
2610 if (!memory_access_is_direct(mr
, true)) {
2611 release_lock
|= prepare_mmio_access(mr
);
2612 l
= memory_access_size(mr
, l
, addr1
);
2613 /* XXX: could force current_cpu to NULL to avoid
2617 /* 64 bit write access */
2619 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2623 /* 32 bit write access */
2625 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2629 /* 16 bit write access */
2631 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2635 /* 8 bit write access */
2637 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2644 addr1
+= memory_region_get_ram_addr(mr
);
2646 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2647 memcpy(ptr
, buf
, l
);
2648 invalidate_and_set_dirty(mr
, addr1
, l
);
2652 qemu_mutex_unlock_iothread();
2653 release_lock
= false;
2665 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2671 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2672 const uint8_t *buf
, int len
)
2677 MemTxResult result
= MEMTX_OK
;
2682 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2683 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2691 /* Called within RCU critical section. */
2692 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2693 MemTxAttrs attrs
, uint8_t *buf
,
2694 int len
, hwaddr addr1
, hwaddr l
,
2699 MemTxResult result
= MEMTX_OK
;
2700 bool release_lock
= false;
2703 if (!memory_access_is_direct(mr
, false)) {
2705 release_lock
|= prepare_mmio_access(mr
);
2706 l
= memory_access_size(mr
, l
, addr1
);
2709 /* 64 bit read access */
2710 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2715 /* 32 bit read access */
2716 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2721 /* 16 bit read access */
2722 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2727 /* 8 bit read access */
2728 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2737 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
2738 memory_region_get_ram_addr(mr
) + addr1
);
2739 memcpy(buf
, ptr
, l
);
2743 qemu_mutex_unlock_iothread();
2744 release_lock
= false;
2756 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2762 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2763 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2768 MemTxResult result
= MEMTX_OK
;
2773 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2774 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2782 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2783 uint8_t *buf
, int len
, bool is_write
)
2786 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2788 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2792 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2793 int len
, int is_write
)
2795 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2796 buf
, len
, is_write
);
2799 enum write_rom_type
{
2804 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2805 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2815 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2817 if (!(memory_region_is_ram(mr
) ||
2818 memory_region_is_romd(mr
))) {
2819 l
= memory_access_size(mr
, l
, addr1
);
2821 addr1
+= memory_region_get_ram_addr(mr
);
2823 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2826 memcpy(ptr
, buf
, l
);
2827 invalidate_and_set_dirty(mr
, addr1
, l
);
2830 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2841 /* used for ROM loading : can write in RAM and ROM */
2842 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2843 const uint8_t *buf
, int len
)
2845 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2848 void cpu_flush_icache_range(hwaddr start
, int len
)
2851 * This function should do the same thing as an icache flush that was
2852 * triggered from within the guest. For TCG we are always cache coherent,
2853 * so there is no need to flush anything. For KVM / Xen we need to flush
2854 * the host's instruction cache at least.
2856 if (tcg_enabled()) {
2860 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2861 start
, NULL
, len
, FLUSH_CACHE
);
2872 static BounceBuffer bounce
;
2874 typedef struct MapClient
{
2876 QLIST_ENTRY(MapClient
) link
;
2879 QemuMutex map_client_list_lock
;
2880 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2881 = QLIST_HEAD_INITIALIZER(map_client_list
);
2883 static void cpu_unregister_map_client_do(MapClient
*client
)
2885 QLIST_REMOVE(client
, link
);
2889 static void cpu_notify_map_clients_locked(void)
2893 while (!QLIST_EMPTY(&map_client_list
)) {
2894 client
= QLIST_FIRST(&map_client_list
);
2895 qemu_bh_schedule(client
->bh
);
2896 cpu_unregister_map_client_do(client
);
2900 void cpu_register_map_client(QEMUBH
*bh
)
2902 MapClient
*client
= g_malloc(sizeof(*client
));
2904 qemu_mutex_lock(&map_client_list_lock
);
2906 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2907 if (!atomic_read(&bounce
.in_use
)) {
2908 cpu_notify_map_clients_locked();
2910 qemu_mutex_unlock(&map_client_list_lock
);
2913 void cpu_exec_init_all(void)
2915 qemu_mutex_init(&ram_list
.mutex
);
2918 qemu_mutex_init(&map_client_list_lock
);
2921 void cpu_unregister_map_client(QEMUBH
*bh
)
2925 qemu_mutex_lock(&map_client_list_lock
);
2926 QLIST_FOREACH(client
, &map_client_list
, link
) {
2927 if (client
->bh
== bh
) {
2928 cpu_unregister_map_client_do(client
);
2932 qemu_mutex_unlock(&map_client_list_lock
);
2935 static void cpu_notify_map_clients(void)
2937 qemu_mutex_lock(&map_client_list_lock
);
2938 cpu_notify_map_clients_locked();
2939 qemu_mutex_unlock(&map_client_list_lock
);
2942 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2950 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2951 if (!memory_access_is_direct(mr
, is_write
)) {
2952 l
= memory_access_size(mr
, l
, addr
);
2953 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2965 /* Map a physical memory region into a host virtual address.
2966 * May map a subset of the requested range, given by and returned in *plen.
2967 * May return NULL if resources needed to perform the mapping are exhausted.
2968 * Use only for reads OR writes - not for read-modify-write operations.
2969 * Use cpu_register_map_client() to know when retrying the map operation is
2970 * likely to succeed.
2972 void *address_space_map(AddressSpace
*as
,
2979 hwaddr l
, xlat
, base
;
2980 MemoryRegion
*mr
, *this_mr
;
2990 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2992 if (!memory_access_is_direct(mr
, is_write
)) {
2993 if (atomic_xchg(&bounce
.in_use
, true)) {
2997 /* Avoid unbounded allocations */
2998 l
= MIN(l
, TARGET_PAGE_SIZE
);
2999 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
3003 memory_region_ref(mr
);
3006 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
3012 return bounce
.buffer
;
3016 raddr
= memory_region_get_ram_addr(mr
);
3027 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
3028 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
3033 memory_region_ref(mr
);
3035 ptr
= qemu_ram_ptr_length(mr
->ram_block
, raddr
+ base
, plen
);
3041 /* Unmaps a memory region previously mapped by address_space_map().
3042 * Will also mark the memory as dirty if is_write == 1. access_len gives
3043 * the amount of memory that was actually read or written by the caller.
3045 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3046 int is_write
, hwaddr access_len
)
3048 if (buffer
!= bounce
.buffer
) {
3052 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
3055 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3057 if (xen_enabled()) {
3058 xen_invalidate_map_cache_entry(buffer
);
3060 memory_region_unref(mr
);
3064 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3065 bounce
.buffer
, access_len
);
3067 qemu_vfree(bounce
.buffer
);
3068 bounce
.buffer
= NULL
;
3069 memory_region_unref(bounce
.mr
);
3070 atomic_mb_set(&bounce
.in_use
, false);
3071 cpu_notify_map_clients();
3074 void *cpu_physical_memory_map(hwaddr addr
,
3078 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3081 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3082 int is_write
, hwaddr access_len
)
3084 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3087 /* warning: addr must be aligned */
3088 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3090 MemTxResult
*result
,
3091 enum device_endian endian
)
3099 bool release_lock
= false;
3102 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3103 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3104 release_lock
|= prepare_mmio_access(mr
);
3107 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3108 #if defined(TARGET_WORDS_BIGENDIAN)
3109 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3113 if (endian
== DEVICE_BIG_ENDIAN
) {
3119 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3120 (memory_region_get_ram_addr(mr
)
3124 case DEVICE_LITTLE_ENDIAN
:
3125 val
= ldl_le_p(ptr
);
3127 case DEVICE_BIG_ENDIAN
:
3128 val
= ldl_be_p(ptr
);
3140 qemu_mutex_unlock_iothread();
3146 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3147 MemTxAttrs attrs
, MemTxResult
*result
)
3149 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3150 DEVICE_NATIVE_ENDIAN
);
3153 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3154 MemTxAttrs attrs
, MemTxResult
*result
)
3156 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3157 DEVICE_LITTLE_ENDIAN
);
3160 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3161 MemTxAttrs attrs
, MemTxResult
*result
)
3163 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3167 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3169 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3172 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3174 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3177 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3179 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3182 /* warning: addr must be aligned */
3183 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3185 MemTxResult
*result
,
3186 enum device_endian endian
)
3194 bool release_lock
= false;
3197 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3199 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3200 release_lock
|= prepare_mmio_access(mr
);
3203 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3204 #if defined(TARGET_WORDS_BIGENDIAN)
3205 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3209 if (endian
== DEVICE_BIG_ENDIAN
) {
3215 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3216 (memory_region_get_ram_addr(mr
)
3220 case DEVICE_LITTLE_ENDIAN
:
3221 val
= ldq_le_p(ptr
);
3223 case DEVICE_BIG_ENDIAN
:
3224 val
= ldq_be_p(ptr
);
3236 qemu_mutex_unlock_iothread();
3242 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3243 MemTxAttrs attrs
, MemTxResult
*result
)
3245 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3246 DEVICE_NATIVE_ENDIAN
);
3249 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3250 MemTxAttrs attrs
, MemTxResult
*result
)
3252 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3253 DEVICE_LITTLE_ENDIAN
);
3256 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3257 MemTxAttrs attrs
, MemTxResult
*result
)
3259 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3263 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3265 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3268 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3270 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3273 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3275 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3279 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3280 MemTxAttrs attrs
, MemTxResult
*result
)
3285 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3292 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3294 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3297 /* warning: addr must be aligned */
3298 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3301 MemTxResult
*result
,
3302 enum device_endian endian
)
3310 bool release_lock
= false;
3313 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3315 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3316 release_lock
|= prepare_mmio_access(mr
);
3319 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3320 #if defined(TARGET_WORDS_BIGENDIAN)
3321 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3325 if (endian
== DEVICE_BIG_ENDIAN
) {
3331 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3332 (memory_region_get_ram_addr(mr
)
3336 case DEVICE_LITTLE_ENDIAN
:
3337 val
= lduw_le_p(ptr
);
3339 case DEVICE_BIG_ENDIAN
:
3340 val
= lduw_be_p(ptr
);
3352 qemu_mutex_unlock_iothread();
3358 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3359 MemTxAttrs attrs
, MemTxResult
*result
)
3361 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3362 DEVICE_NATIVE_ENDIAN
);
3365 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3366 MemTxAttrs attrs
, MemTxResult
*result
)
3368 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3369 DEVICE_LITTLE_ENDIAN
);
3372 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3373 MemTxAttrs attrs
, MemTxResult
*result
)
3375 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3379 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3381 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3384 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3386 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3389 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3391 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3394 /* warning: addr must be aligned. The ram page is not masked as dirty
3395 and the code inside is not invalidated. It is useful if the dirty
3396 bits are used to track modified PTEs */
3397 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3398 MemTxAttrs attrs
, MemTxResult
*result
)
3405 uint8_t dirty_log_mask
;
3406 bool release_lock
= false;
3409 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3411 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3412 release_lock
|= prepare_mmio_access(mr
);
3414 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3416 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3417 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3420 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3421 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3422 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3429 qemu_mutex_unlock_iothread();
3434 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3436 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3439 /* warning: addr must be aligned */
3440 static inline void address_space_stl_internal(AddressSpace
*as
,
3441 hwaddr addr
, uint32_t val
,
3443 MemTxResult
*result
,
3444 enum device_endian endian
)
3451 bool release_lock
= false;
3454 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3456 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3457 release_lock
|= prepare_mmio_access(mr
);
3459 #if defined(TARGET_WORDS_BIGENDIAN)
3460 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3464 if (endian
== DEVICE_BIG_ENDIAN
) {
3468 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3471 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3472 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3474 case DEVICE_LITTLE_ENDIAN
:
3477 case DEVICE_BIG_ENDIAN
:
3484 invalidate_and_set_dirty(mr
, addr1
, 4);
3491 qemu_mutex_unlock_iothread();
3496 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3497 MemTxAttrs attrs
, MemTxResult
*result
)
3499 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3500 DEVICE_NATIVE_ENDIAN
);
3503 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3504 MemTxAttrs attrs
, MemTxResult
*result
)
3506 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3507 DEVICE_LITTLE_ENDIAN
);
3510 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3511 MemTxAttrs attrs
, MemTxResult
*result
)
3513 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3517 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3519 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3522 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3524 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3527 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3529 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3533 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3534 MemTxAttrs attrs
, MemTxResult
*result
)
3539 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3545 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3547 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3550 /* warning: addr must be aligned */
3551 static inline void address_space_stw_internal(AddressSpace
*as
,
3552 hwaddr addr
, uint32_t val
,
3554 MemTxResult
*result
,
3555 enum device_endian endian
)
3562 bool release_lock
= false;
3565 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3566 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3567 release_lock
|= prepare_mmio_access(mr
);
3569 #if defined(TARGET_WORDS_BIGENDIAN)
3570 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3574 if (endian
== DEVICE_BIG_ENDIAN
) {
3578 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3581 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3582 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3584 case DEVICE_LITTLE_ENDIAN
:
3587 case DEVICE_BIG_ENDIAN
:
3594 invalidate_and_set_dirty(mr
, addr1
, 2);
3601 qemu_mutex_unlock_iothread();
3606 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3607 MemTxAttrs attrs
, MemTxResult
*result
)
3609 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3610 DEVICE_NATIVE_ENDIAN
);
3613 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3614 MemTxAttrs attrs
, MemTxResult
*result
)
3616 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3617 DEVICE_LITTLE_ENDIAN
);
3620 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3621 MemTxAttrs attrs
, MemTxResult
*result
)
3623 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3627 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3629 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3632 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3634 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3637 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3639 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3643 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3644 MemTxAttrs attrs
, MemTxResult
*result
)
3648 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3654 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3655 MemTxAttrs attrs
, MemTxResult
*result
)
3658 val
= cpu_to_le64(val
);
3659 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3664 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3665 MemTxAttrs attrs
, MemTxResult
*result
)
3668 val
= cpu_to_be64(val
);
3669 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3675 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3677 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3680 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3682 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3685 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3687 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3690 /* virtual memory access for debug (includes writing to ROM) */
3691 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3692 uint8_t *buf
, int len
, int is_write
)
3702 page
= addr
& TARGET_PAGE_MASK
;
3703 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3704 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3705 /* if no physical page mapped, return an error */
3706 if (phys_addr
== -1)
3708 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3711 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3713 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3716 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3717 MEMTXATTRS_UNSPECIFIED
,
3728 * Allows code that needs to deal with migration bitmaps etc to still be built
3729 * target independent.
3731 size_t qemu_target_page_bits(void)
3733 return TARGET_PAGE_BITS
;
3739 * A helper function for the _utterly broken_ virtio device model to find out if
3740 * it's running on a big endian machine. Don't do this at home kids!
3742 bool target_words_bigendian(void);
3743 bool target_words_bigendian(void)
3745 #if defined(TARGET_WORDS_BIGENDIAN)
3752 #ifndef CONFIG_USER_ONLY
3753 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3760 mr
= address_space_translate(&address_space_memory
,
3761 phys_addr
, &phys_addr
, &l
, false);
3763 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3768 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3774 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3775 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3776 block
->used_length
, opaque
);