4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "exec/cputlb.h"
52 #include "translate-all.h"
54 #include "exec/memory-internal.h"
55 #include "exec/ram_addr.h"
57 #include "qemu/range.h"
59 //#define DEBUG_SUBPAGE
61 #if !defined(CONFIG_USER_ONLY)
62 static bool in_migration
;
64 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
65 * are protected by the ramlist lock.
67 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
69 static MemoryRegion
*system_memory
;
70 static MemoryRegion
*system_io
;
72 AddressSpace address_space_io
;
73 AddressSpace address_space_memory
;
75 MemoryRegion io_mem_rom
, io_mem_notdirty
;
76 static MemoryRegion io_mem_unassigned
;
78 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
79 #define RAM_PREALLOC (1 << 0)
81 /* RAM is mmap-ed with MAP_SHARED */
82 #define RAM_SHARED (1 << 1)
84 /* Only a portion of RAM (used_length) is actually used, and migrated.
85 * This used_length size can change across reboots.
87 #define RAM_RESIZEABLE (1 << 2)
91 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
92 /* current CPU in the current thread. It is only valid inside
94 DEFINE_TLS(CPUState
*, current_cpu
);
95 /* 0 = Do not count executed instructions.
96 1 = Precise instruction counting.
97 2 = Adaptive rate instruction counting. */
100 #if !defined(CONFIG_USER_ONLY)
102 typedef struct PhysPageEntry PhysPageEntry
;
104 struct PhysPageEntry
{
105 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
107 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
111 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
113 /* Size of the L2 (and L3, etc) page tables. */
114 #define ADDR_SPACE_BITS 64
117 #define P_L2_SIZE (1 << P_L2_BITS)
119 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
121 typedef PhysPageEntry Node
[P_L2_SIZE
];
123 typedef struct PhysPageMap
{
126 unsigned sections_nb
;
127 unsigned sections_nb_alloc
;
129 unsigned nodes_nb_alloc
;
131 MemoryRegionSection
*sections
;
134 struct AddressSpaceDispatch
{
137 /* This is a multi-level map on the physical address space.
138 * The bottom level has pointers to MemoryRegionSections.
140 PhysPageEntry phys_map
;
145 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
146 typedef struct subpage_t
{
150 uint16_t sub_section
[TARGET_PAGE_SIZE
];
153 #define PHYS_SECTION_UNASSIGNED 0
154 #define PHYS_SECTION_NOTDIRTY 1
155 #define PHYS_SECTION_ROM 2
156 #define PHYS_SECTION_WATCH 3
158 static void io_mem_init(void);
159 static void memory_map_init(void);
160 static void tcg_commit(MemoryListener
*listener
);
162 static MemoryRegion io_mem_watch
;
165 #if !defined(CONFIG_USER_ONLY)
167 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
169 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
170 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
171 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
172 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
176 static uint32_t phys_map_node_alloc(PhysPageMap
*map
)
181 ret
= map
->nodes_nb
++;
182 assert(ret
!= PHYS_MAP_NODE_NIL
);
183 assert(ret
!= map
->nodes_nb_alloc
);
184 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
185 map
->nodes
[ret
][i
].skip
= 1;
186 map
->nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
191 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
192 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
197 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
199 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
200 lp
->ptr
= phys_map_node_alloc(map
);
201 p
= map
->nodes
[lp
->ptr
];
203 for (i
= 0; i
< P_L2_SIZE
; i
++) {
205 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
209 p
= map
->nodes
[lp
->ptr
];
211 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
213 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
214 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
220 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
226 static void phys_page_set(AddressSpaceDispatch
*d
,
227 hwaddr index
, hwaddr nb
,
230 /* Wildly overreserve - it doesn't matter much. */
231 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
233 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
236 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
237 * and update our entry so we can skip it and go directly to the destination.
239 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
241 unsigned valid_ptr
= P_L2_SIZE
;
246 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
251 for (i
= 0; i
< P_L2_SIZE
; i
++) {
252 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
259 phys_page_compact(&p
[i
], nodes
, compacted
);
263 /* We can only compress if there's only one child. */
268 assert(valid_ptr
< P_L2_SIZE
);
270 /* Don't compress if it won't fit in the # of bits we have. */
271 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
275 lp
->ptr
= p
[valid_ptr
].ptr
;
276 if (!p
[valid_ptr
].skip
) {
277 /* If our only child is a leaf, make this a leaf. */
278 /* By design, we should have made this node a leaf to begin with so we
279 * should never reach here.
280 * But since it's so simple to handle this, let's do it just in case we
285 lp
->skip
+= p
[valid_ptr
].skip
;
289 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
291 DECLARE_BITMAP(compacted
, nodes_nb
);
293 if (d
->phys_map
.skip
) {
294 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
298 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
299 Node
*nodes
, MemoryRegionSection
*sections
)
302 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
305 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
306 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
307 return §ions
[PHYS_SECTION_UNASSIGNED
];
310 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
313 if (sections
[lp
.ptr
].size
.hi
||
314 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
315 sections
[lp
.ptr
].size
.lo
, addr
)) {
316 return §ions
[lp
.ptr
];
318 return §ions
[PHYS_SECTION_UNASSIGNED
];
322 bool memory_region_is_unassigned(MemoryRegion
*mr
)
324 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
325 && mr
!= &io_mem_watch
;
328 /* Called from RCU critical section */
329 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
331 bool resolve_subpage
)
333 MemoryRegionSection
*section
;
336 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
337 if (resolve_subpage
&& section
->mr
->subpage
) {
338 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
339 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
344 /* Called from RCU critical section */
345 static MemoryRegionSection
*
346 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
347 hwaddr
*plen
, bool resolve_subpage
)
349 MemoryRegionSection
*section
;
352 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
353 /* Compute offset within MemoryRegionSection */
354 addr
-= section
->offset_within_address_space
;
356 /* Compute offset within MemoryRegion */
357 *xlat
= addr
+ section
->offset_within_region
;
359 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
360 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
364 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
366 if (memory_region_is_ram(mr
)) {
367 return !(is_write
&& mr
->readonly
);
369 if (memory_region_is_romd(mr
)) {
376 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
377 hwaddr
*xlat
, hwaddr
*plen
,
381 MemoryRegionSection
*section
;
387 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
388 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
391 if (!mr
->iommu_ops
) {
395 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
396 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
397 | (addr
& iotlb
.addr_mask
));
398 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
399 if (!(iotlb
.perm
& (1 << is_write
))) {
400 mr
= &io_mem_unassigned
;
404 as
= iotlb
.target_as
;
407 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
408 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
409 len
= MIN(page
, len
);
418 /* Called from RCU critical section */
419 MemoryRegionSection
*
420 address_space_translate_for_iotlb(CPUState
*cpu
, hwaddr addr
,
421 hwaddr
*xlat
, hwaddr
*plen
)
423 MemoryRegionSection
*section
;
424 section
= address_space_translate_internal(cpu
->memory_dispatch
,
425 addr
, xlat
, plen
, false);
427 assert(!section
->mr
->iommu_ops
);
432 void cpu_exec_init_all(void)
434 #if !defined(CONFIG_USER_ONLY)
435 qemu_mutex_init(&ram_list
.mutex
);
441 #if !defined(CONFIG_USER_ONLY)
443 static int cpu_common_post_load(void *opaque
, int version_id
)
445 CPUState
*cpu
= opaque
;
447 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
448 version_id is increased. */
449 cpu
->interrupt_request
&= ~0x01;
455 static int cpu_common_pre_load(void *opaque
)
457 CPUState
*cpu
= opaque
;
459 cpu
->exception_index
= -1;
464 static bool cpu_common_exception_index_needed(void *opaque
)
466 CPUState
*cpu
= opaque
;
468 return tcg_enabled() && cpu
->exception_index
!= -1;
471 static const VMStateDescription vmstate_cpu_common_exception_index
= {
472 .name
= "cpu_common/exception_index",
474 .minimum_version_id
= 1,
475 .fields
= (VMStateField
[]) {
476 VMSTATE_INT32(exception_index
, CPUState
),
477 VMSTATE_END_OF_LIST()
481 const VMStateDescription vmstate_cpu_common
= {
482 .name
= "cpu_common",
484 .minimum_version_id
= 1,
485 .pre_load
= cpu_common_pre_load
,
486 .post_load
= cpu_common_post_load
,
487 .fields
= (VMStateField
[]) {
488 VMSTATE_UINT32(halted
, CPUState
),
489 VMSTATE_UINT32(interrupt_request
, CPUState
),
490 VMSTATE_END_OF_LIST()
492 .subsections
= (VMStateSubsection
[]) {
494 .vmsd
= &vmstate_cpu_common_exception_index
,
495 .needed
= cpu_common_exception_index_needed
,
504 CPUState
*qemu_get_cpu(int index
)
509 if (cpu
->cpu_index
== index
) {
517 #if !defined(CONFIG_USER_ONLY)
518 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
520 /* We only support one address space per cpu at the moment. */
521 assert(cpu
->as
== as
);
523 if (cpu
->tcg_as_listener
) {
524 memory_listener_unregister(cpu
->tcg_as_listener
);
526 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
528 cpu
->tcg_as_listener
->commit
= tcg_commit
;
529 memory_listener_register(cpu
->tcg_as_listener
, as
);
533 void cpu_exec_init(CPUArchState
*env
)
535 CPUState
*cpu
= ENV_GET_CPU(env
);
536 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
540 #if defined(CONFIG_USER_ONLY)
544 CPU_FOREACH(some_cpu
) {
547 cpu
->cpu_index
= cpu_index
;
549 QTAILQ_INIT(&cpu
->breakpoints
);
550 QTAILQ_INIT(&cpu
->watchpoints
);
551 #ifndef CONFIG_USER_ONLY
552 cpu
->as
= &address_space_memory
;
553 cpu
->thread_id
= qemu_get_thread_id();
554 cpu_reload_memory_map(cpu
);
556 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
557 #if defined(CONFIG_USER_ONLY)
560 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
561 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
563 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
564 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
565 cpu_save
, cpu_load
, env
);
566 assert(cc
->vmsd
== NULL
);
567 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
569 if (cc
->vmsd
!= NULL
) {
570 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
574 #if defined(CONFIG_USER_ONLY)
575 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
577 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
580 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
582 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
584 tb_invalidate_phys_addr(cpu
->as
,
585 phys
| (pc
& ~TARGET_PAGE_MASK
));
590 #if defined(CONFIG_USER_ONLY)
591 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
596 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
602 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
606 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
607 int flags
, CPUWatchpoint
**watchpoint
)
612 /* Add a watchpoint. */
613 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
614 int flags
, CPUWatchpoint
**watchpoint
)
618 /* forbid ranges which are empty or run off the end of the address space */
619 if (len
== 0 || (addr
+ len
- 1) < addr
) {
620 error_report("tried to set invalid watchpoint at %"
621 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
624 wp
= g_malloc(sizeof(*wp
));
630 /* keep all GDB-injected watchpoints in front */
631 if (flags
& BP_GDB
) {
632 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
634 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
637 tlb_flush_page(cpu
, addr
);
644 /* Remove a specific watchpoint. */
645 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
650 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
651 if (addr
== wp
->vaddr
&& len
== wp
->len
652 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
653 cpu_watchpoint_remove_by_ref(cpu
, wp
);
660 /* Remove a specific watchpoint by reference. */
661 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
663 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
665 tlb_flush_page(cpu
, watchpoint
->vaddr
);
670 /* Remove all matching watchpoints. */
671 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
673 CPUWatchpoint
*wp
, *next
;
675 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
676 if (wp
->flags
& mask
) {
677 cpu_watchpoint_remove_by_ref(cpu
, wp
);
682 /* Return true if this watchpoint address matches the specified
683 * access (ie the address range covered by the watchpoint overlaps
684 * partially or completely with the address range covered by the
687 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
691 /* We know the lengths are non-zero, but a little caution is
692 * required to avoid errors in the case where the range ends
693 * exactly at the top of the address space and so addr + len
694 * wraps round to zero.
696 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
697 vaddr addrend
= addr
+ len
- 1;
699 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
704 /* Add a breakpoint. */
705 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
706 CPUBreakpoint
**breakpoint
)
710 bp
= g_malloc(sizeof(*bp
));
715 /* keep all GDB-injected breakpoints in front */
716 if (flags
& BP_GDB
) {
717 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
719 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
722 breakpoint_invalidate(cpu
, pc
);
730 /* Remove a specific breakpoint. */
731 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
735 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
736 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
737 cpu_breakpoint_remove_by_ref(cpu
, bp
);
744 /* Remove a specific breakpoint by reference. */
745 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
747 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
749 breakpoint_invalidate(cpu
, breakpoint
->pc
);
754 /* Remove all matching breakpoints. */
755 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
757 CPUBreakpoint
*bp
, *next
;
759 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
760 if (bp
->flags
& mask
) {
761 cpu_breakpoint_remove_by_ref(cpu
, bp
);
766 /* enable or disable single step mode. EXCP_DEBUG is returned by the
767 CPU loop after each instruction */
768 void cpu_single_step(CPUState
*cpu
, int enabled
)
770 if (cpu
->singlestep_enabled
!= enabled
) {
771 cpu
->singlestep_enabled
= enabled
;
773 kvm_update_guest_debug(cpu
, 0);
775 /* must flush all the translated code to avoid inconsistencies */
776 /* XXX: only flush what is necessary */
777 CPUArchState
*env
= cpu
->env_ptr
;
783 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
790 fprintf(stderr
, "qemu: fatal: ");
791 vfprintf(stderr
, fmt
, ap
);
792 fprintf(stderr
, "\n");
793 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
794 if (qemu_log_enabled()) {
795 qemu_log("qemu: fatal: ");
796 qemu_log_vprintf(fmt
, ap2
);
798 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
804 #if defined(CONFIG_USER_ONLY)
806 struct sigaction act
;
807 sigfillset(&act
.sa_mask
);
808 act
.sa_handler
= SIG_DFL
;
809 sigaction(SIGABRT
, &act
, NULL
);
815 #if !defined(CONFIG_USER_ONLY)
816 /* Called from RCU critical section */
817 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
821 block
= atomic_rcu_read(&ram_list
.mru_block
);
822 if (block
&& addr
- block
->offset
< block
->max_length
) {
825 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
826 if (addr
- block
->offset
< block
->max_length
) {
831 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
835 /* It is safe to write mru_block outside the iothread lock. This
840 * xxx removed from list
844 * call_rcu(reclaim_ramblock, xxx);
847 * atomic_rcu_set is not needed here. The block was already published
848 * when it was placed into the list. Here we're just making an extra
849 * copy of the pointer.
851 ram_list
.mru_block
= block
;
855 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
861 end
= TARGET_PAGE_ALIGN(start
+ length
);
862 start
&= TARGET_PAGE_MASK
;
865 block
= qemu_get_ram_block(start
);
866 assert(block
== qemu_get_ram_block(end
- 1));
867 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
868 cpu_tlb_reset_dirty_all(start1
, length
);
872 /* Note: start and end must be within the same ram block. */
873 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,
878 cpu_physical_memory_clear_dirty_range_type(start
, length
, client
);
881 tlb_reset_dirty_range_all(start
, length
);
885 static void cpu_physical_memory_set_dirty_tracking(bool enable
)
887 in_migration
= enable
;
890 /* Called from RCU critical section */
891 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
892 MemoryRegionSection
*section
,
894 hwaddr paddr
, hwaddr xlat
,
896 target_ulong
*address
)
901 if (memory_region_is_ram(section
->mr
)) {
903 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
905 if (!section
->readonly
) {
906 iotlb
|= PHYS_SECTION_NOTDIRTY
;
908 iotlb
|= PHYS_SECTION_ROM
;
911 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
915 /* Make accesses to pages with watchpoints go via the
916 watchpoint trap routines. */
917 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
918 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
919 /* Avoid trapping reads of pages with a write breakpoint. */
920 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
921 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
922 *address
|= TLB_MMIO
;
930 #endif /* defined(CONFIG_USER_ONLY) */
932 #if !defined(CONFIG_USER_ONLY)
934 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
936 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
938 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
942 * Set a custom physical guest memory alloator.
943 * Accelerators with unusual needs may need this. Hopefully, we can
944 * get rid of it eventually.
946 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
948 phys_mem_alloc
= alloc
;
951 static uint16_t phys_section_add(PhysPageMap
*map
,
952 MemoryRegionSection
*section
)
954 /* The physical section number is ORed with a page-aligned
955 * pointer to produce the iotlb entries. Thus it should
956 * never overflow into the page-aligned value.
958 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
960 if (map
->sections_nb
== map
->sections_nb_alloc
) {
961 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
962 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
963 map
->sections_nb_alloc
);
965 map
->sections
[map
->sections_nb
] = *section
;
966 memory_region_ref(section
->mr
);
967 return map
->sections_nb
++;
970 static void phys_section_destroy(MemoryRegion
*mr
)
972 memory_region_unref(mr
);
975 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
976 object_unref(OBJECT(&subpage
->iomem
));
981 static void phys_sections_free(PhysPageMap
*map
)
983 while (map
->sections_nb
> 0) {
984 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
985 phys_section_destroy(section
->mr
);
987 g_free(map
->sections
);
991 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
994 hwaddr base
= section
->offset_within_address_space
996 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
997 d
->map
.nodes
, d
->map
.sections
);
998 MemoryRegionSection subsection
= {
999 .offset_within_address_space
= base
,
1000 .size
= int128_make64(TARGET_PAGE_SIZE
),
1004 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1006 if (!(existing
->mr
->subpage
)) {
1007 subpage
= subpage_init(d
->as
, base
);
1008 subsection
.address_space
= d
->as
;
1009 subsection
.mr
= &subpage
->iomem
;
1010 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1011 phys_section_add(&d
->map
, &subsection
));
1013 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1015 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1016 end
= start
+ int128_get64(section
->size
) - 1;
1017 subpage_register(subpage
, start
, end
,
1018 phys_section_add(&d
->map
, section
));
1022 static void register_multipage(AddressSpaceDispatch
*d
,
1023 MemoryRegionSection
*section
)
1025 hwaddr start_addr
= section
->offset_within_address_space
;
1026 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1027 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1031 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1034 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1036 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1037 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1038 MemoryRegionSection now
= *section
, remain
= *section
;
1039 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1041 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1042 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1043 - now
.offset_within_address_space
;
1045 now
.size
= int128_min(int128_make64(left
), now
.size
);
1046 register_subpage(d
, &now
);
1048 now
.size
= int128_zero();
1050 while (int128_ne(remain
.size
, now
.size
)) {
1051 remain
.size
= int128_sub(remain
.size
, now
.size
);
1052 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1053 remain
.offset_within_region
+= int128_get64(now
.size
);
1055 if (int128_lt(remain
.size
, page_size
)) {
1056 register_subpage(d
, &now
);
1057 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1058 now
.size
= page_size
;
1059 register_subpage(d
, &now
);
1061 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1062 register_multipage(d
, &now
);
1067 void qemu_flush_coalesced_mmio_buffer(void)
1070 kvm_flush_coalesced_mmio_buffer();
1073 void qemu_mutex_lock_ramlist(void)
1075 qemu_mutex_lock(&ram_list
.mutex
);
1078 void qemu_mutex_unlock_ramlist(void)
1080 qemu_mutex_unlock(&ram_list
.mutex
);
1085 #include <sys/vfs.h>
1087 #define HUGETLBFS_MAGIC 0x958458f6
1089 static long gethugepagesize(const char *path
, Error
**errp
)
1095 ret
= statfs(path
, &fs
);
1096 } while (ret
!= 0 && errno
== EINTR
);
1099 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1104 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1105 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1110 static void *file_ram_alloc(RAMBlock
*block
,
1116 char *sanitized_name
;
1121 Error
*local_err
= NULL
;
1123 hpagesize
= gethugepagesize(path
, &local_err
);
1125 error_propagate(errp
, local_err
);
1128 block
->mr
->align
= hpagesize
;
1130 if (memory
< hpagesize
) {
1131 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1132 "or larger than huge page size 0x%" PRIx64
,
1137 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1139 "host lacks kvm mmu notifiers, -mem-path unsupported");
1143 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1144 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1145 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1150 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1152 g_free(sanitized_name
);
1154 fd
= mkstemp(filename
);
1156 error_setg_errno(errp
, errno
,
1157 "unable to create backing store for hugepages");
1164 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1167 * ftruncate is not supported by hugetlbfs in older
1168 * hosts, so don't bother bailing out on errors.
1169 * If anything goes wrong with it under other filesystems,
1172 if (ftruncate(fd
, memory
)) {
1173 perror("ftruncate");
1176 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
,
1177 (block
->flags
& RAM_SHARED
? MAP_SHARED
: MAP_PRIVATE
),
1179 if (area
== MAP_FAILED
) {
1180 error_setg_errno(errp
, errno
,
1181 "unable to map backing store for hugepages");
1187 os_mem_prealloc(fd
, area
, memory
);
1195 error_report("%s", error_get_pretty(*errp
));
1202 /* Called with the ramlist lock held. */
1203 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1205 RAMBlock
*block
, *next_block
;
1206 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1208 assert(size
!= 0); /* it would hand out same offset multiple times */
1210 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1214 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1215 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1217 end
= block
->offset
+ block
->max_length
;
1219 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1220 if (next_block
->offset
>= end
) {
1221 next
= MIN(next
, next_block
->offset
);
1224 if (next
- end
>= size
&& next
- end
< mingap
) {
1226 mingap
= next
- end
;
1230 if (offset
== RAM_ADDR_MAX
) {
1231 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1239 ram_addr_t
last_ram_offset(void)
1242 ram_addr_t last
= 0;
1245 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1246 last
= MAX(last
, block
->offset
+ block
->max_length
);
1252 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1256 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1257 if (!machine_dump_guest_core(current_machine
)) {
1258 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1260 perror("qemu_madvise");
1261 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1262 "but dump_guest_core=off specified\n");
1267 /* Called within an RCU critical section, or while the ramlist lock
1270 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1274 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1275 if (block
->offset
== addr
) {
1283 /* Called with iothread lock held. */
1284 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1286 RAMBlock
*new_block
, *block
;
1289 new_block
= find_ram_block(addr
);
1291 assert(!new_block
->idstr
[0]);
1294 char *id
= qdev_get_dev_path(dev
);
1296 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1300 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1302 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1303 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1304 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1312 /* Called with iothread lock held. */
1313 void qemu_ram_unset_idstr(ram_addr_t addr
)
1317 /* FIXME: arch_init.c assumes that this is not called throughout
1318 * migration. Ignore the problem since hot-unplug during migration
1319 * does not work anyway.
1323 block
= find_ram_block(addr
);
1325 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1330 static int memory_try_enable_merging(void *addr
, size_t len
)
1332 if (!machine_mem_merge(current_machine
)) {
1333 /* disabled by the user */
1337 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1340 /* Only legal before guest might have detected the memory size: e.g. on
1341 * incoming migration, or right after reset.
1343 * As memory core doesn't know how is memory accessed, it is up to
1344 * resize callback to update device state and/or add assertions to detect
1345 * misuse, if necessary.
1347 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1349 RAMBlock
*block
= find_ram_block(base
);
1353 newsize
= TARGET_PAGE_ALIGN(newsize
);
1355 if (block
->used_length
== newsize
) {
1359 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1360 error_setg_errno(errp
, EINVAL
,
1361 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1362 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1363 newsize
, block
->used_length
);
1367 if (block
->max_length
< newsize
) {
1368 error_setg_errno(errp
, EINVAL
,
1369 "Length too large: %s: 0x" RAM_ADDR_FMT
1370 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1371 newsize
, block
->max_length
);
1375 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1376 block
->used_length
= newsize
;
1377 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
);
1378 memory_region_set_size(block
->mr
, newsize
);
1379 if (block
->resized
) {
1380 block
->resized(block
->idstr
, newsize
, block
->host
);
1385 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1388 RAMBlock
*last_block
= NULL
;
1389 ram_addr_t old_ram_size
, new_ram_size
;
1391 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1393 qemu_mutex_lock_ramlist();
1394 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1396 if (!new_block
->host
) {
1397 if (xen_enabled()) {
1398 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1401 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1402 &new_block
->mr
->align
);
1403 if (!new_block
->host
) {
1404 error_setg_errno(errp
, errno
,
1405 "cannot set up guest memory '%s'",
1406 memory_region_name(new_block
->mr
));
1407 qemu_mutex_unlock_ramlist();
1410 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1414 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1415 * QLIST (which has an RCU-friendly variant) does not have insertion at
1416 * tail, so save the last element in last_block.
1418 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1420 if (block
->max_length
< new_block
->max_length
) {
1425 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1426 } else if (last_block
) {
1427 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1428 } else { /* list is empty */
1429 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1431 ram_list
.mru_block
= NULL
;
1433 /* Write list before version */
1436 qemu_mutex_unlock_ramlist();
1438 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1440 if (new_ram_size
> old_ram_size
) {
1443 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1444 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1445 ram_list
.dirty_memory
[i
] =
1446 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1447 old_ram_size
, new_ram_size
);
1450 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1451 new_block
->used_length
);
1453 if (new_block
->host
) {
1454 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1455 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1456 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1457 if (kvm_enabled()) {
1458 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1462 return new_block
->offset
;
1466 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1467 bool share
, const char *mem_path
,
1470 RAMBlock
*new_block
;
1472 Error
*local_err
= NULL
;
1474 if (xen_enabled()) {
1475 error_setg(errp
, "-mem-path not supported with Xen");
1479 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1481 * file_ram_alloc() needs to allocate just like
1482 * phys_mem_alloc, but we haven't bothered to provide
1486 "-mem-path not supported with this accelerator");
1490 size
= TARGET_PAGE_ALIGN(size
);
1491 new_block
= g_malloc0(sizeof(*new_block
));
1493 new_block
->used_length
= size
;
1494 new_block
->max_length
= size
;
1495 new_block
->flags
= share
? RAM_SHARED
: 0;
1496 new_block
->host
= file_ram_alloc(new_block
, size
,
1498 if (!new_block
->host
) {
1503 addr
= ram_block_add(new_block
, &local_err
);
1506 error_propagate(errp
, local_err
);
1514 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1515 void (*resized
)(const char*,
1518 void *host
, bool resizeable
,
1519 MemoryRegion
*mr
, Error
**errp
)
1521 RAMBlock
*new_block
;
1523 Error
*local_err
= NULL
;
1525 size
= TARGET_PAGE_ALIGN(size
);
1526 max_size
= TARGET_PAGE_ALIGN(max_size
);
1527 new_block
= g_malloc0(sizeof(*new_block
));
1529 new_block
->resized
= resized
;
1530 new_block
->used_length
= size
;
1531 new_block
->max_length
= max_size
;
1532 assert(max_size
>= size
);
1534 new_block
->host
= host
;
1536 new_block
->flags
|= RAM_PREALLOC
;
1539 new_block
->flags
|= RAM_RESIZEABLE
;
1541 addr
= ram_block_add(new_block
, &local_err
);
1544 error_propagate(errp
, local_err
);
1550 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1551 MemoryRegion
*mr
, Error
**errp
)
1553 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1556 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1558 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1561 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1562 void (*resized
)(const char*,
1565 MemoryRegion
*mr
, Error
**errp
)
1567 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1570 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1574 qemu_mutex_lock_ramlist();
1575 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1576 if (addr
== block
->offset
) {
1577 QLIST_REMOVE_RCU(block
, next
);
1578 ram_list
.mru_block
= NULL
;
1579 /* Write list before version */
1582 g_free_rcu(block
, rcu
);
1586 qemu_mutex_unlock_ramlist();
1589 static void reclaim_ramblock(RAMBlock
*block
)
1591 if (block
->flags
& RAM_PREALLOC
) {
1593 } else if (xen_enabled()) {
1594 xen_invalidate_map_cache_entry(block
->host
);
1596 } else if (block
->fd
>= 0) {
1597 munmap(block
->host
, block
->max_length
);
1601 qemu_anon_ram_free(block
->host
, block
->max_length
);
1606 void qemu_ram_free(ram_addr_t addr
)
1610 qemu_mutex_lock_ramlist();
1611 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1612 if (addr
== block
->offset
) {
1613 QLIST_REMOVE_RCU(block
, next
);
1614 ram_list
.mru_block
= NULL
;
1615 /* Write list before version */
1618 call_rcu(block
, reclaim_ramblock
, rcu
);
1622 qemu_mutex_unlock_ramlist();
1626 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1633 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1634 offset
= addr
- block
->offset
;
1635 if (offset
< block
->max_length
) {
1636 vaddr
= ramblock_ptr(block
, offset
);
1637 if (block
->flags
& RAM_PREALLOC
) {
1639 } else if (xen_enabled()) {
1643 if (block
->fd
>= 0) {
1644 flags
|= (block
->flags
& RAM_SHARED
?
1645 MAP_SHARED
: MAP_PRIVATE
);
1646 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1647 flags
, block
->fd
, offset
);
1650 * Remap needs to match alloc. Accelerators that
1651 * set phys_mem_alloc never remap. If they did,
1652 * we'd need a remap hook here.
1654 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1656 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1657 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1660 if (area
!= vaddr
) {
1661 fprintf(stderr
, "Could not remap addr: "
1662 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1666 memory_try_enable_merging(vaddr
, length
);
1667 qemu_ram_setup_dump(vaddr
, length
);
1672 #endif /* !_WIN32 */
1674 int qemu_get_ram_fd(ram_addr_t addr
)
1680 block
= qemu_get_ram_block(addr
);
1686 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1692 block
= qemu_get_ram_block(addr
);
1693 ptr
= ramblock_ptr(block
, 0);
1698 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1699 * This should not be used for general purpose DMA. Use address_space_map
1700 * or address_space_rw instead. For local memory (e.g. video ram) that the
1701 * device owns, use memory_region_get_ram_ptr.
1703 * By the time this function returns, the returned pointer is not protected
1704 * by RCU anymore. If the caller is not within an RCU critical section and
1705 * does not hold the iothread lock, it must have other means of protecting the
1706 * pointer, such as a reference to the region that includes the incoming
1709 void *qemu_get_ram_ptr(ram_addr_t addr
)
1715 block
= qemu_get_ram_block(addr
);
1717 if (xen_enabled() && block
->host
== NULL
) {
1718 /* We need to check if the requested address is in the RAM
1719 * because we don't want to map the entire memory in QEMU.
1720 * In that case just map until the end of the page.
1722 if (block
->offset
== 0) {
1723 ptr
= xen_map_cache(addr
, 0, 0);
1727 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1729 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1736 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1737 * but takes a size argument.
1739 * By the time this function returns, the returned pointer is not protected
1740 * by RCU anymore. If the caller is not within an RCU critical section and
1741 * does not hold the iothread lock, it must have other means of protecting the
1742 * pointer, such as a reference to the region that includes the incoming
1745 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1751 if (xen_enabled()) {
1752 return xen_map_cache(addr
, *size
, 1);
1756 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1757 if (addr
- block
->offset
< block
->max_length
) {
1758 if (addr
- block
->offset
+ *size
> block
->max_length
)
1759 *size
= block
->max_length
- addr
+ block
->offset
;
1760 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1766 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1771 /* Some of the softmmu routines need to translate from a host pointer
1772 * (typically a TLB entry) back to a ram offset.
1774 * By the time this function returns, the returned pointer is not protected
1775 * by RCU anymore. If the caller is not within an RCU critical section and
1776 * does not hold the iothread lock, it must have other means of protecting the
1777 * pointer, such as a reference to the region that includes the incoming
1780 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1783 uint8_t *host
= ptr
;
1786 if (xen_enabled()) {
1788 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1789 mr
= qemu_get_ram_block(*ram_addr
)->mr
;
1795 block
= atomic_rcu_read(&ram_list
.mru_block
);
1796 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1800 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1801 /* This case append when the block is not mapped. */
1802 if (block
->host
== NULL
) {
1805 if (host
- block
->host
< block
->max_length
) {
1814 *ram_addr
= block
->offset
+ (host
- block
->host
);
1820 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1821 uint64_t val
, unsigned size
)
1823 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1824 tb_invalidate_phys_page_fast(ram_addr
, size
);
1828 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1831 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1834 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1839 cpu_physical_memory_set_dirty_range_nocode(ram_addr
, size
);
1840 /* we remove the notdirty callback only if the code has been
1842 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1843 CPUArchState
*env
= current_cpu
->env_ptr
;
1844 tlb_set_dirty(env
, current_cpu
->mem_io_vaddr
);
1848 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1849 unsigned size
, bool is_write
)
1854 static const MemoryRegionOps notdirty_mem_ops
= {
1855 .write
= notdirty_mem_write
,
1856 .valid
.accepts
= notdirty_mem_accepts
,
1857 .endianness
= DEVICE_NATIVE_ENDIAN
,
1860 /* Generate a debug exception if a watchpoint has been hit. */
1861 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
1863 CPUState
*cpu
= current_cpu
;
1864 CPUArchState
*env
= cpu
->env_ptr
;
1865 target_ulong pc
, cs_base
;
1870 if (cpu
->watchpoint_hit
) {
1871 /* We re-entered the check after replacing the TB. Now raise
1872 * the debug interrupt so that is will trigger after the
1873 * current instruction. */
1874 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1877 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1878 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1879 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
1880 && (wp
->flags
& flags
)) {
1881 if (flags
== BP_MEM_READ
) {
1882 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
1884 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
1886 wp
->hitaddr
= vaddr
;
1887 wp
->hitattrs
= attrs
;
1888 if (!cpu
->watchpoint_hit
) {
1889 cpu
->watchpoint_hit
= wp
;
1890 tb_check_watchpoint(cpu
);
1891 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1892 cpu
->exception_index
= EXCP_DEBUG
;
1895 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1896 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1897 cpu_resume_from_signal(cpu
, NULL
);
1901 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1906 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1907 so these check for a hit then pass through to the normal out-of-line
1909 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
1910 unsigned size
, MemTxAttrs attrs
)
1915 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
1918 data
= address_space_ldub(&address_space_memory
, addr
, attrs
, &res
);
1921 data
= address_space_lduw(&address_space_memory
, addr
, attrs
, &res
);
1924 data
= address_space_ldl(&address_space_memory
, addr
, attrs
, &res
);
1932 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
1933 uint64_t val
, unsigned size
,
1938 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
1941 address_space_stb(&address_space_memory
, addr
, val
, attrs
, &res
);
1944 address_space_stw(&address_space_memory
, addr
, val
, attrs
, &res
);
1947 address_space_stl(&address_space_memory
, addr
, val
, attrs
, &res
);
1954 static const MemoryRegionOps watch_mem_ops
= {
1955 .read_with_attrs
= watch_mem_read
,
1956 .write_with_attrs
= watch_mem_write
,
1957 .endianness
= DEVICE_NATIVE_ENDIAN
,
1960 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
1961 unsigned len
, MemTxAttrs attrs
)
1963 subpage_t
*subpage
= opaque
;
1967 #if defined(DEBUG_SUBPAGE)
1968 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
1969 subpage
, len
, addr
);
1971 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
1978 *data
= ldub_p(buf
);
1981 *data
= lduw_p(buf
);
1994 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
1995 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
1997 subpage_t
*subpage
= opaque
;
2000 #if defined(DEBUG_SUBPAGE)
2001 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2002 " value %"PRIx64
"\n",
2003 __func__
, subpage
, len
, addr
, value
);
2021 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2025 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2026 unsigned len
, bool is_write
)
2028 subpage_t
*subpage
= opaque
;
2029 #if defined(DEBUG_SUBPAGE)
2030 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2031 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2034 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2038 static const MemoryRegionOps subpage_ops
= {
2039 .read_with_attrs
= subpage_read
,
2040 .write_with_attrs
= subpage_write
,
2041 .impl
.min_access_size
= 1,
2042 .impl
.max_access_size
= 8,
2043 .valid
.min_access_size
= 1,
2044 .valid
.max_access_size
= 8,
2045 .valid
.accepts
= subpage_accepts
,
2046 .endianness
= DEVICE_NATIVE_ENDIAN
,
2049 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2054 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2056 idx
= SUBPAGE_IDX(start
);
2057 eidx
= SUBPAGE_IDX(end
);
2058 #if defined(DEBUG_SUBPAGE)
2059 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2060 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2062 for (; idx
<= eidx
; idx
++) {
2063 mmio
->sub_section
[idx
] = section
;
2069 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2073 mmio
= g_malloc0(sizeof(subpage_t
));
2077 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2078 NULL
, TARGET_PAGE_SIZE
);
2079 mmio
->iomem
.subpage
= true;
2080 #if defined(DEBUG_SUBPAGE)
2081 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2082 mmio
, base
, TARGET_PAGE_SIZE
);
2084 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2089 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2093 MemoryRegionSection section
= {
2094 .address_space
= as
,
2096 .offset_within_address_space
= 0,
2097 .offset_within_region
= 0,
2098 .size
= int128_2_64(),
2101 return phys_section_add(map
, §ion
);
2104 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
)
2106 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpu
->memory_dispatch
);
2107 MemoryRegionSection
*sections
= d
->map
.sections
;
2109 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2112 static void io_mem_init(void)
2114 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2115 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2117 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2119 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2123 static void mem_begin(MemoryListener
*listener
)
2125 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2126 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2129 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2130 assert(n
== PHYS_SECTION_UNASSIGNED
);
2131 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2132 assert(n
== PHYS_SECTION_NOTDIRTY
);
2133 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2134 assert(n
== PHYS_SECTION_ROM
);
2135 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2136 assert(n
== PHYS_SECTION_WATCH
);
2138 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2140 as
->next_dispatch
= d
;
2143 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2145 phys_sections_free(&d
->map
);
2149 static void mem_commit(MemoryListener
*listener
)
2151 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2152 AddressSpaceDispatch
*cur
= as
->dispatch
;
2153 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2155 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2157 atomic_rcu_set(&as
->dispatch
, next
);
2159 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2163 static void tcg_commit(MemoryListener
*listener
)
2167 /* since each CPU stores ram addresses in its TLB cache, we must
2168 reset the modified entries */
2171 /* FIXME: Disentangle the cpu.h circular files deps so we can
2172 directly get the right CPU from listener. */
2173 if (cpu
->tcg_as_listener
!= listener
) {
2176 cpu_reload_memory_map(cpu
);
2180 static void core_log_global_start(MemoryListener
*listener
)
2182 cpu_physical_memory_set_dirty_tracking(true);
2185 static void core_log_global_stop(MemoryListener
*listener
)
2187 cpu_physical_memory_set_dirty_tracking(false);
2190 static MemoryListener core_memory_listener
= {
2191 .log_global_start
= core_log_global_start
,
2192 .log_global_stop
= core_log_global_stop
,
2196 void address_space_init_dispatch(AddressSpace
*as
)
2198 as
->dispatch
= NULL
;
2199 as
->dispatch_listener
= (MemoryListener
) {
2201 .commit
= mem_commit
,
2202 .region_add
= mem_add
,
2203 .region_nop
= mem_add
,
2206 memory_listener_register(&as
->dispatch_listener
, as
);
2209 void address_space_unregister(AddressSpace
*as
)
2211 memory_listener_unregister(&as
->dispatch_listener
);
2214 void address_space_destroy_dispatch(AddressSpace
*as
)
2216 AddressSpaceDispatch
*d
= as
->dispatch
;
2218 atomic_rcu_set(&as
->dispatch
, NULL
);
2220 call_rcu(d
, address_space_dispatch_free
, rcu
);
2224 static void memory_map_init(void)
2226 system_memory
= g_malloc(sizeof(*system_memory
));
2228 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2229 address_space_init(&address_space_memory
, system_memory
, "memory");
2231 system_io
= g_malloc(sizeof(*system_io
));
2232 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2234 address_space_init(&address_space_io
, system_io
, "I/O");
2236 memory_listener_register(&core_memory_listener
, &address_space_memory
);
2239 MemoryRegion
*get_system_memory(void)
2241 return system_memory
;
2244 MemoryRegion
*get_system_io(void)
2249 #endif /* !defined(CONFIG_USER_ONLY) */
2251 /* physical memory access (slow version, mainly for debug) */
2252 #if defined(CONFIG_USER_ONLY)
2253 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2254 uint8_t *buf
, int len
, int is_write
)
2261 page
= addr
& TARGET_PAGE_MASK
;
2262 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2265 flags
= page_get_flags(page
);
2266 if (!(flags
& PAGE_VALID
))
2269 if (!(flags
& PAGE_WRITE
))
2271 /* XXX: this code should not depend on lock_user */
2272 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2275 unlock_user(p
, addr
, l
);
2277 if (!(flags
& PAGE_READ
))
2279 /* XXX: this code should not depend on lock_user */
2280 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2283 unlock_user(p
, addr
, 0);
2294 static void invalidate_and_set_dirty(hwaddr addr
,
2297 if (cpu_physical_memory_range_includes_clean(addr
, length
)) {
2298 tb_invalidate_phys_range(addr
, addr
+ length
, 0);
2299 cpu_physical_memory_set_dirty_range_nocode(addr
, length
);
2301 xen_modified_memory(addr
, length
);
2304 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2306 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2308 /* Regions are assumed to support 1-4 byte accesses unless
2309 otherwise specified. */
2310 if (access_size_max
== 0) {
2311 access_size_max
= 4;
2314 /* Bound the maximum access by the alignment of the address. */
2315 if (!mr
->ops
->impl
.unaligned
) {
2316 unsigned align_size_max
= addr
& -addr
;
2317 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2318 access_size_max
= align_size_max
;
2322 /* Don't attempt accesses larger than the maximum. */
2323 if (l
> access_size_max
) {
2324 l
= access_size_max
;
2327 l
= 1 << (qemu_fls(l
) - 1);
2333 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2334 uint8_t *buf
, int len
, bool is_write
)
2341 MemTxResult result
= MEMTX_OK
;
2345 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2348 if (!memory_access_is_direct(mr
, is_write
)) {
2349 l
= memory_access_size(mr
, l
, addr1
);
2350 /* XXX: could force current_cpu to NULL to avoid
2354 /* 64 bit write access */
2356 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2360 /* 32 bit write access */
2362 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2366 /* 16 bit write access */
2368 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2372 /* 8 bit write access */
2374 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2381 addr1
+= memory_region_get_ram_addr(mr
);
2383 ptr
= qemu_get_ram_ptr(addr1
);
2384 memcpy(ptr
, buf
, l
);
2385 invalidate_and_set_dirty(addr1
, l
);
2388 if (!memory_access_is_direct(mr
, is_write
)) {
2390 l
= memory_access_size(mr
, l
, addr1
);
2393 /* 64 bit read access */
2394 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2399 /* 32 bit read access */
2400 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2405 /* 16 bit read access */
2406 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2411 /* 8 bit read access */
2412 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2421 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2422 memcpy(buf
, ptr
, l
);
2433 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2434 const uint8_t *buf
, int len
)
2436 return address_space_rw(as
, addr
, attrs
, (uint8_t *)buf
, len
, true);
2439 MemTxResult
address_space_read(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2440 uint8_t *buf
, int len
)
2442 return address_space_rw(as
, addr
, attrs
, buf
, len
, false);
2446 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2447 int len
, int is_write
)
2449 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2450 buf
, len
, is_write
);
2453 enum write_rom_type
{
2458 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2459 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2468 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2470 if (!(memory_region_is_ram(mr
) ||
2471 memory_region_is_romd(mr
))) {
2474 addr1
+= memory_region_get_ram_addr(mr
);
2476 ptr
= qemu_get_ram_ptr(addr1
);
2479 memcpy(ptr
, buf
, l
);
2480 invalidate_and_set_dirty(addr1
, l
);
2483 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2493 /* used for ROM loading : can write in RAM and ROM */
2494 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2495 const uint8_t *buf
, int len
)
2497 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2500 void cpu_flush_icache_range(hwaddr start
, int len
)
2503 * This function should do the same thing as an icache flush that was
2504 * triggered from within the guest. For TCG we are always cache coherent,
2505 * so there is no need to flush anything. For KVM / Xen we need to flush
2506 * the host's instruction cache at least.
2508 if (tcg_enabled()) {
2512 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2513 start
, NULL
, len
, FLUSH_CACHE
);
2523 static BounceBuffer bounce
;
2525 typedef struct MapClient
{
2527 void (*callback
)(void *opaque
);
2528 QLIST_ENTRY(MapClient
) link
;
2531 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2532 = QLIST_HEAD_INITIALIZER(map_client_list
);
2534 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2536 MapClient
*client
= g_malloc(sizeof(*client
));
2538 client
->opaque
= opaque
;
2539 client
->callback
= callback
;
2540 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2544 static void cpu_unregister_map_client(void *_client
)
2546 MapClient
*client
= (MapClient
*)_client
;
2548 QLIST_REMOVE(client
, link
);
2552 static void cpu_notify_map_clients(void)
2556 while (!QLIST_EMPTY(&map_client_list
)) {
2557 client
= QLIST_FIRST(&map_client_list
);
2558 client
->callback(client
->opaque
);
2559 cpu_unregister_map_client(client
);
2563 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2570 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2571 if (!memory_access_is_direct(mr
, is_write
)) {
2572 l
= memory_access_size(mr
, l
, addr
);
2573 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2584 /* Map a physical memory region into a host virtual address.
2585 * May map a subset of the requested range, given by and returned in *plen.
2586 * May return NULL if resources needed to perform the mapping are exhausted.
2587 * Use only for reads OR writes - not for read-modify-write operations.
2588 * Use cpu_register_map_client() to know when retrying the map operation is
2589 * likely to succeed.
2591 void *address_space_map(AddressSpace
*as
,
2598 hwaddr l
, xlat
, base
;
2599 MemoryRegion
*mr
, *this_mr
;
2607 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2608 if (!memory_access_is_direct(mr
, is_write
)) {
2609 if (bounce
.buffer
) {
2612 /* Avoid unbounded allocations */
2613 l
= MIN(l
, TARGET_PAGE_SIZE
);
2614 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2618 memory_region_ref(mr
);
2621 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2626 return bounce
.buffer
;
2630 raddr
= memory_region_get_ram_addr(mr
);
2641 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2642 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2647 memory_region_ref(mr
);
2649 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2652 /* Unmaps a memory region previously mapped by address_space_map().
2653 * Will also mark the memory as dirty if is_write == 1. access_len gives
2654 * the amount of memory that was actually read or written by the caller.
2656 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2657 int is_write
, hwaddr access_len
)
2659 if (buffer
!= bounce
.buffer
) {
2663 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2666 invalidate_and_set_dirty(addr1
, access_len
);
2668 if (xen_enabled()) {
2669 xen_invalidate_map_cache_entry(buffer
);
2671 memory_region_unref(mr
);
2675 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2676 bounce
.buffer
, access_len
);
2678 qemu_vfree(bounce
.buffer
);
2679 bounce
.buffer
= NULL
;
2680 memory_region_unref(bounce
.mr
);
2681 cpu_notify_map_clients();
2684 void *cpu_physical_memory_map(hwaddr addr
,
2688 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2691 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2692 int is_write
, hwaddr access_len
)
2694 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2697 /* warning: addr must be aligned */
2698 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2700 MemTxResult
*result
,
2701 enum device_endian endian
)
2710 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2711 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2713 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
2714 #if defined(TARGET_WORDS_BIGENDIAN)
2715 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2719 if (endian
== DEVICE_BIG_ENDIAN
) {
2725 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2729 case DEVICE_LITTLE_ENDIAN
:
2730 val
= ldl_le_p(ptr
);
2732 case DEVICE_BIG_ENDIAN
:
2733 val
= ldl_be_p(ptr
);
2747 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
2748 MemTxAttrs attrs
, MemTxResult
*result
)
2750 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2751 DEVICE_NATIVE_ENDIAN
);
2754 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
2755 MemTxAttrs attrs
, MemTxResult
*result
)
2757 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2758 DEVICE_LITTLE_ENDIAN
);
2761 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
2762 MemTxAttrs attrs
, MemTxResult
*result
)
2764 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2768 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2770 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2773 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2775 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2778 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2780 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2783 /* warning: addr must be aligned */
2784 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
2786 MemTxResult
*result
,
2787 enum device_endian endian
)
2796 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2798 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2800 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
2801 #if defined(TARGET_WORDS_BIGENDIAN)
2802 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2806 if (endian
== DEVICE_BIG_ENDIAN
) {
2812 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2816 case DEVICE_LITTLE_ENDIAN
:
2817 val
= ldq_le_p(ptr
);
2819 case DEVICE_BIG_ENDIAN
:
2820 val
= ldq_be_p(ptr
);
2834 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
2835 MemTxAttrs attrs
, MemTxResult
*result
)
2837 return address_space_ldq_internal(as
, addr
, attrs
, result
,
2838 DEVICE_NATIVE_ENDIAN
);
2841 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
2842 MemTxAttrs attrs
, MemTxResult
*result
)
2844 return address_space_ldq_internal(as
, addr
, attrs
, result
,
2845 DEVICE_LITTLE_ENDIAN
);
2848 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
2849 MemTxAttrs attrs
, MemTxResult
*result
)
2851 return address_space_ldq_internal(as
, addr
, attrs
, result
,
2855 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
2857 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2860 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
2862 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2865 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
2867 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2871 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
2872 MemTxAttrs attrs
, MemTxResult
*result
)
2877 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
2884 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
2886 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2889 /* warning: addr must be aligned */
2890 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
2893 MemTxResult
*result
,
2894 enum device_endian endian
)
2903 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2905 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2907 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
2908 #if defined(TARGET_WORDS_BIGENDIAN)
2909 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2913 if (endian
== DEVICE_BIG_ENDIAN
) {
2919 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2923 case DEVICE_LITTLE_ENDIAN
:
2924 val
= lduw_le_p(ptr
);
2926 case DEVICE_BIG_ENDIAN
:
2927 val
= lduw_be_p(ptr
);
2941 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
2942 MemTxAttrs attrs
, MemTxResult
*result
)
2944 return address_space_lduw_internal(as
, addr
, attrs
, result
,
2945 DEVICE_NATIVE_ENDIAN
);
2948 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
2949 MemTxAttrs attrs
, MemTxResult
*result
)
2951 return address_space_lduw_internal(as
, addr
, attrs
, result
,
2952 DEVICE_LITTLE_ENDIAN
);
2955 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
2956 MemTxAttrs attrs
, MemTxResult
*result
)
2958 return address_space_lduw_internal(as
, addr
, attrs
, result
,
2962 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
2964 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2967 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
2969 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2972 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
2974 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2977 /* warning: addr must be aligned. The ram page is not masked as dirty
2978 and the code inside is not invalidated. It is useful if the dirty
2979 bits are used to track modified PTEs */
2980 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
2981 MemTxAttrs attrs
, MemTxResult
*result
)
2989 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2991 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2992 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
2994 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2995 ptr
= qemu_get_ram_ptr(addr1
);
2998 if (unlikely(in_migration
)) {
2999 if (cpu_physical_memory_is_clean(addr1
)) {
3000 /* invalidate code */
3001 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3003 cpu_physical_memory_set_dirty_range_nocode(addr1
, 4);
3013 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3015 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3018 /* warning: addr must be aligned */
3019 static inline void address_space_stl_internal(AddressSpace
*as
,
3020 hwaddr addr
, uint32_t val
,
3022 MemTxResult
*result
,
3023 enum device_endian endian
)
3031 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3033 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3034 #if defined(TARGET_WORDS_BIGENDIAN)
3035 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3039 if (endian
== DEVICE_BIG_ENDIAN
) {
3043 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3046 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3047 ptr
= qemu_get_ram_ptr(addr1
);
3049 case DEVICE_LITTLE_ENDIAN
:
3052 case DEVICE_BIG_ENDIAN
:
3059 invalidate_and_set_dirty(addr1
, 4);
3067 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3068 MemTxAttrs attrs
, MemTxResult
*result
)
3070 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3071 DEVICE_NATIVE_ENDIAN
);
3074 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3075 MemTxAttrs attrs
, MemTxResult
*result
)
3077 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3078 DEVICE_LITTLE_ENDIAN
);
3081 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3082 MemTxAttrs attrs
, MemTxResult
*result
)
3084 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3088 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3090 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3093 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3095 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3098 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3100 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3104 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3105 MemTxAttrs attrs
, MemTxResult
*result
)
3110 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3116 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3118 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3121 /* warning: addr must be aligned */
3122 static inline void address_space_stw_internal(AddressSpace
*as
,
3123 hwaddr addr
, uint32_t val
,
3125 MemTxResult
*result
,
3126 enum device_endian endian
)
3134 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3135 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3136 #if defined(TARGET_WORDS_BIGENDIAN)
3137 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3141 if (endian
== DEVICE_BIG_ENDIAN
) {
3145 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3148 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3149 ptr
= qemu_get_ram_ptr(addr1
);
3151 case DEVICE_LITTLE_ENDIAN
:
3154 case DEVICE_BIG_ENDIAN
:
3161 invalidate_and_set_dirty(addr1
, 2);
3169 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3170 MemTxAttrs attrs
, MemTxResult
*result
)
3172 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3173 DEVICE_NATIVE_ENDIAN
);
3176 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3177 MemTxAttrs attrs
, MemTxResult
*result
)
3179 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3180 DEVICE_LITTLE_ENDIAN
);
3183 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3184 MemTxAttrs attrs
, MemTxResult
*result
)
3186 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3190 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3192 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3195 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3197 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3200 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3202 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3206 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3207 MemTxAttrs attrs
, MemTxResult
*result
)
3211 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3217 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3218 MemTxAttrs attrs
, MemTxResult
*result
)
3221 val
= cpu_to_le64(val
);
3222 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3227 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3228 MemTxAttrs attrs
, MemTxResult
*result
)
3231 val
= cpu_to_be64(val
);
3232 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3238 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3240 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3243 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3245 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3248 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3250 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3253 /* virtual memory access for debug (includes writing to ROM) */
3254 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3255 uint8_t *buf
, int len
, int is_write
)
3262 page
= addr
& TARGET_PAGE_MASK
;
3263 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
3264 /* if no physical page mapped, return an error */
3265 if (phys_addr
== -1)
3267 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3270 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3272 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
3274 address_space_rw(cpu
->as
, phys_addr
, MEMTXATTRS_UNSPECIFIED
,
3286 * A helper function for the _utterly broken_ virtio device model to find out if
3287 * it's running on a big endian machine. Don't do this at home kids!
3289 bool target_words_bigendian(void);
3290 bool target_words_bigendian(void)
3292 #if defined(TARGET_WORDS_BIGENDIAN)
3299 #ifndef CONFIG_USER_ONLY
3300 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3305 mr
= address_space_translate(&address_space_memory
,
3306 phys_addr
, &phys_addr
, &l
, false);
3308 return !(memory_region_is_ram(mr
) ||
3309 memory_region_is_romd(mr
));
3312 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3317 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3318 func(block
->host
, block
->offset
, block
->used_length
, opaque
);