4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
30 #include "qemu/osdep.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/xen/xen.h"
34 #include "qemu/timer.h"
35 #include "qemu/config-file.h"
36 #include "qemu/error-report.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
52 #include "exec/ram_addr.h"
54 #include "qemu/range.h"
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 static bool in_migration
;
61 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
63 static MemoryRegion
*system_memory
;
64 static MemoryRegion
*system_io
;
66 AddressSpace address_space_io
;
67 AddressSpace address_space_memory
;
69 MemoryRegion io_mem_rom
, io_mem_notdirty
;
70 static MemoryRegion io_mem_unassigned
;
72 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73 #define RAM_PREALLOC (1 << 0)
75 /* RAM is mmap-ed with MAP_SHARED */
76 #define RAM_SHARED (1 << 1)
80 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
81 /* current CPU in the current thread. It is only valid inside
83 DEFINE_TLS(CPUState
*, current_cpu
);
84 /* 0 = Do not count executed instructions.
85 1 = Precise instruction counting.
86 2 = Adaptive rate instruction counting. */
89 #if !defined(CONFIG_USER_ONLY)
91 typedef struct PhysPageEntry PhysPageEntry
;
93 struct PhysPageEntry
{
94 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
96 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
100 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
102 /* Size of the L2 (and L3, etc) page tables. */
103 #define ADDR_SPACE_BITS 64
106 #define P_L2_SIZE (1 << P_L2_BITS)
108 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
110 typedef PhysPageEntry Node
[P_L2_SIZE
];
112 typedef struct PhysPageMap
{
113 unsigned sections_nb
;
114 unsigned sections_nb_alloc
;
116 unsigned nodes_nb_alloc
;
118 MemoryRegionSection
*sections
;
121 struct AddressSpaceDispatch
{
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
125 PhysPageEntry phys_map
;
130 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131 typedef struct subpage_t
{
135 uint16_t sub_section
[TARGET_PAGE_SIZE
];
138 #define PHYS_SECTION_UNASSIGNED 0
139 #define PHYS_SECTION_NOTDIRTY 1
140 #define PHYS_SECTION_ROM 2
141 #define PHYS_SECTION_WATCH 3
143 static void io_mem_init(void);
144 static void memory_map_init(void);
145 static void tcg_commit(MemoryListener
*listener
);
147 static MemoryRegion io_mem_watch
;
150 #if !defined(CONFIG_USER_ONLY)
152 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
154 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
155 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
156 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
157 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
161 static uint32_t phys_map_node_alloc(PhysPageMap
*map
)
166 ret
= map
->nodes_nb
++;
167 assert(ret
!= PHYS_MAP_NODE_NIL
);
168 assert(ret
!= map
->nodes_nb_alloc
);
169 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
170 map
->nodes
[ret
][i
].skip
= 1;
171 map
->nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
176 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
177 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
182 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
184 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
185 lp
->ptr
= phys_map_node_alloc(map
);
186 p
= map
->nodes
[lp
->ptr
];
188 for (i
= 0; i
< P_L2_SIZE
; i
++) {
190 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
194 p
= map
->nodes
[lp
->ptr
];
196 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
198 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
199 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
205 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
211 static void phys_page_set(AddressSpaceDispatch
*d
,
212 hwaddr index
, hwaddr nb
,
215 /* Wildly overreserve - it doesn't matter much. */
216 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
218 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
221 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
224 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
226 unsigned valid_ptr
= P_L2_SIZE
;
231 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
236 for (i
= 0; i
< P_L2_SIZE
; i
++) {
237 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
244 phys_page_compact(&p
[i
], nodes
, compacted
);
248 /* We can only compress if there's only one child. */
253 assert(valid_ptr
< P_L2_SIZE
);
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
260 lp
->ptr
= p
[valid_ptr
].ptr
;
261 if (!p
[valid_ptr
].skip
) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
270 lp
->skip
+= p
[valid_ptr
].skip
;
274 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
276 DECLARE_BITMAP(compacted
, nodes_nb
);
278 if (d
->phys_map
.skip
) {
279 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
283 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
284 Node
*nodes
, MemoryRegionSection
*sections
)
287 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
290 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
291 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
292 return §ions
[PHYS_SECTION_UNASSIGNED
];
295 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
298 if (sections
[lp
.ptr
].size
.hi
||
299 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
300 sections
[lp
.ptr
].size
.lo
, addr
)) {
301 return §ions
[lp
.ptr
];
303 return §ions
[PHYS_SECTION_UNASSIGNED
];
307 bool memory_region_is_unassigned(MemoryRegion
*mr
)
309 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
310 && mr
!= &io_mem_watch
;
313 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
315 bool resolve_subpage
)
317 MemoryRegionSection
*section
;
320 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
321 if (resolve_subpage
&& section
->mr
->subpage
) {
322 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
323 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
328 static MemoryRegionSection
*
329 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
330 hwaddr
*plen
, bool resolve_subpage
)
332 MemoryRegionSection
*section
;
335 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
336 /* Compute offset within MemoryRegionSection */
337 addr
-= section
->offset_within_address_space
;
339 /* Compute offset within MemoryRegion */
340 *xlat
= addr
+ section
->offset_within_region
;
342 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
343 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
347 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
349 if (memory_region_is_ram(mr
)) {
350 return !(is_write
&& mr
->readonly
);
352 if (memory_region_is_romd(mr
)) {
359 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
360 hwaddr
*xlat
, hwaddr
*plen
,
364 MemoryRegionSection
*section
;
369 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
372 if (!mr
->iommu_ops
) {
376 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
377 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
378 | (addr
& iotlb
.addr_mask
));
379 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
380 if (!(iotlb
.perm
& (1 << is_write
))) {
381 mr
= &io_mem_unassigned
;
385 as
= iotlb
.target_as
;
388 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
389 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
390 len
= MIN(page
, len
);
398 MemoryRegionSection
*
399 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
402 MemoryRegionSection
*section
;
403 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
405 assert(!section
->mr
->iommu_ops
);
410 void cpu_exec_init_all(void)
412 #if !defined(CONFIG_USER_ONLY)
413 qemu_mutex_init(&ram_list
.mutex
);
419 #if !defined(CONFIG_USER_ONLY)
421 static int cpu_common_post_load(void *opaque
, int version_id
)
423 CPUState
*cpu
= opaque
;
425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
427 cpu
->interrupt_request
&= ~0x01;
433 static int cpu_common_pre_load(void *opaque
)
435 CPUState
*cpu
= opaque
;
437 cpu
->exception_index
= 0;
442 static bool cpu_common_exception_index_needed(void *opaque
)
444 CPUState
*cpu
= opaque
;
446 return cpu
->exception_index
!= 0;
449 static const VMStateDescription vmstate_cpu_common_exception_index
= {
450 .name
= "cpu_common/exception_index",
452 .minimum_version_id
= 1,
453 .fields
= (VMStateField
[]) {
454 VMSTATE_INT32(exception_index
, CPUState
),
455 VMSTATE_END_OF_LIST()
459 const VMStateDescription vmstate_cpu_common
= {
460 .name
= "cpu_common",
462 .minimum_version_id
= 1,
463 .pre_load
= cpu_common_pre_load
,
464 .post_load
= cpu_common_post_load
,
465 .fields
= (VMStateField
[]) {
466 VMSTATE_UINT32(halted
, CPUState
),
467 VMSTATE_UINT32(interrupt_request
, CPUState
),
468 VMSTATE_END_OF_LIST()
470 .subsections
= (VMStateSubsection
[]) {
472 .vmsd
= &vmstate_cpu_common_exception_index
,
473 .needed
= cpu_common_exception_index_needed
,
482 CPUState
*qemu_get_cpu(int index
)
487 if (cpu
->cpu_index
== index
) {
495 #if !defined(CONFIG_USER_ONLY)
496 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
498 /* We only support one address space per cpu at the moment. */
499 assert(cpu
->as
== as
);
501 if (cpu
->tcg_as_listener
) {
502 memory_listener_unregister(cpu
->tcg_as_listener
);
504 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
506 cpu
->tcg_as_listener
->commit
= tcg_commit
;
507 memory_listener_register(cpu
->tcg_as_listener
, as
);
511 void cpu_exec_init(CPUArchState
*env
)
513 CPUState
*cpu
= ENV_GET_CPU(env
);
514 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
518 #ifdef TARGET_WORDS_BIGENDIAN
519 cpu
->bigendian
= true;
521 cpu
->bigendian
= false;
524 #if defined(CONFIG_USER_ONLY)
528 CPU_FOREACH(some_cpu
) {
531 cpu
->cpu_index
= cpu_index
;
533 QTAILQ_INIT(&cpu
->breakpoints
);
534 QTAILQ_INIT(&cpu
->watchpoints
);
535 #ifndef CONFIG_USER_ONLY
536 cpu
->as
= &address_space_memory
;
537 cpu
->thread_id
= qemu_get_thread_id();
539 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
540 #if defined(CONFIG_USER_ONLY)
543 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
544 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
546 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
547 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
548 cpu_save
, cpu_load
, env
);
549 assert(cc
->vmsd
== NULL
);
550 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
552 if (cc
->vmsd
!= NULL
) {
553 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
557 #if defined(TARGET_HAS_ICE)
558 #if defined(CONFIG_USER_ONLY)
559 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
561 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
564 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
566 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
568 tb_invalidate_phys_addr(cpu
->as
,
569 phys
| (pc
& ~TARGET_PAGE_MASK
));
573 #endif /* TARGET_HAS_ICE */
575 #if defined(CONFIG_USER_ONLY)
576 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
581 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
587 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
591 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
592 int flags
, CPUWatchpoint
**watchpoint
)
597 /* Add a watchpoint. */
598 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
599 int flags
, CPUWatchpoint
**watchpoint
)
603 /* forbid ranges which are empty or run off the end of the address space */
604 if (len
== 0 || (addr
+ len
- 1) < addr
) {
605 error_report("tried to set invalid watchpoint at %"
606 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
609 wp
= g_malloc(sizeof(*wp
));
615 /* keep all GDB-injected watchpoints in front */
616 if (flags
& BP_GDB
) {
617 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
619 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
622 tlb_flush_page(cpu
, addr
);
629 /* Remove a specific watchpoint. */
630 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
635 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
636 if (addr
== wp
->vaddr
&& len
== wp
->len
637 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
638 cpu_watchpoint_remove_by_ref(cpu
, wp
);
645 /* Remove a specific watchpoint by reference. */
646 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
648 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
650 tlb_flush_page(cpu
, watchpoint
->vaddr
);
655 /* Remove all matching watchpoints. */
656 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
658 CPUWatchpoint
*wp
, *next
;
660 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
661 if (wp
->flags
& mask
) {
662 cpu_watchpoint_remove_by_ref(cpu
, wp
);
667 /* Return true if this watchpoint address matches the specified
668 * access (ie the address range covered by the watchpoint overlaps
669 * partially or completely with the address range covered by the
672 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
676 /* We know the lengths are non-zero, but a little caution is
677 * required to avoid errors in the case where the range ends
678 * exactly at the top of the address space and so addr + len
679 * wraps round to zero.
681 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
682 vaddr addrend
= addr
+ len
- 1;
684 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
689 /* Add a breakpoint. */
690 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
691 CPUBreakpoint
**breakpoint
)
693 #if defined(TARGET_HAS_ICE)
696 bp
= g_malloc(sizeof(*bp
));
701 /* keep all GDB-injected breakpoints in front */
702 if (flags
& BP_GDB
) {
703 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
705 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
708 breakpoint_invalidate(cpu
, pc
);
719 /* Remove a specific breakpoint. */
720 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
722 #if defined(TARGET_HAS_ICE)
725 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
726 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
727 cpu_breakpoint_remove_by_ref(cpu
, bp
);
737 /* Remove a specific breakpoint by reference. */
738 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
740 #if defined(TARGET_HAS_ICE)
741 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
743 breakpoint_invalidate(cpu
, breakpoint
->pc
);
749 /* Remove all matching breakpoints. */
750 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
752 #if defined(TARGET_HAS_ICE)
753 CPUBreakpoint
*bp
, *next
;
755 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
756 if (bp
->flags
& mask
) {
757 cpu_breakpoint_remove_by_ref(cpu
, bp
);
763 /* enable or disable single step mode. EXCP_DEBUG is returned by the
764 CPU loop after each instruction */
765 void cpu_single_step(CPUState
*cpu
, int enabled
)
767 #if defined(TARGET_HAS_ICE)
768 if (cpu
->singlestep_enabled
!= enabled
) {
769 cpu
->singlestep_enabled
= enabled
;
771 kvm_update_guest_debug(cpu
, 0);
773 /* must flush all the translated code to avoid inconsistencies */
774 /* XXX: only flush what is necessary */
775 CPUArchState
*env
= cpu
->env_ptr
;
782 void QEMU_NORETURN
cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
789 fprintf(stderr
, "qemu: fatal: ");
790 vfprintf(stderr
, fmt
, ap
);
791 fprintf(stderr
, "\n");
792 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
793 if (qemu_log_enabled()) {
794 qemu_log("qemu: fatal: ");
795 qemu_log_vprintf(fmt
, ap2
);
797 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
803 #if defined(CONFIG_USER_ONLY)
805 struct sigaction act
;
806 sigfillset(&act
.sa_mask
);
807 act
.sa_handler
= SIG_DFL
;
808 sigaction(SIGABRT
, &act
, NULL
);
814 #if !defined(CONFIG_USER_ONLY)
815 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
819 /* The list is protected by the iothread lock here. */
820 block
= ram_list
.mru_block
;
821 if (block
&& addr
- block
->offset
< block
->length
) {
824 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
825 if (addr
- block
->offset
< block
->length
) {
830 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
834 ram_list
.mru_block
= block
;
838 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
844 end
= TARGET_PAGE_ALIGN(start
+ length
);
845 start
&= TARGET_PAGE_MASK
;
847 block
= qemu_get_ram_block(start
);
848 assert(block
== qemu_get_ram_block(end
- 1));
849 start1
= (uintptr_t)block
->host
+ (start
- block
->offset
);
850 cpu_tlb_reset_dirty_all(start1
, length
);
853 /* Note: start and end must be within the same ram block. */
854 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,
859 cpu_physical_memory_clear_dirty_range(start
, length
, client
);
862 tlb_reset_dirty_range_all(start
, length
);
866 static void cpu_physical_memory_set_dirty_tracking(bool enable
)
868 in_migration
= enable
;
871 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
872 MemoryRegionSection
*section
,
874 hwaddr paddr
, hwaddr xlat
,
876 target_ulong
*address
)
881 if (memory_region_is_ram(section
->mr
)) {
883 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
885 if (!section
->readonly
) {
886 iotlb
|= PHYS_SECTION_NOTDIRTY
;
888 iotlb
|= PHYS_SECTION_ROM
;
891 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
895 /* Make accesses to pages with watchpoints go via the
896 watchpoint trap routines. */
897 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
898 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
899 /* Avoid trapping reads of pages with a write breakpoint. */
900 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
901 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
902 *address
|= TLB_MMIO
;
910 #endif /* defined(CONFIG_USER_ONLY) */
912 #if !defined(CONFIG_USER_ONLY)
914 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
916 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
918 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
922 * Set a custom physical guest memory alloator.
923 * Accelerators with unusual needs may need this. Hopefully, we can
924 * get rid of it eventually.
926 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
928 phys_mem_alloc
= alloc
;
931 static uint16_t phys_section_add(PhysPageMap
*map
,
932 MemoryRegionSection
*section
)
934 /* The physical section number is ORed with a page-aligned
935 * pointer to produce the iotlb entries. Thus it should
936 * never overflow into the page-aligned value.
938 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
940 if (map
->sections_nb
== map
->sections_nb_alloc
) {
941 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
942 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
943 map
->sections_nb_alloc
);
945 map
->sections
[map
->sections_nb
] = *section
;
946 memory_region_ref(section
->mr
);
947 return map
->sections_nb
++;
950 static void phys_section_destroy(MemoryRegion
*mr
)
952 memory_region_unref(mr
);
955 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
956 object_unref(OBJECT(&subpage
->iomem
));
961 static void phys_sections_free(PhysPageMap
*map
)
963 while (map
->sections_nb
> 0) {
964 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
965 phys_section_destroy(section
->mr
);
967 g_free(map
->sections
);
971 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
974 hwaddr base
= section
->offset_within_address_space
976 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
977 d
->map
.nodes
, d
->map
.sections
);
978 MemoryRegionSection subsection
= {
979 .offset_within_address_space
= base
,
980 .size
= int128_make64(TARGET_PAGE_SIZE
),
984 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
986 if (!(existing
->mr
->subpage
)) {
987 subpage
= subpage_init(d
->as
, base
);
988 subsection
.address_space
= d
->as
;
989 subsection
.mr
= &subpage
->iomem
;
990 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
991 phys_section_add(&d
->map
, &subsection
));
993 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
995 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
996 end
= start
+ int128_get64(section
->size
) - 1;
997 subpage_register(subpage
, start
, end
,
998 phys_section_add(&d
->map
, section
));
1002 static void register_multipage(AddressSpaceDispatch
*d
,
1003 MemoryRegionSection
*section
)
1005 hwaddr start_addr
= section
->offset_within_address_space
;
1006 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1007 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1011 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1014 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1016 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1017 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1018 MemoryRegionSection now
= *section
, remain
= *section
;
1019 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1021 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1022 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1023 - now
.offset_within_address_space
;
1025 now
.size
= int128_min(int128_make64(left
), now
.size
);
1026 register_subpage(d
, &now
);
1028 now
.size
= int128_zero();
1030 while (int128_ne(remain
.size
, now
.size
)) {
1031 remain
.size
= int128_sub(remain
.size
, now
.size
);
1032 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1033 remain
.offset_within_region
+= int128_get64(now
.size
);
1035 if (int128_lt(remain
.size
, page_size
)) {
1036 register_subpage(d
, &now
);
1037 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1038 now
.size
= page_size
;
1039 register_subpage(d
, &now
);
1041 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1042 register_multipage(d
, &now
);
1047 void qemu_flush_coalesced_mmio_buffer(void)
1050 kvm_flush_coalesced_mmio_buffer();
1053 void qemu_mutex_lock_ramlist(void)
1055 qemu_mutex_lock(&ram_list
.mutex
);
1058 void qemu_mutex_unlock_ramlist(void)
1060 qemu_mutex_unlock(&ram_list
.mutex
);
1065 #include <sys/vfs.h>
1067 #define HUGETLBFS_MAGIC 0x958458f6
1069 static long gethugepagesize(const char *path
, Error
**errp
)
1075 ret
= statfs(path
, &fs
);
1076 } while (ret
!= 0 && errno
== EINTR
);
1079 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1084 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1085 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1090 static void *file_ram_alloc(RAMBlock
*block
,
1096 char *sanitized_name
;
1098 void * volatile area
= NULL
;
1101 Error
*local_err
= NULL
;
1103 hpagesize
= gethugepagesize(path
, &local_err
);
1105 error_propagate(errp
, local_err
);
1108 block
->mr
->align
= hpagesize
;
1110 if (memory
< hpagesize
) {
1111 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1112 "or larger than huge page size 0x%" PRIx64
,
1117 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1119 "host lacks kvm mmu notifiers, -mem-path unsupported");
1123 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1124 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1125 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1130 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1132 g_free(sanitized_name
);
1134 fd
= mkstemp(filename
);
1136 error_setg_errno(errp
, errno
,
1137 "unable to create backing store for hugepages");
1144 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1147 * ftruncate is not supported by hugetlbfs in older
1148 * hosts, so don't bother bailing out on errors.
1149 * If anything goes wrong with it under other filesystems,
1152 if (ftruncate(fd
, memory
)) {
1153 perror("ftruncate");
1156 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
,
1157 (block
->flags
& RAM_SHARED
? MAP_SHARED
: MAP_PRIVATE
),
1159 if (area
== MAP_FAILED
) {
1160 error_setg_errno(errp
, errno
,
1161 "unable to map backing store for hugepages");
1167 os_mem_prealloc(fd
, area
, memory
);
1175 error_report("%s\n", error_get_pretty(*errp
));
1182 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1184 RAMBlock
*block
, *next_block
;
1185 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1187 assert(size
!= 0); /* it would hand out same offset multiple times */
1189 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1192 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1193 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1195 end
= block
->offset
+ block
->length
;
1197 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1198 if (next_block
->offset
>= end
) {
1199 next
= MIN(next
, next_block
->offset
);
1202 if (next
- end
>= size
&& next
- end
< mingap
) {
1204 mingap
= next
- end
;
1208 if (offset
== RAM_ADDR_MAX
) {
1209 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1217 ram_addr_t
last_ram_offset(void)
1220 ram_addr_t last
= 0;
1222 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1223 last
= MAX(last
, block
->offset
+ block
->length
);
1228 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1232 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1233 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1234 "dump-guest-core", true)) {
1235 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1237 perror("qemu_madvise");
1238 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1239 "but dump_guest_core=off specified\n");
1244 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1248 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1249 if (block
->offset
== addr
) {
1257 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1259 RAMBlock
*new_block
= find_ram_block(addr
);
1263 assert(!new_block
->idstr
[0]);
1266 char *id
= qdev_get_dev_path(dev
);
1268 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1272 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1274 /* This assumes the iothread lock is taken here too. */
1275 qemu_mutex_lock_ramlist();
1276 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1277 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1278 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1283 qemu_mutex_unlock_ramlist();
1286 void qemu_ram_unset_idstr(ram_addr_t addr
)
1288 RAMBlock
*block
= find_ram_block(addr
);
1291 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1295 static int memory_try_enable_merging(void *addr
, size_t len
)
1297 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1298 /* disabled by the user */
1302 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1305 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1308 ram_addr_t old_ram_size
, new_ram_size
;
1310 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1312 /* This assumes the iothread lock is taken here too. */
1313 qemu_mutex_lock_ramlist();
1314 new_block
->offset
= find_ram_offset(new_block
->length
);
1316 if (!new_block
->host
) {
1317 if (xen_enabled()) {
1318 xen_ram_alloc(new_block
->offset
, new_block
->length
, new_block
->mr
);
1320 new_block
->host
= phys_mem_alloc(new_block
->length
,
1321 &new_block
->mr
->align
);
1322 if (!new_block
->host
) {
1323 error_setg_errno(errp
, errno
,
1324 "cannot set up guest memory '%s'",
1325 memory_region_name(new_block
->mr
));
1326 qemu_mutex_unlock_ramlist();
1329 memory_try_enable_merging(new_block
->host
, new_block
->length
);
1333 /* Keep the list sorted from biggest to smallest block. */
1334 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1335 if (block
->length
< new_block
->length
) {
1340 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1342 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1344 ram_list
.mru_block
= NULL
;
1347 qemu_mutex_unlock_ramlist();
1349 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1351 if (new_ram_size
> old_ram_size
) {
1353 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1354 ram_list
.dirty_memory
[i
] =
1355 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1356 old_ram_size
, new_ram_size
);
1359 cpu_physical_memory_set_dirty_range(new_block
->offset
, new_block
->length
);
1361 qemu_ram_setup_dump(new_block
->host
, new_block
->length
);
1362 qemu_madvise(new_block
->host
, new_block
->length
, QEMU_MADV_HUGEPAGE
);
1363 qemu_madvise(new_block
->host
, new_block
->length
, QEMU_MADV_DONTFORK
);
1365 if (kvm_enabled()) {
1366 kvm_setup_guest_memory(new_block
->host
, new_block
->length
);
1369 return new_block
->offset
;
1373 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1374 bool share
, const char *mem_path
,
1377 RAMBlock
*new_block
;
1379 Error
*local_err
= NULL
;
1381 if (xen_enabled()) {
1382 error_setg(errp
, "-mem-path not supported with Xen");
1386 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1388 * file_ram_alloc() needs to allocate just like
1389 * phys_mem_alloc, but we haven't bothered to provide
1393 "-mem-path not supported with this accelerator");
1397 size
= TARGET_PAGE_ALIGN(size
);
1398 new_block
= g_malloc0(sizeof(*new_block
));
1400 new_block
->length
= size
;
1401 new_block
->flags
= share
? RAM_SHARED
: 0;
1402 new_block
->host
= file_ram_alloc(new_block
, size
,
1404 if (!new_block
->host
) {
1409 addr
= ram_block_add(new_block
, &local_err
);
1412 error_propagate(errp
, local_err
);
1419 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1420 MemoryRegion
*mr
, Error
**errp
)
1422 RAMBlock
*new_block
;
1424 Error
*local_err
= NULL
;
1426 size
= TARGET_PAGE_ALIGN(size
);
1427 new_block
= g_malloc0(sizeof(*new_block
));
1429 new_block
->length
= size
;
1431 new_block
->host
= host
;
1433 new_block
->flags
|= RAM_PREALLOC
;
1435 addr
= ram_block_add(new_block
, &local_err
);
1438 error_propagate(errp
, local_err
);
1444 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1446 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
, errp
);
1449 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1453 /* This assumes the iothread lock is taken here too. */
1454 qemu_mutex_lock_ramlist();
1455 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1456 if (addr
== block
->offset
) {
1457 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1458 ram_list
.mru_block
= NULL
;
1464 qemu_mutex_unlock_ramlist();
1467 void qemu_ram_free(ram_addr_t addr
)
1471 /* This assumes the iothread lock is taken here too. */
1472 qemu_mutex_lock_ramlist();
1473 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1474 if (addr
== block
->offset
) {
1475 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1476 ram_list
.mru_block
= NULL
;
1478 if (block
->flags
& RAM_PREALLOC
) {
1480 } else if (xen_enabled()) {
1481 xen_invalidate_map_cache_entry(block
->host
);
1483 } else if (block
->fd
>= 0) {
1484 munmap(block
->host
, block
->length
);
1488 qemu_anon_ram_free(block
->host
, block
->length
);
1494 qemu_mutex_unlock_ramlist();
1499 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1506 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1507 offset
= addr
- block
->offset
;
1508 if (offset
< block
->length
) {
1509 vaddr
= block
->host
+ offset
;
1510 if (block
->flags
& RAM_PREALLOC
) {
1512 } else if (xen_enabled()) {
1516 munmap(vaddr
, length
);
1517 if (block
->fd
>= 0) {
1518 flags
|= (block
->flags
& RAM_SHARED
?
1519 MAP_SHARED
: MAP_PRIVATE
);
1520 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1521 flags
, block
->fd
, offset
);
1524 * Remap needs to match alloc. Accelerators that
1525 * set phys_mem_alloc never remap. If they did,
1526 * we'd need a remap hook here.
1528 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1530 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1531 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1534 if (area
!= vaddr
) {
1535 fprintf(stderr
, "Could not remap addr: "
1536 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1540 memory_try_enable_merging(vaddr
, length
);
1541 qemu_ram_setup_dump(vaddr
, length
);
1547 #endif /* !_WIN32 */
1549 int qemu_get_ram_fd(ram_addr_t addr
)
1551 RAMBlock
*block
= qemu_get_ram_block(addr
);
1556 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1558 RAMBlock
*block
= qemu_get_ram_block(addr
);
1563 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1564 With the exception of the softmmu code in this file, this should
1565 only be used for local memory (e.g. video ram) that the device owns,
1566 and knows it isn't going to access beyond the end of the block.
1568 It should not be used for general purpose DMA.
1569 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1571 void *qemu_get_ram_ptr(ram_addr_t addr
)
1573 RAMBlock
*block
= qemu_get_ram_block(addr
);
1575 if (xen_enabled()) {
1576 /* We need to check if the requested address is in the RAM
1577 * because we don't want to map the entire memory in QEMU.
1578 * In that case just map until the end of the page.
1580 if (block
->offset
== 0) {
1581 return xen_map_cache(addr
, 0, 0);
1582 } else if (block
->host
== NULL
) {
1584 xen_map_cache(block
->offset
, block
->length
, 1);
1587 return block
->host
+ (addr
- block
->offset
);
1590 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1591 * but takes a size argument */
1592 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1597 if (xen_enabled()) {
1598 return xen_map_cache(addr
, *size
, 1);
1602 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1603 if (addr
- block
->offset
< block
->length
) {
1604 if (addr
- block
->offset
+ *size
> block
->length
)
1605 *size
= block
->length
- addr
+ block
->offset
;
1606 return block
->host
+ (addr
- block
->offset
);
1610 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1615 /* Some of the softmmu routines need to translate from a host pointer
1616 (typically a TLB entry) back to a ram offset. */
1617 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1620 uint8_t *host
= ptr
;
1622 if (xen_enabled()) {
1623 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1624 return qemu_get_ram_block(*ram_addr
)->mr
;
1627 block
= ram_list
.mru_block
;
1628 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1632 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1633 /* This case append when the block is not mapped. */
1634 if (block
->host
== NULL
) {
1637 if (host
- block
->host
< block
->length
) {
1645 *ram_addr
= block
->offset
+ (host
- block
->host
);
1649 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1650 uint64_t val
, unsigned size
)
1652 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1653 tb_invalidate_phys_page_fast(ram_addr
, size
);
1657 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1660 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1663 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1668 cpu_physical_memory_set_dirty_range_nocode(ram_addr
, size
);
1669 /* we remove the notdirty callback only if the code has been
1671 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1672 CPUArchState
*env
= current_cpu
->env_ptr
;
1673 tlb_set_dirty(env
, current_cpu
->mem_io_vaddr
);
1677 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1678 unsigned size
, bool is_write
)
1683 static const MemoryRegionOps notdirty_mem_ops
= {
1684 .write
= notdirty_mem_write
,
1685 .valid
.accepts
= notdirty_mem_accepts
,
1686 .endianness
= DEVICE_NATIVE_ENDIAN
,
1689 /* Generate a debug exception if a watchpoint has been hit. */
1690 static void check_watchpoint(int offset
, int len
, int flags
)
1692 CPUState
*cpu
= current_cpu
;
1693 CPUArchState
*env
= cpu
->env_ptr
;
1694 target_ulong pc
, cs_base
;
1699 if (cpu
->watchpoint_hit
) {
1700 /* We re-entered the check after replacing the TB. Now raise
1701 * the debug interrupt so that is will trigger after the
1702 * current instruction. */
1703 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1706 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1707 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1708 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
1709 && (wp
->flags
& flags
)) {
1710 if (flags
== BP_MEM_READ
) {
1711 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
1713 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
1715 wp
->hitaddr
= vaddr
;
1716 if (!cpu
->watchpoint_hit
) {
1717 cpu
->watchpoint_hit
= wp
;
1718 tb_check_watchpoint(cpu
);
1719 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1720 cpu
->exception_index
= EXCP_DEBUG
;
1723 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1724 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1725 cpu_resume_from_signal(cpu
, NULL
);
1729 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1734 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1735 so these check for a hit then pass through to the normal out-of-line
1737 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1740 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, BP_MEM_READ
);
1742 case 1: return ldub_phys(&address_space_memory
, addr
);
1743 case 2: return lduw_phys(&address_space_memory
, addr
);
1744 case 4: return ldl_phys(&address_space_memory
, addr
);
1749 static void watch_mem_write(void *opaque
, hwaddr addr
,
1750 uint64_t val
, unsigned size
)
1752 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, BP_MEM_WRITE
);
1755 stb_phys(&address_space_memory
, addr
, val
);
1758 stw_phys(&address_space_memory
, addr
, val
);
1761 stl_phys(&address_space_memory
, addr
, val
);
1767 static const MemoryRegionOps watch_mem_ops
= {
1768 .read
= watch_mem_read
,
1769 .write
= watch_mem_write
,
1770 .endianness
= DEVICE_NATIVE_ENDIAN
,
1773 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1776 subpage_t
*subpage
= opaque
;
1779 #if defined(DEBUG_SUBPAGE)
1780 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
1781 subpage
, len
, addr
);
1783 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1796 static void subpage_write(void *opaque
, hwaddr addr
,
1797 uint64_t value
, unsigned len
)
1799 subpage_t
*subpage
= opaque
;
1802 #if defined(DEBUG_SUBPAGE)
1803 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1804 " value %"PRIx64
"\n",
1805 __func__
, subpage
, len
, addr
, value
);
1820 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1823 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1824 unsigned len
, bool is_write
)
1826 subpage_t
*subpage
= opaque
;
1827 #if defined(DEBUG_SUBPAGE)
1828 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
1829 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1832 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1836 static const MemoryRegionOps subpage_ops
= {
1837 .read
= subpage_read
,
1838 .write
= subpage_write
,
1839 .valid
.accepts
= subpage_accepts
,
1840 .endianness
= DEVICE_NATIVE_ENDIAN
,
1843 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1848 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1850 idx
= SUBPAGE_IDX(start
);
1851 eidx
= SUBPAGE_IDX(end
);
1852 #if defined(DEBUG_SUBPAGE)
1853 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1854 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
1856 for (; idx
<= eidx
; idx
++) {
1857 mmio
->sub_section
[idx
] = section
;
1863 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1867 mmio
= g_malloc0(sizeof(subpage_t
));
1871 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1872 NULL
, TARGET_PAGE_SIZE
);
1873 mmio
->iomem
.subpage
= true;
1874 #if defined(DEBUG_SUBPAGE)
1875 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
1876 mmio
, base
, TARGET_PAGE_SIZE
);
1878 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1883 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
1887 MemoryRegionSection section
= {
1888 .address_space
= as
,
1890 .offset_within_address_space
= 0,
1891 .offset_within_region
= 0,
1892 .size
= int128_2_64(),
1895 return phys_section_add(map
, §ion
);
1898 MemoryRegion
*iotlb_to_region(AddressSpace
*as
, hwaddr index
)
1900 return as
->dispatch
->map
.sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1903 static void io_mem_init(void)
1905 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
1906 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1908 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1910 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1914 static void mem_begin(MemoryListener
*listener
)
1916 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1917 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
1920 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
1921 assert(n
== PHYS_SECTION_UNASSIGNED
);
1922 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
1923 assert(n
== PHYS_SECTION_NOTDIRTY
);
1924 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
1925 assert(n
== PHYS_SECTION_ROM
);
1926 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
1927 assert(n
== PHYS_SECTION_WATCH
);
1929 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
1931 as
->next_dispatch
= d
;
1934 static void mem_commit(MemoryListener
*listener
)
1936 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1937 AddressSpaceDispatch
*cur
= as
->dispatch
;
1938 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1940 phys_page_compact_all(next
, next
->map
.nodes_nb
);
1942 as
->dispatch
= next
;
1945 phys_sections_free(&cur
->map
);
1950 static void tcg_commit(MemoryListener
*listener
)
1954 /* since each CPU stores ram addresses in its TLB cache, we must
1955 reset the modified entries */
1958 /* FIXME: Disentangle the cpu.h circular files deps so we can
1959 directly get the right CPU from listener. */
1960 if (cpu
->tcg_as_listener
!= listener
) {
1967 static void core_log_global_start(MemoryListener
*listener
)
1969 cpu_physical_memory_set_dirty_tracking(true);
1972 static void core_log_global_stop(MemoryListener
*listener
)
1974 cpu_physical_memory_set_dirty_tracking(false);
1977 static MemoryListener core_memory_listener
= {
1978 .log_global_start
= core_log_global_start
,
1979 .log_global_stop
= core_log_global_stop
,
1983 void address_space_init_dispatch(AddressSpace
*as
)
1985 as
->dispatch
= NULL
;
1986 as
->dispatch_listener
= (MemoryListener
) {
1988 .commit
= mem_commit
,
1989 .region_add
= mem_add
,
1990 .region_nop
= mem_add
,
1993 memory_listener_register(&as
->dispatch_listener
, as
);
1996 void address_space_destroy_dispatch(AddressSpace
*as
)
1998 AddressSpaceDispatch
*d
= as
->dispatch
;
2000 memory_listener_unregister(&as
->dispatch_listener
);
2002 as
->dispatch
= NULL
;
2005 static void memory_map_init(void)
2007 system_memory
= g_malloc(sizeof(*system_memory
));
2009 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2010 address_space_init(&address_space_memory
, system_memory
, "memory");
2012 system_io
= g_malloc(sizeof(*system_io
));
2013 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2015 address_space_init(&address_space_io
, system_io
, "I/O");
2017 memory_listener_register(&core_memory_listener
, &address_space_memory
);
2020 MemoryRegion
*get_system_memory(void)
2022 return system_memory
;
2025 MemoryRegion
*get_system_io(void)
2030 #endif /* !defined(CONFIG_USER_ONLY) */
2032 /* physical memory access (slow version, mainly for debug) */
2033 #if defined(CONFIG_USER_ONLY)
2034 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2035 uint8_t *buf
, int len
, int is_write
)
2042 page
= addr
& TARGET_PAGE_MASK
;
2043 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2046 flags
= page_get_flags(page
);
2047 if (!(flags
& PAGE_VALID
))
2050 if (!(flags
& PAGE_WRITE
))
2052 /* XXX: this code should not depend on lock_user */
2053 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2056 unlock_user(p
, addr
, l
);
2058 if (!(flags
& PAGE_READ
))
2060 /* XXX: this code should not depend on lock_user */
2061 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2064 unlock_user(p
, addr
, 0);
2075 static void invalidate_and_set_dirty(hwaddr addr
,
2078 if (cpu_physical_memory_range_includes_clean(addr
, length
)) {
2079 tb_invalidate_phys_range(addr
, addr
+ length
, 0);
2080 cpu_physical_memory_set_dirty_range_nocode(addr
, length
);
2082 xen_modified_memory(addr
, length
);
2085 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2087 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2089 /* Regions are assumed to support 1-4 byte accesses unless
2090 otherwise specified. */
2091 if (access_size_max
== 0) {
2092 access_size_max
= 4;
2095 /* Bound the maximum access by the alignment of the address. */
2096 if (!mr
->ops
->impl
.unaligned
) {
2097 unsigned align_size_max
= addr
& -addr
;
2098 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2099 access_size_max
= align_size_max
;
2103 /* Don't attempt accesses larger than the maximum. */
2104 if (l
> access_size_max
) {
2105 l
= access_size_max
;
2108 l
= 1 << (qemu_fls(l
) - 1);
2114 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
2115 int len
, bool is_write
)
2126 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2129 if (!memory_access_is_direct(mr
, is_write
)) {
2130 l
= memory_access_size(mr
, l
, addr1
);
2131 /* XXX: could force current_cpu to NULL to avoid
2135 /* 64 bit write access */
2137 error
|= io_mem_write(mr
, addr1
, val
, 8);
2140 /* 32 bit write access */
2142 error
|= io_mem_write(mr
, addr1
, val
, 4);
2145 /* 16 bit write access */
2147 error
|= io_mem_write(mr
, addr1
, val
, 2);
2150 /* 8 bit write access */
2152 error
|= io_mem_write(mr
, addr1
, val
, 1);
2158 addr1
+= memory_region_get_ram_addr(mr
);
2160 ptr
= qemu_get_ram_ptr(addr1
);
2161 memcpy(ptr
, buf
, l
);
2162 invalidate_and_set_dirty(addr1
, l
);
2165 if (!memory_access_is_direct(mr
, is_write
)) {
2167 l
= memory_access_size(mr
, l
, addr1
);
2170 /* 64 bit read access */
2171 error
|= io_mem_read(mr
, addr1
, &val
, 8);
2175 /* 32 bit read access */
2176 error
|= io_mem_read(mr
, addr1
, &val
, 4);
2180 /* 16 bit read access */
2181 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2185 /* 8 bit read access */
2186 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2194 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2195 memcpy(buf
, ptr
, l
);
2206 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2207 const uint8_t *buf
, int len
)
2209 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2212 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2214 return address_space_rw(as
, addr
, buf
, len
, false);
2218 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2219 int len
, int is_write
)
2221 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2224 enum write_rom_type
{
2229 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2230 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2239 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2241 if (!(memory_region_is_ram(mr
) ||
2242 memory_region_is_romd(mr
))) {
2245 addr1
+= memory_region_get_ram_addr(mr
);
2247 ptr
= qemu_get_ram_ptr(addr1
);
2250 memcpy(ptr
, buf
, l
);
2251 invalidate_and_set_dirty(addr1
, l
);
2254 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2264 /* used for ROM loading : can write in RAM and ROM */
2265 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2266 const uint8_t *buf
, int len
)
2268 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2271 void cpu_flush_icache_range(hwaddr start
, int len
)
2274 * This function should do the same thing as an icache flush that was
2275 * triggered from within the guest. For TCG we are always cache coherent,
2276 * so there is no need to flush anything. For KVM / Xen we need to flush
2277 * the host's instruction cache at least.
2279 if (tcg_enabled()) {
2283 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2284 start
, NULL
, len
, FLUSH_CACHE
);
2294 static BounceBuffer bounce
;
2296 typedef struct MapClient
{
2298 void (*callback
)(void *opaque
);
2299 QLIST_ENTRY(MapClient
) link
;
2302 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2303 = QLIST_HEAD_INITIALIZER(map_client_list
);
2305 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2307 MapClient
*client
= g_malloc(sizeof(*client
));
2309 client
->opaque
= opaque
;
2310 client
->callback
= callback
;
2311 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2315 static void cpu_unregister_map_client(void *_client
)
2317 MapClient
*client
= (MapClient
*)_client
;
2319 QLIST_REMOVE(client
, link
);
2323 static void cpu_notify_map_clients(void)
2327 while (!QLIST_EMPTY(&map_client_list
)) {
2328 client
= QLIST_FIRST(&map_client_list
);
2329 client
->callback(client
->opaque
);
2330 cpu_unregister_map_client(client
);
2334 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2341 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2342 if (!memory_access_is_direct(mr
, is_write
)) {
2343 l
= memory_access_size(mr
, l
, addr
);
2344 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2355 /* Map a physical memory region into a host virtual address.
2356 * May map a subset of the requested range, given by and returned in *plen.
2357 * May return NULL if resources needed to perform the mapping are exhausted.
2358 * Use only for reads OR writes - not for read-modify-write operations.
2359 * Use cpu_register_map_client() to know when retrying the map operation is
2360 * likely to succeed.
2362 void *address_space_map(AddressSpace
*as
,
2369 hwaddr l
, xlat
, base
;
2370 MemoryRegion
*mr
, *this_mr
;
2378 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2379 if (!memory_access_is_direct(mr
, is_write
)) {
2380 if (bounce
.buffer
) {
2383 /* Avoid unbounded allocations */
2384 l
= MIN(l
, TARGET_PAGE_SIZE
);
2385 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2389 memory_region_ref(mr
);
2392 address_space_read(as
, addr
, bounce
.buffer
, l
);
2396 return bounce
.buffer
;
2400 raddr
= memory_region_get_ram_addr(mr
);
2411 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2412 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2417 memory_region_ref(mr
);
2419 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2422 /* Unmaps a memory region previously mapped by address_space_map().
2423 * Will also mark the memory as dirty if is_write == 1. access_len gives
2424 * the amount of memory that was actually read or written by the caller.
2426 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2427 int is_write
, hwaddr access_len
)
2429 if (buffer
!= bounce
.buffer
) {
2433 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2436 invalidate_and_set_dirty(addr1
, access_len
);
2438 if (xen_enabled()) {
2439 xen_invalidate_map_cache_entry(buffer
);
2441 memory_region_unref(mr
);
2445 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2447 qemu_vfree(bounce
.buffer
);
2448 bounce
.buffer
= NULL
;
2449 memory_region_unref(bounce
.mr
);
2450 cpu_notify_map_clients();
2453 void *cpu_physical_memory_map(hwaddr addr
,
2457 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2460 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2461 int is_write
, hwaddr access_len
)
2463 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2466 /* warning: addr must be aligned */
2467 static inline uint32_t ldl_phys_internal(AddressSpace
*as
, hwaddr addr
,
2468 enum device_endian endian
)
2476 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2477 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2479 io_mem_read(mr
, addr1
, &val
, 4);
2480 #if defined(TARGET_WORDS_BIGENDIAN)
2481 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2485 if (endian
== DEVICE_BIG_ENDIAN
) {
2491 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2495 case DEVICE_LITTLE_ENDIAN
:
2496 val
= ldl_le_p(ptr
);
2498 case DEVICE_BIG_ENDIAN
:
2499 val
= ldl_be_p(ptr
);
2509 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2511 return ldl_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2514 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2516 return ldl_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2519 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2521 return ldl_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2524 /* warning: addr must be aligned */
2525 static inline uint64_t ldq_phys_internal(AddressSpace
*as
, hwaddr addr
,
2526 enum device_endian endian
)
2534 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2536 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2538 io_mem_read(mr
, addr1
, &val
, 8);
2539 #if defined(TARGET_WORDS_BIGENDIAN)
2540 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2544 if (endian
== DEVICE_BIG_ENDIAN
) {
2550 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2554 case DEVICE_LITTLE_ENDIAN
:
2555 val
= ldq_le_p(ptr
);
2557 case DEVICE_BIG_ENDIAN
:
2558 val
= ldq_be_p(ptr
);
2568 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
2570 return ldq_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2573 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
2575 return ldq_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2578 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
2580 return ldq_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2584 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
2587 address_space_rw(as
, addr
, &val
, 1, 0);
2591 /* warning: addr must be aligned */
2592 static inline uint32_t lduw_phys_internal(AddressSpace
*as
, hwaddr addr
,
2593 enum device_endian endian
)
2601 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2603 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2605 io_mem_read(mr
, addr1
, &val
, 2);
2606 #if defined(TARGET_WORDS_BIGENDIAN)
2607 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2611 if (endian
== DEVICE_BIG_ENDIAN
) {
2617 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2621 case DEVICE_LITTLE_ENDIAN
:
2622 val
= lduw_le_p(ptr
);
2624 case DEVICE_BIG_ENDIAN
:
2625 val
= lduw_be_p(ptr
);
2635 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
2637 return lduw_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2640 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
2642 return lduw_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2645 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
2647 return lduw_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2650 /* warning: addr must be aligned. The ram page is not masked as dirty
2651 and the code inside is not invalidated. It is useful if the dirty
2652 bits are used to track modified PTEs */
2653 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2660 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2662 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2663 io_mem_write(mr
, addr1
, val
, 4);
2665 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2666 ptr
= qemu_get_ram_ptr(addr1
);
2669 if (unlikely(in_migration
)) {
2670 if (cpu_physical_memory_is_clean(addr1
)) {
2671 /* invalidate code */
2672 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2674 cpu_physical_memory_set_dirty_range_nocode(addr1
, 4);
2680 /* warning: addr must be aligned */
2681 static inline void stl_phys_internal(AddressSpace
*as
,
2682 hwaddr addr
, uint32_t val
,
2683 enum device_endian endian
)
2690 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2692 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2693 #if defined(TARGET_WORDS_BIGENDIAN)
2694 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2698 if (endian
== DEVICE_BIG_ENDIAN
) {
2702 io_mem_write(mr
, addr1
, val
, 4);
2705 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2706 ptr
= qemu_get_ram_ptr(addr1
);
2708 case DEVICE_LITTLE_ENDIAN
:
2711 case DEVICE_BIG_ENDIAN
:
2718 invalidate_and_set_dirty(addr1
, 4);
2722 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2724 stl_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2727 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2729 stl_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2732 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2734 stl_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2738 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2741 address_space_rw(as
, addr
, &v
, 1, 1);
2744 /* warning: addr must be aligned */
2745 static inline void stw_phys_internal(AddressSpace
*as
,
2746 hwaddr addr
, uint32_t val
,
2747 enum device_endian endian
)
2754 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2755 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2756 #if defined(TARGET_WORDS_BIGENDIAN)
2757 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2761 if (endian
== DEVICE_BIG_ENDIAN
) {
2765 io_mem_write(mr
, addr1
, val
, 2);
2768 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2769 ptr
= qemu_get_ram_ptr(addr1
);
2771 case DEVICE_LITTLE_ENDIAN
:
2774 case DEVICE_BIG_ENDIAN
:
2781 invalidate_and_set_dirty(addr1
, 2);
2785 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2787 stw_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2790 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2792 stw_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2795 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2797 stw_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2801 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2804 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2807 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2809 val
= cpu_to_le64(val
);
2810 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2813 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2815 val
= cpu_to_be64(val
);
2816 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2819 /* virtual memory access for debug (includes writing to ROM) */
2820 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2821 uint8_t *buf
, int len
, int is_write
)
2828 page
= addr
& TARGET_PAGE_MASK
;
2829 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2830 /* if no physical page mapped, return an error */
2831 if (phys_addr
== -1)
2833 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2836 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2838 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
2840 address_space_rw(cpu
->as
, phys_addr
, buf
, l
, 0);
2851 * A helper function for the _utterly broken_ virtio device model to find out if
2852 * it's running on a big endian machine. Don't do this at home kids!
2854 bool target_words_bigendian(void);
2855 bool target_words_bigendian(void)
2857 #if defined(TARGET_WORDS_BIGENDIAN)
2864 #ifndef CONFIG_USER_ONLY
2865 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2870 mr
= address_space_translate(&address_space_memory
,
2871 phys_addr
, &phys_addr
, &l
, false);
2873 return !(memory_region_is_ram(mr
) ||
2874 memory_region_is_romd(mr
));
2877 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2881 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2882 func(block
->host
, block
->offset
, block
->length
, opaque
);