4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
30 #include "qemu/osdep.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/xen/xen.h"
34 #include "qemu/timer.h"
35 #include "qemu/config-file.h"
36 #include "qemu/error-report.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
52 #include "exec/ram_addr.h"
54 #include "qemu/range.h"
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 static bool in_migration
;
61 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
63 static MemoryRegion
*system_memory
;
64 static MemoryRegion
*system_io
;
66 AddressSpace address_space_io
;
67 AddressSpace address_space_memory
;
69 MemoryRegion io_mem_rom
, io_mem_notdirty
;
70 static MemoryRegion io_mem_unassigned
;
72 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73 #define RAM_PREALLOC (1 << 0)
75 /* RAM is mmap-ed with MAP_SHARED */
76 #define RAM_SHARED (1 << 1)
80 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
81 /* current CPU in the current thread. It is only valid inside
83 DEFINE_TLS(CPUState
*, current_cpu
);
84 /* 0 = Do not count executed instructions.
85 1 = Precise instruction counting.
86 2 = Adaptive rate instruction counting. */
89 #if !defined(CONFIG_USER_ONLY)
91 typedef struct PhysPageEntry PhysPageEntry
;
93 struct PhysPageEntry
{
94 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
96 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
100 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
102 /* Size of the L2 (and L3, etc) page tables. */
103 #define ADDR_SPACE_BITS 64
106 #define P_L2_SIZE (1 << P_L2_BITS)
108 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
110 typedef PhysPageEntry Node
[P_L2_SIZE
];
112 typedef struct PhysPageMap
{
113 unsigned sections_nb
;
114 unsigned sections_nb_alloc
;
116 unsigned nodes_nb_alloc
;
118 MemoryRegionSection
*sections
;
121 struct AddressSpaceDispatch
{
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
125 PhysPageEntry phys_map
;
130 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131 typedef struct subpage_t
{
135 uint16_t sub_section
[TARGET_PAGE_SIZE
];
138 #define PHYS_SECTION_UNASSIGNED 0
139 #define PHYS_SECTION_NOTDIRTY 1
140 #define PHYS_SECTION_ROM 2
141 #define PHYS_SECTION_WATCH 3
143 static void io_mem_init(void);
144 static void memory_map_init(void);
145 static void tcg_commit(MemoryListener
*listener
);
147 static MemoryRegion io_mem_watch
;
150 #if !defined(CONFIG_USER_ONLY)
152 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
154 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
155 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
156 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
157 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
161 static uint32_t phys_map_node_alloc(PhysPageMap
*map
)
166 ret
= map
->nodes_nb
++;
167 assert(ret
!= PHYS_MAP_NODE_NIL
);
168 assert(ret
!= map
->nodes_nb_alloc
);
169 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
170 map
->nodes
[ret
][i
].skip
= 1;
171 map
->nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
176 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
177 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
182 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
184 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
185 lp
->ptr
= phys_map_node_alloc(map
);
186 p
= map
->nodes
[lp
->ptr
];
188 for (i
= 0; i
< P_L2_SIZE
; i
++) {
190 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
194 p
= map
->nodes
[lp
->ptr
];
196 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
198 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
199 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
205 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
211 static void phys_page_set(AddressSpaceDispatch
*d
,
212 hwaddr index
, hwaddr nb
,
215 /* Wildly overreserve - it doesn't matter much. */
216 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
218 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
221 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
224 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
226 unsigned valid_ptr
= P_L2_SIZE
;
231 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
236 for (i
= 0; i
< P_L2_SIZE
; i
++) {
237 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
244 phys_page_compact(&p
[i
], nodes
, compacted
);
248 /* We can only compress if there's only one child. */
253 assert(valid_ptr
< P_L2_SIZE
);
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
260 lp
->ptr
= p
[valid_ptr
].ptr
;
261 if (!p
[valid_ptr
].skip
) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
270 lp
->skip
+= p
[valid_ptr
].skip
;
274 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
276 DECLARE_BITMAP(compacted
, nodes_nb
);
278 if (d
->phys_map
.skip
) {
279 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
283 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
284 Node
*nodes
, MemoryRegionSection
*sections
)
287 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
290 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
291 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
292 return §ions
[PHYS_SECTION_UNASSIGNED
];
295 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
298 if (sections
[lp
.ptr
].size
.hi
||
299 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
300 sections
[lp
.ptr
].size
.lo
, addr
)) {
301 return §ions
[lp
.ptr
];
303 return §ions
[PHYS_SECTION_UNASSIGNED
];
307 bool memory_region_is_unassigned(MemoryRegion
*mr
)
309 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
310 && mr
!= &io_mem_watch
;
313 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
315 bool resolve_subpage
)
317 MemoryRegionSection
*section
;
320 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
321 if (resolve_subpage
&& section
->mr
->subpage
) {
322 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
323 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
328 static MemoryRegionSection
*
329 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
330 hwaddr
*plen
, bool resolve_subpage
)
332 MemoryRegionSection
*section
;
335 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
336 /* Compute offset within MemoryRegionSection */
337 addr
-= section
->offset_within_address_space
;
339 /* Compute offset within MemoryRegion */
340 *xlat
= addr
+ section
->offset_within_region
;
342 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
343 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
347 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
349 if (memory_region_is_ram(mr
)) {
350 return !(is_write
&& mr
->readonly
);
352 if (memory_region_is_romd(mr
)) {
359 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
360 hwaddr
*xlat
, hwaddr
*plen
,
364 MemoryRegionSection
*section
;
369 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
372 if (!mr
->iommu_ops
) {
376 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
377 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
378 | (addr
& iotlb
.addr_mask
));
379 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
380 if (!(iotlb
.perm
& (1 << is_write
))) {
381 mr
= &io_mem_unassigned
;
385 as
= iotlb
.target_as
;
388 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
389 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
390 len
= MIN(page
, len
);
398 MemoryRegionSection
*
399 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
402 MemoryRegionSection
*section
;
403 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
405 assert(!section
->mr
->iommu_ops
);
410 void cpu_exec_init_all(void)
412 #if !defined(CONFIG_USER_ONLY)
413 qemu_mutex_init(&ram_list
.mutex
);
419 #if !defined(CONFIG_USER_ONLY)
421 static int cpu_common_post_load(void *opaque
, int version_id
)
423 CPUState
*cpu
= opaque
;
425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
427 cpu
->interrupt_request
&= ~0x01;
433 static int cpu_common_pre_load(void *opaque
)
435 CPUState
*cpu
= opaque
;
437 cpu
->exception_index
= 0;
442 static bool cpu_common_exception_index_needed(void *opaque
)
444 CPUState
*cpu
= opaque
;
446 return cpu
->exception_index
!= 0;
449 static const VMStateDescription vmstate_cpu_common_exception_index
= {
450 .name
= "cpu_common/exception_index",
452 .minimum_version_id
= 1,
453 .fields
= (VMStateField
[]) {
454 VMSTATE_INT32(exception_index
, CPUState
),
455 VMSTATE_END_OF_LIST()
459 const VMStateDescription vmstate_cpu_common
= {
460 .name
= "cpu_common",
462 .minimum_version_id
= 1,
463 .pre_load
= cpu_common_pre_load
,
464 .post_load
= cpu_common_post_load
,
465 .fields
= (VMStateField
[]) {
466 VMSTATE_UINT32(halted
, CPUState
),
467 VMSTATE_UINT32(interrupt_request
, CPUState
),
468 VMSTATE_END_OF_LIST()
470 .subsections
= (VMStateSubsection
[]) {
472 .vmsd
= &vmstate_cpu_common_exception_index
,
473 .needed
= cpu_common_exception_index_needed
,
482 CPUState
*qemu_get_cpu(int index
)
487 if (cpu
->cpu_index
== index
) {
495 #if !defined(CONFIG_USER_ONLY)
496 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
498 /* We only support one address space per cpu at the moment. */
499 assert(cpu
->as
== as
);
501 if (cpu
->tcg_as_listener
) {
502 memory_listener_unregister(cpu
->tcg_as_listener
);
504 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
506 cpu
->tcg_as_listener
->commit
= tcg_commit
;
507 memory_listener_register(cpu
->tcg_as_listener
, as
);
511 void cpu_exec_init(CPUArchState
*env
)
513 CPUState
*cpu
= ENV_GET_CPU(env
);
514 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
518 #ifdef TARGET_WORDS_BIGENDIAN
519 cpu
->bigendian
= true;
521 cpu
->bigendian
= false;
524 #if defined(CONFIG_USER_ONLY)
528 CPU_FOREACH(some_cpu
) {
531 cpu
->cpu_index
= cpu_index
;
533 QTAILQ_INIT(&cpu
->breakpoints
);
534 QTAILQ_INIT(&cpu
->watchpoints
);
535 #ifndef CONFIG_USER_ONLY
536 cpu
->as
= &address_space_memory
;
537 cpu
->thread_id
= qemu_get_thread_id();
539 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
540 #if defined(CONFIG_USER_ONLY)
543 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
544 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
546 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
547 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
548 cpu_save
, cpu_load
, env
);
549 assert(cc
->vmsd
== NULL
);
550 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
552 if (cc
->vmsd
!= NULL
) {
553 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
557 #if defined(TARGET_HAS_ICE)
558 #if defined(CONFIG_USER_ONLY)
559 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
561 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
564 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
566 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
568 tb_invalidate_phys_addr(cpu
->as
,
569 phys
| (pc
& ~TARGET_PAGE_MASK
));
573 #endif /* TARGET_HAS_ICE */
575 #if defined(CONFIG_USER_ONLY)
576 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
581 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
587 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
591 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
592 int flags
, CPUWatchpoint
**watchpoint
)
597 /* Add a watchpoint. */
598 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
599 int flags
, CPUWatchpoint
**watchpoint
)
603 /* forbid ranges which are empty or run off the end of the address space */
604 if (len
== 0 || (addr
+ len
- 1) < addr
) {
605 error_report("tried to set invalid watchpoint at %"
606 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
609 wp
= g_malloc(sizeof(*wp
));
615 /* keep all GDB-injected watchpoints in front */
616 if (flags
& BP_GDB
) {
617 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
619 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
622 tlb_flush_page(cpu
, addr
);
629 /* Remove a specific watchpoint. */
630 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
635 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
636 if (addr
== wp
->vaddr
&& len
== wp
->len
637 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
638 cpu_watchpoint_remove_by_ref(cpu
, wp
);
645 /* Remove a specific watchpoint by reference. */
646 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
648 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
650 tlb_flush_page(cpu
, watchpoint
->vaddr
);
655 /* Remove all matching watchpoints. */
656 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
658 CPUWatchpoint
*wp
, *next
;
660 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
661 if (wp
->flags
& mask
) {
662 cpu_watchpoint_remove_by_ref(cpu
, wp
);
667 /* Return true if this watchpoint address matches the specified
668 * access (ie the address range covered by the watchpoint overlaps
669 * partially or completely with the address range covered by the
672 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
676 /* We know the lengths are non-zero, but a little caution is
677 * required to avoid errors in the case where the range ends
678 * exactly at the top of the address space and so addr + len
679 * wraps round to zero.
681 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
682 vaddr addrend
= addr
+ len
- 1;
684 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
689 /* Add a breakpoint. */
690 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
691 CPUBreakpoint
**breakpoint
)
693 #if defined(TARGET_HAS_ICE)
696 bp
= g_malloc(sizeof(*bp
));
701 /* keep all GDB-injected breakpoints in front */
702 if (flags
& BP_GDB
) {
703 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
705 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
708 breakpoint_invalidate(cpu
, pc
);
719 /* Remove a specific breakpoint. */
720 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
722 #if defined(TARGET_HAS_ICE)
725 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
726 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
727 cpu_breakpoint_remove_by_ref(cpu
, bp
);
737 /* Remove a specific breakpoint by reference. */
738 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
740 #if defined(TARGET_HAS_ICE)
741 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
743 breakpoint_invalidate(cpu
, breakpoint
->pc
);
749 /* Remove all matching breakpoints. */
750 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
752 #if defined(TARGET_HAS_ICE)
753 CPUBreakpoint
*bp
, *next
;
755 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
756 if (bp
->flags
& mask
) {
757 cpu_breakpoint_remove_by_ref(cpu
, bp
);
763 /* enable or disable single step mode. EXCP_DEBUG is returned by the
764 CPU loop after each instruction */
765 void cpu_single_step(CPUState
*cpu
, int enabled
)
767 #if defined(TARGET_HAS_ICE)
768 if (cpu
->singlestep_enabled
!= enabled
) {
769 cpu
->singlestep_enabled
= enabled
;
771 kvm_update_guest_debug(cpu
, 0);
773 /* must flush all the translated code to avoid inconsistencies */
774 /* XXX: only flush what is necessary */
775 CPUArchState
*env
= cpu
->env_ptr
;
782 void QEMU_NORETURN
cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
789 fprintf(stderr
, "qemu: fatal: ");
790 vfprintf(stderr
, fmt
, ap
);
791 fprintf(stderr
, "\n");
792 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
793 if (qemu_log_enabled()) {
794 qemu_log("qemu: fatal: ");
795 qemu_log_vprintf(fmt
, ap2
);
797 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
803 #if defined(CONFIG_USER_ONLY)
805 struct sigaction act
;
806 sigfillset(&act
.sa_mask
);
807 act
.sa_handler
= SIG_DFL
;
808 sigaction(SIGABRT
, &act
, NULL
);
814 #if !defined(CONFIG_USER_ONLY)
815 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
819 /* The list is protected by the iothread lock here. */
820 block
= ram_list
.mru_block
;
821 if (block
&& addr
- block
->offset
< block
->length
) {
824 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
825 if (addr
- block
->offset
< block
->length
) {
830 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
834 ram_list
.mru_block
= block
;
838 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
844 end
= TARGET_PAGE_ALIGN(start
+ length
);
845 start
&= TARGET_PAGE_MASK
;
847 block
= qemu_get_ram_block(start
);
848 assert(block
== qemu_get_ram_block(end
- 1));
849 start1
= (uintptr_t)block
->host
+ (start
- block
->offset
);
850 cpu_tlb_reset_dirty_all(start1
, length
);
853 /* Note: start and end must be within the same ram block. */
854 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,
859 cpu_physical_memory_clear_dirty_range(start
, length
, client
);
862 tlb_reset_dirty_range_all(start
, length
);
866 static void cpu_physical_memory_set_dirty_tracking(bool enable
)
868 in_migration
= enable
;
871 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
872 MemoryRegionSection
*section
,
874 hwaddr paddr
, hwaddr xlat
,
876 target_ulong
*address
)
881 if (memory_region_is_ram(section
->mr
)) {
883 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
885 if (!section
->readonly
) {
886 iotlb
|= PHYS_SECTION_NOTDIRTY
;
888 iotlb
|= PHYS_SECTION_ROM
;
891 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
895 /* Make accesses to pages with watchpoints go via the
896 watchpoint trap routines. */
897 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
898 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
899 /* Avoid trapping reads of pages with a write breakpoint. */
900 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
901 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
902 *address
|= TLB_MMIO
;
910 #endif /* defined(CONFIG_USER_ONLY) */
912 #if !defined(CONFIG_USER_ONLY)
914 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
916 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
918 static void *(*phys_mem_alloc
)(size_t size
) = qemu_anon_ram_alloc
;
921 * Set a custom physical guest memory alloator.
922 * Accelerators with unusual needs may need this. Hopefully, we can
923 * get rid of it eventually.
925 void phys_mem_set_alloc(void *(*alloc
)(size_t))
927 phys_mem_alloc
= alloc
;
930 static uint16_t phys_section_add(PhysPageMap
*map
,
931 MemoryRegionSection
*section
)
933 /* The physical section number is ORed with a page-aligned
934 * pointer to produce the iotlb entries. Thus it should
935 * never overflow into the page-aligned value.
937 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
939 if (map
->sections_nb
== map
->sections_nb_alloc
) {
940 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
941 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
942 map
->sections_nb_alloc
);
944 map
->sections
[map
->sections_nb
] = *section
;
945 memory_region_ref(section
->mr
);
946 return map
->sections_nb
++;
949 static void phys_section_destroy(MemoryRegion
*mr
)
951 memory_region_unref(mr
);
954 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
955 object_unref(OBJECT(&subpage
->iomem
));
960 static void phys_sections_free(PhysPageMap
*map
)
962 while (map
->sections_nb
> 0) {
963 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
964 phys_section_destroy(section
->mr
);
966 g_free(map
->sections
);
970 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
973 hwaddr base
= section
->offset_within_address_space
975 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
976 d
->map
.nodes
, d
->map
.sections
);
977 MemoryRegionSection subsection
= {
978 .offset_within_address_space
= base
,
979 .size
= int128_make64(TARGET_PAGE_SIZE
),
983 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
985 if (!(existing
->mr
->subpage
)) {
986 subpage
= subpage_init(d
->as
, base
);
987 subsection
.address_space
= d
->as
;
988 subsection
.mr
= &subpage
->iomem
;
989 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
990 phys_section_add(&d
->map
, &subsection
));
992 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
994 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
995 end
= start
+ int128_get64(section
->size
) - 1;
996 subpage_register(subpage
, start
, end
,
997 phys_section_add(&d
->map
, section
));
1001 static void register_multipage(AddressSpaceDispatch
*d
,
1002 MemoryRegionSection
*section
)
1004 hwaddr start_addr
= section
->offset_within_address_space
;
1005 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1006 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1010 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1013 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1015 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1016 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1017 MemoryRegionSection now
= *section
, remain
= *section
;
1018 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1020 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1021 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1022 - now
.offset_within_address_space
;
1024 now
.size
= int128_min(int128_make64(left
), now
.size
);
1025 register_subpage(d
, &now
);
1027 now
.size
= int128_zero();
1029 while (int128_ne(remain
.size
, now
.size
)) {
1030 remain
.size
= int128_sub(remain
.size
, now
.size
);
1031 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1032 remain
.offset_within_region
+= int128_get64(now
.size
);
1034 if (int128_lt(remain
.size
, page_size
)) {
1035 register_subpage(d
, &now
);
1036 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1037 now
.size
= page_size
;
1038 register_subpage(d
, &now
);
1040 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1041 register_multipage(d
, &now
);
1046 void qemu_flush_coalesced_mmio_buffer(void)
1049 kvm_flush_coalesced_mmio_buffer();
1052 void qemu_mutex_lock_ramlist(void)
1054 qemu_mutex_lock(&ram_list
.mutex
);
1057 void qemu_mutex_unlock_ramlist(void)
1059 qemu_mutex_unlock(&ram_list
.mutex
);
1064 #include <sys/vfs.h>
1066 #define HUGETLBFS_MAGIC 0x958458f6
1068 static long gethugepagesize(const char *path
, Error
**errp
)
1074 ret
= statfs(path
, &fs
);
1075 } while (ret
!= 0 && errno
== EINTR
);
1078 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1083 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1084 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1089 static void *file_ram_alloc(RAMBlock
*block
,
1095 char *sanitized_name
;
1097 void * volatile area
= NULL
;
1100 Error
*local_err
= NULL
;
1102 hpagesize
= gethugepagesize(path
, &local_err
);
1104 error_propagate(errp
, local_err
);
1108 if (memory
< hpagesize
) {
1109 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1110 "or larger than huge page size 0x%" PRIx64
,
1115 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1117 "host lacks kvm mmu notifiers, -mem-path unsupported");
1121 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1122 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1123 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1128 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1130 g_free(sanitized_name
);
1132 fd
= mkstemp(filename
);
1134 error_setg_errno(errp
, errno
,
1135 "unable to create backing store for hugepages");
1142 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1145 * ftruncate is not supported by hugetlbfs in older
1146 * hosts, so don't bother bailing out on errors.
1147 * If anything goes wrong with it under other filesystems,
1150 if (ftruncate(fd
, memory
)) {
1151 perror("ftruncate");
1154 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
,
1155 (block
->flags
& RAM_SHARED
? MAP_SHARED
: MAP_PRIVATE
),
1157 if (area
== MAP_FAILED
) {
1158 error_setg_errno(errp
, errno
,
1159 "unable to map backing store for hugepages");
1165 os_mem_prealloc(fd
, area
, memory
);
1173 error_report("%s\n", error_get_pretty(*errp
));
1180 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1182 RAMBlock
*block
, *next_block
;
1183 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1185 assert(size
!= 0); /* it would hand out same offset multiple times */
1187 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1190 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1191 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1193 end
= block
->offset
+ block
->length
;
1195 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1196 if (next_block
->offset
>= end
) {
1197 next
= MIN(next
, next_block
->offset
);
1200 if (next
- end
>= size
&& next
- end
< mingap
) {
1202 mingap
= next
- end
;
1206 if (offset
== RAM_ADDR_MAX
) {
1207 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1215 ram_addr_t
last_ram_offset(void)
1218 ram_addr_t last
= 0;
1220 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1221 last
= MAX(last
, block
->offset
+ block
->length
);
1226 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1230 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1231 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1232 "dump-guest-core", true)) {
1233 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1235 perror("qemu_madvise");
1236 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1237 "but dump_guest_core=off specified\n");
1242 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1246 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1247 if (block
->offset
== addr
) {
1255 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1257 RAMBlock
*new_block
= find_ram_block(addr
);
1261 assert(!new_block
->idstr
[0]);
1264 char *id
= qdev_get_dev_path(dev
);
1266 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1270 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1272 /* This assumes the iothread lock is taken here too. */
1273 qemu_mutex_lock_ramlist();
1274 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1275 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1276 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1281 qemu_mutex_unlock_ramlist();
1284 void qemu_ram_unset_idstr(ram_addr_t addr
)
1286 RAMBlock
*block
= find_ram_block(addr
);
1289 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1293 static int memory_try_enable_merging(void *addr
, size_t len
)
1295 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1296 /* disabled by the user */
1300 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1303 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1306 ram_addr_t old_ram_size
, new_ram_size
;
1308 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1310 /* This assumes the iothread lock is taken here too. */
1311 qemu_mutex_lock_ramlist();
1312 new_block
->offset
= find_ram_offset(new_block
->length
);
1314 if (!new_block
->host
) {
1315 if (xen_enabled()) {
1316 xen_ram_alloc(new_block
->offset
, new_block
->length
, new_block
->mr
);
1318 new_block
->host
= phys_mem_alloc(new_block
->length
);
1319 if (!new_block
->host
) {
1320 error_setg_errno(errp
, errno
,
1321 "cannot set up guest memory '%s'",
1322 memory_region_name(new_block
->mr
));
1323 qemu_mutex_unlock_ramlist();
1326 memory_try_enable_merging(new_block
->host
, new_block
->length
);
1330 /* Keep the list sorted from biggest to smallest block. */
1331 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1332 if (block
->length
< new_block
->length
) {
1337 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1339 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1341 ram_list
.mru_block
= NULL
;
1344 qemu_mutex_unlock_ramlist();
1346 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1348 if (new_ram_size
> old_ram_size
) {
1350 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1351 ram_list
.dirty_memory
[i
] =
1352 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1353 old_ram_size
, new_ram_size
);
1356 cpu_physical_memory_set_dirty_range(new_block
->offset
, new_block
->length
);
1358 qemu_ram_setup_dump(new_block
->host
, new_block
->length
);
1359 qemu_madvise(new_block
->host
, new_block
->length
, QEMU_MADV_HUGEPAGE
);
1360 qemu_madvise(new_block
->host
, new_block
->length
, QEMU_MADV_DONTFORK
);
1362 if (kvm_enabled()) {
1363 kvm_setup_guest_memory(new_block
->host
, new_block
->length
);
1366 return new_block
->offset
;
1370 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1371 bool share
, const char *mem_path
,
1374 RAMBlock
*new_block
;
1376 Error
*local_err
= NULL
;
1378 if (xen_enabled()) {
1379 error_setg(errp
, "-mem-path not supported with Xen");
1383 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1385 * file_ram_alloc() needs to allocate just like
1386 * phys_mem_alloc, but we haven't bothered to provide
1390 "-mem-path not supported with this accelerator");
1394 size
= TARGET_PAGE_ALIGN(size
);
1395 new_block
= g_malloc0(sizeof(*new_block
));
1397 new_block
->length
= size
;
1398 new_block
->flags
= share
? RAM_SHARED
: 0;
1399 new_block
->host
= file_ram_alloc(new_block
, size
,
1401 if (!new_block
->host
) {
1406 addr
= ram_block_add(new_block
, &local_err
);
1409 error_propagate(errp
, local_err
);
1416 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1417 MemoryRegion
*mr
, Error
**errp
)
1419 RAMBlock
*new_block
;
1421 Error
*local_err
= NULL
;
1423 size
= TARGET_PAGE_ALIGN(size
);
1424 new_block
= g_malloc0(sizeof(*new_block
));
1426 new_block
->length
= size
;
1428 new_block
->host
= host
;
1430 new_block
->flags
|= RAM_PREALLOC
;
1432 addr
= ram_block_add(new_block
, &local_err
);
1435 error_propagate(errp
, local_err
);
1441 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1443 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
, errp
);
1446 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1450 /* This assumes the iothread lock is taken here too. */
1451 qemu_mutex_lock_ramlist();
1452 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1453 if (addr
== block
->offset
) {
1454 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1455 ram_list
.mru_block
= NULL
;
1461 qemu_mutex_unlock_ramlist();
1464 void qemu_ram_free(ram_addr_t addr
)
1468 /* This assumes the iothread lock is taken here too. */
1469 qemu_mutex_lock_ramlist();
1470 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1471 if (addr
== block
->offset
) {
1472 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1473 ram_list
.mru_block
= NULL
;
1475 if (block
->flags
& RAM_PREALLOC
) {
1477 } else if (xen_enabled()) {
1478 xen_invalidate_map_cache_entry(block
->host
);
1480 } else if (block
->fd
>= 0) {
1481 munmap(block
->host
, block
->length
);
1485 qemu_anon_ram_free(block
->host
, block
->length
);
1491 qemu_mutex_unlock_ramlist();
1496 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1503 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1504 offset
= addr
- block
->offset
;
1505 if (offset
< block
->length
) {
1506 vaddr
= block
->host
+ offset
;
1507 if (block
->flags
& RAM_PREALLOC
) {
1509 } else if (xen_enabled()) {
1513 munmap(vaddr
, length
);
1514 if (block
->fd
>= 0) {
1515 flags
|= (block
->flags
& RAM_SHARED
?
1516 MAP_SHARED
: MAP_PRIVATE
);
1517 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1518 flags
, block
->fd
, offset
);
1521 * Remap needs to match alloc. Accelerators that
1522 * set phys_mem_alloc never remap. If they did,
1523 * we'd need a remap hook here.
1525 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1527 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1528 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1531 if (area
!= vaddr
) {
1532 fprintf(stderr
, "Could not remap addr: "
1533 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1537 memory_try_enable_merging(vaddr
, length
);
1538 qemu_ram_setup_dump(vaddr
, length
);
1544 #endif /* !_WIN32 */
1546 int qemu_get_ram_fd(ram_addr_t addr
)
1548 RAMBlock
*block
= qemu_get_ram_block(addr
);
1553 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1555 RAMBlock
*block
= qemu_get_ram_block(addr
);
1560 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1561 With the exception of the softmmu code in this file, this should
1562 only be used for local memory (e.g. video ram) that the device owns,
1563 and knows it isn't going to access beyond the end of the block.
1565 It should not be used for general purpose DMA.
1566 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1568 void *qemu_get_ram_ptr(ram_addr_t addr
)
1570 RAMBlock
*block
= qemu_get_ram_block(addr
);
1572 if (xen_enabled()) {
1573 /* We need to check if the requested address is in the RAM
1574 * because we don't want to map the entire memory in QEMU.
1575 * In that case just map until the end of the page.
1577 if (block
->offset
== 0) {
1578 return xen_map_cache(addr
, 0, 0);
1579 } else if (block
->host
== NULL
) {
1581 xen_map_cache(block
->offset
, block
->length
, 1);
1584 return block
->host
+ (addr
- block
->offset
);
1587 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1588 * but takes a size argument */
1589 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1594 if (xen_enabled()) {
1595 return xen_map_cache(addr
, *size
, 1);
1599 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1600 if (addr
- block
->offset
< block
->length
) {
1601 if (addr
- block
->offset
+ *size
> block
->length
)
1602 *size
= block
->length
- addr
+ block
->offset
;
1603 return block
->host
+ (addr
- block
->offset
);
1607 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1612 /* Some of the softmmu routines need to translate from a host pointer
1613 (typically a TLB entry) back to a ram offset. */
1614 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1617 uint8_t *host
= ptr
;
1619 if (xen_enabled()) {
1620 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1621 return qemu_get_ram_block(*ram_addr
)->mr
;
1624 block
= ram_list
.mru_block
;
1625 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1629 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1630 /* This case append when the block is not mapped. */
1631 if (block
->host
== NULL
) {
1634 if (host
- block
->host
< block
->length
) {
1642 *ram_addr
= block
->offset
+ (host
- block
->host
);
1646 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1647 uint64_t val
, unsigned size
)
1649 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1650 tb_invalidate_phys_page_fast(ram_addr
, size
);
1654 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1657 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1660 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1665 cpu_physical_memory_set_dirty_range_nocode(ram_addr
, size
);
1666 /* we remove the notdirty callback only if the code has been
1668 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1669 CPUArchState
*env
= current_cpu
->env_ptr
;
1670 tlb_set_dirty(env
, current_cpu
->mem_io_vaddr
);
1674 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1675 unsigned size
, bool is_write
)
1680 static const MemoryRegionOps notdirty_mem_ops
= {
1681 .write
= notdirty_mem_write
,
1682 .valid
.accepts
= notdirty_mem_accepts
,
1683 .endianness
= DEVICE_NATIVE_ENDIAN
,
1686 /* Generate a debug exception if a watchpoint has been hit. */
1687 static void check_watchpoint(int offset
, int len
, int flags
)
1689 CPUState
*cpu
= current_cpu
;
1690 CPUArchState
*env
= cpu
->env_ptr
;
1691 target_ulong pc
, cs_base
;
1696 if (cpu
->watchpoint_hit
) {
1697 /* We re-entered the check after replacing the TB. Now raise
1698 * the debug interrupt so that is will trigger after the
1699 * current instruction. */
1700 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1703 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1704 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1705 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
1706 && (wp
->flags
& flags
)) {
1707 if (flags
== BP_MEM_READ
) {
1708 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
1710 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
1712 wp
->hitaddr
= vaddr
;
1713 if (!cpu
->watchpoint_hit
) {
1714 cpu
->watchpoint_hit
= wp
;
1715 tb_check_watchpoint(cpu
);
1716 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1717 cpu
->exception_index
= EXCP_DEBUG
;
1720 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1721 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1722 cpu_resume_from_signal(cpu
, NULL
);
1726 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1731 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1732 so these check for a hit then pass through to the normal out-of-line
1734 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1737 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, BP_MEM_READ
);
1739 case 1: return ldub_phys(&address_space_memory
, addr
);
1740 case 2: return lduw_phys(&address_space_memory
, addr
);
1741 case 4: return ldl_phys(&address_space_memory
, addr
);
1746 static void watch_mem_write(void *opaque
, hwaddr addr
,
1747 uint64_t val
, unsigned size
)
1749 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, BP_MEM_WRITE
);
1752 stb_phys(&address_space_memory
, addr
, val
);
1755 stw_phys(&address_space_memory
, addr
, val
);
1758 stl_phys(&address_space_memory
, addr
, val
);
1764 static const MemoryRegionOps watch_mem_ops
= {
1765 .read
= watch_mem_read
,
1766 .write
= watch_mem_write
,
1767 .endianness
= DEVICE_NATIVE_ENDIAN
,
1770 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1773 subpage_t
*subpage
= opaque
;
1776 #if defined(DEBUG_SUBPAGE)
1777 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
1778 subpage
, len
, addr
);
1780 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1793 static void subpage_write(void *opaque
, hwaddr addr
,
1794 uint64_t value
, unsigned len
)
1796 subpage_t
*subpage
= opaque
;
1799 #if defined(DEBUG_SUBPAGE)
1800 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1801 " value %"PRIx64
"\n",
1802 __func__
, subpage
, len
, addr
, value
);
1817 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1820 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1821 unsigned len
, bool is_write
)
1823 subpage_t
*subpage
= opaque
;
1824 #if defined(DEBUG_SUBPAGE)
1825 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
1826 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1829 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1833 static const MemoryRegionOps subpage_ops
= {
1834 .read
= subpage_read
,
1835 .write
= subpage_write
,
1836 .valid
.accepts
= subpage_accepts
,
1837 .endianness
= DEVICE_NATIVE_ENDIAN
,
1840 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1845 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1847 idx
= SUBPAGE_IDX(start
);
1848 eidx
= SUBPAGE_IDX(end
);
1849 #if defined(DEBUG_SUBPAGE)
1850 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1851 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
1853 for (; idx
<= eidx
; idx
++) {
1854 mmio
->sub_section
[idx
] = section
;
1860 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1864 mmio
= g_malloc0(sizeof(subpage_t
));
1868 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1869 NULL
, TARGET_PAGE_SIZE
);
1870 mmio
->iomem
.subpage
= true;
1871 #if defined(DEBUG_SUBPAGE)
1872 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
1873 mmio
, base
, TARGET_PAGE_SIZE
);
1875 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1880 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
1884 MemoryRegionSection section
= {
1885 .address_space
= as
,
1887 .offset_within_address_space
= 0,
1888 .offset_within_region
= 0,
1889 .size
= int128_2_64(),
1892 return phys_section_add(map
, §ion
);
1895 MemoryRegion
*iotlb_to_region(AddressSpace
*as
, hwaddr index
)
1897 return as
->dispatch
->map
.sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1900 static void io_mem_init(void)
1902 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
1903 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1905 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1907 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1911 static void mem_begin(MemoryListener
*listener
)
1913 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1914 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
1917 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
1918 assert(n
== PHYS_SECTION_UNASSIGNED
);
1919 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
1920 assert(n
== PHYS_SECTION_NOTDIRTY
);
1921 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
1922 assert(n
== PHYS_SECTION_ROM
);
1923 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
1924 assert(n
== PHYS_SECTION_WATCH
);
1926 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
1928 as
->next_dispatch
= d
;
1931 static void mem_commit(MemoryListener
*listener
)
1933 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1934 AddressSpaceDispatch
*cur
= as
->dispatch
;
1935 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1937 phys_page_compact_all(next
, next
->map
.nodes_nb
);
1939 as
->dispatch
= next
;
1942 phys_sections_free(&cur
->map
);
1947 static void tcg_commit(MemoryListener
*listener
)
1951 /* since each CPU stores ram addresses in its TLB cache, we must
1952 reset the modified entries */
1955 /* FIXME: Disentangle the cpu.h circular files deps so we can
1956 directly get the right CPU from listener. */
1957 if (cpu
->tcg_as_listener
!= listener
) {
1964 static void core_log_global_start(MemoryListener
*listener
)
1966 cpu_physical_memory_set_dirty_tracking(true);
1969 static void core_log_global_stop(MemoryListener
*listener
)
1971 cpu_physical_memory_set_dirty_tracking(false);
1974 static MemoryListener core_memory_listener
= {
1975 .log_global_start
= core_log_global_start
,
1976 .log_global_stop
= core_log_global_stop
,
1980 void address_space_init_dispatch(AddressSpace
*as
)
1982 as
->dispatch
= NULL
;
1983 as
->dispatch_listener
= (MemoryListener
) {
1985 .commit
= mem_commit
,
1986 .region_add
= mem_add
,
1987 .region_nop
= mem_add
,
1990 memory_listener_register(&as
->dispatch_listener
, as
);
1993 void address_space_destroy_dispatch(AddressSpace
*as
)
1995 AddressSpaceDispatch
*d
= as
->dispatch
;
1997 memory_listener_unregister(&as
->dispatch_listener
);
1999 as
->dispatch
= NULL
;
2002 static void memory_map_init(void)
2004 system_memory
= g_malloc(sizeof(*system_memory
));
2006 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2007 address_space_init(&address_space_memory
, system_memory
, "memory");
2009 system_io
= g_malloc(sizeof(*system_io
));
2010 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2012 address_space_init(&address_space_io
, system_io
, "I/O");
2014 memory_listener_register(&core_memory_listener
, &address_space_memory
);
2017 MemoryRegion
*get_system_memory(void)
2019 return system_memory
;
2022 MemoryRegion
*get_system_io(void)
2027 #endif /* !defined(CONFIG_USER_ONLY) */
2029 /* physical memory access (slow version, mainly for debug) */
2030 #if defined(CONFIG_USER_ONLY)
2031 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2032 uint8_t *buf
, int len
, int is_write
)
2039 page
= addr
& TARGET_PAGE_MASK
;
2040 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2043 flags
= page_get_flags(page
);
2044 if (!(flags
& PAGE_VALID
))
2047 if (!(flags
& PAGE_WRITE
))
2049 /* XXX: this code should not depend on lock_user */
2050 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2053 unlock_user(p
, addr
, l
);
2055 if (!(flags
& PAGE_READ
))
2057 /* XXX: this code should not depend on lock_user */
2058 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2061 unlock_user(p
, addr
, 0);
2072 static void invalidate_and_set_dirty(hwaddr addr
,
2075 if (cpu_physical_memory_range_includes_clean(addr
, length
)) {
2076 tb_invalidate_phys_range(addr
, addr
+ length
, 0);
2077 cpu_physical_memory_set_dirty_range_nocode(addr
, length
);
2079 xen_modified_memory(addr
, length
);
2082 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2084 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2086 /* Regions are assumed to support 1-4 byte accesses unless
2087 otherwise specified. */
2088 if (access_size_max
== 0) {
2089 access_size_max
= 4;
2092 /* Bound the maximum access by the alignment of the address. */
2093 if (!mr
->ops
->impl
.unaligned
) {
2094 unsigned align_size_max
= addr
& -addr
;
2095 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2096 access_size_max
= align_size_max
;
2100 /* Don't attempt accesses larger than the maximum. */
2101 if (l
> access_size_max
) {
2102 l
= access_size_max
;
2105 l
= 1 << (qemu_fls(l
) - 1);
2111 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
2112 int len
, bool is_write
)
2123 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2126 if (!memory_access_is_direct(mr
, is_write
)) {
2127 l
= memory_access_size(mr
, l
, addr1
);
2128 /* XXX: could force current_cpu to NULL to avoid
2132 /* 64 bit write access */
2134 error
|= io_mem_write(mr
, addr1
, val
, 8);
2137 /* 32 bit write access */
2139 error
|= io_mem_write(mr
, addr1
, val
, 4);
2142 /* 16 bit write access */
2144 error
|= io_mem_write(mr
, addr1
, val
, 2);
2147 /* 8 bit write access */
2149 error
|= io_mem_write(mr
, addr1
, val
, 1);
2155 addr1
+= memory_region_get_ram_addr(mr
);
2157 ptr
= qemu_get_ram_ptr(addr1
);
2158 memcpy(ptr
, buf
, l
);
2159 invalidate_and_set_dirty(addr1
, l
);
2162 if (!memory_access_is_direct(mr
, is_write
)) {
2164 l
= memory_access_size(mr
, l
, addr1
);
2167 /* 64 bit read access */
2168 error
|= io_mem_read(mr
, addr1
, &val
, 8);
2172 /* 32 bit read access */
2173 error
|= io_mem_read(mr
, addr1
, &val
, 4);
2177 /* 16 bit read access */
2178 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2182 /* 8 bit read access */
2183 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2191 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2192 memcpy(buf
, ptr
, l
);
2203 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2204 const uint8_t *buf
, int len
)
2206 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2209 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2211 return address_space_rw(as
, addr
, buf
, len
, false);
2215 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2216 int len
, int is_write
)
2218 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2221 enum write_rom_type
{
2226 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2227 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2236 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2238 if (!(memory_region_is_ram(mr
) ||
2239 memory_region_is_romd(mr
))) {
2242 addr1
+= memory_region_get_ram_addr(mr
);
2244 ptr
= qemu_get_ram_ptr(addr1
);
2247 memcpy(ptr
, buf
, l
);
2248 invalidate_and_set_dirty(addr1
, l
);
2251 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2261 /* used for ROM loading : can write in RAM and ROM */
2262 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2263 const uint8_t *buf
, int len
)
2265 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2268 void cpu_flush_icache_range(hwaddr start
, int len
)
2271 * This function should do the same thing as an icache flush that was
2272 * triggered from within the guest. For TCG we are always cache coherent,
2273 * so there is no need to flush anything. For KVM / Xen we need to flush
2274 * the host's instruction cache at least.
2276 if (tcg_enabled()) {
2280 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2281 start
, NULL
, len
, FLUSH_CACHE
);
2291 static BounceBuffer bounce
;
2293 typedef struct MapClient
{
2295 void (*callback
)(void *opaque
);
2296 QLIST_ENTRY(MapClient
) link
;
2299 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2300 = QLIST_HEAD_INITIALIZER(map_client_list
);
2302 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2304 MapClient
*client
= g_malloc(sizeof(*client
));
2306 client
->opaque
= opaque
;
2307 client
->callback
= callback
;
2308 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2312 static void cpu_unregister_map_client(void *_client
)
2314 MapClient
*client
= (MapClient
*)_client
;
2316 QLIST_REMOVE(client
, link
);
2320 static void cpu_notify_map_clients(void)
2324 while (!QLIST_EMPTY(&map_client_list
)) {
2325 client
= QLIST_FIRST(&map_client_list
);
2326 client
->callback(client
->opaque
);
2327 cpu_unregister_map_client(client
);
2331 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2338 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2339 if (!memory_access_is_direct(mr
, is_write
)) {
2340 l
= memory_access_size(mr
, l
, addr
);
2341 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2352 /* Map a physical memory region into a host virtual address.
2353 * May map a subset of the requested range, given by and returned in *plen.
2354 * May return NULL if resources needed to perform the mapping are exhausted.
2355 * Use only for reads OR writes - not for read-modify-write operations.
2356 * Use cpu_register_map_client() to know when retrying the map operation is
2357 * likely to succeed.
2359 void *address_space_map(AddressSpace
*as
,
2366 hwaddr l
, xlat
, base
;
2367 MemoryRegion
*mr
, *this_mr
;
2375 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2376 if (!memory_access_is_direct(mr
, is_write
)) {
2377 if (bounce
.buffer
) {
2380 /* Avoid unbounded allocations */
2381 l
= MIN(l
, TARGET_PAGE_SIZE
);
2382 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2386 memory_region_ref(mr
);
2389 address_space_read(as
, addr
, bounce
.buffer
, l
);
2393 return bounce
.buffer
;
2397 raddr
= memory_region_get_ram_addr(mr
);
2408 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2409 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2414 memory_region_ref(mr
);
2416 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2419 /* Unmaps a memory region previously mapped by address_space_map().
2420 * Will also mark the memory as dirty if is_write == 1. access_len gives
2421 * the amount of memory that was actually read or written by the caller.
2423 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2424 int is_write
, hwaddr access_len
)
2426 if (buffer
!= bounce
.buffer
) {
2430 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2433 invalidate_and_set_dirty(addr1
, access_len
);
2435 if (xen_enabled()) {
2436 xen_invalidate_map_cache_entry(buffer
);
2438 memory_region_unref(mr
);
2442 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2444 qemu_vfree(bounce
.buffer
);
2445 bounce
.buffer
= NULL
;
2446 memory_region_unref(bounce
.mr
);
2447 cpu_notify_map_clients();
2450 void *cpu_physical_memory_map(hwaddr addr
,
2454 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2457 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2458 int is_write
, hwaddr access_len
)
2460 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2463 /* warning: addr must be aligned */
2464 static inline uint32_t ldl_phys_internal(AddressSpace
*as
, hwaddr addr
,
2465 enum device_endian endian
)
2473 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2474 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2476 io_mem_read(mr
, addr1
, &val
, 4);
2477 #if defined(TARGET_WORDS_BIGENDIAN)
2478 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2482 if (endian
== DEVICE_BIG_ENDIAN
) {
2488 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2492 case DEVICE_LITTLE_ENDIAN
:
2493 val
= ldl_le_p(ptr
);
2495 case DEVICE_BIG_ENDIAN
:
2496 val
= ldl_be_p(ptr
);
2506 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2508 return ldl_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2511 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2513 return ldl_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2516 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2518 return ldl_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2521 /* warning: addr must be aligned */
2522 static inline uint64_t ldq_phys_internal(AddressSpace
*as
, hwaddr addr
,
2523 enum device_endian endian
)
2531 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2533 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2535 io_mem_read(mr
, addr1
, &val
, 8);
2536 #if defined(TARGET_WORDS_BIGENDIAN)
2537 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2541 if (endian
== DEVICE_BIG_ENDIAN
) {
2547 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2551 case DEVICE_LITTLE_ENDIAN
:
2552 val
= ldq_le_p(ptr
);
2554 case DEVICE_BIG_ENDIAN
:
2555 val
= ldq_be_p(ptr
);
2565 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
2567 return ldq_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2570 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
2572 return ldq_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2575 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
2577 return ldq_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2581 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
2584 address_space_rw(as
, addr
, &val
, 1, 0);
2588 /* warning: addr must be aligned */
2589 static inline uint32_t lduw_phys_internal(AddressSpace
*as
, hwaddr addr
,
2590 enum device_endian endian
)
2598 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2600 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2602 io_mem_read(mr
, addr1
, &val
, 2);
2603 #if defined(TARGET_WORDS_BIGENDIAN)
2604 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2608 if (endian
== DEVICE_BIG_ENDIAN
) {
2614 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2618 case DEVICE_LITTLE_ENDIAN
:
2619 val
= lduw_le_p(ptr
);
2621 case DEVICE_BIG_ENDIAN
:
2622 val
= lduw_be_p(ptr
);
2632 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
2634 return lduw_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2637 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
2639 return lduw_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2642 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
2644 return lduw_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2647 /* warning: addr must be aligned. The ram page is not masked as dirty
2648 and the code inside is not invalidated. It is useful if the dirty
2649 bits are used to track modified PTEs */
2650 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2657 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2659 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2660 io_mem_write(mr
, addr1
, val
, 4);
2662 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2663 ptr
= qemu_get_ram_ptr(addr1
);
2666 if (unlikely(in_migration
)) {
2667 if (cpu_physical_memory_is_clean(addr1
)) {
2668 /* invalidate code */
2669 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2671 cpu_physical_memory_set_dirty_range_nocode(addr1
, 4);
2677 /* warning: addr must be aligned */
2678 static inline void stl_phys_internal(AddressSpace
*as
,
2679 hwaddr addr
, uint32_t val
,
2680 enum device_endian endian
)
2687 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2689 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2690 #if defined(TARGET_WORDS_BIGENDIAN)
2691 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2695 if (endian
== DEVICE_BIG_ENDIAN
) {
2699 io_mem_write(mr
, addr1
, val
, 4);
2702 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2703 ptr
= qemu_get_ram_ptr(addr1
);
2705 case DEVICE_LITTLE_ENDIAN
:
2708 case DEVICE_BIG_ENDIAN
:
2715 invalidate_and_set_dirty(addr1
, 4);
2719 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2721 stl_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2724 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2726 stl_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2729 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2731 stl_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2735 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2738 address_space_rw(as
, addr
, &v
, 1, 1);
2741 /* warning: addr must be aligned */
2742 static inline void stw_phys_internal(AddressSpace
*as
,
2743 hwaddr addr
, uint32_t val
,
2744 enum device_endian endian
)
2751 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2752 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2753 #if defined(TARGET_WORDS_BIGENDIAN)
2754 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2758 if (endian
== DEVICE_BIG_ENDIAN
) {
2762 io_mem_write(mr
, addr1
, val
, 2);
2765 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2766 ptr
= qemu_get_ram_ptr(addr1
);
2768 case DEVICE_LITTLE_ENDIAN
:
2771 case DEVICE_BIG_ENDIAN
:
2778 invalidate_and_set_dirty(addr1
, 2);
2782 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2784 stw_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2787 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2789 stw_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2792 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2794 stw_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2798 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2801 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2804 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2806 val
= cpu_to_le64(val
);
2807 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2810 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2812 val
= cpu_to_be64(val
);
2813 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2816 /* virtual memory access for debug (includes writing to ROM) */
2817 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2818 uint8_t *buf
, int len
, int is_write
)
2825 page
= addr
& TARGET_PAGE_MASK
;
2826 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2827 /* if no physical page mapped, return an error */
2828 if (phys_addr
== -1)
2830 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2833 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2835 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
2837 address_space_rw(cpu
->as
, phys_addr
, buf
, l
, 0);
2848 * A helper function for the _utterly broken_ virtio device model to find out if
2849 * it's running on a big endian machine. Don't do this at home kids!
2851 bool target_words_bigendian(void);
2852 bool target_words_bigendian(void)
2854 #if defined(TARGET_WORDS_BIGENDIAN)
2861 #ifndef CONFIG_USER_ONLY
2862 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2867 mr
= address_space_translate(&address_space_memory
,
2868 phys_addr
, &phys_addr
, &l
, false);
2870 return !(memory_region_is_ram(mr
) ||
2871 memory_region_is_romd(mr
));
2874 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2878 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2879 func(block
->host
, block
->offset
, block
->length
, opaque
);