4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "hw/xen/xen.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
47 #include "exec/cpu-all.h"
49 #include "exec/cputlb.h"
50 #include "translate-all.h"
52 #include "exec/memory-internal.h"
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 static int in_migration
;
59 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
61 static MemoryRegion
*system_memory
;
62 static MemoryRegion
*system_io
;
64 AddressSpace address_space_io
;
65 AddressSpace address_space_memory
;
67 MemoryRegion io_mem_rom
, io_mem_notdirty
;
68 static MemoryRegion io_mem_unassigned
;
73 /* current CPU in the current thread. It is only valid inside
75 DEFINE_TLS(CPUState
*, current_cpu
);
76 /* 0 = Do not count executed instructions.
77 1 = Precise instruction counting.
78 2 = Adaptive rate instruction counting. */
81 #if !defined(CONFIG_USER_ONLY)
83 typedef struct PhysPageEntry PhysPageEntry
;
85 struct PhysPageEntry
{
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
91 typedef PhysPageEntry Node
[L2_SIZE
];
93 struct AddressSpaceDispatch
{
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
97 PhysPageEntry phys_map
;
99 MemoryRegionSection
*sections
;
103 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104 typedef struct subpage_t
{
108 uint16_t sub_section
[TARGET_PAGE_SIZE
];
111 #define PHYS_SECTION_UNASSIGNED 0
112 #define PHYS_SECTION_NOTDIRTY 1
113 #define PHYS_SECTION_ROM 2
114 #define PHYS_SECTION_WATCH 3
116 typedef struct PhysPageMap
{
117 unsigned sections_nb
;
118 unsigned sections_nb_alloc
;
120 unsigned nodes_nb_alloc
;
122 MemoryRegionSection
*sections
;
125 static PhysPageMap
*prev_map
;
126 static PhysPageMap next_map
;
128 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
130 static void io_mem_init(void);
131 static void memory_map_init(void);
132 static void *qemu_safe_ram_ptr(ram_addr_t addr
);
134 static MemoryRegion io_mem_watch
;
137 #if !defined(CONFIG_USER_ONLY)
139 static void phys_map_node_reserve(unsigned nodes
)
141 if (next_map
.nodes_nb
+ nodes
> next_map
.nodes_nb_alloc
) {
142 next_map
.nodes_nb_alloc
= MAX(next_map
.nodes_nb_alloc
* 2,
144 next_map
.nodes_nb_alloc
= MAX(next_map
.nodes_nb_alloc
,
145 next_map
.nodes_nb
+ nodes
);
146 next_map
.nodes
= g_renew(Node
, next_map
.nodes
,
147 next_map
.nodes_nb_alloc
);
151 static uint16_t phys_map_node_alloc(void)
156 ret
= next_map
.nodes_nb
++;
157 assert(ret
!= PHYS_MAP_NODE_NIL
);
158 assert(ret
!= next_map
.nodes_nb_alloc
);
159 for (i
= 0; i
< L2_SIZE
; ++i
) {
160 next_map
.nodes
[ret
][i
].is_leaf
= 0;
161 next_map
.nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
166 static void phys_page_set_level(PhysPageEntry
*lp
, hwaddr
*index
,
167 hwaddr
*nb
, uint16_t leaf
,
172 hwaddr step
= (hwaddr
)1 << (level
* L2_BITS
);
174 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
175 lp
->ptr
= phys_map_node_alloc();
176 p
= next_map
.nodes
[lp
->ptr
];
178 for (i
= 0; i
< L2_SIZE
; i
++) {
180 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
184 p
= next_map
.nodes
[lp
->ptr
];
186 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
188 while (*nb
&& lp
< &p
[L2_SIZE
]) {
189 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
195 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
201 static void phys_page_set(AddressSpaceDispatch
*d
,
202 hwaddr index
, hwaddr nb
,
205 /* Wildly overreserve - it doesn't matter much. */
206 phys_map_node_reserve(3 * P_L2_LEVELS
);
208 phys_page_set_level(&d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
211 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr index
,
212 Node
*nodes
, MemoryRegionSection
*sections
)
217 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
218 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
219 return §ions
[PHYS_SECTION_UNASSIGNED
];
222 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
224 return §ions
[lp
.ptr
];
227 bool memory_region_is_unassigned(MemoryRegion
*mr
)
229 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
230 && mr
!= &io_mem_watch
;
233 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
235 bool resolve_subpage
)
237 MemoryRegionSection
*section
;
240 section
= phys_page_find(d
->phys_map
, addr
>> TARGET_PAGE_BITS
,
241 d
->nodes
, d
->sections
);
242 if (resolve_subpage
&& section
->mr
->subpage
) {
243 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
244 section
= &d
->sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
249 static MemoryRegionSection
*
250 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
251 hwaddr
*plen
, bool resolve_subpage
)
253 MemoryRegionSection
*section
;
256 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
257 /* Compute offset within MemoryRegionSection */
258 addr
-= section
->offset_within_address_space
;
260 /* Compute offset within MemoryRegion */
261 *xlat
= addr
+ section
->offset_within_region
;
263 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
264 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
268 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
269 hwaddr
*xlat
, hwaddr
*plen
,
273 MemoryRegionSection
*section
;
278 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
281 if (!mr
->iommu_ops
) {
285 iotlb
= mr
->iommu_ops
->translate(mr
, addr
);
286 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
287 | (addr
& iotlb
.addr_mask
));
288 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
289 if (!(iotlb
.perm
& (1 << is_write
))) {
290 mr
= &io_mem_unassigned
;
294 as
= iotlb
.target_as
;
302 MemoryRegionSection
*
303 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
306 MemoryRegionSection
*section
;
307 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
309 assert(!section
->mr
->iommu_ops
);
314 void cpu_exec_init_all(void)
316 #if !defined(CONFIG_USER_ONLY)
317 qemu_mutex_init(&ram_list
.mutex
);
323 #if !defined(CONFIG_USER_ONLY)
325 static int cpu_common_post_load(void *opaque
, int version_id
)
327 CPUState
*cpu
= opaque
;
329 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
330 version_id is increased. */
331 cpu
->interrupt_request
&= ~0x01;
332 tlb_flush(cpu
->env_ptr
, 1);
337 const VMStateDescription vmstate_cpu_common
= {
338 .name
= "cpu_common",
340 .minimum_version_id
= 1,
341 .minimum_version_id_old
= 1,
342 .post_load
= cpu_common_post_load
,
343 .fields
= (VMStateField
[]) {
344 VMSTATE_UINT32(halted
, CPUState
),
345 VMSTATE_UINT32(interrupt_request
, CPUState
),
346 VMSTATE_END_OF_LIST()
352 CPUState
*qemu_get_cpu(int index
)
354 CPUState
*cpu
= first_cpu
;
357 if (cpu
->cpu_index
== index
) {
366 void qemu_for_each_cpu(void (*func
)(CPUState
*cpu
, void *data
), void *data
)
377 void cpu_exec_init(CPUArchState
*env
)
379 CPUState
*cpu
= ENV_GET_CPU(env
);
380 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
384 #if defined(CONFIG_USER_ONLY)
387 cpu
->next_cpu
= NULL
;
390 while (*pcpu
!= NULL
) {
391 pcpu
= &(*pcpu
)->next_cpu
;
394 cpu
->cpu_index
= cpu_index
;
396 QTAILQ_INIT(&env
->breakpoints
);
397 QTAILQ_INIT(&env
->watchpoints
);
398 #ifndef CONFIG_USER_ONLY
399 cpu
->thread_id
= qemu_get_thread_id();
402 #if defined(CONFIG_USER_ONLY)
405 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
406 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
407 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
408 cpu_save
, cpu_load
, env
);
409 assert(cc
->vmsd
== NULL
);
411 if (cc
->vmsd
!= NULL
) {
412 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
416 #if defined(TARGET_HAS_ICE)
417 #if defined(CONFIG_USER_ONLY)
418 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
420 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
423 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
425 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
426 (pc
& ~TARGET_PAGE_MASK
));
429 #endif /* TARGET_HAS_ICE */
431 #if defined(CONFIG_USER_ONLY)
432 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
437 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
438 int flags
, CPUWatchpoint
**watchpoint
)
443 /* Add a watchpoint. */
444 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
445 int flags
, CPUWatchpoint
**watchpoint
)
447 target_ulong len_mask
= ~(len
- 1);
450 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
451 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
452 len
== 0 || len
> TARGET_PAGE_SIZE
) {
453 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
454 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
457 wp
= g_malloc(sizeof(*wp
));
460 wp
->len_mask
= len_mask
;
463 /* keep all GDB-injected watchpoints in front */
465 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
467 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
469 tlb_flush_page(env
, addr
);
476 /* Remove a specific watchpoint. */
477 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
480 target_ulong len_mask
= ~(len
- 1);
483 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
484 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
485 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
486 cpu_watchpoint_remove_by_ref(env
, wp
);
493 /* Remove a specific watchpoint by reference. */
494 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
496 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
498 tlb_flush_page(env
, watchpoint
->vaddr
);
503 /* Remove all matching watchpoints. */
504 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
506 CPUWatchpoint
*wp
, *next
;
508 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
509 if (wp
->flags
& mask
)
510 cpu_watchpoint_remove_by_ref(env
, wp
);
515 /* Add a breakpoint. */
516 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
517 CPUBreakpoint
**breakpoint
)
519 #if defined(TARGET_HAS_ICE)
522 bp
= g_malloc(sizeof(*bp
));
527 /* keep all GDB-injected breakpoints in front */
529 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
531 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
533 breakpoint_invalidate(env
, pc
);
543 /* Remove a specific breakpoint. */
544 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
546 #if defined(TARGET_HAS_ICE)
549 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
550 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
551 cpu_breakpoint_remove_by_ref(env
, bp
);
561 /* Remove a specific breakpoint by reference. */
562 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
564 #if defined(TARGET_HAS_ICE)
565 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
567 breakpoint_invalidate(env
, breakpoint
->pc
);
573 /* Remove all matching breakpoints. */
574 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
576 #if defined(TARGET_HAS_ICE)
577 CPUBreakpoint
*bp
, *next
;
579 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
580 if (bp
->flags
& mask
)
581 cpu_breakpoint_remove_by_ref(env
, bp
);
586 /* enable or disable single step mode. EXCP_DEBUG is returned by the
587 CPU loop after each instruction */
588 void cpu_single_step(CPUState
*cpu
, int enabled
)
590 #if defined(TARGET_HAS_ICE)
591 CPUArchState
*env
= cpu
->env_ptr
;
593 if (cpu
->singlestep_enabled
!= enabled
) {
594 cpu
->singlestep_enabled
= enabled
;
596 kvm_update_guest_debug(env
, 0);
598 /* must flush all the translated code to avoid inconsistencies */
599 /* XXX: only flush what is necessary */
606 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
608 CPUState
*cpu
= ENV_GET_CPU(env
);
614 fprintf(stderr
, "qemu: fatal: ");
615 vfprintf(stderr
, fmt
, ap
);
616 fprintf(stderr
, "\n");
617 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
618 if (qemu_log_enabled()) {
619 qemu_log("qemu: fatal: ");
620 qemu_log_vprintf(fmt
, ap2
);
622 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
628 #if defined(CONFIG_USER_ONLY)
630 struct sigaction act
;
631 sigfillset(&act
.sa_mask
);
632 act
.sa_handler
= SIG_DFL
;
633 sigaction(SIGABRT
, &act
, NULL
);
639 CPUArchState
*cpu_copy(CPUArchState
*env
)
641 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
642 #if defined(TARGET_HAS_ICE)
647 memcpy(new_env
, env
, sizeof(CPUArchState
));
649 /* Clone all break/watchpoints.
650 Note: Once we support ptrace with hw-debug register access, make sure
651 BP_CPU break/watchpoints are handled correctly on clone. */
652 QTAILQ_INIT(&env
->breakpoints
);
653 QTAILQ_INIT(&env
->watchpoints
);
654 #if defined(TARGET_HAS_ICE)
655 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
656 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
658 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
659 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
667 #if !defined(CONFIG_USER_ONLY)
668 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
673 /* we modify the TLB cache so that the dirty bit will be set again
674 when accessing the range */
675 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
676 /* Check that we don't span multiple blocks - this breaks the
677 address comparisons below. */
678 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
679 != (end
- 1) - start
) {
682 cpu_tlb_reset_dirty_all(start1
, length
);
686 /* Note: start and end must be within the same ram block. */
687 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
692 start
&= TARGET_PAGE_MASK
;
693 end
= TARGET_PAGE_ALIGN(end
);
695 length
= end
- start
;
698 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
701 tlb_reset_dirty_range_all(start
, end
, length
);
705 static int cpu_physical_memory_set_dirty_tracking(int enable
)
708 in_migration
= enable
;
712 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
713 MemoryRegionSection
*section
,
715 hwaddr paddr
, hwaddr xlat
,
717 target_ulong
*address
)
722 if (memory_region_is_ram(section
->mr
)) {
724 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
726 if (!section
->readonly
) {
727 iotlb
|= PHYS_SECTION_NOTDIRTY
;
729 iotlb
|= PHYS_SECTION_ROM
;
732 iotlb
= section
- address_space_memory
.dispatch
->sections
;
736 /* Make accesses to pages with watchpoints go via the
737 watchpoint trap routines. */
738 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
739 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
740 /* Avoid trapping reads of pages with a write breakpoint. */
741 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
742 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
743 *address
|= TLB_MMIO
;
751 #endif /* defined(CONFIG_USER_ONLY) */
753 #if !defined(CONFIG_USER_ONLY)
755 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
757 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
759 static uint16_t phys_section_add(MemoryRegionSection
*section
)
761 /* The physical section number is ORed with a page-aligned
762 * pointer to produce the iotlb entries. Thus it should
763 * never overflow into the page-aligned value.
765 assert(next_map
.sections_nb
< TARGET_PAGE_SIZE
);
767 if (next_map
.sections_nb
== next_map
.sections_nb_alloc
) {
768 next_map
.sections_nb_alloc
= MAX(next_map
.sections_nb_alloc
* 2,
770 next_map
.sections
= g_renew(MemoryRegionSection
, next_map
.sections
,
771 next_map
.sections_nb_alloc
);
773 next_map
.sections
[next_map
.sections_nb
] = *section
;
774 memory_region_ref(section
->mr
);
775 return next_map
.sections_nb
++;
778 static void phys_section_destroy(MemoryRegion
*mr
)
780 memory_region_unref(mr
);
783 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
784 memory_region_destroy(&subpage
->iomem
);
789 static void phys_sections_free(PhysPageMap
*map
)
791 while (map
->sections_nb
> 0) {
792 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
793 phys_section_destroy(section
->mr
);
795 g_free(map
->sections
);
800 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
803 hwaddr base
= section
->offset_within_address_space
805 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
>> TARGET_PAGE_BITS
,
806 next_map
.nodes
, next_map
.sections
);
807 MemoryRegionSection subsection
= {
808 .offset_within_address_space
= base
,
809 .size
= int128_make64(TARGET_PAGE_SIZE
),
813 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
815 if (!(existing
->mr
->subpage
)) {
816 subpage
= subpage_init(d
->as
, base
);
817 subsection
.mr
= &subpage
->iomem
;
818 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
819 phys_section_add(&subsection
));
821 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
823 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
824 end
= start
+ int128_get64(section
->size
) - 1;
825 subpage_register(subpage
, start
, end
, phys_section_add(section
));
829 static void register_multipage(AddressSpaceDispatch
*d
,
830 MemoryRegionSection
*section
)
832 hwaddr start_addr
= section
->offset_within_address_space
;
833 uint16_t section_index
= phys_section_add(section
);
834 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
838 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
841 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
843 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
844 AddressSpaceDispatch
*d
= as
->next_dispatch
;
845 MemoryRegionSection now
= *section
, remain
= *section
;
846 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
848 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
849 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
850 - now
.offset_within_address_space
;
852 now
.size
= int128_min(int128_make64(left
), now
.size
);
853 register_subpage(d
, &now
);
855 now
.size
= int128_zero();
857 while (int128_ne(remain
.size
, now
.size
)) {
858 remain
.size
= int128_sub(remain
.size
, now
.size
);
859 remain
.offset_within_address_space
+= int128_get64(now
.size
);
860 remain
.offset_within_region
+= int128_get64(now
.size
);
862 if (int128_lt(remain
.size
, page_size
)) {
863 register_subpage(d
, &now
);
864 } else if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
865 now
.size
= page_size
;
866 register_subpage(d
, &now
);
868 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
869 register_multipage(d
, &now
);
874 void qemu_flush_coalesced_mmio_buffer(void)
877 kvm_flush_coalesced_mmio_buffer();
880 void qemu_mutex_lock_ramlist(void)
882 qemu_mutex_lock(&ram_list
.mutex
);
885 void qemu_mutex_unlock_ramlist(void)
887 qemu_mutex_unlock(&ram_list
.mutex
);
890 #if defined(__linux__) && !defined(TARGET_S390X)
894 #define HUGETLBFS_MAGIC 0x958458f6
896 static long gethugepagesize(const char *path
)
902 ret
= statfs(path
, &fs
);
903 } while (ret
!= 0 && errno
== EINTR
);
910 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
911 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
916 static void *file_ram_alloc(RAMBlock
*block
,
921 char *sanitized_name
;
928 unsigned long hpagesize
;
930 hpagesize
= gethugepagesize(path
);
935 if (memory
< hpagesize
) {
939 if (kvm_enabled() && !kvm_has_sync_mmu()) {
940 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
944 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
945 sanitized_name
= g_strdup(block
->mr
->name
);
946 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
951 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
953 g_free(sanitized_name
);
955 fd
= mkstemp(filename
);
957 perror("unable to create backing store for hugepages");
964 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
967 * ftruncate is not supported by hugetlbfs in older
968 * hosts, so don't bother bailing out on errors.
969 * If anything goes wrong with it under other filesystems,
972 if (ftruncate(fd
, memory
))
976 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
977 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
978 * to sidestep this quirk.
980 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
981 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
983 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
985 if (area
== MAP_FAILED
) {
986 perror("file_ram_alloc: can't mmap RAM pages");
995 static ram_addr_t
find_ram_offset(ram_addr_t size
)
997 RAMBlock
*block
, *next_block
;
998 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1000 assert(size
!= 0); /* it would hand out same offset multiple times */
1002 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1005 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1006 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1008 end
= block
->offset
+ block
->length
;
1010 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1011 if (next_block
->offset
>= end
) {
1012 next
= MIN(next
, next_block
->offset
);
1015 if (next
- end
>= size
&& next
- end
< mingap
) {
1017 mingap
= next
- end
;
1021 if (offset
== RAM_ADDR_MAX
) {
1022 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1030 ram_addr_t
last_ram_offset(void)
1033 ram_addr_t last
= 0;
1035 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1036 last
= MAX(last
, block
->offset
+ block
->length
);
1041 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1045 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1046 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1047 "dump-guest-core", true)) {
1048 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1050 perror("qemu_madvise");
1051 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1052 "but dump_guest_core=off specified\n");
1057 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1059 RAMBlock
*new_block
, *block
;
1062 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1063 if (block
->offset
== addr
) {
1069 assert(!new_block
->idstr
[0]);
1072 char *id
= qdev_get_dev_path(dev
);
1074 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1078 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1080 /* This assumes the iothread lock is taken here too. */
1081 qemu_mutex_lock_ramlist();
1082 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1083 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1084 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1089 qemu_mutex_unlock_ramlist();
1092 static int memory_try_enable_merging(void *addr
, size_t len
)
1094 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1095 /* disabled by the user */
1099 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1102 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1105 RAMBlock
*block
, *new_block
;
1107 size
= TARGET_PAGE_ALIGN(size
);
1108 new_block
= g_malloc0(sizeof(*new_block
));
1110 /* This assumes the iothread lock is taken here too. */
1111 qemu_mutex_lock_ramlist();
1113 new_block
->offset
= find_ram_offset(size
);
1115 new_block
->host
= host
;
1116 new_block
->flags
|= RAM_PREALLOC_MASK
;
1119 #if defined (__linux__) && !defined(TARGET_S390X)
1120 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1121 if (!new_block
->host
) {
1122 new_block
->host
= qemu_anon_ram_alloc(size
);
1123 memory_try_enable_merging(new_block
->host
, size
);
1126 fprintf(stderr
, "-mem-path option unsupported\n");
1130 if (xen_enabled()) {
1131 xen_ram_alloc(new_block
->offset
, size
, mr
);
1132 } else if (kvm_enabled()) {
1133 /* some s390/kvm configurations have special constraints */
1134 new_block
->host
= kvm_ram_alloc(size
);
1136 new_block
->host
= qemu_anon_ram_alloc(size
);
1138 memory_try_enable_merging(new_block
->host
, size
);
1141 new_block
->length
= size
;
1143 /* Keep the list sorted from biggest to smallest block. */
1144 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1145 if (block
->length
< new_block
->length
) {
1150 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1152 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1154 ram_list
.mru_block
= NULL
;
1157 qemu_mutex_unlock_ramlist();
1159 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
1160 last_ram_offset() >> TARGET_PAGE_BITS
);
1161 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
1162 0, size
>> TARGET_PAGE_BITS
);
1163 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
1165 qemu_ram_setup_dump(new_block
->host
, size
);
1166 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1169 kvm_setup_guest_memory(new_block
->host
, size
);
1171 return new_block
->offset
;
1174 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1176 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1179 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1183 /* This assumes the iothread lock is taken here too. */
1184 qemu_mutex_lock_ramlist();
1185 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1186 if (addr
== block
->offset
) {
1187 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1188 ram_list
.mru_block
= NULL
;
1194 qemu_mutex_unlock_ramlist();
1197 void qemu_ram_free(ram_addr_t addr
)
1201 /* This assumes the iothread lock is taken here too. */
1202 qemu_mutex_lock_ramlist();
1203 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1204 if (addr
== block
->offset
) {
1205 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1206 ram_list
.mru_block
= NULL
;
1208 if (block
->flags
& RAM_PREALLOC_MASK
) {
1210 } else if (mem_path
) {
1211 #if defined (__linux__) && !defined(TARGET_S390X)
1213 munmap(block
->host
, block
->length
);
1216 qemu_anon_ram_free(block
->host
, block
->length
);
1222 if (xen_enabled()) {
1223 xen_invalidate_map_cache_entry(block
->host
);
1225 qemu_anon_ram_free(block
->host
, block
->length
);
1232 qemu_mutex_unlock_ramlist();
1237 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1244 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1245 offset
= addr
- block
->offset
;
1246 if (offset
< block
->length
) {
1247 vaddr
= block
->host
+ offset
;
1248 if (block
->flags
& RAM_PREALLOC_MASK
) {
1252 munmap(vaddr
, length
);
1254 #if defined(__linux__) && !defined(TARGET_S390X)
1257 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1260 flags
|= MAP_PRIVATE
;
1262 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1263 flags
, block
->fd
, offset
);
1265 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1266 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1273 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1274 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
1275 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
1278 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1279 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1283 if (area
!= vaddr
) {
1284 fprintf(stderr
, "Could not remap addr: "
1285 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1289 memory_try_enable_merging(vaddr
, length
);
1290 qemu_ram_setup_dump(vaddr
, length
);
1296 #endif /* !_WIN32 */
1298 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
1302 /* The list is protected by the iothread lock here. */
1303 block
= ram_list
.mru_block
;
1304 if (block
&& addr
- block
->offset
< block
->length
) {
1307 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1308 if (addr
- block
->offset
< block
->length
) {
1313 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1317 ram_list
.mru_block
= block
;
1321 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1322 With the exception of the softmmu code in this file, this should
1323 only be used for local memory (e.g. video ram) that the device owns,
1324 and knows it isn't going to access beyond the end of the block.
1326 It should not be used for general purpose DMA.
1327 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1329 void *qemu_get_ram_ptr(ram_addr_t addr
)
1331 RAMBlock
*block
= qemu_get_ram_block(addr
);
1333 if (xen_enabled()) {
1334 /* We need to check if the requested address is in the RAM
1335 * because we don't want to map the entire memory in QEMU.
1336 * In that case just map until the end of the page.
1338 if (block
->offset
== 0) {
1339 return xen_map_cache(addr
, 0, 0);
1340 } else if (block
->host
== NULL
) {
1342 xen_map_cache(block
->offset
, block
->length
, 1);
1345 return block
->host
+ (addr
- block
->offset
);
1348 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1349 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1351 * ??? Is this still necessary?
1353 static void *qemu_safe_ram_ptr(ram_addr_t addr
)
1357 /* The list is protected by the iothread lock here. */
1358 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1359 if (addr
- block
->offset
< block
->length
) {
1360 if (xen_enabled()) {
1361 /* We need to check if the requested address is in the RAM
1362 * because we don't want to map the entire memory in QEMU.
1363 * In that case just map until the end of the page.
1365 if (block
->offset
== 0) {
1366 return xen_map_cache(addr
, 0, 0);
1367 } else if (block
->host
== NULL
) {
1369 xen_map_cache(block
->offset
, block
->length
, 1);
1372 return block
->host
+ (addr
- block
->offset
);
1376 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1382 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1383 * but takes a size argument */
1384 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1389 if (xen_enabled()) {
1390 return xen_map_cache(addr
, *size
, 1);
1394 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1395 if (addr
- block
->offset
< block
->length
) {
1396 if (addr
- block
->offset
+ *size
> block
->length
)
1397 *size
= block
->length
- addr
+ block
->offset
;
1398 return block
->host
+ (addr
- block
->offset
);
1402 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1407 /* Some of the softmmu routines need to translate from a host pointer
1408 (typically a TLB entry) back to a ram offset. */
1409 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1412 uint8_t *host
= ptr
;
1414 if (xen_enabled()) {
1415 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1416 return qemu_get_ram_block(*ram_addr
)->mr
;
1419 block
= ram_list
.mru_block
;
1420 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1424 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1425 /* This case append when the block is not mapped. */
1426 if (block
->host
== NULL
) {
1429 if (host
- block
->host
< block
->length
) {
1437 *ram_addr
= block
->offset
+ (host
- block
->host
);
1441 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1442 uint64_t val
, unsigned size
)
1445 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1446 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1447 tb_invalidate_phys_page_fast(ram_addr
, size
);
1448 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1452 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1455 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1458 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1463 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1464 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
1465 /* we remove the notdirty callback only if the code has been
1467 if (dirty_flags
== 0xff) {
1468 CPUArchState
*env
= current_cpu
->env_ptr
;
1469 tlb_set_dirty(env
, env
->mem_io_vaddr
);
1473 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1474 unsigned size
, bool is_write
)
1479 static const MemoryRegionOps notdirty_mem_ops
= {
1480 .write
= notdirty_mem_write
,
1481 .valid
.accepts
= notdirty_mem_accepts
,
1482 .endianness
= DEVICE_NATIVE_ENDIAN
,
1485 /* Generate a debug exception if a watchpoint has been hit. */
1486 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1488 CPUArchState
*env
= current_cpu
->env_ptr
;
1489 target_ulong pc
, cs_base
;
1494 if (env
->watchpoint_hit
) {
1495 /* We re-entered the check after replacing the TB. Now raise
1496 * the debug interrupt so that is will trigger after the
1497 * current instruction. */
1498 cpu_interrupt(ENV_GET_CPU(env
), CPU_INTERRUPT_DEBUG
);
1501 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1502 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1503 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1504 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1505 wp
->flags
|= BP_WATCHPOINT_HIT
;
1506 if (!env
->watchpoint_hit
) {
1507 env
->watchpoint_hit
= wp
;
1508 tb_check_watchpoint(env
);
1509 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1510 env
->exception_index
= EXCP_DEBUG
;
1513 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1514 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
1515 cpu_resume_from_signal(env
, NULL
);
1519 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1524 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1525 so these check for a hit then pass through to the normal out-of-line
1527 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1530 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1532 case 1: return ldub_phys(addr
);
1533 case 2: return lduw_phys(addr
);
1534 case 4: return ldl_phys(addr
);
1539 static void watch_mem_write(void *opaque
, hwaddr addr
,
1540 uint64_t val
, unsigned size
)
1542 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1545 stb_phys(addr
, val
);
1548 stw_phys(addr
, val
);
1551 stl_phys(addr
, val
);
1557 static const MemoryRegionOps watch_mem_ops
= {
1558 .read
= watch_mem_read
,
1559 .write
= watch_mem_write
,
1560 .endianness
= DEVICE_NATIVE_ENDIAN
,
1563 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1566 subpage_t
*subpage
= opaque
;
1569 #if defined(DEBUG_SUBPAGE)
1570 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
"\n", __func__
,
1571 subpage
, len
, addr
);
1573 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1586 static void subpage_write(void *opaque
, hwaddr addr
,
1587 uint64_t value
, unsigned len
)
1589 subpage_t
*subpage
= opaque
;
1592 #if defined(DEBUG_SUBPAGE)
1593 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1594 " value %"PRIx64
"\n",
1595 __func__
, subpage
, len
, addr
, value
);
1610 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1613 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1614 unsigned size
, bool is_write
)
1616 subpage_t
*subpage
= opaque
;
1617 #if defined(DEBUG_SUBPAGE)
1618 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx
"\n",
1619 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1622 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1626 static const MemoryRegionOps subpage_ops
= {
1627 .read
= subpage_read
,
1628 .write
= subpage_write
,
1629 .valid
.accepts
= subpage_accepts
,
1630 .endianness
= DEVICE_NATIVE_ENDIAN
,
1633 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1638 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1640 idx
= SUBPAGE_IDX(start
);
1641 eidx
= SUBPAGE_IDX(end
);
1642 #if defined(DEBUG_SUBPAGE)
1643 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
1644 mmio
, start
, end
, idx
, eidx
, memory
);
1646 for (; idx
<= eidx
; idx
++) {
1647 mmio
->sub_section
[idx
] = section
;
1653 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1657 mmio
= g_malloc0(sizeof(subpage_t
));
1661 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1662 "subpage", TARGET_PAGE_SIZE
);
1663 mmio
->iomem
.subpage
= true;
1664 #if defined(DEBUG_SUBPAGE)
1665 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
1666 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
1668 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1673 static uint16_t dummy_section(MemoryRegion
*mr
)
1675 MemoryRegionSection section
= {
1677 .offset_within_address_space
= 0,
1678 .offset_within_region
= 0,
1679 .size
= int128_2_64(),
1682 return phys_section_add(§ion
);
1685 MemoryRegion
*iotlb_to_region(hwaddr index
)
1687 return address_space_memory
.dispatch
->sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1690 static void io_mem_init(void)
1692 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, "rom", UINT64_MAX
);
1693 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1694 "unassigned", UINT64_MAX
);
1695 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1696 "notdirty", UINT64_MAX
);
1697 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1698 "watch", UINT64_MAX
);
1701 static void mem_begin(MemoryListener
*listener
)
1703 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1704 AddressSpaceDispatch
*d
= g_new(AddressSpaceDispatch
, 1);
1706 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
1708 as
->next_dispatch
= d
;
1711 static void mem_commit(MemoryListener
*listener
)
1713 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1714 AddressSpaceDispatch
*cur
= as
->dispatch
;
1715 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1717 next
->nodes
= next_map
.nodes
;
1718 next
->sections
= next_map
.sections
;
1720 as
->dispatch
= next
;
1724 static void core_begin(MemoryListener
*listener
)
1728 prev_map
= g_new(PhysPageMap
, 1);
1729 *prev_map
= next_map
;
1731 memset(&next_map
, 0, sizeof(next_map
));
1732 n
= dummy_section(&io_mem_unassigned
);
1733 assert(n
== PHYS_SECTION_UNASSIGNED
);
1734 n
= dummy_section(&io_mem_notdirty
);
1735 assert(n
== PHYS_SECTION_NOTDIRTY
);
1736 n
= dummy_section(&io_mem_rom
);
1737 assert(n
== PHYS_SECTION_ROM
);
1738 n
= dummy_section(&io_mem_watch
);
1739 assert(n
== PHYS_SECTION_WATCH
);
1742 /* This listener's commit run after the other AddressSpaceDispatch listeners'.
1743 * All AddressSpaceDispatch instances have switched to the next map.
1745 static void core_commit(MemoryListener
*listener
)
1747 phys_sections_free(prev_map
);
1750 static void tcg_commit(MemoryListener
*listener
)
1754 /* since each CPU stores ram addresses in its TLB cache, we must
1755 reset the modified entries */
1757 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
1758 CPUArchState
*env
= cpu
->env_ptr
;
1764 static void core_log_global_start(MemoryListener
*listener
)
1766 cpu_physical_memory_set_dirty_tracking(1);
1769 static void core_log_global_stop(MemoryListener
*listener
)
1771 cpu_physical_memory_set_dirty_tracking(0);
1774 static MemoryListener core_memory_listener
= {
1775 .begin
= core_begin
,
1776 .commit
= core_commit
,
1777 .log_global_start
= core_log_global_start
,
1778 .log_global_stop
= core_log_global_stop
,
1782 static MemoryListener tcg_memory_listener
= {
1783 .commit
= tcg_commit
,
1786 void address_space_init_dispatch(AddressSpace
*as
)
1788 as
->dispatch
= NULL
;
1789 as
->dispatch_listener
= (MemoryListener
) {
1791 .commit
= mem_commit
,
1792 .region_add
= mem_add
,
1793 .region_nop
= mem_add
,
1796 memory_listener_register(&as
->dispatch_listener
, as
);
1799 void address_space_destroy_dispatch(AddressSpace
*as
)
1801 AddressSpaceDispatch
*d
= as
->dispatch
;
1803 memory_listener_unregister(&as
->dispatch_listener
);
1805 as
->dispatch
= NULL
;
1808 static void memory_map_init(void)
1810 system_memory
= g_malloc(sizeof(*system_memory
));
1811 memory_region_init(system_memory
, NULL
, "system", INT64_MAX
);
1812 address_space_init(&address_space_memory
, system_memory
, "memory");
1814 system_io
= g_malloc(sizeof(*system_io
));
1815 memory_region_init(system_io
, NULL
, "io", 65536);
1816 address_space_init(&address_space_io
, system_io
, "I/O");
1818 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1819 memory_listener_register(&tcg_memory_listener
, &address_space_memory
);
1822 MemoryRegion
*get_system_memory(void)
1824 return system_memory
;
1827 MemoryRegion
*get_system_io(void)
1832 #endif /* !defined(CONFIG_USER_ONLY) */
1834 /* physical memory access (slow version, mainly for debug) */
1835 #if defined(CONFIG_USER_ONLY)
1836 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
1837 uint8_t *buf
, int len
, int is_write
)
1844 page
= addr
& TARGET_PAGE_MASK
;
1845 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1848 flags
= page_get_flags(page
);
1849 if (!(flags
& PAGE_VALID
))
1852 if (!(flags
& PAGE_WRITE
))
1854 /* XXX: this code should not depend on lock_user */
1855 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1858 unlock_user(p
, addr
, l
);
1860 if (!(flags
& PAGE_READ
))
1862 /* XXX: this code should not depend on lock_user */
1863 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1866 unlock_user(p
, addr
, 0);
1877 static void invalidate_and_set_dirty(hwaddr addr
,
1880 if (!cpu_physical_memory_is_dirty(addr
)) {
1881 /* invalidate code */
1882 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1884 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
1886 xen_modified_memory(addr
, length
);
1889 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
1891 if (memory_region_is_ram(mr
)) {
1892 return !(is_write
&& mr
->readonly
);
1894 if (memory_region_is_romd(mr
)) {
1901 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
1903 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
1905 /* Regions are assumed to support 1-4 byte accesses unless
1906 otherwise specified. */
1907 if (access_size_max
== 0) {
1908 access_size_max
= 4;
1911 /* Bound the maximum access by the alignment of the address. */
1912 if (!mr
->ops
->impl
.unaligned
) {
1913 unsigned align_size_max
= addr
& -addr
;
1914 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
1915 access_size_max
= align_size_max
;
1919 /* Don't attempt accesses larger than the maximum. */
1920 if (l
> access_size_max
) {
1921 l
= access_size_max
;
1927 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
1928 int len
, bool is_write
)
1939 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
1942 if (!memory_access_is_direct(mr
, is_write
)) {
1943 l
= memory_access_size(mr
, l
, addr1
);
1944 /* XXX: could force current_cpu to NULL to avoid
1948 /* 64 bit write access */
1950 error
|= io_mem_write(mr
, addr1
, val
, 8);
1953 /* 32 bit write access */
1955 error
|= io_mem_write(mr
, addr1
, val
, 4);
1958 /* 16 bit write access */
1960 error
|= io_mem_write(mr
, addr1
, val
, 2);
1963 /* 8 bit write access */
1965 error
|= io_mem_write(mr
, addr1
, val
, 1);
1971 addr1
+= memory_region_get_ram_addr(mr
);
1973 ptr
= qemu_get_ram_ptr(addr1
);
1974 memcpy(ptr
, buf
, l
);
1975 invalidate_and_set_dirty(addr1
, l
);
1978 if (!memory_access_is_direct(mr
, is_write
)) {
1980 l
= memory_access_size(mr
, l
, addr1
);
1983 /* 64 bit read access */
1984 error
|= io_mem_read(mr
, addr1
, &val
, 8);
1988 /* 32 bit read access */
1989 error
|= io_mem_read(mr
, addr1
, &val
, 4);
1993 /* 16 bit read access */
1994 error
|= io_mem_read(mr
, addr1
, &val
, 2);
1998 /* 8 bit read access */
1999 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2007 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2008 memcpy(buf
, ptr
, l
);
2019 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2020 const uint8_t *buf
, int len
)
2022 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2025 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2027 return address_space_rw(as
, addr
, buf
, len
, false);
2031 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2032 int len
, int is_write
)
2034 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2037 /* used for ROM loading : can write in RAM and ROM */
2038 void cpu_physical_memory_write_rom(hwaddr addr
,
2039 const uint8_t *buf
, int len
)
2048 mr
= address_space_translate(&address_space_memory
,
2049 addr
, &addr1
, &l
, true);
2051 if (!(memory_region_is_ram(mr
) ||
2052 memory_region_is_romd(mr
))) {
2055 addr1
+= memory_region_get_ram_addr(mr
);
2057 ptr
= qemu_get_ram_ptr(addr1
);
2058 memcpy(ptr
, buf
, l
);
2059 invalidate_and_set_dirty(addr1
, l
);
2074 static BounceBuffer bounce
;
2076 typedef struct MapClient
{
2078 void (*callback
)(void *opaque
);
2079 QLIST_ENTRY(MapClient
) link
;
2082 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2083 = QLIST_HEAD_INITIALIZER(map_client_list
);
2085 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2087 MapClient
*client
= g_malloc(sizeof(*client
));
2089 client
->opaque
= opaque
;
2090 client
->callback
= callback
;
2091 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2095 static void cpu_unregister_map_client(void *_client
)
2097 MapClient
*client
= (MapClient
*)_client
;
2099 QLIST_REMOVE(client
, link
);
2103 static void cpu_notify_map_clients(void)
2107 while (!QLIST_EMPTY(&map_client_list
)) {
2108 client
= QLIST_FIRST(&map_client_list
);
2109 client
->callback(client
->opaque
);
2110 cpu_unregister_map_client(client
);
2114 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2121 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2122 if (!memory_access_is_direct(mr
, is_write
)) {
2123 l
= memory_access_size(mr
, l
, addr
);
2124 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2135 /* Map a physical memory region into a host virtual address.
2136 * May map a subset of the requested range, given by and returned in *plen.
2137 * May return NULL if resources needed to perform the mapping are exhausted.
2138 * Use only for reads OR writes - not for read-modify-write operations.
2139 * Use cpu_register_map_client() to know when retrying the map operation is
2140 * likely to succeed.
2142 void *address_space_map(AddressSpace
*as
,
2149 hwaddr l
, xlat
, base
;
2150 MemoryRegion
*mr
, *this_mr
;
2158 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2159 if (!memory_access_is_direct(mr
, is_write
)) {
2160 if (bounce
.buffer
) {
2163 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
2167 memory_region_ref(mr
);
2170 address_space_read(as
, addr
, bounce
.buffer
, l
);
2174 return bounce
.buffer
;
2178 raddr
= memory_region_get_ram_addr(mr
);
2189 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2190 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2195 memory_region_ref(mr
);
2197 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2200 /* Unmaps a memory region previously mapped by address_space_map().
2201 * Will also mark the memory as dirty if is_write == 1. access_len gives
2202 * the amount of memory that was actually read or written by the caller.
2204 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2205 int is_write
, hwaddr access_len
)
2207 if (buffer
!= bounce
.buffer
) {
2211 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2214 while (access_len
) {
2216 l
= TARGET_PAGE_SIZE
;
2219 invalidate_and_set_dirty(addr1
, l
);
2224 if (xen_enabled()) {
2225 xen_invalidate_map_cache_entry(buffer
);
2227 memory_region_unref(mr
);
2231 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2233 qemu_vfree(bounce
.buffer
);
2234 bounce
.buffer
= NULL
;
2235 memory_region_unref(bounce
.mr
);
2236 cpu_notify_map_clients();
2239 void *cpu_physical_memory_map(hwaddr addr
,
2243 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2246 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2247 int is_write
, hwaddr access_len
)
2249 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2252 /* warning: addr must be aligned */
2253 static inline uint32_t ldl_phys_internal(hwaddr addr
,
2254 enum device_endian endian
)
2262 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2264 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2266 io_mem_read(mr
, addr1
, &val
, 4);
2267 #if defined(TARGET_WORDS_BIGENDIAN)
2268 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2272 if (endian
== DEVICE_BIG_ENDIAN
) {
2278 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2282 case DEVICE_LITTLE_ENDIAN
:
2283 val
= ldl_le_p(ptr
);
2285 case DEVICE_BIG_ENDIAN
:
2286 val
= ldl_be_p(ptr
);
2296 uint32_t ldl_phys(hwaddr addr
)
2298 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2301 uint32_t ldl_le_phys(hwaddr addr
)
2303 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2306 uint32_t ldl_be_phys(hwaddr addr
)
2308 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2311 /* warning: addr must be aligned */
2312 static inline uint64_t ldq_phys_internal(hwaddr addr
,
2313 enum device_endian endian
)
2321 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2323 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2325 io_mem_read(mr
, addr1
, &val
, 8);
2326 #if defined(TARGET_WORDS_BIGENDIAN)
2327 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2331 if (endian
== DEVICE_BIG_ENDIAN
) {
2337 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2341 case DEVICE_LITTLE_ENDIAN
:
2342 val
= ldq_le_p(ptr
);
2344 case DEVICE_BIG_ENDIAN
:
2345 val
= ldq_be_p(ptr
);
2355 uint64_t ldq_phys(hwaddr addr
)
2357 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2360 uint64_t ldq_le_phys(hwaddr addr
)
2362 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2365 uint64_t ldq_be_phys(hwaddr addr
)
2367 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2371 uint32_t ldub_phys(hwaddr addr
)
2374 cpu_physical_memory_read(addr
, &val
, 1);
2378 /* warning: addr must be aligned */
2379 static inline uint32_t lduw_phys_internal(hwaddr addr
,
2380 enum device_endian endian
)
2388 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2390 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2392 io_mem_read(mr
, addr1
, &val
, 2);
2393 #if defined(TARGET_WORDS_BIGENDIAN)
2394 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2398 if (endian
== DEVICE_BIG_ENDIAN
) {
2404 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2408 case DEVICE_LITTLE_ENDIAN
:
2409 val
= lduw_le_p(ptr
);
2411 case DEVICE_BIG_ENDIAN
:
2412 val
= lduw_be_p(ptr
);
2422 uint32_t lduw_phys(hwaddr addr
)
2424 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2427 uint32_t lduw_le_phys(hwaddr addr
)
2429 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2432 uint32_t lduw_be_phys(hwaddr addr
)
2434 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2437 /* warning: addr must be aligned. The ram page is not masked as dirty
2438 and the code inside is not invalidated. It is useful if the dirty
2439 bits are used to track modified PTEs */
2440 void stl_phys_notdirty(hwaddr addr
, uint32_t val
)
2447 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2449 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2450 io_mem_write(mr
, addr1
, val
, 4);
2452 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2453 ptr
= qemu_get_ram_ptr(addr1
);
2456 if (unlikely(in_migration
)) {
2457 if (!cpu_physical_memory_is_dirty(addr1
)) {
2458 /* invalidate code */
2459 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2461 cpu_physical_memory_set_dirty_flags(
2462 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
2468 /* warning: addr must be aligned */
2469 static inline void stl_phys_internal(hwaddr addr
, uint32_t val
,
2470 enum device_endian endian
)
2477 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2479 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2480 #if defined(TARGET_WORDS_BIGENDIAN)
2481 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2485 if (endian
== DEVICE_BIG_ENDIAN
) {
2489 io_mem_write(mr
, addr1
, val
, 4);
2492 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2493 ptr
= qemu_get_ram_ptr(addr1
);
2495 case DEVICE_LITTLE_ENDIAN
:
2498 case DEVICE_BIG_ENDIAN
:
2505 invalidate_and_set_dirty(addr1
, 4);
2509 void stl_phys(hwaddr addr
, uint32_t val
)
2511 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2514 void stl_le_phys(hwaddr addr
, uint32_t val
)
2516 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2519 void stl_be_phys(hwaddr addr
, uint32_t val
)
2521 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2525 void stb_phys(hwaddr addr
, uint32_t val
)
2528 cpu_physical_memory_write(addr
, &v
, 1);
2531 /* warning: addr must be aligned */
2532 static inline void stw_phys_internal(hwaddr addr
, uint32_t val
,
2533 enum device_endian endian
)
2540 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2542 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2543 #if defined(TARGET_WORDS_BIGENDIAN)
2544 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2548 if (endian
== DEVICE_BIG_ENDIAN
) {
2552 io_mem_write(mr
, addr1
, val
, 2);
2555 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2556 ptr
= qemu_get_ram_ptr(addr1
);
2558 case DEVICE_LITTLE_ENDIAN
:
2561 case DEVICE_BIG_ENDIAN
:
2568 invalidate_and_set_dirty(addr1
, 2);
2572 void stw_phys(hwaddr addr
, uint32_t val
)
2574 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2577 void stw_le_phys(hwaddr addr
, uint32_t val
)
2579 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2582 void stw_be_phys(hwaddr addr
, uint32_t val
)
2584 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2588 void stq_phys(hwaddr addr
, uint64_t val
)
2591 cpu_physical_memory_write(addr
, &val
, 8);
2594 void stq_le_phys(hwaddr addr
, uint64_t val
)
2596 val
= cpu_to_le64(val
);
2597 cpu_physical_memory_write(addr
, &val
, 8);
2600 void stq_be_phys(hwaddr addr
, uint64_t val
)
2602 val
= cpu_to_be64(val
);
2603 cpu_physical_memory_write(addr
, &val
, 8);
2606 /* virtual memory access for debug (includes writing to ROM) */
2607 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
2608 uint8_t *buf
, int len
, int is_write
)
2615 page
= addr
& TARGET_PAGE_MASK
;
2616 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2617 /* if no physical page mapped, return an error */
2618 if (phys_addr
== -1)
2620 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2623 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2625 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
2627 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
2636 #if !defined(CONFIG_USER_ONLY)
2639 * A helper function for the _utterly broken_ virtio device model to find out if
2640 * it's running on a big endian machine. Don't do this at home kids!
2642 bool virtio_is_big_endian(void);
2643 bool virtio_is_big_endian(void)
2645 #if defined(TARGET_WORDS_BIGENDIAN)
2654 #ifndef CONFIG_USER_ONLY
2655 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2660 mr
= address_space_translate(&address_space_memory
,
2661 phys_addr
, &phys_addr
, &l
, false);
2663 return !(memory_region_is_ram(mr
) ||
2664 memory_region_is_romd(mr
));
2667 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2671 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2672 func(block
->host
, block
->offset
, block
->length
, opaque
);