4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "hw/xen/xen.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
47 #include "exec/cpu-all.h"
49 #include "exec/cputlb.h"
50 #include "translate-all.h"
52 #include "exec/memory-internal.h"
54 #include "qemu/range.h"
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 static int in_migration
;
61 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
63 static MemoryRegion
*system_memory
;
64 static MemoryRegion
*system_io
;
66 AddressSpace address_space_io
;
67 AddressSpace address_space_memory
;
69 MemoryRegion io_mem_rom
, io_mem_notdirty
;
70 static MemoryRegion io_mem_unassigned
;
74 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
75 /* current CPU in the current thread. It is only valid inside
77 DEFINE_TLS(CPUState
*, current_cpu
);
78 /* 0 = Do not count executed instructions.
79 1 = Precise instruction counting.
80 2 = Adaptive rate instruction counting. */
83 #if !defined(CONFIG_USER_ONLY)
85 typedef struct PhysPageEntry PhysPageEntry
;
87 struct PhysPageEntry
{
88 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
90 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
94 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
96 /* Size of the L2 (and L3, etc) page tables. */
97 #define ADDR_SPACE_BITS 64
100 #define P_L2_SIZE (1 << P_L2_BITS)
102 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
104 typedef PhysPageEntry Node
[P_L2_SIZE
];
106 typedef struct PhysPageMap
{
107 unsigned sections_nb
;
108 unsigned sections_nb_alloc
;
110 unsigned nodes_nb_alloc
;
112 MemoryRegionSection
*sections
;
115 struct AddressSpaceDispatch
{
116 /* This is a multi-level map on the physical address space.
117 * The bottom level has pointers to MemoryRegionSections.
119 PhysPageEntry phys_map
;
124 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
125 typedef struct subpage_t
{
129 uint16_t sub_section
[TARGET_PAGE_SIZE
];
132 #define PHYS_SECTION_UNASSIGNED 0
133 #define PHYS_SECTION_NOTDIRTY 1
134 #define PHYS_SECTION_ROM 2
135 #define PHYS_SECTION_WATCH 3
137 static void io_mem_init(void);
138 static void memory_map_init(void);
140 static MemoryRegion io_mem_watch
;
143 #if !defined(CONFIG_USER_ONLY)
145 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
147 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
148 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
149 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
150 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
154 static uint32_t phys_map_node_alloc(PhysPageMap
*map
)
159 ret
= map
->nodes_nb
++;
160 assert(ret
!= PHYS_MAP_NODE_NIL
);
161 assert(ret
!= map
->nodes_nb_alloc
);
162 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
163 map
->nodes
[ret
][i
].skip
= 1;
164 map
->nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
169 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
170 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
175 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
177 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
178 lp
->ptr
= phys_map_node_alloc(map
);
179 p
= map
->nodes
[lp
->ptr
];
181 for (i
= 0; i
< P_L2_SIZE
; i
++) {
183 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
187 p
= map
->nodes
[lp
->ptr
];
189 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
191 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
192 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
198 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
204 static void phys_page_set(AddressSpaceDispatch
*d
,
205 hwaddr index
, hwaddr nb
,
208 /* Wildly overreserve - it doesn't matter much. */
209 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
211 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
214 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
215 * and update our entry so we can skip it and go directly to the destination.
217 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
219 unsigned valid_ptr
= P_L2_SIZE
;
224 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
229 for (i
= 0; i
< P_L2_SIZE
; i
++) {
230 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
237 phys_page_compact(&p
[i
], nodes
, compacted
);
241 /* We can only compress if there's only one child. */
246 assert(valid_ptr
< P_L2_SIZE
);
248 /* Don't compress if it won't fit in the # of bits we have. */
249 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
253 lp
->ptr
= p
[valid_ptr
].ptr
;
254 if (!p
[valid_ptr
].skip
) {
255 /* If our only child is a leaf, make this a leaf. */
256 /* By design, we should have made this node a leaf to begin with so we
257 * should never reach here.
258 * But since it's so simple to handle this, let's do it just in case we
263 lp
->skip
+= p
[valid_ptr
].skip
;
267 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
269 DECLARE_BITMAP(compacted
, nodes_nb
);
271 if (d
->phys_map
.skip
) {
272 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
276 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
277 Node
*nodes
, MemoryRegionSection
*sections
)
280 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
283 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
284 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
285 return §ions
[PHYS_SECTION_UNASSIGNED
];
288 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
291 if (sections
[lp
.ptr
].size
.hi
||
292 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
293 sections
[lp
.ptr
].size
.lo
, addr
)) {
294 return §ions
[lp
.ptr
];
296 return §ions
[PHYS_SECTION_UNASSIGNED
];
300 bool memory_region_is_unassigned(MemoryRegion
*mr
)
302 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
303 && mr
!= &io_mem_watch
;
306 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
308 bool resolve_subpage
)
310 MemoryRegionSection
*section
;
313 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
314 if (resolve_subpage
&& section
->mr
->subpage
) {
315 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
316 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
321 static MemoryRegionSection
*
322 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
323 hwaddr
*plen
, bool resolve_subpage
)
325 MemoryRegionSection
*section
;
328 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
329 /* Compute offset within MemoryRegionSection */
330 addr
-= section
->offset_within_address_space
;
332 /* Compute offset within MemoryRegion */
333 *xlat
= addr
+ section
->offset_within_region
;
335 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
336 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
340 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
341 hwaddr
*xlat
, hwaddr
*plen
,
345 MemoryRegionSection
*section
;
350 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
353 if (!mr
->iommu_ops
) {
357 iotlb
= mr
->iommu_ops
->translate(mr
, addr
);
358 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
359 | (addr
& iotlb
.addr_mask
));
360 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
361 if (!(iotlb
.perm
& (1 << is_write
))) {
362 mr
= &io_mem_unassigned
;
366 as
= iotlb
.target_as
;
374 MemoryRegionSection
*
375 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
378 MemoryRegionSection
*section
;
379 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
381 assert(!section
->mr
->iommu_ops
);
386 void cpu_exec_init_all(void)
388 #if !defined(CONFIG_USER_ONLY)
389 qemu_mutex_init(&ram_list
.mutex
);
395 #if !defined(CONFIG_USER_ONLY)
397 static int cpu_common_post_load(void *opaque
, int version_id
)
399 CPUState
*cpu
= opaque
;
401 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
402 version_id is increased. */
403 cpu
->interrupt_request
&= ~0x01;
404 tlb_flush(cpu
->env_ptr
, 1);
409 const VMStateDescription vmstate_cpu_common
= {
410 .name
= "cpu_common",
412 .minimum_version_id
= 1,
413 .minimum_version_id_old
= 1,
414 .post_load
= cpu_common_post_load
,
415 .fields
= (VMStateField
[]) {
416 VMSTATE_UINT32(halted
, CPUState
),
417 VMSTATE_UINT32(interrupt_request
, CPUState
),
418 VMSTATE_END_OF_LIST()
424 CPUState
*qemu_get_cpu(int index
)
429 if (cpu
->cpu_index
== index
) {
437 void cpu_exec_init(CPUArchState
*env
)
439 CPUState
*cpu
= ENV_GET_CPU(env
);
440 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
444 #if defined(CONFIG_USER_ONLY)
448 CPU_FOREACH(some_cpu
) {
451 cpu
->cpu_index
= cpu_index
;
453 QTAILQ_INIT(&env
->breakpoints
);
454 QTAILQ_INIT(&env
->watchpoints
);
455 #ifndef CONFIG_USER_ONLY
456 cpu
->thread_id
= qemu_get_thread_id();
458 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
459 #if defined(CONFIG_USER_ONLY)
462 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
463 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
465 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
466 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
467 cpu_save
, cpu_load
, env
);
468 assert(cc
->vmsd
== NULL
);
469 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
471 if (cc
->vmsd
!= NULL
) {
472 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
476 #if defined(TARGET_HAS_ICE)
477 #if defined(CONFIG_USER_ONLY)
478 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
480 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
483 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
485 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
487 tb_invalidate_phys_addr(phys
| (pc
& ~TARGET_PAGE_MASK
));
491 #endif /* TARGET_HAS_ICE */
493 #if defined(CONFIG_USER_ONLY)
494 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
499 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
500 int flags
, CPUWatchpoint
**watchpoint
)
505 /* Add a watchpoint. */
506 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
507 int flags
, CPUWatchpoint
**watchpoint
)
509 target_ulong len_mask
= ~(len
- 1);
512 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
513 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
514 len
== 0 || len
> TARGET_PAGE_SIZE
) {
515 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
516 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
519 wp
= g_malloc(sizeof(*wp
));
522 wp
->len_mask
= len_mask
;
525 /* keep all GDB-injected watchpoints in front */
527 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
529 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
531 tlb_flush_page(env
, addr
);
538 /* Remove a specific watchpoint. */
539 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
542 target_ulong len_mask
= ~(len
- 1);
545 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
546 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
547 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
548 cpu_watchpoint_remove_by_ref(env
, wp
);
555 /* Remove a specific watchpoint by reference. */
556 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
558 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
560 tlb_flush_page(env
, watchpoint
->vaddr
);
565 /* Remove all matching watchpoints. */
566 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
568 CPUWatchpoint
*wp
, *next
;
570 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
571 if (wp
->flags
& mask
)
572 cpu_watchpoint_remove_by_ref(env
, wp
);
577 /* Add a breakpoint. */
578 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
579 CPUBreakpoint
**breakpoint
)
581 #if defined(TARGET_HAS_ICE)
584 bp
= g_malloc(sizeof(*bp
));
589 /* keep all GDB-injected breakpoints in front */
590 if (flags
& BP_GDB
) {
591 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
593 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
596 breakpoint_invalidate(ENV_GET_CPU(env
), pc
);
607 /* Remove a specific breakpoint. */
608 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
610 #if defined(TARGET_HAS_ICE)
613 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
614 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
615 cpu_breakpoint_remove_by_ref(env
, bp
);
625 /* Remove a specific breakpoint by reference. */
626 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
628 #if defined(TARGET_HAS_ICE)
629 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
631 breakpoint_invalidate(ENV_GET_CPU(env
), breakpoint
->pc
);
637 /* Remove all matching breakpoints. */
638 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
640 #if defined(TARGET_HAS_ICE)
641 CPUBreakpoint
*bp
, *next
;
643 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
644 if (bp
->flags
& mask
)
645 cpu_breakpoint_remove_by_ref(env
, bp
);
650 /* enable or disable single step mode. EXCP_DEBUG is returned by the
651 CPU loop after each instruction */
652 void cpu_single_step(CPUState
*cpu
, int enabled
)
654 #if defined(TARGET_HAS_ICE)
655 if (cpu
->singlestep_enabled
!= enabled
) {
656 cpu
->singlestep_enabled
= enabled
;
658 kvm_update_guest_debug(cpu
, 0);
660 /* must flush all the translated code to avoid inconsistencies */
661 /* XXX: only flush what is necessary */
662 CPUArchState
*env
= cpu
->env_ptr
;
669 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
671 CPUState
*cpu
= ENV_GET_CPU(env
);
677 fprintf(stderr
, "qemu: fatal: ");
678 vfprintf(stderr
, fmt
, ap
);
679 fprintf(stderr
, "\n");
680 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
681 if (qemu_log_enabled()) {
682 qemu_log("qemu: fatal: ");
683 qemu_log_vprintf(fmt
, ap2
);
685 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
691 #if defined(CONFIG_USER_ONLY)
693 struct sigaction act
;
694 sigfillset(&act
.sa_mask
);
695 act
.sa_handler
= SIG_DFL
;
696 sigaction(SIGABRT
, &act
, NULL
);
702 #if !defined(CONFIG_USER_ONLY)
703 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
707 /* The list is protected by the iothread lock here. */
708 block
= ram_list
.mru_block
;
709 if (block
&& addr
- block
->offset
< block
->length
) {
712 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
713 if (addr
- block
->offset
< block
->length
) {
718 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
722 ram_list
.mru_block
= block
;
726 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
732 block
= qemu_get_ram_block(start
);
733 assert(block
== qemu_get_ram_block(end
- 1));
734 start1
= (uintptr_t)block
->host
+ (start
- block
->offset
);
735 cpu_tlb_reset_dirty_all(start1
, length
);
738 /* Note: start and end must be within the same ram block. */
739 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
744 start
&= TARGET_PAGE_MASK
;
745 end
= TARGET_PAGE_ALIGN(end
);
747 length
= end
- start
;
750 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
753 tlb_reset_dirty_range_all(start
, end
, length
);
757 static int cpu_physical_memory_set_dirty_tracking(int enable
)
760 in_migration
= enable
;
764 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
765 MemoryRegionSection
*section
,
767 hwaddr paddr
, hwaddr xlat
,
769 target_ulong
*address
)
774 if (memory_region_is_ram(section
->mr
)) {
776 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
778 if (!section
->readonly
) {
779 iotlb
|= PHYS_SECTION_NOTDIRTY
;
781 iotlb
|= PHYS_SECTION_ROM
;
784 iotlb
= section
- address_space_memory
.dispatch
->map
.sections
;
788 /* Make accesses to pages with watchpoints go via the
789 watchpoint trap routines. */
790 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
791 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
792 /* Avoid trapping reads of pages with a write breakpoint. */
793 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
794 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
795 *address
|= TLB_MMIO
;
803 #endif /* defined(CONFIG_USER_ONLY) */
805 #if !defined(CONFIG_USER_ONLY)
807 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
809 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
811 static void *(*phys_mem_alloc
)(size_t size
) = qemu_anon_ram_alloc
;
814 * Set a custom physical guest memory alloator.
815 * Accelerators with unusual needs may need this. Hopefully, we can
816 * get rid of it eventually.
818 void phys_mem_set_alloc(void *(*alloc
)(size_t))
820 phys_mem_alloc
= alloc
;
823 static uint16_t phys_section_add(PhysPageMap
*map
,
824 MemoryRegionSection
*section
)
826 /* The physical section number is ORed with a page-aligned
827 * pointer to produce the iotlb entries. Thus it should
828 * never overflow into the page-aligned value.
830 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
832 if (map
->sections_nb
== map
->sections_nb_alloc
) {
833 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
834 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
835 map
->sections_nb_alloc
);
837 map
->sections
[map
->sections_nb
] = *section
;
838 memory_region_ref(section
->mr
);
839 return map
->sections_nb
++;
842 static void phys_section_destroy(MemoryRegion
*mr
)
844 memory_region_unref(mr
);
847 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
848 memory_region_destroy(&subpage
->iomem
);
853 static void phys_sections_free(PhysPageMap
*map
)
855 while (map
->sections_nb
> 0) {
856 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
857 phys_section_destroy(section
->mr
);
859 g_free(map
->sections
);
863 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
866 hwaddr base
= section
->offset_within_address_space
868 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
869 d
->map
.nodes
, d
->map
.sections
);
870 MemoryRegionSection subsection
= {
871 .offset_within_address_space
= base
,
872 .size
= int128_make64(TARGET_PAGE_SIZE
),
876 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
878 if (!(existing
->mr
->subpage
)) {
879 subpage
= subpage_init(d
->as
, base
);
880 subsection
.mr
= &subpage
->iomem
;
881 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
882 phys_section_add(&d
->map
, &subsection
));
884 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
886 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
887 end
= start
+ int128_get64(section
->size
) - 1;
888 subpage_register(subpage
, start
, end
,
889 phys_section_add(&d
->map
, section
));
893 static void register_multipage(AddressSpaceDispatch
*d
,
894 MemoryRegionSection
*section
)
896 hwaddr start_addr
= section
->offset_within_address_space
;
897 uint16_t section_index
= phys_section_add(&d
->map
, section
);
898 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
902 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
905 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
907 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
908 AddressSpaceDispatch
*d
= as
->next_dispatch
;
909 MemoryRegionSection now
= *section
, remain
= *section
;
910 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
912 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
913 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
914 - now
.offset_within_address_space
;
916 now
.size
= int128_min(int128_make64(left
), now
.size
);
917 register_subpage(d
, &now
);
919 now
.size
= int128_zero();
921 while (int128_ne(remain
.size
, now
.size
)) {
922 remain
.size
= int128_sub(remain
.size
, now
.size
);
923 remain
.offset_within_address_space
+= int128_get64(now
.size
);
924 remain
.offset_within_region
+= int128_get64(now
.size
);
926 if (int128_lt(remain
.size
, page_size
)) {
927 register_subpage(d
, &now
);
928 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
929 now
.size
= page_size
;
930 register_subpage(d
, &now
);
932 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
933 register_multipage(d
, &now
);
938 void qemu_flush_coalesced_mmio_buffer(void)
941 kvm_flush_coalesced_mmio_buffer();
944 void qemu_mutex_lock_ramlist(void)
946 qemu_mutex_lock(&ram_list
.mutex
);
949 void qemu_mutex_unlock_ramlist(void)
951 qemu_mutex_unlock(&ram_list
.mutex
);
958 #define HUGETLBFS_MAGIC 0x958458f6
960 static long gethugepagesize(const char *path
)
966 ret
= statfs(path
, &fs
);
967 } while (ret
!= 0 && errno
== EINTR
);
974 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
975 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
980 static sigjmp_buf sigjump
;
982 static void sigbus_handler(int signal
)
984 siglongjmp(sigjump
, 1);
987 static void *file_ram_alloc(RAMBlock
*block
,
992 char *sanitized_name
;
996 unsigned long hpagesize
;
998 hpagesize
= gethugepagesize(path
);
1003 if (memory
< hpagesize
) {
1007 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1008 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1012 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1013 sanitized_name
= g_strdup(block
->mr
->name
);
1014 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1019 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1021 g_free(sanitized_name
);
1023 fd
= mkstemp(filename
);
1025 perror("unable to create backing store for hugepages");
1032 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1035 * ftruncate is not supported by hugetlbfs in older
1036 * hosts, so don't bother bailing out on errors.
1037 * If anything goes wrong with it under other filesystems,
1040 if (ftruncate(fd
, memory
))
1041 perror("ftruncate");
1043 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
1044 if (area
== MAP_FAILED
) {
1045 perror("file_ram_alloc: can't mmap RAM pages");
1052 struct sigaction act
, oldact
;
1053 sigset_t set
, oldset
;
1055 memset(&act
, 0, sizeof(act
));
1056 act
.sa_handler
= &sigbus_handler
;
1059 ret
= sigaction(SIGBUS
, &act
, &oldact
);
1061 perror("file_ram_alloc: failed to install signal handler");
1065 /* unblock SIGBUS */
1067 sigaddset(&set
, SIGBUS
);
1068 pthread_sigmask(SIG_UNBLOCK
, &set
, &oldset
);
1070 if (sigsetjmp(sigjump
, 1)) {
1071 fprintf(stderr
, "file_ram_alloc: failed to preallocate pages\n");
1075 /* MAP_POPULATE silently ignores failures */
1076 for (i
= 0; i
< (memory
/hpagesize
)-1; i
++) {
1077 memset(area
+ (hpagesize
*i
), 0, 1);
1080 ret
= sigaction(SIGBUS
, &oldact
, NULL
);
1082 perror("file_ram_alloc: failed to reinstall signal handler");
1086 pthread_sigmask(SIG_SETMASK
, &oldset
, NULL
);
1093 static void *file_ram_alloc(RAMBlock
*block
,
1097 fprintf(stderr
, "-mem-path not supported on this host\n");
1102 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1104 RAMBlock
*block
, *next_block
;
1105 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1107 assert(size
!= 0); /* it would hand out same offset multiple times */
1109 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1112 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1113 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1115 end
= block
->offset
+ block
->length
;
1117 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1118 if (next_block
->offset
>= end
) {
1119 next
= MIN(next
, next_block
->offset
);
1122 if (next
- end
>= size
&& next
- end
< mingap
) {
1124 mingap
= next
- end
;
1128 if (offset
== RAM_ADDR_MAX
) {
1129 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1137 ram_addr_t
last_ram_offset(void)
1140 ram_addr_t last
= 0;
1142 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1143 last
= MAX(last
, block
->offset
+ block
->length
);
1148 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1152 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1153 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1154 "dump-guest-core", true)) {
1155 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1157 perror("qemu_madvise");
1158 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1159 "but dump_guest_core=off specified\n");
1164 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1166 RAMBlock
*new_block
, *block
;
1169 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1170 if (block
->offset
== addr
) {
1176 assert(!new_block
->idstr
[0]);
1179 char *id
= qdev_get_dev_path(dev
);
1181 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1185 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1187 /* This assumes the iothread lock is taken here too. */
1188 qemu_mutex_lock_ramlist();
1189 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1190 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1191 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1196 qemu_mutex_unlock_ramlist();
1199 static int memory_try_enable_merging(void *addr
, size_t len
)
1201 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1202 /* disabled by the user */
1206 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1209 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1212 RAMBlock
*block
, *new_block
;
1214 size
= TARGET_PAGE_ALIGN(size
);
1215 new_block
= g_malloc0(sizeof(*new_block
));
1218 /* This assumes the iothread lock is taken here too. */
1219 qemu_mutex_lock_ramlist();
1221 new_block
->offset
= find_ram_offset(size
);
1223 new_block
->host
= host
;
1224 new_block
->flags
|= RAM_PREALLOC_MASK
;
1225 } else if (xen_enabled()) {
1227 fprintf(stderr
, "-mem-path not supported with Xen\n");
1230 xen_ram_alloc(new_block
->offset
, size
, mr
);
1233 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1235 * file_ram_alloc() needs to allocate just like
1236 * phys_mem_alloc, but we haven't bothered to provide
1240 "-mem-path not supported with this accelerator\n");
1243 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1245 if (!new_block
->host
) {
1246 new_block
->host
= phys_mem_alloc(size
);
1247 if (!new_block
->host
) {
1248 fprintf(stderr
, "Cannot set up guest memory '%s': %s\n",
1249 new_block
->mr
->name
, strerror(errno
));
1252 memory_try_enable_merging(new_block
->host
, size
);
1255 new_block
->length
= size
;
1257 /* Keep the list sorted from biggest to smallest block. */
1258 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1259 if (block
->length
< new_block
->length
) {
1264 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1266 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1268 ram_list
.mru_block
= NULL
;
1271 qemu_mutex_unlock_ramlist();
1273 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
1274 last_ram_offset() >> TARGET_PAGE_BITS
);
1275 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
1276 0, size
>> TARGET_PAGE_BITS
);
1277 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
1279 qemu_ram_setup_dump(new_block
->host
, size
);
1280 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1281 qemu_madvise(new_block
->host
, size
, QEMU_MADV_DONTFORK
);
1284 kvm_setup_guest_memory(new_block
->host
, size
);
1286 return new_block
->offset
;
1289 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1291 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1294 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1298 /* This assumes the iothread lock is taken here too. */
1299 qemu_mutex_lock_ramlist();
1300 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1301 if (addr
== block
->offset
) {
1302 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1303 ram_list
.mru_block
= NULL
;
1309 qemu_mutex_unlock_ramlist();
1312 void qemu_ram_free(ram_addr_t addr
)
1316 /* This assumes the iothread lock is taken here too. */
1317 qemu_mutex_lock_ramlist();
1318 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1319 if (addr
== block
->offset
) {
1320 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1321 ram_list
.mru_block
= NULL
;
1323 if (block
->flags
& RAM_PREALLOC_MASK
) {
1325 } else if (xen_enabled()) {
1326 xen_invalidate_map_cache_entry(block
->host
);
1328 } else if (block
->fd
>= 0) {
1329 munmap(block
->host
, block
->length
);
1333 qemu_anon_ram_free(block
->host
, block
->length
);
1339 qemu_mutex_unlock_ramlist();
1344 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1351 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1352 offset
= addr
- block
->offset
;
1353 if (offset
< block
->length
) {
1354 vaddr
= block
->host
+ offset
;
1355 if (block
->flags
& RAM_PREALLOC_MASK
) {
1357 } else if (xen_enabled()) {
1361 munmap(vaddr
, length
);
1362 if (block
->fd
>= 0) {
1364 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1367 flags
|= MAP_PRIVATE
;
1369 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1370 flags
, block
->fd
, offset
);
1373 * Remap needs to match alloc. Accelerators that
1374 * set phys_mem_alloc never remap. If they did,
1375 * we'd need a remap hook here.
1377 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1379 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1380 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1383 if (area
!= vaddr
) {
1384 fprintf(stderr
, "Could not remap addr: "
1385 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1389 memory_try_enable_merging(vaddr
, length
);
1390 qemu_ram_setup_dump(vaddr
, length
);
1396 #endif /* !_WIN32 */
1398 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1399 With the exception of the softmmu code in this file, this should
1400 only be used for local memory (e.g. video ram) that the device owns,
1401 and knows it isn't going to access beyond the end of the block.
1403 It should not be used for general purpose DMA.
1404 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1406 void *qemu_get_ram_ptr(ram_addr_t addr
)
1408 RAMBlock
*block
= qemu_get_ram_block(addr
);
1410 if (xen_enabled()) {
1411 /* We need to check if the requested address is in the RAM
1412 * because we don't want to map the entire memory in QEMU.
1413 * In that case just map until the end of the page.
1415 if (block
->offset
== 0) {
1416 return xen_map_cache(addr
, 0, 0);
1417 } else if (block
->host
== NULL
) {
1419 xen_map_cache(block
->offset
, block
->length
, 1);
1422 return block
->host
+ (addr
- block
->offset
);
1425 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1426 * but takes a size argument */
1427 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1432 if (xen_enabled()) {
1433 return xen_map_cache(addr
, *size
, 1);
1437 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1438 if (addr
- block
->offset
< block
->length
) {
1439 if (addr
- block
->offset
+ *size
> block
->length
)
1440 *size
= block
->length
- addr
+ block
->offset
;
1441 return block
->host
+ (addr
- block
->offset
);
1445 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1450 /* Some of the softmmu routines need to translate from a host pointer
1451 (typically a TLB entry) back to a ram offset. */
1452 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1455 uint8_t *host
= ptr
;
1457 if (xen_enabled()) {
1458 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1459 return qemu_get_ram_block(*ram_addr
)->mr
;
1462 block
= ram_list
.mru_block
;
1463 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1467 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1468 /* This case append when the block is not mapped. */
1469 if (block
->host
== NULL
) {
1472 if (host
- block
->host
< block
->length
) {
1480 *ram_addr
= block
->offset
+ (host
- block
->host
);
1484 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1485 uint64_t val
, unsigned size
)
1488 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1489 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1490 tb_invalidate_phys_page_fast(ram_addr
, size
);
1491 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1495 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1498 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1501 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1506 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1507 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
1508 /* we remove the notdirty callback only if the code has been
1510 if (dirty_flags
== 0xff) {
1511 CPUArchState
*env
= current_cpu
->env_ptr
;
1512 tlb_set_dirty(env
, env
->mem_io_vaddr
);
1516 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1517 unsigned size
, bool is_write
)
1522 static const MemoryRegionOps notdirty_mem_ops
= {
1523 .write
= notdirty_mem_write
,
1524 .valid
.accepts
= notdirty_mem_accepts
,
1525 .endianness
= DEVICE_NATIVE_ENDIAN
,
1528 /* Generate a debug exception if a watchpoint has been hit. */
1529 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1531 CPUArchState
*env
= current_cpu
->env_ptr
;
1532 target_ulong pc
, cs_base
;
1537 if (env
->watchpoint_hit
) {
1538 /* We re-entered the check after replacing the TB. Now raise
1539 * the debug interrupt so that is will trigger after the
1540 * current instruction. */
1541 cpu_interrupt(ENV_GET_CPU(env
), CPU_INTERRUPT_DEBUG
);
1544 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1545 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1546 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1547 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1548 wp
->flags
|= BP_WATCHPOINT_HIT
;
1549 if (!env
->watchpoint_hit
) {
1550 env
->watchpoint_hit
= wp
;
1551 tb_check_watchpoint(env
);
1552 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1553 env
->exception_index
= EXCP_DEBUG
;
1556 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1557 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
1558 cpu_resume_from_signal(env
, NULL
);
1562 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1567 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1568 so these check for a hit then pass through to the normal out-of-line
1570 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1573 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1575 case 1: return ldub_phys(addr
);
1576 case 2: return lduw_phys(addr
);
1577 case 4: return ldl_phys(addr
);
1582 static void watch_mem_write(void *opaque
, hwaddr addr
,
1583 uint64_t val
, unsigned size
)
1585 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1588 stb_phys(addr
, val
);
1591 stw_phys(addr
, val
);
1594 stl_phys(addr
, val
);
1600 static const MemoryRegionOps watch_mem_ops
= {
1601 .read
= watch_mem_read
,
1602 .write
= watch_mem_write
,
1603 .endianness
= DEVICE_NATIVE_ENDIAN
,
1606 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1609 subpage_t
*subpage
= opaque
;
1612 #if defined(DEBUG_SUBPAGE)
1613 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
1614 subpage
, len
, addr
);
1616 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1629 static void subpage_write(void *opaque
, hwaddr addr
,
1630 uint64_t value
, unsigned len
)
1632 subpage_t
*subpage
= opaque
;
1635 #if defined(DEBUG_SUBPAGE)
1636 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1637 " value %"PRIx64
"\n",
1638 __func__
, subpage
, len
, addr
, value
);
1653 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1656 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1657 unsigned len
, bool is_write
)
1659 subpage_t
*subpage
= opaque
;
1660 #if defined(DEBUG_SUBPAGE)
1661 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
1662 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1665 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1669 static const MemoryRegionOps subpage_ops
= {
1670 .read
= subpage_read
,
1671 .write
= subpage_write
,
1672 .valid
.accepts
= subpage_accepts
,
1673 .endianness
= DEVICE_NATIVE_ENDIAN
,
1676 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1681 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1683 idx
= SUBPAGE_IDX(start
);
1684 eidx
= SUBPAGE_IDX(end
);
1685 #if defined(DEBUG_SUBPAGE)
1686 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1687 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
1689 for (; idx
<= eidx
; idx
++) {
1690 mmio
->sub_section
[idx
] = section
;
1696 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1700 mmio
= g_malloc0(sizeof(subpage_t
));
1704 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1705 "subpage", TARGET_PAGE_SIZE
);
1706 mmio
->iomem
.subpage
= true;
1707 #if defined(DEBUG_SUBPAGE)
1708 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
1709 mmio
, base
, TARGET_PAGE_SIZE
);
1711 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1716 static uint16_t dummy_section(PhysPageMap
*map
, MemoryRegion
*mr
)
1718 MemoryRegionSection section
= {
1720 .offset_within_address_space
= 0,
1721 .offset_within_region
= 0,
1722 .size
= int128_2_64(),
1725 return phys_section_add(map
, §ion
);
1728 MemoryRegion
*iotlb_to_region(hwaddr index
)
1730 return address_space_memory
.dispatch
->map
.sections
[
1731 index
& ~TARGET_PAGE_MASK
].mr
;
1734 static void io_mem_init(void)
1736 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, "rom", UINT64_MAX
);
1737 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1738 "unassigned", UINT64_MAX
);
1739 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1740 "notdirty", UINT64_MAX
);
1741 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1742 "watch", UINT64_MAX
);
1745 static void mem_begin(MemoryListener
*listener
)
1747 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1748 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
1751 n
= dummy_section(&d
->map
, &io_mem_unassigned
);
1752 assert(n
== PHYS_SECTION_UNASSIGNED
);
1753 n
= dummy_section(&d
->map
, &io_mem_notdirty
);
1754 assert(n
== PHYS_SECTION_NOTDIRTY
);
1755 n
= dummy_section(&d
->map
, &io_mem_rom
);
1756 assert(n
== PHYS_SECTION_ROM
);
1757 n
= dummy_section(&d
->map
, &io_mem_watch
);
1758 assert(n
== PHYS_SECTION_WATCH
);
1760 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
1762 as
->next_dispatch
= d
;
1765 static void mem_commit(MemoryListener
*listener
)
1767 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1768 AddressSpaceDispatch
*cur
= as
->dispatch
;
1769 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1771 phys_page_compact_all(next
, next
->map
.nodes_nb
);
1773 as
->dispatch
= next
;
1776 phys_sections_free(&cur
->map
);
1781 static void tcg_commit(MemoryListener
*listener
)
1785 /* since each CPU stores ram addresses in its TLB cache, we must
1786 reset the modified entries */
1789 CPUArchState
*env
= cpu
->env_ptr
;
1795 static void core_log_global_start(MemoryListener
*listener
)
1797 cpu_physical_memory_set_dirty_tracking(1);
1800 static void core_log_global_stop(MemoryListener
*listener
)
1802 cpu_physical_memory_set_dirty_tracking(0);
1805 static MemoryListener core_memory_listener
= {
1806 .log_global_start
= core_log_global_start
,
1807 .log_global_stop
= core_log_global_stop
,
1811 static MemoryListener tcg_memory_listener
= {
1812 .commit
= tcg_commit
,
1815 void address_space_init_dispatch(AddressSpace
*as
)
1817 as
->dispatch
= NULL
;
1818 as
->dispatch_listener
= (MemoryListener
) {
1820 .commit
= mem_commit
,
1821 .region_add
= mem_add
,
1822 .region_nop
= mem_add
,
1825 memory_listener_register(&as
->dispatch_listener
, as
);
1828 void address_space_destroy_dispatch(AddressSpace
*as
)
1830 AddressSpaceDispatch
*d
= as
->dispatch
;
1832 memory_listener_unregister(&as
->dispatch_listener
);
1834 as
->dispatch
= NULL
;
1837 static void memory_map_init(void)
1839 system_memory
= g_malloc(sizeof(*system_memory
));
1841 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
1842 address_space_init(&address_space_memory
, system_memory
, "memory");
1844 system_io
= g_malloc(sizeof(*system_io
));
1845 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
1847 address_space_init(&address_space_io
, system_io
, "I/O");
1849 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1850 if (tcg_enabled()) {
1851 memory_listener_register(&tcg_memory_listener
, &address_space_memory
);
1855 MemoryRegion
*get_system_memory(void)
1857 return system_memory
;
1860 MemoryRegion
*get_system_io(void)
1865 #endif /* !defined(CONFIG_USER_ONLY) */
1867 /* physical memory access (slow version, mainly for debug) */
1868 #if defined(CONFIG_USER_ONLY)
1869 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
1870 uint8_t *buf
, int len
, int is_write
)
1877 page
= addr
& TARGET_PAGE_MASK
;
1878 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1881 flags
= page_get_flags(page
);
1882 if (!(flags
& PAGE_VALID
))
1885 if (!(flags
& PAGE_WRITE
))
1887 /* XXX: this code should not depend on lock_user */
1888 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1891 unlock_user(p
, addr
, l
);
1893 if (!(flags
& PAGE_READ
))
1895 /* XXX: this code should not depend on lock_user */
1896 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1899 unlock_user(p
, addr
, 0);
1910 static void invalidate_and_set_dirty(hwaddr addr
,
1913 if (!cpu_physical_memory_is_dirty(addr
)) {
1914 /* invalidate code */
1915 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1917 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
1919 xen_modified_memory(addr
, length
);
1922 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
1924 if (memory_region_is_ram(mr
)) {
1925 return !(is_write
&& mr
->readonly
);
1927 if (memory_region_is_romd(mr
)) {
1934 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
1936 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
1938 /* Regions are assumed to support 1-4 byte accesses unless
1939 otherwise specified. */
1940 if (access_size_max
== 0) {
1941 access_size_max
= 4;
1944 /* Bound the maximum access by the alignment of the address. */
1945 if (!mr
->ops
->impl
.unaligned
) {
1946 unsigned align_size_max
= addr
& -addr
;
1947 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
1948 access_size_max
= align_size_max
;
1952 /* Don't attempt accesses larger than the maximum. */
1953 if (l
> access_size_max
) {
1954 l
= access_size_max
;
1957 l
= 1 << (qemu_fls(l
) - 1);
1963 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
1964 int len
, bool is_write
)
1975 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
1978 if (!memory_access_is_direct(mr
, is_write
)) {
1979 l
= memory_access_size(mr
, l
, addr1
);
1980 /* XXX: could force current_cpu to NULL to avoid
1984 /* 64 bit write access */
1986 error
|= io_mem_write(mr
, addr1
, val
, 8);
1989 /* 32 bit write access */
1991 error
|= io_mem_write(mr
, addr1
, val
, 4);
1994 /* 16 bit write access */
1996 error
|= io_mem_write(mr
, addr1
, val
, 2);
1999 /* 8 bit write access */
2001 error
|= io_mem_write(mr
, addr1
, val
, 1);
2007 addr1
+= memory_region_get_ram_addr(mr
);
2009 ptr
= qemu_get_ram_ptr(addr1
);
2010 memcpy(ptr
, buf
, l
);
2011 invalidate_and_set_dirty(addr1
, l
);
2014 if (!memory_access_is_direct(mr
, is_write
)) {
2016 l
= memory_access_size(mr
, l
, addr1
);
2019 /* 64 bit read access */
2020 error
|= io_mem_read(mr
, addr1
, &val
, 8);
2024 /* 32 bit read access */
2025 error
|= io_mem_read(mr
, addr1
, &val
, 4);
2029 /* 16 bit read access */
2030 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2034 /* 8 bit read access */
2035 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2043 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2044 memcpy(buf
, ptr
, l
);
2055 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2056 const uint8_t *buf
, int len
)
2058 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2061 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2063 return address_space_rw(as
, addr
, buf
, len
, false);
2067 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2068 int len
, int is_write
)
2070 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2073 /* used for ROM loading : can write in RAM and ROM */
2074 void cpu_physical_memory_write_rom(hwaddr addr
,
2075 const uint8_t *buf
, int len
)
2084 mr
= address_space_translate(&address_space_memory
,
2085 addr
, &addr1
, &l
, true);
2087 if (!(memory_region_is_ram(mr
) ||
2088 memory_region_is_romd(mr
))) {
2091 addr1
+= memory_region_get_ram_addr(mr
);
2093 ptr
= qemu_get_ram_ptr(addr1
);
2094 memcpy(ptr
, buf
, l
);
2095 invalidate_and_set_dirty(addr1
, l
);
2110 static BounceBuffer bounce
;
2112 typedef struct MapClient
{
2114 void (*callback
)(void *opaque
);
2115 QLIST_ENTRY(MapClient
) link
;
2118 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2119 = QLIST_HEAD_INITIALIZER(map_client_list
);
2121 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2123 MapClient
*client
= g_malloc(sizeof(*client
));
2125 client
->opaque
= opaque
;
2126 client
->callback
= callback
;
2127 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2131 static void cpu_unregister_map_client(void *_client
)
2133 MapClient
*client
= (MapClient
*)_client
;
2135 QLIST_REMOVE(client
, link
);
2139 static void cpu_notify_map_clients(void)
2143 while (!QLIST_EMPTY(&map_client_list
)) {
2144 client
= QLIST_FIRST(&map_client_list
);
2145 client
->callback(client
->opaque
);
2146 cpu_unregister_map_client(client
);
2150 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2157 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2158 if (!memory_access_is_direct(mr
, is_write
)) {
2159 l
= memory_access_size(mr
, l
, addr
);
2160 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2171 /* Map a physical memory region into a host virtual address.
2172 * May map a subset of the requested range, given by and returned in *plen.
2173 * May return NULL if resources needed to perform the mapping are exhausted.
2174 * Use only for reads OR writes - not for read-modify-write operations.
2175 * Use cpu_register_map_client() to know when retrying the map operation is
2176 * likely to succeed.
2178 void *address_space_map(AddressSpace
*as
,
2185 hwaddr l
, xlat
, base
;
2186 MemoryRegion
*mr
, *this_mr
;
2194 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2195 if (!memory_access_is_direct(mr
, is_write
)) {
2196 if (bounce
.buffer
) {
2199 /* Avoid unbounded allocations */
2200 l
= MIN(l
, TARGET_PAGE_SIZE
);
2201 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2205 memory_region_ref(mr
);
2208 address_space_read(as
, addr
, bounce
.buffer
, l
);
2212 return bounce
.buffer
;
2216 raddr
= memory_region_get_ram_addr(mr
);
2227 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2228 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2233 memory_region_ref(mr
);
2235 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2238 /* Unmaps a memory region previously mapped by address_space_map().
2239 * Will also mark the memory as dirty if is_write == 1. access_len gives
2240 * the amount of memory that was actually read or written by the caller.
2242 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2243 int is_write
, hwaddr access_len
)
2245 if (buffer
!= bounce
.buffer
) {
2249 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2252 while (access_len
) {
2254 l
= TARGET_PAGE_SIZE
;
2257 invalidate_and_set_dirty(addr1
, l
);
2262 if (xen_enabled()) {
2263 xen_invalidate_map_cache_entry(buffer
);
2265 memory_region_unref(mr
);
2269 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2271 qemu_vfree(bounce
.buffer
);
2272 bounce
.buffer
= NULL
;
2273 memory_region_unref(bounce
.mr
);
2274 cpu_notify_map_clients();
2277 void *cpu_physical_memory_map(hwaddr addr
,
2281 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2284 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2285 int is_write
, hwaddr access_len
)
2287 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2290 /* warning: addr must be aligned */
2291 static inline uint32_t ldl_phys_internal(hwaddr addr
,
2292 enum device_endian endian
)
2300 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2302 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2304 io_mem_read(mr
, addr1
, &val
, 4);
2305 #if defined(TARGET_WORDS_BIGENDIAN)
2306 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2310 if (endian
== DEVICE_BIG_ENDIAN
) {
2316 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2320 case DEVICE_LITTLE_ENDIAN
:
2321 val
= ldl_le_p(ptr
);
2323 case DEVICE_BIG_ENDIAN
:
2324 val
= ldl_be_p(ptr
);
2334 uint32_t ldl_phys(hwaddr addr
)
2336 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2339 uint32_t ldl_le_phys(hwaddr addr
)
2341 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2344 uint32_t ldl_be_phys(hwaddr addr
)
2346 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2349 /* warning: addr must be aligned */
2350 static inline uint64_t ldq_phys_internal(hwaddr addr
,
2351 enum device_endian endian
)
2359 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2361 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2363 io_mem_read(mr
, addr1
, &val
, 8);
2364 #if defined(TARGET_WORDS_BIGENDIAN)
2365 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2369 if (endian
== DEVICE_BIG_ENDIAN
) {
2375 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2379 case DEVICE_LITTLE_ENDIAN
:
2380 val
= ldq_le_p(ptr
);
2382 case DEVICE_BIG_ENDIAN
:
2383 val
= ldq_be_p(ptr
);
2393 uint64_t ldq_phys(hwaddr addr
)
2395 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2398 uint64_t ldq_le_phys(hwaddr addr
)
2400 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2403 uint64_t ldq_be_phys(hwaddr addr
)
2405 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2409 uint32_t ldub_phys(hwaddr addr
)
2412 cpu_physical_memory_read(addr
, &val
, 1);
2416 /* warning: addr must be aligned */
2417 static inline uint32_t lduw_phys_internal(hwaddr addr
,
2418 enum device_endian endian
)
2426 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2428 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2430 io_mem_read(mr
, addr1
, &val
, 2);
2431 #if defined(TARGET_WORDS_BIGENDIAN)
2432 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2436 if (endian
== DEVICE_BIG_ENDIAN
) {
2442 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2446 case DEVICE_LITTLE_ENDIAN
:
2447 val
= lduw_le_p(ptr
);
2449 case DEVICE_BIG_ENDIAN
:
2450 val
= lduw_be_p(ptr
);
2460 uint32_t lduw_phys(hwaddr addr
)
2462 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2465 uint32_t lduw_le_phys(hwaddr addr
)
2467 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2470 uint32_t lduw_be_phys(hwaddr addr
)
2472 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2475 /* warning: addr must be aligned. The ram page is not masked as dirty
2476 and the code inside is not invalidated. It is useful if the dirty
2477 bits are used to track modified PTEs */
2478 void stl_phys_notdirty(hwaddr addr
, uint32_t val
)
2485 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2487 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2488 io_mem_write(mr
, addr1
, val
, 4);
2490 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2491 ptr
= qemu_get_ram_ptr(addr1
);
2494 if (unlikely(in_migration
)) {
2495 if (!cpu_physical_memory_is_dirty(addr1
)) {
2496 /* invalidate code */
2497 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2499 cpu_physical_memory_set_dirty_flags(
2500 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
2506 /* warning: addr must be aligned */
2507 static inline void stl_phys_internal(hwaddr addr
, uint32_t val
,
2508 enum device_endian endian
)
2515 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2517 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2518 #if defined(TARGET_WORDS_BIGENDIAN)
2519 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2523 if (endian
== DEVICE_BIG_ENDIAN
) {
2527 io_mem_write(mr
, addr1
, val
, 4);
2530 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2531 ptr
= qemu_get_ram_ptr(addr1
);
2533 case DEVICE_LITTLE_ENDIAN
:
2536 case DEVICE_BIG_ENDIAN
:
2543 invalidate_and_set_dirty(addr1
, 4);
2547 void stl_phys(hwaddr addr
, uint32_t val
)
2549 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2552 void stl_le_phys(hwaddr addr
, uint32_t val
)
2554 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2557 void stl_be_phys(hwaddr addr
, uint32_t val
)
2559 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2563 void stb_phys(hwaddr addr
, uint32_t val
)
2566 cpu_physical_memory_write(addr
, &v
, 1);
2569 /* warning: addr must be aligned */
2570 static inline void stw_phys_internal(hwaddr addr
, uint32_t val
,
2571 enum device_endian endian
)
2578 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2580 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2581 #if defined(TARGET_WORDS_BIGENDIAN)
2582 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2586 if (endian
== DEVICE_BIG_ENDIAN
) {
2590 io_mem_write(mr
, addr1
, val
, 2);
2593 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2594 ptr
= qemu_get_ram_ptr(addr1
);
2596 case DEVICE_LITTLE_ENDIAN
:
2599 case DEVICE_BIG_ENDIAN
:
2606 invalidate_and_set_dirty(addr1
, 2);
2610 void stw_phys(hwaddr addr
, uint32_t val
)
2612 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2615 void stw_le_phys(hwaddr addr
, uint32_t val
)
2617 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2620 void stw_be_phys(hwaddr addr
, uint32_t val
)
2622 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2626 void stq_phys(hwaddr addr
, uint64_t val
)
2629 cpu_physical_memory_write(addr
, &val
, 8);
2632 void stq_le_phys(hwaddr addr
, uint64_t val
)
2634 val
= cpu_to_le64(val
);
2635 cpu_physical_memory_write(addr
, &val
, 8);
2638 void stq_be_phys(hwaddr addr
, uint64_t val
)
2640 val
= cpu_to_be64(val
);
2641 cpu_physical_memory_write(addr
, &val
, 8);
2644 /* virtual memory access for debug (includes writing to ROM) */
2645 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2646 uint8_t *buf
, int len
, int is_write
)
2653 page
= addr
& TARGET_PAGE_MASK
;
2654 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2655 /* if no physical page mapped, return an error */
2656 if (phys_addr
== -1)
2658 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2661 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2663 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
2665 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
2674 #if !defined(CONFIG_USER_ONLY)
2677 * A helper function for the _utterly broken_ virtio device model to find out if
2678 * it's running on a big endian machine. Don't do this at home kids!
2680 bool virtio_is_big_endian(void);
2681 bool virtio_is_big_endian(void)
2683 #if defined(TARGET_WORDS_BIGENDIAN)
2692 #ifndef CONFIG_USER_ONLY
2693 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2698 mr
= address_space_translate(&address_space_memory
,
2699 phys_addr
, &phys_addr
, &l
, false);
2701 return !(memory_region_is_ram(mr
) ||
2702 memory_region_is_romd(mr
));
2705 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2709 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2710 func(block
->host
, block
->offset
, block
->length
, opaque
);