4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
30 #include "qemu/osdep.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/xen/xen.h"
34 #include "qemu/timer.h"
35 #include "qemu/config-file.h"
36 #include "qemu/error-report.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
52 #include "exec/ram_addr.h"
54 #include "qemu/range.h"
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 static bool in_migration
;
61 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
63 static MemoryRegion
*system_memory
;
64 static MemoryRegion
*system_io
;
66 AddressSpace address_space_io
;
67 AddressSpace address_space_memory
;
69 MemoryRegion io_mem_rom
, io_mem_notdirty
;
70 static MemoryRegion io_mem_unassigned
;
72 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73 #define RAM_PREALLOC (1 << 0)
75 /* RAM is mmap-ed with MAP_SHARED */
76 #define RAM_SHARED (1 << 1)
80 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
81 /* current CPU in the current thread. It is only valid inside
83 DEFINE_TLS(CPUState
*, current_cpu
);
84 /* 0 = Do not count executed instructions.
85 1 = Precise instruction counting.
86 2 = Adaptive rate instruction counting. */
89 #if !defined(CONFIG_USER_ONLY)
91 typedef struct PhysPageEntry PhysPageEntry
;
93 struct PhysPageEntry
{
94 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
96 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
100 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
102 /* Size of the L2 (and L3, etc) page tables. */
103 #define ADDR_SPACE_BITS 64
106 #define P_L2_SIZE (1 << P_L2_BITS)
108 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
110 typedef PhysPageEntry Node
[P_L2_SIZE
];
112 typedef struct PhysPageMap
{
113 unsigned sections_nb
;
114 unsigned sections_nb_alloc
;
116 unsigned nodes_nb_alloc
;
118 MemoryRegionSection
*sections
;
121 struct AddressSpaceDispatch
{
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
125 PhysPageEntry phys_map
;
130 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131 typedef struct subpage_t
{
135 uint16_t sub_section
[TARGET_PAGE_SIZE
];
138 #define PHYS_SECTION_UNASSIGNED 0
139 #define PHYS_SECTION_NOTDIRTY 1
140 #define PHYS_SECTION_ROM 2
141 #define PHYS_SECTION_WATCH 3
143 static void io_mem_init(void);
144 static void memory_map_init(void);
145 static void tcg_commit(MemoryListener
*listener
);
147 static MemoryRegion io_mem_watch
;
150 #if !defined(CONFIG_USER_ONLY)
152 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
154 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
155 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
156 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
157 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
161 static uint32_t phys_map_node_alloc(PhysPageMap
*map
)
166 ret
= map
->nodes_nb
++;
167 assert(ret
!= PHYS_MAP_NODE_NIL
);
168 assert(ret
!= map
->nodes_nb_alloc
);
169 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
170 map
->nodes
[ret
][i
].skip
= 1;
171 map
->nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
176 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
177 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
182 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
184 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
185 lp
->ptr
= phys_map_node_alloc(map
);
186 p
= map
->nodes
[lp
->ptr
];
188 for (i
= 0; i
< P_L2_SIZE
; i
++) {
190 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
194 p
= map
->nodes
[lp
->ptr
];
196 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
198 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
199 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
205 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
211 static void phys_page_set(AddressSpaceDispatch
*d
,
212 hwaddr index
, hwaddr nb
,
215 /* Wildly overreserve - it doesn't matter much. */
216 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
218 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
221 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
224 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
226 unsigned valid_ptr
= P_L2_SIZE
;
231 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
236 for (i
= 0; i
< P_L2_SIZE
; i
++) {
237 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
244 phys_page_compact(&p
[i
], nodes
, compacted
);
248 /* We can only compress if there's only one child. */
253 assert(valid_ptr
< P_L2_SIZE
);
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
260 lp
->ptr
= p
[valid_ptr
].ptr
;
261 if (!p
[valid_ptr
].skip
) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
270 lp
->skip
+= p
[valid_ptr
].skip
;
274 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
276 DECLARE_BITMAP(compacted
, nodes_nb
);
278 if (d
->phys_map
.skip
) {
279 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
283 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
284 Node
*nodes
, MemoryRegionSection
*sections
)
287 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
290 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
291 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
292 return §ions
[PHYS_SECTION_UNASSIGNED
];
295 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
298 if (sections
[lp
.ptr
].size
.hi
||
299 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
300 sections
[lp
.ptr
].size
.lo
, addr
)) {
301 return §ions
[lp
.ptr
];
303 return §ions
[PHYS_SECTION_UNASSIGNED
];
307 bool memory_region_is_unassigned(MemoryRegion
*mr
)
309 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
310 && mr
!= &io_mem_watch
;
313 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
315 bool resolve_subpage
)
317 MemoryRegionSection
*section
;
320 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
321 if (resolve_subpage
&& section
->mr
->subpage
) {
322 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
323 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
328 static MemoryRegionSection
*
329 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
330 hwaddr
*plen
, bool resolve_subpage
)
332 MemoryRegionSection
*section
;
335 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
336 /* Compute offset within MemoryRegionSection */
337 addr
-= section
->offset_within_address_space
;
339 /* Compute offset within MemoryRegion */
340 *xlat
= addr
+ section
->offset_within_region
;
342 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
343 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
347 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
349 if (memory_region_is_ram(mr
)) {
350 return !(is_write
&& mr
->readonly
);
352 if (memory_region_is_romd(mr
)) {
359 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
360 hwaddr
*xlat
, hwaddr
*plen
,
364 MemoryRegionSection
*section
;
369 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
372 if (!mr
->iommu_ops
) {
376 iotlb
= mr
->iommu_ops
->translate(mr
, addr
);
377 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
378 | (addr
& iotlb
.addr_mask
));
379 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
380 if (!(iotlb
.perm
& (1 << is_write
))) {
381 mr
= &io_mem_unassigned
;
385 as
= iotlb
.target_as
;
388 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
389 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
390 len
= MIN(page
, len
);
398 MemoryRegionSection
*
399 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
402 MemoryRegionSection
*section
;
403 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
405 assert(!section
->mr
->iommu_ops
);
410 void cpu_exec_init_all(void)
412 #if !defined(CONFIG_USER_ONLY)
413 qemu_mutex_init(&ram_list
.mutex
);
419 #if !defined(CONFIG_USER_ONLY)
421 static int cpu_common_post_load(void *opaque
, int version_id
)
423 CPUState
*cpu
= opaque
;
425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
427 cpu
->interrupt_request
&= ~0x01;
433 const VMStateDescription vmstate_cpu_common
= {
434 .name
= "cpu_common",
436 .minimum_version_id
= 1,
437 .post_load
= cpu_common_post_load
,
438 .fields
= (VMStateField
[]) {
439 VMSTATE_UINT32(halted
, CPUState
),
440 VMSTATE_UINT32(interrupt_request
, CPUState
),
441 VMSTATE_END_OF_LIST()
447 CPUState
*qemu_get_cpu(int index
)
452 if (cpu
->cpu_index
== index
) {
460 #if !defined(CONFIG_USER_ONLY)
461 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
463 /* We only support one address space per cpu at the moment. */
464 assert(cpu
->as
== as
);
466 if (cpu
->tcg_as_listener
) {
467 memory_listener_unregister(cpu
->tcg_as_listener
);
469 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
471 cpu
->tcg_as_listener
->commit
= tcg_commit
;
472 memory_listener_register(cpu
->tcg_as_listener
, as
);
476 void cpu_exec_init(CPUArchState
*env
)
478 CPUState
*cpu
= ENV_GET_CPU(env
);
479 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
483 #if defined(CONFIG_USER_ONLY)
487 CPU_FOREACH(some_cpu
) {
490 cpu
->cpu_index
= cpu_index
;
492 QTAILQ_INIT(&cpu
->breakpoints
);
493 QTAILQ_INIT(&cpu
->watchpoints
);
494 #ifndef CONFIG_USER_ONLY
495 cpu
->as
= &address_space_memory
;
496 cpu
->thread_id
= qemu_get_thread_id();
498 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
499 #if defined(CONFIG_USER_ONLY)
502 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
503 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
505 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
506 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
507 cpu_save
, cpu_load
, env
);
508 assert(cc
->vmsd
== NULL
);
509 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
511 if (cc
->vmsd
!= NULL
) {
512 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
516 #if defined(TARGET_HAS_ICE)
517 #if defined(CONFIG_USER_ONLY)
518 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
520 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
523 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
525 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
527 tb_invalidate_phys_addr(cpu
->as
,
528 phys
| (pc
& ~TARGET_PAGE_MASK
));
532 #endif /* TARGET_HAS_ICE */
534 #if defined(CONFIG_USER_ONLY)
535 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
540 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
541 int flags
, CPUWatchpoint
**watchpoint
)
546 /* Add a watchpoint. */
547 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
548 int flags
, CPUWatchpoint
**watchpoint
)
550 vaddr len_mask
= ~(len
- 1);
553 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
554 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
555 len
== 0 || len
> TARGET_PAGE_SIZE
) {
556 error_report("tried to set invalid watchpoint at %"
557 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
560 wp
= g_malloc(sizeof(*wp
));
563 wp
->len_mask
= len_mask
;
566 /* keep all GDB-injected watchpoints in front */
567 if (flags
& BP_GDB
) {
568 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
570 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
573 tlb_flush_page(cpu
, addr
);
580 /* Remove a specific watchpoint. */
581 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
584 vaddr len_mask
= ~(len
- 1);
587 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
588 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
589 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
590 cpu_watchpoint_remove_by_ref(cpu
, wp
);
597 /* Remove a specific watchpoint by reference. */
598 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
600 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
602 tlb_flush_page(cpu
, watchpoint
->vaddr
);
607 /* Remove all matching watchpoints. */
608 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
610 CPUWatchpoint
*wp
, *next
;
612 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
613 if (wp
->flags
& mask
) {
614 cpu_watchpoint_remove_by_ref(cpu
, wp
);
620 /* Add a breakpoint. */
621 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
622 CPUBreakpoint
**breakpoint
)
624 #if defined(TARGET_HAS_ICE)
627 bp
= g_malloc(sizeof(*bp
));
632 /* keep all GDB-injected breakpoints in front */
633 if (flags
& BP_GDB
) {
634 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
636 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
639 breakpoint_invalidate(cpu
, pc
);
650 /* Remove a specific breakpoint. */
651 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
653 #if defined(TARGET_HAS_ICE)
656 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
657 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
658 cpu_breakpoint_remove_by_ref(cpu
, bp
);
668 /* Remove a specific breakpoint by reference. */
669 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
671 #if defined(TARGET_HAS_ICE)
672 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
674 breakpoint_invalidate(cpu
, breakpoint
->pc
);
680 /* Remove all matching breakpoints. */
681 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
683 #if defined(TARGET_HAS_ICE)
684 CPUBreakpoint
*bp
, *next
;
686 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
687 if (bp
->flags
& mask
) {
688 cpu_breakpoint_remove_by_ref(cpu
, bp
);
694 /* enable or disable single step mode. EXCP_DEBUG is returned by the
695 CPU loop after each instruction */
696 void cpu_single_step(CPUState
*cpu
, int enabled
)
698 #if defined(TARGET_HAS_ICE)
699 if (cpu
->singlestep_enabled
!= enabled
) {
700 cpu
->singlestep_enabled
= enabled
;
702 kvm_update_guest_debug(cpu
, 0);
704 /* must flush all the translated code to avoid inconsistencies */
705 /* XXX: only flush what is necessary */
706 CPUArchState
*env
= cpu
->env_ptr
;
713 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
720 fprintf(stderr
, "qemu: fatal: ");
721 vfprintf(stderr
, fmt
, ap
);
722 fprintf(stderr
, "\n");
723 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
724 if (qemu_log_enabled()) {
725 qemu_log("qemu: fatal: ");
726 qemu_log_vprintf(fmt
, ap2
);
728 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
734 #if defined(CONFIG_USER_ONLY)
736 struct sigaction act
;
737 sigfillset(&act
.sa_mask
);
738 act
.sa_handler
= SIG_DFL
;
739 sigaction(SIGABRT
, &act
, NULL
);
745 #if !defined(CONFIG_USER_ONLY)
746 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
750 /* The list is protected by the iothread lock here. */
751 block
= ram_list
.mru_block
;
752 if (block
&& addr
- block
->offset
< block
->length
) {
755 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
756 if (addr
- block
->offset
< block
->length
) {
761 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
765 ram_list
.mru_block
= block
;
769 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
775 end
= TARGET_PAGE_ALIGN(start
+ length
);
776 start
&= TARGET_PAGE_MASK
;
778 block
= qemu_get_ram_block(start
);
779 assert(block
== qemu_get_ram_block(end
- 1));
780 start1
= (uintptr_t)block
->host
+ (start
- block
->offset
);
781 cpu_tlb_reset_dirty_all(start1
, length
);
784 /* Note: start and end must be within the same ram block. */
785 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,
790 cpu_physical_memory_clear_dirty_range(start
, length
, client
);
793 tlb_reset_dirty_range_all(start
, length
);
797 static void cpu_physical_memory_set_dirty_tracking(bool enable
)
799 in_migration
= enable
;
802 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
803 MemoryRegionSection
*section
,
805 hwaddr paddr
, hwaddr xlat
,
807 target_ulong
*address
)
812 if (memory_region_is_ram(section
->mr
)) {
814 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
816 if (!section
->readonly
) {
817 iotlb
|= PHYS_SECTION_NOTDIRTY
;
819 iotlb
|= PHYS_SECTION_ROM
;
822 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
826 /* Make accesses to pages with watchpoints go via the
827 watchpoint trap routines. */
828 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
829 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
830 /* Avoid trapping reads of pages with a write breakpoint. */
831 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
832 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
833 *address
|= TLB_MMIO
;
841 #endif /* defined(CONFIG_USER_ONLY) */
843 #if !defined(CONFIG_USER_ONLY)
845 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
847 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
849 static void *(*phys_mem_alloc
)(size_t size
) = qemu_anon_ram_alloc
;
852 * Set a custom physical guest memory alloator.
853 * Accelerators with unusual needs may need this. Hopefully, we can
854 * get rid of it eventually.
856 void phys_mem_set_alloc(void *(*alloc
)(size_t))
858 phys_mem_alloc
= alloc
;
861 static uint16_t phys_section_add(PhysPageMap
*map
,
862 MemoryRegionSection
*section
)
864 /* The physical section number is ORed with a page-aligned
865 * pointer to produce the iotlb entries. Thus it should
866 * never overflow into the page-aligned value.
868 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
870 if (map
->sections_nb
== map
->sections_nb_alloc
) {
871 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
872 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
873 map
->sections_nb_alloc
);
875 map
->sections
[map
->sections_nb
] = *section
;
876 memory_region_ref(section
->mr
);
877 return map
->sections_nb
++;
880 static void phys_section_destroy(MemoryRegion
*mr
)
882 memory_region_unref(mr
);
885 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
886 memory_region_destroy(&subpage
->iomem
);
891 static void phys_sections_free(PhysPageMap
*map
)
893 while (map
->sections_nb
> 0) {
894 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
895 phys_section_destroy(section
->mr
);
897 g_free(map
->sections
);
901 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
904 hwaddr base
= section
->offset_within_address_space
906 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
907 d
->map
.nodes
, d
->map
.sections
);
908 MemoryRegionSection subsection
= {
909 .offset_within_address_space
= base
,
910 .size
= int128_make64(TARGET_PAGE_SIZE
),
914 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
916 if (!(existing
->mr
->subpage
)) {
917 subpage
= subpage_init(d
->as
, base
);
918 subsection
.address_space
= d
->as
;
919 subsection
.mr
= &subpage
->iomem
;
920 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
921 phys_section_add(&d
->map
, &subsection
));
923 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
925 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
926 end
= start
+ int128_get64(section
->size
) - 1;
927 subpage_register(subpage
, start
, end
,
928 phys_section_add(&d
->map
, section
));
932 static void register_multipage(AddressSpaceDispatch
*d
,
933 MemoryRegionSection
*section
)
935 hwaddr start_addr
= section
->offset_within_address_space
;
936 uint16_t section_index
= phys_section_add(&d
->map
, section
);
937 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
941 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
944 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
946 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
947 AddressSpaceDispatch
*d
= as
->next_dispatch
;
948 MemoryRegionSection now
= *section
, remain
= *section
;
949 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
951 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
952 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
953 - now
.offset_within_address_space
;
955 now
.size
= int128_min(int128_make64(left
), now
.size
);
956 register_subpage(d
, &now
);
958 now
.size
= int128_zero();
960 while (int128_ne(remain
.size
, now
.size
)) {
961 remain
.size
= int128_sub(remain
.size
, now
.size
);
962 remain
.offset_within_address_space
+= int128_get64(now
.size
);
963 remain
.offset_within_region
+= int128_get64(now
.size
);
965 if (int128_lt(remain
.size
, page_size
)) {
966 register_subpage(d
, &now
);
967 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
968 now
.size
= page_size
;
969 register_subpage(d
, &now
);
971 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
972 register_multipage(d
, &now
);
977 void qemu_flush_coalesced_mmio_buffer(void)
980 kvm_flush_coalesced_mmio_buffer();
983 void qemu_mutex_lock_ramlist(void)
985 qemu_mutex_lock(&ram_list
.mutex
);
988 void qemu_mutex_unlock_ramlist(void)
990 qemu_mutex_unlock(&ram_list
.mutex
);
997 #define HUGETLBFS_MAGIC 0x958458f6
999 static long gethugepagesize(const char *path
)
1005 ret
= statfs(path
, &fs
);
1006 } while (ret
!= 0 && errno
== EINTR
);
1013 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1014 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1019 static void *file_ram_alloc(RAMBlock
*block
,
1025 char *sanitized_name
;
1029 unsigned long hpagesize
;
1031 hpagesize
= gethugepagesize(path
);
1036 if (memory
< hpagesize
) {
1040 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1042 "host lacks kvm mmu notifiers, -mem-path unsupported");
1046 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1047 sanitized_name
= g_strdup(block
->mr
->name
);
1048 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1053 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1055 g_free(sanitized_name
);
1057 fd
= mkstemp(filename
);
1059 error_setg_errno(errp
, errno
,
1060 "unable to create backing store for hugepages");
1067 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1070 * ftruncate is not supported by hugetlbfs in older
1071 * hosts, so don't bother bailing out on errors.
1072 * If anything goes wrong with it under other filesystems,
1075 if (ftruncate(fd
, memory
)) {
1076 perror("ftruncate");
1079 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
,
1080 (block
->flags
& RAM_SHARED
? MAP_SHARED
: MAP_PRIVATE
),
1082 if (area
== MAP_FAILED
) {
1083 error_setg_errno(errp
, errno
,
1084 "unable to map backing store for hugepages");
1090 os_mem_prealloc(fd
, area
, memory
);
1104 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1106 RAMBlock
*block
, *next_block
;
1107 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1109 assert(size
!= 0); /* it would hand out same offset multiple times */
1111 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1114 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1115 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1117 end
= block
->offset
+ block
->length
;
1119 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1120 if (next_block
->offset
>= end
) {
1121 next
= MIN(next
, next_block
->offset
);
1124 if (next
- end
>= size
&& next
- end
< mingap
) {
1126 mingap
= next
- end
;
1130 if (offset
== RAM_ADDR_MAX
) {
1131 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1139 ram_addr_t
last_ram_offset(void)
1142 ram_addr_t last
= 0;
1144 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1145 last
= MAX(last
, block
->offset
+ block
->length
);
1150 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1154 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1155 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1156 "dump-guest-core", true)) {
1157 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1159 perror("qemu_madvise");
1160 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1161 "but dump_guest_core=off specified\n");
1166 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1170 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1171 if (block
->offset
== addr
) {
1179 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1181 RAMBlock
*new_block
= find_ram_block(addr
);
1185 assert(!new_block
->idstr
[0]);
1188 char *id
= qdev_get_dev_path(dev
);
1190 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1194 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1196 /* This assumes the iothread lock is taken here too. */
1197 qemu_mutex_lock_ramlist();
1198 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1199 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1200 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1205 qemu_mutex_unlock_ramlist();
1208 void qemu_ram_unset_idstr(ram_addr_t addr
)
1210 RAMBlock
*block
= find_ram_block(addr
);
1213 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1217 static int memory_try_enable_merging(void *addr
, size_t len
)
1219 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1220 /* disabled by the user */
1224 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1227 static ram_addr_t
ram_block_add(RAMBlock
*new_block
)
1230 ram_addr_t old_ram_size
, new_ram_size
;
1232 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1234 /* This assumes the iothread lock is taken here too. */
1235 qemu_mutex_lock_ramlist();
1236 new_block
->offset
= find_ram_offset(new_block
->length
);
1238 if (!new_block
->host
) {
1239 if (xen_enabled()) {
1240 xen_ram_alloc(new_block
->offset
, new_block
->length
, new_block
->mr
);
1242 new_block
->host
= phys_mem_alloc(new_block
->length
);
1243 if (!new_block
->host
) {
1244 fprintf(stderr
, "Cannot set up guest memory '%s': %s\n",
1245 new_block
->mr
->name
, strerror(errno
));
1248 memory_try_enable_merging(new_block
->host
, new_block
->length
);
1252 /* Keep the list sorted from biggest to smallest block. */
1253 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1254 if (block
->length
< new_block
->length
) {
1259 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1261 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1263 ram_list
.mru_block
= NULL
;
1266 qemu_mutex_unlock_ramlist();
1268 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1270 if (new_ram_size
> old_ram_size
) {
1272 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1273 ram_list
.dirty_memory
[i
] =
1274 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1275 old_ram_size
, new_ram_size
);
1278 cpu_physical_memory_set_dirty_range(new_block
->offset
, new_block
->length
);
1280 qemu_ram_setup_dump(new_block
->host
, new_block
->length
);
1281 qemu_madvise(new_block
->host
, new_block
->length
, QEMU_MADV_HUGEPAGE
);
1282 qemu_madvise(new_block
->host
, new_block
->length
, QEMU_MADV_DONTFORK
);
1284 if (kvm_enabled()) {
1285 kvm_setup_guest_memory(new_block
->host
, new_block
->length
);
1288 return new_block
->offset
;
1292 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1293 bool share
, const char *mem_path
,
1296 RAMBlock
*new_block
;
1298 if (xen_enabled()) {
1299 error_setg(errp
, "-mem-path not supported with Xen");
1303 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1305 * file_ram_alloc() needs to allocate just like
1306 * phys_mem_alloc, but we haven't bothered to provide
1310 "-mem-path not supported with this accelerator");
1314 size
= TARGET_PAGE_ALIGN(size
);
1315 new_block
= g_malloc0(sizeof(*new_block
));
1317 new_block
->length
= size
;
1318 new_block
->flags
= share
? RAM_SHARED
: 0;
1319 new_block
->host
= file_ram_alloc(new_block
, size
,
1321 if (!new_block
->host
) {
1326 return ram_block_add(new_block
);
1330 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1333 RAMBlock
*new_block
;
1335 size
= TARGET_PAGE_ALIGN(size
);
1336 new_block
= g_malloc0(sizeof(*new_block
));
1338 new_block
->length
= size
;
1340 new_block
->host
= host
;
1342 new_block
->flags
|= RAM_PREALLOC
;
1344 return ram_block_add(new_block
);
1347 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1349 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1352 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1356 /* This assumes the iothread lock is taken here too. */
1357 qemu_mutex_lock_ramlist();
1358 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1359 if (addr
== block
->offset
) {
1360 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1361 ram_list
.mru_block
= NULL
;
1367 qemu_mutex_unlock_ramlist();
1370 void qemu_ram_free(ram_addr_t addr
)
1374 /* This assumes the iothread lock is taken here too. */
1375 qemu_mutex_lock_ramlist();
1376 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1377 if (addr
== block
->offset
) {
1378 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1379 ram_list
.mru_block
= NULL
;
1381 if (block
->flags
& RAM_PREALLOC
) {
1383 } else if (xen_enabled()) {
1384 xen_invalidate_map_cache_entry(block
->host
);
1386 } else if (block
->fd
>= 0) {
1387 munmap(block
->host
, block
->length
);
1391 qemu_anon_ram_free(block
->host
, block
->length
);
1397 qemu_mutex_unlock_ramlist();
1402 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1409 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1410 offset
= addr
- block
->offset
;
1411 if (offset
< block
->length
) {
1412 vaddr
= block
->host
+ offset
;
1413 if (block
->flags
& RAM_PREALLOC
) {
1415 } else if (xen_enabled()) {
1419 munmap(vaddr
, length
);
1420 if (block
->fd
>= 0) {
1421 flags
|= (block
->flags
& RAM_SHARED
?
1422 MAP_SHARED
: MAP_PRIVATE
);
1423 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1424 flags
, block
->fd
, offset
);
1427 * Remap needs to match alloc. Accelerators that
1428 * set phys_mem_alloc never remap. If they did,
1429 * we'd need a remap hook here.
1431 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1433 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1434 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1437 if (area
!= vaddr
) {
1438 fprintf(stderr
, "Could not remap addr: "
1439 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1443 memory_try_enable_merging(vaddr
, length
);
1444 qemu_ram_setup_dump(vaddr
, length
);
1450 #endif /* !_WIN32 */
1452 int qemu_get_ram_fd(ram_addr_t addr
)
1454 RAMBlock
*block
= qemu_get_ram_block(addr
);
1459 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1461 RAMBlock
*block
= qemu_get_ram_block(addr
);
1466 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1467 With the exception of the softmmu code in this file, this should
1468 only be used for local memory (e.g. video ram) that the device owns,
1469 and knows it isn't going to access beyond the end of the block.
1471 It should not be used for general purpose DMA.
1472 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1474 void *qemu_get_ram_ptr(ram_addr_t addr
)
1476 RAMBlock
*block
= qemu_get_ram_block(addr
);
1478 if (xen_enabled()) {
1479 /* We need to check if the requested address is in the RAM
1480 * because we don't want to map the entire memory in QEMU.
1481 * In that case just map until the end of the page.
1483 if (block
->offset
== 0) {
1484 return xen_map_cache(addr
, 0, 0);
1485 } else if (block
->host
== NULL
) {
1487 xen_map_cache(block
->offset
, block
->length
, 1);
1490 return block
->host
+ (addr
- block
->offset
);
1493 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1494 * but takes a size argument */
1495 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1500 if (xen_enabled()) {
1501 return xen_map_cache(addr
, *size
, 1);
1505 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1506 if (addr
- block
->offset
< block
->length
) {
1507 if (addr
- block
->offset
+ *size
> block
->length
)
1508 *size
= block
->length
- addr
+ block
->offset
;
1509 return block
->host
+ (addr
- block
->offset
);
1513 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1518 /* Some of the softmmu routines need to translate from a host pointer
1519 (typically a TLB entry) back to a ram offset. */
1520 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1523 uint8_t *host
= ptr
;
1525 if (xen_enabled()) {
1526 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1527 return qemu_get_ram_block(*ram_addr
)->mr
;
1530 block
= ram_list
.mru_block
;
1531 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1535 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1536 /* This case append when the block is not mapped. */
1537 if (block
->host
== NULL
) {
1540 if (host
- block
->host
< block
->length
) {
1548 *ram_addr
= block
->offset
+ (host
- block
->host
);
1552 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1553 uint64_t val
, unsigned size
)
1555 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1556 tb_invalidate_phys_page_fast(ram_addr
, size
);
1560 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1563 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1566 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1571 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_MIGRATION
);
1572 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_VGA
);
1573 /* we remove the notdirty callback only if the code has been
1575 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1576 CPUArchState
*env
= current_cpu
->env_ptr
;
1577 tlb_set_dirty(env
, current_cpu
->mem_io_vaddr
);
1581 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1582 unsigned size
, bool is_write
)
1587 static const MemoryRegionOps notdirty_mem_ops
= {
1588 .write
= notdirty_mem_write
,
1589 .valid
.accepts
= notdirty_mem_accepts
,
1590 .endianness
= DEVICE_NATIVE_ENDIAN
,
1593 /* Generate a debug exception if a watchpoint has been hit. */
1594 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1596 CPUState
*cpu
= current_cpu
;
1597 CPUArchState
*env
= cpu
->env_ptr
;
1598 target_ulong pc
, cs_base
;
1603 if (cpu
->watchpoint_hit
) {
1604 /* We re-entered the check after replacing the TB. Now raise
1605 * the debug interrupt so that is will trigger after the
1606 * current instruction. */
1607 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1610 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1611 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1612 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1613 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1614 wp
->flags
|= BP_WATCHPOINT_HIT
;
1615 if (!cpu
->watchpoint_hit
) {
1616 cpu
->watchpoint_hit
= wp
;
1617 tb_check_watchpoint(cpu
);
1618 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1619 cpu
->exception_index
= EXCP_DEBUG
;
1622 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1623 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1624 cpu_resume_from_signal(cpu
, NULL
);
1628 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1633 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1634 so these check for a hit then pass through to the normal out-of-line
1636 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1639 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1641 case 1: return ldub_phys(&address_space_memory
, addr
);
1642 case 2: return lduw_phys(&address_space_memory
, addr
);
1643 case 4: return ldl_phys(&address_space_memory
, addr
);
1648 static void watch_mem_write(void *opaque
, hwaddr addr
,
1649 uint64_t val
, unsigned size
)
1651 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1654 stb_phys(&address_space_memory
, addr
, val
);
1657 stw_phys(&address_space_memory
, addr
, val
);
1660 stl_phys(&address_space_memory
, addr
, val
);
1666 static const MemoryRegionOps watch_mem_ops
= {
1667 .read
= watch_mem_read
,
1668 .write
= watch_mem_write
,
1669 .endianness
= DEVICE_NATIVE_ENDIAN
,
1672 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1675 subpage_t
*subpage
= opaque
;
1678 #if defined(DEBUG_SUBPAGE)
1679 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
1680 subpage
, len
, addr
);
1682 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1695 static void subpage_write(void *opaque
, hwaddr addr
,
1696 uint64_t value
, unsigned len
)
1698 subpage_t
*subpage
= opaque
;
1701 #if defined(DEBUG_SUBPAGE)
1702 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1703 " value %"PRIx64
"\n",
1704 __func__
, subpage
, len
, addr
, value
);
1719 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1722 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1723 unsigned len
, bool is_write
)
1725 subpage_t
*subpage
= opaque
;
1726 #if defined(DEBUG_SUBPAGE)
1727 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
1728 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1731 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1735 static const MemoryRegionOps subpage_ops
= {
1736 .read
= subpage_read
,
1737 .write
= subpage_write
,
1738 .valid
.accepts
= subpage_accepts
,
1739 .endianness
= DEVICE_NATIVE_ENDIAN
,
1742 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1747 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1749 idx
= SUBPAGE_IDX(start
);
1750 eidx
= SUBPAGE_IDX(end
);
1751 #if defined(DEBUG_SUBPAGE)
1752 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1753 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
1755 for (; idx
<= eidx
; idx
++) {
1756 mmio
->sub_section
[idx
] = section
;
1762 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1766 mmio
= g_malloc0(sizeof(subpage_t
));
1770 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1771 "subpage", TARGET_PAGE_SIZE
);
1772 mmio
->iomem
.subpage
= true;
1773 #if defined(DEBUG_SUBPAGE)
1774 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
1775 mmio
, base
, TARGET_PAGE_SIZE
);
1777 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1782 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
1786 MemoryRegionSection section
= {
1787 .address_space
= as
,
1789 .offset_within_address_space
= 0,
1790 .offset_within_region
= 0,
1791 .size
= int128_2_64(),
1794 return phys_section_add(map
, §ion
);
1797 MemoryRegion
*iotlb_to_region(AddressSpace
*as
, hwaddr index
)
1799 return as
->dispatch
->map
.sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1802 static void io_mem_init(void)
1804 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, "rom", UINT64_MAX
);
1805 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1806 "unassigned", UINT64_MAX
);
1807 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1808 "notdirty", UINT64_MAX
);
1809 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1810 "watch", UINT64_MAX
);
1813 static void mem_begin(MemoryListener
*listener
)
1815 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1816 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
1819 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
1820 assert(n
== PHYS_SECTION_UNASSIGNED
);
1821 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
1822 assert(n
== PHYS_SECTION_NOTDIRTY
);
1823 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
1824 assert(n
== PHYS_SECTION_ROM
);
1825 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
1826 assert(n
== PHYS_SECTION_WATCH
);
1828 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
1830 as
->next_dispatch
= d
;
1833 static void mem_commit(MemoryListener
*listener
)
1835 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1836 AddressSpaceDispatch
*cur
= as
->dispatch
;
1837 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1839 phys_page_compact_all(next
, next
->map
.nodes_nb
);
1841 as
->dispatch
= next
;
1844 phys_sections_free(&cur
->map
);
1849 static void tcg_commit(MemoryListener
*listener
)
1853 /* since each CPU stores ram addresses in its TLB cache, we must
1854 reset the modified entries */
1857 /* FIXME: Disentangle the cpu.h circular files deps so we can
1858 directly get the right CPU from listener. */
1859 if (cpu
->tcg_as_listener
!= listener
) {
1866 static void core_log_global_start(MemoryListener
*listener
)
1868 cpu_physical_memory_set_dirty_tracking(true);
1871 static void core_log_global_stop(MemoryListener
*listener
)
1873 cpu_physical_memory_set_dirty_tracking(false);
1876 static MemoryListener core_memory_listener
= {
1877 .log_global_start
= core_log_global_start
,
1878 .log_global_stop
= core_log_global_stop
,
1882 void address_space_init_dispatch(AddressSpace
*as
)
1884 as
->dispatch
= NULL
;
1885 as
->dispatch_listener
= (MemoryListener
) {
1887 .commit
= mem_commit
,
1888 .region_add
= mem_add
,
1889 .region_nop
= mem_add
,
1892 memory_listener_register(&as
->dispatch_listener
, as
);
1895 void address_space_destroy_dispatch(AddressSpace
*as
)
1897 AddressSpaceDispatch
*d
= as
->dispatch
;
1899 memory_listener_unregister(&as
->dispatch_listener
);
1901 as
->dispatch
= NULL
;
1904 static void memory_map_init(void)
1906 system_memory
= g_malloc(sizeof(*system_memory
));
1908 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
1909 address_space_init(&address_space_memory
, system_memory
, "memory");
1911 system_io
= g_malloc(sizeof(*system_io
));
1912 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
1914 address_space_init(&address_space_io
, system_io
, "I/O");
1916 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1919 MemoryRegion
*get_system_memory(void)
1921 return system_memory
;
1924 MemoryRegion
*get_system_io(void)
1929 #endif /* !defined(CONFIG_USER_ONLY) */
1931 /* physical memory access (slow version, mainly for debug) */
1932 #if defined(CONFIG_USER_ONLY)
1933 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
1934 uint8_t *buf
, int len
, int is_write
)
1941 page
= addr
& TARGET_PAGE_MASK
;
1942 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1945 flags
= page_get_flags(page
);
1946 if (!(flags
& PAGE_VALID
))
1949 if (!(flags
& PAGE_WRITE
))
1951 /* XXX: this code should not depend on lock_user */
1952 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1955 unlock_user(p
, addr
, l
);
1957 if (!(flags
& PAGE_READ
))
1959 /* XXX: this code should not depend on lock_user */
1960 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1963 unlock_user(p
, addr
, 0);
1974 static void invalidate_and_set_dirty(hwaddr addr
,
1977 if (cpu_physical_memory_is_clean(addr
)) {
1978 /* invalidate code */
1979 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1981 cpu_physical_memory_set_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
1982 cpu_physical_memory_set_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
1984 xen_modified_memory(addr
, length
);
1987 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
1989 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
1991 /* Regions are assumed to support 1-4 byte accesses unless
1992 otherwise specified. */
1993 if (access_size_max
== 0) {
1994 access_size_max
= 4;
1997 /* Bound the maximum access by the alignment of the address. */
1998 if (!mr
->ops
->impl
.unaligned
) {
1999 unsigned align_size_max
= addr
& -addr
;
2000 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2001 access_size_max
= align_size_max
;
2005 /* Don't attempt accesses larger than the maximum. */
2006 if (l
> access_size_max
) {
2007 l
= access_size_max
;
2010 l
= 1 << (qemu_fls(l
) - 1);
2016 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
2017 int len
, bool is_write
)
2028 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2031 if (!memory_access_is_direct(mr
, is_write
)) {
2032 l
= memory_access_size(mr
, l
, addr1
);
2033 /* XXX: could force current_cpu to NULL to avoid
2037 /* 64 bit write access */
2039 error
|= io_mem_write(mr
, addr1
, val
, 8);
2042 /* 32 bit write access */
2044 error
|= io_mem_write(mr
, addr1
, val
, 4);
2047 /* 16 bit write access */
2049 error
|= io_mem_write(mr
, addr1
, val
, 2);
2052 /* 8 bit write access */
2054 error
|= io_mem_write(mr
, addr1
, val
, 1);
2060 addr1
+= memory_region_get_ram_addr(mr
);
2062 ptr
= qemu_get_ram_ptr(addr1
);
2063 memcpy(ptr
, buf
, l
);
2064 invalidate_and_set_dirty(addr1
, l
);
2067 if (!memory_access_is_direct(mr
, is_write
)) {
2069 l
= memory_access_size(mr
, l
, addr1
);
2072 /* 64 bit read access */
2073 error
|= io_mem_read(mr
, addr1
, &val
, 8);
2077 /* 32 bit read access */
2078 error
|= io_mem_read(mr
, addr1
, &val
, 4);
2082 /* 16 bit read access */
2083 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2087 /* 8 bit read access */
2088 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2096 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2097 memcpy(buf
, ptr
, l
);
2108 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2109 const uint8_t *buf
, int len
)
2111 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2114 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2116 return address_space_rw(as
, addr
, buf
, len
, false);
2120 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2121 int len
, int is_write
)
2123 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2126 enum write_rom_type
{
2131 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2132 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2141 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2143 if (!(memory_region_is_ram(mr
) ||
2144 memory_region_is_romd(mr
))) {
2147 addr1
+= memory_region_get_ram_addr(mr
);
2149 ptr
= qemu_get_ram_ptr(addr1
);
2152 memcpy(ptr
, buf
, l
);
2153 invalidate_and_set_dirty(addr1
, l
);
2156 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2166 /* used for ROM loading : can write in RAM and ROM */
2167 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2168 const uint8_t *buf
, int len
)
2170 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2173 void cpu_flush_icache_range(hwaddr start
, int len
)
2176 * This function should do the same thing as an icache flush that was
2177 * triggered from within the guest. For TCG we are always cache coherent,
2178 * so there is no need to flush anything. For KVM / Xen we need to flush
2179 * the host's instruction cache at least.
2181 if (tcg_enabled()) {
2185 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2186 start
, NULL
, len
, FLUSH_CACHE
);
2196 static BounceBuffer bounce
;
2198 typedef struct MapClient
{
2200 void (*callback
)(void *opaque
);
2201 QLIST_ENTRY(MapClient
) link
;
2204 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2205 = QLIST_HEAD_INITIALIZER(map_client_list
);
2207 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2209 MapClient
*client
= g_malloc(sizeof(*client
));
2211 client
->opaque
= opaque
;
2212 client
->callback
= callback
;
2213 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2217 static void cpu_unregister_map_client(void *_client
)
2219 MapClient
*client
= (MapClient
*)_client
;
2221 QLIST_REMOVE(client
, link
);
2225 static void cpu_notify_map_clients(void)
2229 while (!QLIST_EMPTY(&map_client_list
)) {
2230 client
= QLIST_FIRST(&map_client_list
);
2231 client
->callback(client
->opaque
);
2232 cpu_unregister_map_client(client
);
2236 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2243 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2244 if (!memory_access_is_direct(mr
, is_write
)) {
2245 l
= memory_access_size(mr
, l
, addr
);
2246 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2257 /* Map a physical memory region into a host virtual address.
2258 * May map a subset of the requested range, given by and returned in *plen.
2259 * May return NULL if resources needed to perform the mapping are exhausted.
2260 * Use only for reads OR writes - not for read-modify-write operations.
2261 * Use cpu_register_map_client() to know when retrying the map operation is
2262 * likely to succeed.
2264 void *address_space_map(AddressSpace
*as
,
2271 hwaddr l
, xlat
, base
;
2272 MemoryRegion
*mr
, *this_mr
;
2280 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2281 if (!memory_access_is_direct(mr
, is_write
)) {
2282 if (bounce
.buffer
) {
2285 /* Avoid unbounded allocations */
2286 l
= MIN(l
, TARGET_PAGE_SIZE
);
2287 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2291 memory_region_ref(mr
);
2294 address_space_read(as
, addr
, bounce
.buffer
, l
);
2298 return bounce
.buffer
;
2302 raddr
= memory_region_get_ram_addr(mr
);
2313 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2314 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2319 memory_region_ref(mr
);
2321 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2324 /* Unmaps a memory region previously mapped by address_space_map().
2325 * Will also mark the memory as dirty if is_write == 1. access_len gives
2326 * the amount of memory that was actually read or written by the caller.
2328 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2329 int is_write
, hwaddr access_len
)
2331 if (buffer
!= bounce
.buffer
) {
2335 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2338 while (access_len
) {
2340 l
= TARGET_PAGE_SIZE
;
2343 invalidate_and_set_dirty(addr1
, l
);
2348 if (xen_enabled()) {
2349 xen_invalidate_map_cache_entry(buffer
);
2351 memory_region_unref(mr
);
2355 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2357 qemu_vfree(bounce
.buffer
);
2358 bounce
.buffer
= NULL
;
2359 memory_region_unref(bounce
.mr
);
2360 cpu_notify_map_clients();
2363 void *cpu_physical_memory_map(hwaddr addr
,
2367 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2370 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2371 int is_write
, hwaddr access_len
)
2373 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2376 /* warning: addr must be aligned */
2377 static inline uint32_t ldl_phys_internal(AddressSpace
*as
, hwaddr addr
,
2378 enum device_endian endian
)
2386 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2387 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2389 io_mem_read(mr
, addr1
, &val
, 4);
2390 #if defined(TARGET_WORDS_BIGENDIAN)
2391 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2395 if (endian
== DEVICE_BIG_ENDIAN
) {
2401 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2405 case DEVICE_LITTLE_ENDIAN
:
2406 val
= ldl_le_p(ptr
);
2408 case DEVICE_BIG_ENDIAN
:
2409 val
= ldl_be_p(ptr
);
2419 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2421 return ldl_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2424 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2426 return ldl_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2429 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2431 return ldl_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2434 /* warning: addr must be aligned */
2435 static inline uint64_t ldq_phys_internal(AddressSpace
*as
, hwaddr addr
,
2436 enum device_endian endian
)
2444 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2446 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2448 io_mem_read(mr
, addr1
, &val
, 8);
2449 #if defined(TARGET_WORDS_BIGENDIAN)
2450 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2454 if (endian
== DEVICE_BIG_ENDIAN
) {
2460 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2464 case DEVICE_LITTLE_ENDIAN
:
2465 val
= ldq_le_p(ptr
);
2467 case DEVICE_BIG_ENDIAN
:
2468 val
= ldq_be_p(ptr
);
2478 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
2480 return ldq_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2483 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
2485 return ldq_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2488 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
2490 return ldq_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2494 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
2497 address_space_rw(as
, addr
, &val
, 1, 0);
2501 /* warning: addr must be aligned */
2502 static inline uint32_t lduw_phys_internal(AddressSpace
*as
, hwaddr addr
,
2503 enum device_endian endian
)
2511 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2513 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2515 io_mem_read(mr
, addr1
, &val
, 2);
2516 #if defined(TARGET_WORDS_BIGENDIAN)
2517 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2521 if (endian
== DEVICE_BIG_ENDIAN
) {
2527 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2531 case DEVICE_LITTLE_ENDIAN
:
2532 val
= lduw_le_p(ptr
);
2534 case DEVICE_BIG_ENDIAN
:
2535 val
= lduw_be_p(ptr
);
2545 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
2547 return lduw_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2550 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
2552 return lduw_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2555 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
2557 return lduw_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2560 /* warning: addr must be aligned. The ram page is not masked as dirty
2561 and the code inside is not invalidated. It is useful if the dirty
2562 bits are used to track modified PTEs */
2563 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2570 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2572 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2573 io_mem_write(mr
, addr1
, val
, 4);
2575 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2576 ptr
= qemu_get_ram_ptr(addr1
);
2579 if (unlikely(in_migration
)) {
2580 if (cpu_physical_memory_is_clean(addr1
)) {
2581 /* invalidate code */
2582 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2584 cpu_physical_memory_set_dirty_flag(addr1
,
2585 DIRTY_MEMORY_MIGRATION
);
2586 cpu_physical_memory_set_dirty_flag(addr1
, DIRTY_MEMORY_VGA
);
2592 /* warning: addr must be aligned */
2593 static inline void stl_phys_internal(AddressSpace
*as
,
2594 hwaddr addr
, uint32_t val
,
2595 enum device_endian endian
)
2602 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2604 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2605 #if defined(TARGET_WORDS_BIGENDIAN)
2606 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2610 if (endian
== DEVICE_BIG_ENDIAN
) {
2614 io_mem_write(mr
, addr1
, val
, 4);
2617 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2618 ptr
= qemu_get_ram_ptr(addr1
);
2620 case DEVICE_LITTLE_ENDIAN
:
2623 case DEVICE_BIG_ENDIAN
:
2630 invalidate_and_set_dirty(addr1
, 4);
2634 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2636 stl_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2639 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2641 stl_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2644 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2646 stl_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2650 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2653 address_space_rw(as
, addr
, &v
, 1, 1);
2656 /* warning: addr must be aligned */
2657 static inline void stw_phys_internal(AddressSpace
*as
,
2658 hwaddr addr
, uint32_t val
,
2659 enum device_endian endian
)
2666 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2667 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2668 #if defined(TARGET_WORDS_BIGENDIAN)
2669 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2673 if (endian
== DEVICE_BIG_ENDIAN
) {
2677 io_mem_write(mr
, addr1
, val
, 2);
2680 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2681 ptr
= qemu_get_ram_ptr(addr1
);
2683 case DEVICE_LITTLE_ENDIAN
:
2686 case DEVICE_BIG_ENDIAN
:
2693 invalidate_and_set_dirty(addr1
, 2);
2697 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2699 stw_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2702 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2704 stw_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2707 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2709 stw_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2713 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2716 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2719 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2721 val
= cpu_to_le64(val
);
2722 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2725 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2727 val
= cpu_to_be64(val
);
2728 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2731 /* virtual memory access for debug (includes writing to ROM) */
2732 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2733 uint8_t *buf
, int len
, int is_write
)
2740 page
= addr
& TARGET_PAGE_MASK
;
2741 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2742 /* if no physical page mapped, return an error */
2743 if (phys_addr
== -1)
2745 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2748 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2750 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
2752 address_space_rw(cpu
->as
, phys_addr
, buf
, l
, 0);
2763 * A helper function for the _utterly broken_ virtio device model to find out if
2764 * it's running on a big endian machine. Don't do this at home kids!
2766 bool target_words_bigendian(void);
2767 bool target_words_bigendian(void)
2769 #if defined(TARGET_WORDS_BIGENDIAN)
2776 #ifndef CONFIG_USER_ONLY
2777 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2782 mr
= address_space_translate(&address_space_memory
,
2783 phys_addr
, &phys_addr
, &l
, false);
2785 return !(memory_region_is_ram(mr
) ||
2786 memory_region_is_romd(mr
));
2789 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2793 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2794 func(block
->host
, block
->offset
, block
->length
, opaque
);