4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
30 #include "qemu/osdep.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/xen/xen.h"
34 #include "qemu/timer.h"
35 #include "qemu/config-file.h"
36 #include "qemu/error-report.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
52 #include "exec/ram_addr.h"
53 #include "qemu/cache-utils.h"
55 #include "qemu/range.h"
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 static bool in_migration
;
62 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
64 static MemoryRegion
*system_memory
;
65 static MemoryRegion
*system_io
;
67 AddressSpace address_space_io
;
68 AddressSpace address_space_memory
;
70 MemoryRegion io_mem_rom
, io_mem_notdirty
;
71 static MemoryRegion io_mem_unassigned
;
75 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
76 /* current CPU in the current thread. It is only valid inside
78 DEFINE_TLS(CPUState
*, current_cpu
);
79 /* 0 = Do not count executed instructions.
80 1 = Precise instruction counting.
81 2 = Adaptive rate instruction counting. */
84 #if !defined(CONFIG_USER_ONLY)
86 typedef struct PhysPageEntry PhysPageEntry
;
88 struct PhysPageEntry
{
89 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
91 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
95 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
97 /* Size of the L2 (and L3, etc) page tables. */
98 #define ADDR_SPACE_BITS 64
101 #define P_L2_SIZE (1 << P_L2_BITS)
103 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
105 typedef PhysPageEntry Node
[P_L2_SIZE
];
107 typedef struct PhysPageMap
{
108 unsigned sections_nb
;
109 unsigned sections_nb_alloc
;
111 unsigned nodes_nb_alloc
;
113 MemoryRegionSection
*sections
;
116 struct AddressSpaceDispatch
{
117 /* This is a multi-level map on the physical address space.
118 * The bottom level has pointers to MemoryRegionSections.
120 PhysPageEntry phys_map
;
125 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
126 typedef struct subpage_t
{
130 uint16_t sub_section
[TARGET_PAGE_SIZE
];
133 #define PHYS_SECTION_UNASSIGNED 0
134 #define PHYS_SECTION_NOTDIRTY 1
135 #define PHYS_SECTION_ROM 2
136 #define PHYS_SECTION_WATCH 3
138 static void io_mem_init(void);
139 static void memory_map_init(void);
140 static void tcg_commit(MemoryListener
*listener
);
142 static MemoryRegion io_mem_watch
;
145 #if !defined(CONFIG_USER_ONLY)
147 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
149 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
150 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
151 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
152 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
156 static uint32_t phys_map_node_alloc(PhysPageMap
*map
)
161 ret
= map
->nodes_nb
++;
162 assert(ret
!= PHYS_MAP_NODE_NIL
);
163 assert(ret
!= map
->nodes_nb_alloc
);
164 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
165 map
->nodes
[ret
][i
].skip
= 1;
166 map
->nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
171 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
172 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
177 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
179 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
180 lp
->ptr
= phys_map_node_alloc(map
);
181 p
= map
->nodes
[lp
->ptr
];
183 for (i
= 0; i
< P_L2_SIZE
; i
++) {
185 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
189 p
= map
->nodes
[lp
->ptr
];
191 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
193 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
194 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
200 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
206 static void phys_page_set(AddressSpaceDispatch
*d
,
207 hwaddr index
, hwaddr nb
,
210 /* Wildly overreserve - it doesn't matter much. */
211 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
213 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
216 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
217 * and update our entry so we can skip it and go directly to the destination.
219 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
221 unsigned valid_ptr
= P_L2_SIZE
;
226 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
231 for (i
= 0; i
< P_L2_SIZE
; i
++) {
232 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
239 phys_page_compact(&p
[i
], nodes
, compacted
);
243 /* We can only compress if there's only one child. */
248 assert(valid_ptr
< P_L2_SIZE
);
250 /* Don't compress if it won't fit in the # of bits we have. */
251 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
255 lp
->ptr
= p
[valid_ptr
].ptr
;
256 if (!p
[valid_ptr
].skip
) {
257 /* If our only child is a leaf, make this a leaf. */
258 /* By design, we should have made this node a leaf to begin with so we
259 * should never reach here.
260 * But since it's so simple to handle this, let's do it just in case we
265 lp
->skip
+= p
[valid_ptr
].skip
;
269 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
271 DECLARE_BITMAP(compacted
, nodes_nb
);
273 if (d
->phys_map
.skip
) {
274 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
278 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
279 Node
*nodes
, MemoryRegionSection
*sections
)
282 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
285 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
286 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
287 return §ions
[PHYS_SECTION_UNASSIGNED
];
290 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
293 if (sections
[lp
.ptr
].size
.hi
||
294 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
295 sections
[lp
.ptr
].size
.lo
, addr
)) {
296 return §ions
[lp
.ptr
];
298 return §ions
[PHYS_SECTION_UNASSIGNED
];
302 bool memory_region_is_unassigned(MemoryRegion
*mr
)
304 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
305 && mr
!= &io_mem_watch
;
308 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
310 bool resolve_subpage
)
312 MemoryRegionSection
*section
;
315 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
316 if (resolve_subpage
&& section
->mr
->subpage
) {
317 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
318 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
323 static MemoryRegionSection
*
324 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
325 hwaddr
*plen
, bool resolve_subpage
)
327 MemoryRegionSection
*section
;
330 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
331 /* Compute offset within MemoryRegionSection */
332 addr
-= section
->offset_within_address_space
;
334 /* Compute offset within MemoryRegion */
335 *xlat
= addr
+ section
->offset_within_region
;
337 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
338 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
342 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
344 if (memory_region_is_ram(mr
)) {
345 return !(is_write
&& mr
->readonly
);
347 if (memory_region_is_romd(mr
)) {
354 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
355 hwaddr
*xlat
, hwaddr
*plen
,
359 MemoryRegionSection
*section
;
364 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
367 if (!mr
->iommu_ops
) {
371 iotlb
= mr
->iommu_ops
->translate(mr
, addr
);
372 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
373 | (addr
& iotlb
.addr_mask
));
374 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
375 if (!(iotlb
.perm
& (1 << is_write
))) {
376 mr
= &io_mem_unassigned
;
380 as
= iotlb
.target_as
;
383 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
384 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
385 len
= MIN(page
, len
);
393 MemoryRegionSection
*
394 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
397 MemoryRegionSection
*section
;
398 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
400 assert(!section
->mr
->iommu_ops
);
405 void cpu_exec_init_all(void)
407 #if !defined(CONFIG_USER_ONLY)
408 qemu_mutex_init(&ram_list
.mutex
);
414 #if !defined(CONFIG_USER_ONLY)
416 static int cpu_common_post_load(void *opaque
, int version_id
)
418 CPUState
*cpu
= opaque
;
420 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
421 version_id is increased. */
422 cpu
->interrupt_request
&= ~0x01;
428 const VMStateDescription vmstate_cpu_common
= {
429 .name
= "cpu_common",
431 .minimum_version_id
= 1,
432 .minimum_version_id_old
= 1,
433 .post_load
= cpu_common_post_load
,
434 .fields
= (VMStateField
[]) {
435 VMSTATE_UINT32(halted
, CPUState
),
436 VMSTATE_UINT32(interrupt_request
, CPUState
),
437 VMSTATE_END_OF_LIST()
443 CPUState
*qemu_get_cpu(int index
)
448 if (cpu
->cpu_index
== index
) {
456 #if !defined(CONFIG_USER_ONLY)
457 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
459 /* We only support one address space per cpu at the moment. */
460 assert(cpu
->as
== as
);
462 if (cpu
->tcg_as_listener
) {
463 memory_listener_unregister(cpu
->tcg_as_listener
);
465 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
467 cpu
->tcg_as_listener
->commit
= tcg_commit
;
468 memory_listener_register(cpu
->tcg_as_listener
, as
);
472 void cpu_exec_init(CPUArchState
*env
)
474 CPUState
*cpu
= ENV_GET_CPU(env
);
475 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
479 #ifdef TARGET_WORDS_BIGENDIAN
480 cpu
->bigendian
= true;
482 cpu
->bigendian
= false;
485 #if defined(CONFIG_USER_ONLY)
489 CPU_FOREACH(some_cpu
) {
492 cpu
->cpu_index
= cpu_index
;
494 QTAILQ_INIT(&cpu
->breakpoints
);
495 QTAILQ_INIT(&cpu
->watchpoints
);
496 #ifndef CONFIG_USER_ONLY
497 cpu
->as
= &address_space_memory
;
498 cpu
->thread_id
= qemu_get_thread_id();
500 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
501 #if defined(CONFIG_USER_ONLY)
504 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
505 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
507 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
508 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
509 cpu_save
, cpu_load
, env
);
510 assert(cc
->vmsd
== NULL
);
511 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
513 if (cc
->vmsd
!= NULL
) {
514 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
518 #if defined(TARGET_HAS_ICE)
519 #if defined(CONFIG_USER_ONLY)
520 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
522 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
525 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
527 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
529 tb_invalidate_phys_addr(cpu
->as
,
530 phys
| (pc
& ~TARGET_PAGE_MASK
));
534 #endif /* TARGET_HAS_ICE */
536 #if defined(CONFIG_USER_ONLY)
537 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
542 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
543 int flags
, CPUWatchpoint
**watchpoint
)
548 /* Add a watchpoint. */
549 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
550 int flags
, CPUWatchpoint
**watchpoint
)
552 vaddr len_mask
= ~(len
- 1);
555 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
556 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
557 len
== 0 || len
> TARGET_PAGE_SIZE
) {
558 error_report("tried to set invalid watchpoint at %"
559 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
562 wp
= g_malloc(sizeof(*wp
));
565 wp
->len_mask
= len_mask
;
568 /* keep all GDB-injected watchpoints in front */
569 if (flags
& BP_GDB
) {
570 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
572 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
575 tlb_flush_page(cpu
, addr
);
582 /* Remove a specific watchpoint. */
583 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
586 vaddr len_mask
= ~(len
- 1);
589 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
590 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
591 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
592 cpu_watchpoint_remove_by_ref(cpu
, wp
);
599 /* Remove a specific watchpoint by reference. */
600 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
602 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
604 tlb_flush_page(cpu
, watchpoint
->vaddr
);
609 /* Remove all matching watchpoints. */
610 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
612 CPUWatchpoint
*wp
, *next
;
614 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
615 if (wp
->flags
& mask
) {
616 cpu_watchpoint_remove_by_ref(cpu
, wp
);
622 /* Add a breakpoint. */
623 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
624 CPUBreakpoint
**breakpoint
)
626 #if defined(TARGET_HAS_ICE)
629 bp
= g_malloc(sizeof(*bp
));
634 /* keep all GDB-injected breakpoints in front */
635 if (flags
& BP_GDB
) {
636 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
638 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
641 breakpoint_invalidate(cpu
, pc
);
652 /* Remove a specific breakpoint. */
653 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
655 #if defined(TARGET_HAS_ICE)
658 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
659 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
660 cpu_breakpoint_remove_by_ref(cpu
, bp
);
670 /* Remove a specific breakpoint by reference. */
671 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
673 #if defined(TARGET_HAS_ICE)
674 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
676 breakpoint_invalidate(cpu
, breakpoint
->pc
);
682 /* Remove all matching breakpoints. */
683 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
685 #if defined(TARGET_HAS_ICE)
686 CPUBreakpoint
*bp
, *next
;
688 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
689 if (bp
->flags
& mask
) {
690 cpu_breakpoint_remove_by_ref(cpu
, bp
);
696 /* enable or disable single step mode. EXCP_DEBUG is returned by the
697 CPU loop after each instruction */
698 void cpu_single_step(CPUState
*cpu
, int enabled
)
700 #if defined(TARGET_HAS_ICE)
701 if (cpu
->singlestep_enabled
!= enabled
) {
702 cpu
->singlestep_enabled
= enabled
;
704 kvm_update_guest_debug(cpu
, 0);
706 /* must flush all the translated code to avoid inconsistencies */
707 /* XXX: only flush what is necessary */
708 CPUArchState
*env
= cpu
->env_ptr
;
715 void QEMU_NORETURN
cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
722 fprintf(stderr
, "qemu: fatal: ");
723 vfprintf(stderr
, fmt
, ap
);
724 fprintf(stderr
, "\n");
725 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
726 if (qemu_log_enabled()) {
727 qemu_log("qemu: fatal: ");
728 qemu_log_vprintf(fmt
, ap2
);
730 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
736 #if defined(CONFIG_USER_ONLY)
738 struct sigaction act
;
739 sigfillset(&act
.sa_mask
);
740 act
.sa_handler
= SIG_DFL
;
741 sigaction(SIGABRT
, &act
, NULL
);
747 #if !defined(CONFIG_USER_ONLY)
748 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
752 /* The list is protected by the iothread lock here. */
753 block
= ram_list
.mru_block
;
754 if (block
&& addr
- block
->offset
< block
->length
) {
757 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
758 if (addr
- block
->offset
< block
->length
) {
763 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
767 ram_list
.mru_block
= block
;
771 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
777 end
= TARGET_PAGE_ALIGN(start
+ length
);
778 start
&= TARGET_PAGE_MASK
;
780 block
= qemu_get_ram_block(start
);
781 assert(block
== qemu_get_ram_block(end
- 1));
782 start1
= (uintptr_t)block
->host
+ (start
- block
->offset
);
783 cpu_tlb_reset_dirty_all(start1
, length
);
786 /* Note: start and end must be within the same ram block. */
787 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,
792 cpu_physical_memory_clear_dirty_range(start
, length
, client
);
795 tlb_reset_dirty_range_all(start
, length
);
799 static void cpu_physical_memory_set_dirty_tracking(bool enable
)
801 in_migration
= enable
;
804 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
805 MemoryRegionSection
*section
,
807 hwaddr paddr
, hwaddr xlat
,
809 target_ulong
*address
)
814 if (memory_region_is_ram(section
->mr
)) {
816 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
818 if (!section
->readonly
) {
819 iotlb
|= PHYS_SECTION_NOTDIRTY
;
821 iotlb
|= PHYS_SECTION_ROM
;
824 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
828 /* Make accesses to pages with watchpoints go via the
829 watchpoint trap routines. */
830 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
831 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
832 /* Avoid trapping reads of pages with a write breakpoint. */
833 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
834 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
835 *address
|= TLB_MMIO
;
843 #endif /* defined(CONFIG_USER_ONLY) */
845 #if !defined(CONFIG_USER_ONLY)
847 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
849 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
851 static void *(*phys_mem_alloc
)(size_t size
) = qemu_anon_ram_alloc
;
854 * Set a custom physical guest memory alloator.
855 * Accelerators with unusual needs may need this. Hopefully, we can
856 * get rid of it eventually.
858 void phys_mem_set_alloc(void *(*alloc
)(size_t))
860 phys_mem_alloc
= alloc
;
863 static uint16_t phys_section_add(PhysPageMap
*map
,
864 MemoryRegionSection
*section
)
866 /* The physical section number is ORed with a page-aligned
867 * pointer to produce the iotlb entries. Thus it should
868 * never overflow into the page-aligned value.
870 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
872 if (map
->sections_nb
== map
->sections_nb_alloc
) {
873 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
874 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
875 map
->sections_nb_alloc
);
877 map
->sections
[map
->sections_nb
] = *section
;
878 memory_region_ref(section
->mr
);
879 return map
->sections_nb
++;
882 static void phys_section_destroy(MemoryRegion
*mr
)
884 memory_region_unref(mr
);
887 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
888 memory_region_destroy(&subpage
->iomem
);
893 static void phys_sections_free(PhysPageMap
*map
)
895 while (map
->sections_nb
> 0) {
896 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
897 phys_section_destroy(section
->mr
);
899 g_free(map
->sections
);
903 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
906 hwaddr base
= section
->offset_within_address_space
908 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
909 d
->map
.nodes
, d
->map
.sections
);
910 MemoryRegionSection subsection
= {
911 .offset_within_address_space
= base
,
912 .size
= int128_make64(TARGET_PAGE_SIZE
),
916 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
918 if (!(existing
->mr
->subpage
)) {
919 subpage
= subpage_init(d
->as
, base
);
920 subsection
.address_space
= d
->as
;
921 subsection
.mr
= &subpage
->iomem
;
922 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
923 phys_section_add(&d
->map
, &subsection
));
925 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
927 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
928 end
= start
+ int128_get64(section
->size
) - 1;
929 subpage_register(subpage
, start
, end
,
930 phys_section_add(&d
->map
, section
));
934 static void register_multipage(AddressSpaceDispatch
*d
,
935 MemoryRegionSection
*section
)
937 hwaddr start_addr
= section
->offset_within_address_space
;
938 uint16_t section_index
= phys_section_add(&d
->map
, section
);
939 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
943 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
946 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
948 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
949 AddressSpaceDispatch
*d
= as
->next_dispatch
;
950 MemoryRegionSection now
= *section
, remain
= *section
;
951 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
953 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
954 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
955 - now
.offset_within_address_space
;
957 now
.size
= int128_min(int128_make64(left
), now
.size
);
958 register_subpage(d
, &now
);
960 now
.size
= int128_zero();
962 while (int128_ne(remain
.size
, now
.size
)) {
963 remain
.size
= int128_sub(remain
.size
, now
.size
);
964 remain
.offset_within_address_space
+= int128_get64(now
.size
);
965 remain
.offset_within_region
+= int128_get64(now
.size
);
967 if (int128_lt(remain
.size
, page_size
)) {
968 register_subpage(d
, &now
);
969 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
970 now
.size
= page_size
;
971 register_subpage(d
, &now
);
973 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
974 register_multipage(d
, &now
);
979 void qemu_flush_coalesced_mmio_buffer(void)
982 kvm_flush_coalesced_mmio_buffer();
985 void qemu_mutex_lock_ramlist(void)
987 qemu_mutex_lock(&ram_list
.mutex
);
990 void qemu_mutex_unlock_ramlist(void)
992 qemu_mutex_unlock(&ram_list
.mutex
);
999 #define HUGETLBFS_MAGIC 0x958458f6
1001 static long gethugepagesize(const char *path
)
1007 ret
= statfs(path
, &fs
);
1008 } while (ret
!= 0 && errno
== EINTR
);
1015 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1016 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1021 static sigjmp_buf sigjump
;
1023 static void sigbus_handler(int signal
)
1025 siglongjmp(sigjump
, 1);
1028 static void *file_ram_alloc(RAMBlock
*block
,
1033 char *sanitized_name
;
1035 void * volatile area
;
1037 uintptr_t hpagesize
;
1039 hpagesize
= gethugepagesize(path
);
1044 if (memory
< hpagesize
) {
1048 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1049 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1053 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1054 sanitized_name
= g_strdup(block
->mr
->name
);
1055 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1060 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1062 g_free(sanitized_name
);
1064 fd
= mkstemp(filename
);
1066 perror("unable to create backing store for hugepages");
1073 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1076 * ftruncate is not supported by hugetlbfs in older
1077 * hosts, so don't bother bailing out on errors.
1078 * If anything goes wrong with it under other filesystems,
1081 if (ftruncate(fd
, memory
))
1082 perror("ftruncate");
1084 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
1085 if (area
== MAP_FAILED
) {
1086 perror("file_ram_alloc: can't mmap RAM pages");
1093 struct sigaction act
, oldact
;
1094 sigset_t set
, oldset
;
1096 memset(&act
, 0, sizeof(act
));
1097 act
.sa_handler
= &sigbus_handler
;
1100 ret
= sigaction(SIGBUS
, &act
, &oldact
);
1102 perror("file_ram_alloc: failed to install signal handler");
1106 /* unblock SIGBUS */
1108 sigaddset(&set
, SIGBUS
);
1109 pthread_sigmask(SIG_UNBLOCK
, &set
, &oldset
);
1111 if (sigsetjmp(sigjump
, 1)) {
1112 fprintf(stderr
, "file_ram_alloc: failed to preallocate pages\n");
1116 /* MAP_POPULATE silently ignores failures */
1117 for (i
= 0; i
< (memory
/hpagesize
); i
++) {
1118 memset(area
+ (hpagesize
*i
), 0, 1);
1121 ret
= sigaction(SIGBUS
, &oldact
, NULL
);
1123 perror("file_ram_alloc: failed to reinstall signal handler");
1127 pthread_sigmask(SIG_SETMASK
, &oldset
, NULL
);
1140 static void *file_ram_alloc(RAMBlock
*block
,
1144 fprintf(stderr
, "-mem-path not supported on this host\n");
1149 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1151 RAMBlock
*block
, *next_block
;
1152 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1154 assert(size
!= 0); /* it would hand out same offset multiple times */
1156 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1159 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1160 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1162 end
= block
->offset
+ block
->length
;
1164 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1165 if (next_block
->offset
>= end
) {
1166 next
= MIN(next
, next_block
->offset
);
1169 if (next
- end
>= size
&& next
- end
< mingap
) {
1171 mingap
= next
- end
;
1175 if (offset
== RAM_ADDR_MAX
) {
1176 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1184 ram_addr_t
last_ram_offset(void)
1187 ram_addr_t last
= 0;
1189 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1190 last
= MAX(last
, block
->offset
+ block
->length
);
1195 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1199 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1200 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1201 "dump-guest-core", true)) {
1202 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1204 perror("qemu_madvise");
1205 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1206 "but dump_guest_core=off specified\n");
1211 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1213 RAMBlock
*new_block
, *block
;
1216 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1217 if (block
->offset
== addr
) {
1223 assert(!new_block
->idstr
[0]);
1226 char *id
= qdev_get_dev_path(dev
);
1228 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1232 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1234 /* This assumes the iothread lock is taken here too. */
1235 qemu_mutex_lock_ramlist();
1236 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1237 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1238 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1243 qemu_mutex_unlock_ramlist();
1246 static int memory_try_enable_merging(void *addr
, size_t len
)
1248 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1249 /* disabled by the user */
1253 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1256 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1259 RAMBlock
*block
, *new_block
;
1260 ram_addr_t old_ram_size
, new_ram_size
;
1262 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1264 size
= TARGET_PAGE_ALIGN(size
);
1265 new_block
= g_malloc0(sizeof(*new_block
));
1268 /* This assumes the iothread lock is taken here too. */
1269 qemu_mutex_lock_ramlist();
1271 new_block
->offset
= find_ram_offset(size
);
1273 new_block
->host
= host
;
1274 new_block
->flags
|= RAM_PREALLOC_MASK
;
1275 } else if (xen_enabled()) {
1277 fprintf(stderr
, "-mem-path not supported with Xen\n");
1280 xen_ram_alloc(new_block
->offset
, size
, mr
);
1283 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1285 * file_ram_alloc() needs to allocate just like
1286 * phys_mem_alloc, but we haven't bothered to provide
1290 "-mem-path not supported with this accelerator\n");
1293 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1295 if (!new_block
->host
) {
1296 new_block
->host
= phys_mem_alloc(size
);
1297 if (!new_block
->host
) {
1298 fprintf(stderr
, "Cannot set up guest memory '%s': %s\n",
1299 new_block
->mr
->name
, strerror(errno
));
1302 memory_try_enable_merging(new_block
->host
, size
);
1305 new_block
->length
= size
;
1307 /* Keep the list sorted from biggest to smallest block. */
1308 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1309 if (block
->length
< new_block
->length
) {
1314 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1316 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1318 ram_list
.mru_block
= NULL
;
1321 qemu_mutex_unlock_ramlist();
1323 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1325 if (new_ram_size
> old_ram_size
) {
1327 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1328 ram_list
.dirty_memory
[i
] =
1329 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1330 old_ram_size
, new_ram_size
);
1333 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
);
1335 qemu_ram_setup_dump(new_block
->host
, size
);
1336 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1337 qemu_madvise(new_block
->host
, size
, QEMU_MADV_DONTFORK
);
1340 kvm_setup_guest_memory(new_block
->host
, size
);
1342 return new_block
->offset
;
1345 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1347 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1350 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1354 /* This assumes the iothread lock is taken here too. */
1355 qemu_mutex_lock_ramlist();
1356 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1357 if (addr
== block
->offset
) {
1358 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1359 ram_list
.mru_block
= NULL
;
1365 qemu_mutex_unlock_ramlist();
1368 void qemu_ram_free(ram_addr_t addr
)
1372 /* This assumes the iothread lock is taken here too. */
1373 qemu_mutex_lock_ramlist();
1374 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1375 if (addr
== block
->offset
) {
1376 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1377 ram_list
.mru_block
= NULL
;
1379 if (block
->flags
& RAM_PREALLOC_MASK
) {
1381 } else if (xen_enabled()) {
1382 xen_invalidate_map_cache_entry(block
->host
);
1384 } else if (block
->fd
>= 0) {
1385 munmap(block
->host
, block
->length
);
1389 qemu_anon_ram_free(block
->host
, block
->length
);
1395 qemu_mutex_unlock_ramlist();
1400 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1407 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1408 offset
= addr
- block
->offset
;
1409 if (offset
< block
->length
) {
1410 vaddr
= block
->host
+ offset
;
1411 if (block
->flags
& RAM_PREALLOC_MASK
) {
1413 } else if (xen_enabled()) {
1417 munmap(vaddr
, length
);
1418 if (block
->fd
>= 0) {
1420 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1423 flags
|= MAP_PRIVATE
;
1425 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1426 flags
, block
->fd
, offset
);
1429 * Remap needs to match alloc. Accelerators that
1430 * set phys_mem_alloc never remap. If they did,
1431 * we'd need a remap hook here.
1433 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1435 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1436 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1439 if (area
!= vaddr
) {
1440 fprintf(stderr
, "Could not remap addr: "
1441 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1445 memory_try_enable_merging(vaddr
, length
);
1446 qemu_ram_setup_dump(vaddr
, length
);
1452 #endif /* !_WIN32 */
1454 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1455 With the exception of the softmmu code in this file, this should
1456 only be used for local memory (e.g. video ram) that the device owns,
1457 and knows it isn't going to access beyond the end of the block.
1459 It should not be used for general purpose DMA.
1460 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1462 void *qemu_get_ram_ptr(ram_addr_t addr
)
1464 RAMBlock
*block
= qemu_get_ram_block(addr
);
1466 if (xen_enabled()) {
1467 /* We need to check if the requested address is in the RAM
1468 * because we don't want to map the entire memory in QEMU.
1469 * In that case just map until the end of the page.
1471 if (block
->offset
== 0) {
1472 return xen_map_cache(addr
, 0, 0);
1473 } else if (block
->host
== NULL
) {
1475 xen_map_cache(block
->offset
, block
->length
, 1);
1478 return block
->host
+ (addr
- block
->offset
);
1481 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1482 * but takes a size argument */
1483 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1488 if (xen_enabled()) {
1489 return xen_map_cache(addr
, *size
, 1);
1493 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1494 if (addr
- block
->offset
< block
->length
) {
1495 if (addr
- block
->offset
+ *size
> block
->length
)
1496 *size
= block
->length
- addr
+ block
->offset
;
1497 return block
->host
+ (addr
- block
->offset
);
1501 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1506 /* Some of the softmmu routines need to translate from a host pointer
1507 (typically a TLB entry) back to a ram offset. */
1508 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1511 uint8_t *host
= ptr
;
1513 if (xen_enabled()) {
1514 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1515 return qemu_get_ram_block(*ram_addr
)->mr
;
1518 block
= ram_list
.mru_block
;
1519 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1523 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1524 /* This case append when the block is not mapped. */
1525 if (block
->host
== NULL
) {
1528 if (host
- block
->host
< block
->length
) {
1536 *ram_addr
= block
->offset
+ (host
- block
->host
);
1540 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1541 uint64_t val
, unsigned size
)
1543 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1544 tb_invalidate_phys_page_fast(ram_addr
, size
);
1548 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1551 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1554 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1559 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_MIGRATION
);
1560 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_VGA
);
1561 /* we remove the notdirty callback only if the code has been
1563 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1564 CPUArchState
*env
= current_cpu
->env_ptr
;
1565 tlb_set_dirty(env
, current_cpu
->mem_io_vaddr
);
1569 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1570 unsigned size
, bool is_write
)
1575 static const MemoryRegionOps notdirty_mem_ops
= {
1576 .write
= notdirty_mem_write
,
1577 .valid
.accepts
= notdirty_mem_accepts
,
1578 .endianness
= DEVICE_NATIVE_ENDIAN
,
1581 /* Generate a debug exception if a watchpoint has been hit. */
1582 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1584 CPUState
*cpu
= current_cpu
;
1585 CPUArchState
*env
= cpu
->env_ptr
;
1586 target_ulong pc
, cs_base
;
1591 if (cpu
->watchpoint_hit
) {
1592 /* We re-entered the check after replacing the TB. Now raise
1593 * the debug interrupt so that is will trigger after the
1594 * current instruction. */
1595 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1598 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1599 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1600 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1601 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1602 wp
->flags
|= BP_WATCHPOINT_HIT
;
1603 if (!cpu
->watchpoint_hit
) {
1604 cpu
->watchpoint_hit
= wp
;
1605 tb_check_watchpoint(cpu
);
1606 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1607 cpu
->exception_index
= EXCP_DEBUG
;
1610 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1611 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1612 cpu_resume_from_signal(cpu
, NULL
);
1616 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1621 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1622 so these check for a hit then pass through to the normal out-of-line
1624 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1627 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1629 case 1: return ldub_phys(&address_space_memory
, addr
);
1630 case 2: return lduw_phys(&address_space_memory
, addr
);
1631 case 4: return ldl_phys(&address_space_memory
, addr
);
1636 static void watch_mem_write(void *opaque
, hwaddr addr
,
1637 uint64_t val
, unsigned size
)
1639 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1642 stb_phys(&address_space_memory
, addr
, val
);
1645 stw_phys(&address_space_memory
, addr
, val
);
1648 stl_phys(&address_space_memory
, addr
, val
);
1654 static const MemoryRegionOps watch_mem_ops
= {
1655 .read
= watch_mem_read
,
1656 .write
= watch_mem_write
,
1657 .endianness
= DEVICE_NATIVE_ENDIAN
,
1660 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1663 subpage_t
*subpage
= opaque
;
1666 #if defined(DEBUG_SUBPAGE)
1667 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
1668 subpage
, len
, addr
);
1670 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1683 static void subpage_write(void *opaque
, hwaddr addr
,
1684 uint64_t value
, unsigned len
)
1686 subpage_t
*subpage
= opaque
;
1689 #if defined(DEBUG_SUBPAGE)
1690 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1691 " value %"PRIx64
"\n",
1692 __func__
, subpage
, len
, addr
, value
);
1707 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1710 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1711 unsigned len
, bool is_write
)
1713 subpage_t
*subpage
= opaque
;
1714 #if defined(DEBUG_SUBPAGE)
1715 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
1716 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1719 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1723 static const MemoryRegionOps subpage_ops
= {
1724 .read
= subpage_read
,
1725 .write
= subpage_write
,
1726 .valid
.accepts
= subpage_accepts
,
1727 .endianness
= DEVICE_NATIVE_ENDIAN
,
1730 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1735 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1737 idx
= SUBPAGE_IDX(start
);
1738 eidx
= SUBPAGE_IDX(end
);
1739 #if defined(DEBUG_SUBPAGE)
1740 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1741 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
1743 for (; idx
<= eidx
; idx
++) {
1744 mmio
->sub_section
[idx
] = section
;
1750 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1754 mmio
= g_malloc0(sizeof(subpage_t
));
1758 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1759 "subpage", TARGET_PAGE_SIZE
);
1760 mmio
->iomem
.subpage
= true;
1761 #if defined(DEBUG_SUBPAGE)
1762 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
1763 mmio
, base
, TARGET_PAGE_SIZE
);
1765 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1770 static uint16_t dummy_section(PhysPageMap
*map
, MemoryRegion
*mr
)
1772 MemoryRegionSection section
= {
1773 .address_space
= &address_space_memory
,
1775 .offset_within_address_space
= 0,
1776 .offset_within_region
= 0,
1777 .size
= int128_2_64(),
1780 return phys_section_add(map
, §ion
);
1783 MemoryRegion
*iotlb_to_region(AddressSpace
*as
, hwaddr index
)
1785 return as
->dispatch
->map
.sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1788 static void io_mem_init(void)
1790 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, "rom", UINT64_MAX
);
1791 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1792 "unassigned", UINT64_MAX
);
1793 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1794 "notdirty", UINT64_MAX
);
1795 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1796 "watch", UINT64_MAX
);
1799 static void mem_begin(MemoryListener
*listener
)
1801 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1802 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
1805 n
= dummy_section(&d
->map
, &io_mem_unassigned
);
1806 assert(n
== PHYS_SECTION_UNASSIGNED
);
1807 n
= dummy_section(&d
->map
, &io_mem_notdirty
);
1808 assert(n
== PHYS_SECTION_NOTDIRTY
);
1809 n
= dummy_section(&d
->map
, &io_mem_rom
);
1810 assert(n
== PHYS_SECTION_ROM
);
1811 n
= dummy_section(&d
->map
, &io_mem_watch
);
1812 assert(n
== PHYS_SECTION_WATCH
);
1814 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
1816 as
->next_dispatch
= d
;
1819 static void mem_commit(MemoryListener
*listener
)
1821 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1822 AddressSpaceDispatch
*cur
= as
->dispatch
;
1823 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1825 phys_page_compact_all(next
, next
->map
.nodes_nb
);
1827 as
->dispatch
= next
;
1830 phys_sections_free(&cur
->map
);
1835 static void tcg_commit(MemoryListener
*listener
)
1839 /* since each CPU stores ram addresses in its TLB cache, we must
1840 reset the modified entries */
1843 /* FIXME: Disentangle the cpu.h circular files deps so we can
1844 directly get the right CPU from listener. */
1845 if (cpu
->tcg_as_listener
!= listener
) {
1852 static void core_log_global_start(MemoryListener
*listener
)
1854 cpu_physical_memory_set_dirty_tracking(true);
1857 static void core_log_global_stop(MemoryListener
*listener
)
1859 cpu_physical_memory_set_dirty_tracking(false);
1862 static MemoryListener core_memory_listener
= {
1863 .log_global_start
= core_log_global_start
,
1864 .log_global_stop
= core_log_global_stop
,
1868 void address_space_init_dispatch(AddressSpace
*as
)
1870 as
->dispatch
= NULL
;
1871 as
->dispatch_listener
= (MemoryListener
) {
1873 .commit
= mem_commit
,
1874 .region_add
= mem_add
,
1875 .region_nop
= mem_add
,
1878 memory_listener_register(&as
->dispatch_listener
, as
);
1881 void address_space_destroy_dispatch(AddressSpace
*as
)
1883 AddressSpaceDispatch
*d
= as
->dispatch
;
1885 memory_listener_unregister(&as
->dispatch_listener
);
1887 as
->dispatch
= NULL
;
1890 static void memory_map_init(void)
1892 system_memory
= g_malloc(sizeof(*system_memory
));
1894 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
1895 address_space_init(&address_space_memory
, system_memory
, "memory");
1897 system_io
= g_malloc(sizeof(*system_io
));
1898 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
1900 address_space_init(&address_space_io
, system_io
, "I/O");
1902 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1905 MemoryRegion
*get_system_memory(void)
1907 return system_memory
;
1910 MemoryRegion
*get_system_io(void)
1915 #endif /* !defined(CONFIG_USER_ONLY) */
1917 /* physical memory access (slow version, mainly for debug) */
1918 #if defined(CONFIG_USER_ONLY)
1919 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
1920 uint8_t *buf
, int len
, int is_write
)
1927 page
= addr
& TARGET_PAGE_MASK
;
1928 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1931 flags
= page_get_flags(page
);
1932 if (!(flags
& PAGE_VALID
))
1935 if (!(flags
& PAGE_WRITE
))
1937 /* XXX: this code should not depend on lock_user */
1938 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1941 unlock_user(p
, addr
, l
);
1943 if (!(flags
& PAGE_READ
))
1945 /* XXX: this code should not depend on lock_user */
1946 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1949 unlock_user(p
, addr
, 0);
1960 static void invalidate_and_set_dirty(hwaddr addr
,
1963 if (cpu_physical_memory_is_clean(addr
)) {
1964 /* invalidate code */
1965 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1967 cpu_physical_memory_set_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
1968 cpu_physical_memory_set_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
1970 xen_modified_memory(addr
, length
);
1973 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
1975 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
1977 /* Regions are assumed to support 1-4 byte accesses unless
1978 otherwise specified. */
1979 if (access_size_max
== 0) {
1980 access_size_max
= 4;
1983 /* Bound the maximum access by the alignment of the address. */
1984 if (!mr
->ops
->impl
.unaligned
) {
1985 unsigned align_size_max
= addr
& -addr
;
1986 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
1987 access_size_max
= align_size_max
;
1991 /* Don't attempt accesses larger than the maximum. */
1992 if (l
> access_size_max
) {
1993 l
= access_size_max
;
1996 l
= 1 << (qemu_fls(l
) - 1);
2002 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
2003 int len
, bool is_write
)
2014 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2017 if (!memory_access_is_direct(mr
, is_write
)) {
2018 l
= memory_access_size(mr
, l
, addr1
);
2019 /* XXX: could force current_cpu to NULL to avoid
2023 /* 64 bit write access */
2025 error
|= io_mem_write(mr
, addr1
, val
, 8);
2028 /* 32 bit write access */
2030 error
|= io_mem_write(mr
, addr1
, val
, 4);
2033 /* 16 bit write access */
2035 error
|= io_mem_write(mr
, addr1
, val
, 2);
2038 /* 8 bit write access */
2040 error
|= io_mem_write(mr
, addr1
, val
, 1);
2046 addr1
+= memory_region_get_ram_addr(mr
);
2048 ptr
= qemu_get_ram_ptr(addr1
);
2049 memcpy(ptr
, buf
, l
);
2050 invalidate_and_set_dirty(addr1
, l
);
2053 if (!memory_access_is_direct(mr
, is_write
)) {
2055 l
= memory_access_size(mr
, l
, addr1
);
2058 /* 64 bit read access */
2059 error
|= io_mem_read(mr
, addr1
, &val
, 8);
2063 /* 32 bit read access */
2064 error
|= io_mem_read(mr
, addr1
, &val
, 4);
2068 /* 16 bit read access */
2069 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2073 /* 8 bit read access */
2074 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2082 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2083 memcpy(buf
, ptr
, l
);
2094 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2095 const uint8_t *buf
, int len
)
2097 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2100 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2102 return address_space_rw(as
, addr
, buf
, len
, false);
2106 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2107 int len
, int is_write
)
2109 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2112 enum write_rom_type
{
2117 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2118 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2127 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2129 if (!(memory_region_is_ram(mr
) ||
2130 memory_region_is_romd(mr
))) {
2133 addr1
+= memory_region_get_ram_addr(mr
);
2135 ptr
= qemu_get_ram_ptr(addr1
);
2138 memcpy(ptr
, buf
, l
);
2139 invalidate_and_set_dirty(addr1
, l
);
2142 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2152 /* used for ROM loading : can write in RAM and ROM */
2153 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2154 const uint8_t *buf
, int len
)
2156 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2159 void cpu_flush_icache_range(hwaddr start
, int len
)
2162 * This function should do the same thing as an icache flush that was
2163 * triggered from within the guest. For TCG we are always cache coherent,
2164 * so there is no need to flush anything. For KVM / Xen we need to flush
2165 * the host's instruction cache at least.
2167 if (tcg_enabled()) {
2171 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2172 start
, NULL
, len
, FLUSH_CACHE
);
2182 static BounceBuffer bounce
;
2184 typedef struct MapClient
{
2186 void (*callback
)(void *opaque
);
2187 QLIST_ENTRY(MapClient
) link
;
2190 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2191 = QLIST_HEAD_INITIALIZER(map_client_list
);
2193 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2195 MapClient
*client
= g_malloc(sizeof(*client
));
2197 client
->opaque
= opaque
;
2198 client
->callback
= callback
;
2199 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2203 static void cpu_unregister_map_client(void *_client
)
2205 MapClient
*client
= (MapClient
*)_client
;
2207 QLIST_REMOVE(client
, link
);
2211 static void cpu_notify_map_clients(void)
2215 while (!QLIST_EMPTY(&map_client_list
)) {
2216 client
= QLIST_FIRST(&map_client_list
);
2217 client
->callback(client
->opaque
);
2218 cpu_unregister_map_client(client
);
2222 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2229 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2230 if (!memory_access_is_direct(mr
, is_write
)) {
2231 l
= memory_access_size(mr
, l
, addr
);
2232 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2243 /* Map a physical memory region into a host virtual address.
2244 * May map a subset of the requested range, given by and returned in *plen.
2245 * May return NULL if resources needed to perform the mapping are exhausted.
2246 * Use only for reads OR writes - not for read-modify-write operations.
2247 * Use cpu_register_map_client() to know when retrying the map operation is
2248 * likely to succeed.
2250 void *address_space_map(AddressSpace
*as
,
2257 hwaddr l
, xlat
, base
;
2258 MemoryRegion
*mr
, *this_mr
;
2266 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2267 if (!memory_access_is_direct(mr
, is_write
)) {
2268 if (bounce
.buffer
) {
2271 /* Avoid unbounded allocations */
2272 l
= MIN(l
, TARGET_PAGE_SIZE
);
2273 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2277 memory_region_ref(mr
);
2280 address_space_read(as
, addr
, bounce
.buffer
, l
);
2284 return bounce
.buffer
;
2288 raddr
= memory_region_get_ram_addr(mr
);
2299 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2300 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2305 memory_region_ref(mr
);
2307 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2310 /* Unmaps a memory region previously mapped by address_space_map().
2311 * Will also mark the memory as dirty if is_write == 1. access_len gives
2312 * the amount of memory that was actually read or written by the caller.
2314 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2315 int is_write
, hwaddr access_len
)
2317 if (buffer
!= bounce
.buffer
) {
2321 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2324 while (access_len
) {
2326 l
= TARGET_PAGE_SIZE
;
2329 invalidate_and_set_dirty(addr1
, l
);
2334 if (xen_enabled()) {
2335 xen_invalidate_map_cache_entry(buffer
);
2337 memory_region_unref(mr
);
2341 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2343 qemu_vfree(bounce
.buffer
);
2344 bounce
.buffer
= NULL
;
2345 memory_region_unref(bounce
.mr
);
2346 cpu_notify_map_clients();
2349 void *cpu_physical_memory_map(hwaddr addr
,
2353 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2356 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2357 int is_write
, hwaddr access_len
)
2359 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2362 /* warning: addr must be aligned */
2363 static inline uint32_t ldl_phys_internal(AddressSpace
*as
, hwaddr addr
,
2364 enum device_endian endian
)
2372 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2373 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2375 io_mem_read(mr
, addr1
, &val
, 4);
2376 #if defined(TARGET_WORDS_BIGENDIAN)
2377 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2381 if (endian
== DEVICE_BIG_ENDIAN
) {
2387 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2391 case DEVICE_LITTLE_ENDIAN
:
2392 val
= ldl_le_p(ptr
);
2394 case DEVICE_BIG_ENDIAN
:
2395 val
= ldl_be_p(ptr
);
2405 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2407 return ldl_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2410 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2412 return ldl_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2415 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2417 return ldl_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2420 /* warning: addr must be aligned */
2421 static inline uint64_t ldq_phys_internal(AddressSpace
*as
, hwaddr addr
,
2422 enum device_endian endian
)
2430 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2432 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2434 io_mem_read(mr
, addr1
, &val
, 8);
2435 #if defined(TARGET_WORDS_BIGENDIAN)
2436 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2440 if (endian
== DEVICE_BIG_ENDIAN
) {
2446 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2450 case DEVICE_LITTLE_ENDIAN
:
2451 val
= ldq_le_p(ptr
);
2453 case DEVICE_BIG_ENDIAN
:
2454 val
= ldq_be_p(ptr
);
2464 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
2466 return ldq_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2469 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
2471 return ldq_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2474 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
2476 return ldq_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2480 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
2483 address_space_rw(as
, addr
, &val
, 1, 0);
2487 /* warning: addr must be aligned */
2488 static inline uint32_t lduw_phys_internal(AddressSpace
*as
, hwaddr addr
,
2489 enum device_endian endian
)
2497 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2499 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2501 io_mem_read(mr
, addr1
, &val
, 2);
2502 #if defined(TARGET_WORDS_BIGENDIAN)
2503 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2507 if (endian
== DEVICE_BIG_ENDIAN
) {
2513 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2517 case DEVICE_LITTLE_ENDIAN
:
2518 val
= lduw_le_p(ptr
);
2520 case DEVICE_BIG_ENDIAN
:
2521 val
= lduw_be_p(ptr
);
2531 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
2533 return lduw_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2536 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
2538 return lduw_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2541 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
2543 return lduw_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2546 /* warning: addr must be aligned. The ram page is not masked as dirty
2547 and the code inside is not invalidated. It is useful if the dirty
2548 bits are used to track modified PTEs */
2549 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2556 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2558 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2559 io_mem_write(mr
, addr1
, val
, 4);
2561 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2562 ptr
= qemu_get_ram_ptr(addr1
);
2565 if (unlikely(in_migration
)) {
2566 if (cpu_physical_memory_is_clean(addr1
)) {
2567 /* invalidate code */
2568 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2570 cpu_physical_memory_set_dirty_flag(addr1
,
2571 DIRTY_MEMORY_MIGRATION
);
2572 cpu_physical_memory_set_dirty_flag(addr1
, DIRTY_MEMORY_VGA
);
2578 /* warning: addr must be aligned */
2579 static inline void stl_phys_internal(AddressSpace
*as
,
2580 hwaddr addr
, uint32_t val
,
2581 enum device_endian endian
)
2588 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2590 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2591 #if defined(TARGET_WORDS_BIGENDIAN)
2592 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2596 if (endian
== DEVICE_BIG_ENDIAN
) {
2600 io_mem_write(mr
, addr1
, val
, 4);
2603 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2604 ptr
= qemu_get_ram_ptr(addr1
);
2606 case DEVICE_LITTLE_ENDIAN
:
2609 case DEVICE_BIG_ENDIAN
:
2616 invalidate_and_set_dirty(addr1
, 4);
2620 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2622 stl_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2625 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2627 stl_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2630 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2632 stl_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2636 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2639 address_space_rw(as
, addr
, &v
, 1, 1);
2642 /* warning: addr must be aligned */
2643 static inline void stw_phys_internal(AddressSpace
*as
,
2644 hwaddr addr
, uint32_t val
,
2645 enum device_endian endian
)
2652 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2653 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2654 #if defined(TARGET_WORDS_BIGENDIAN)
2655 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2659 if (endian
== DEVICE_BIG_ENDIAN
) {
2663 io_mem_write(mr
, addr1
, val
, 2);
2666 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2667 ptr
= qemu_get_ram_ptr(addr1
);
2669 case DEVICE_LITTLE_ENDIAN
:
2672 case DEVICE_BIG_ENDIAN
:
2679 invalidate_and_set_dirty(addr1
, 2);
2683 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2685 stw_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2688 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2690 stw_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2693 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2695 stw_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2699 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2702 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2705 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2707 val
= cpu_to_le64(val
);
2708 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2711 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2713 val
= cpu_to_be64(val
);
2714 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2717 /* virtual memory access for debug (includes writing to ROM) */
2718 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2719 uint8_t *buf
, int len
, int is_write
)
2726 page
= addr
& TARGET_PAGE_MASK
;
2727 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2728 /* if no physical page mapped, return an error */
2729 if (phys_addr
== -1)
2731 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2734 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2736 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
2738 address_space_rw(cpu
->as
, phys_addr
, buf
, l
, 0);
2748 #if !defined(CONFIG_USER_ONLY)
2751 * A helper function for the _utterly broken_ virtio device model to find out if
2752 * it's running on a big endian machine. Don't do this at home kids!
2754 bool virtio_is_big_endian(void);
2755 bool virtio_is_big_endian(void)
2757 #if defined(TARGET_WORDS_BIGENDIAN)
2766 #ifndef CONFIG_USER_ONLY
2767 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2772 mr
= address_space_translate(&address_space_memory
,
2773 phys_addr
, &phys_addr
, &l
, false);
2775 return !(memory_region_is_ram(mr
) ||
2776 memory_region_is_romd(mr
));
2779 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2783 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2784 func(block
->host
, block
->offset
, block
->length
, opaque
);