4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
30 #include "qemu/osdep.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/xen/xen.h"
34 #include "qemu/timer.h"
35 #include "qemu/config-file.h"
36 #include "qemu/error-report.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
52 #include "exec/ram_addr.h"
53 #include "qemu/cache-utils.h"
55 #include "qemu/range.h"
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 static bool in_migration
;
62 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
64 static MemoryRegion
*system_memory
;
65 static MemoryRegion
*system_io
;
67 AddressSpace address_space_io
;
68 AddressSpace address_space_memory
;
70 MemoryRegion io_mem_rom
, io_mem_notdirty
;
71 static MemoryRegion io_mem_unassigned
;
75 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
76 /* current CPU in the current thread. It is only valid inside
78 DEFINE_TLS(CPUState
*, current_cpu
);
79 /* 0 = Do not count executed instructions.
80 1 = Precise instruction counting.
81 2 = Adaptive rate instruction counting. */
84 #if !defined(CONFIG_USER_ONLY)
86 typedef struct PhysPageEntry PhysPageEntry
;
88 struct PhysPageEntry
{
89 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
91 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
95 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
97 /* Size of the L2 (and L3, etc) page tables. */
98 #define ADDR_SPACE_BITS 64
101 #define P_L2_SIZE (1 << P_L2_BITS)
103 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
105 typedef PhysPageEntry Node
[P_L2_SIZE
];
107 typedef struct PhysPageMap
{
108 unsigned sections_nb
;
109 unsigned sections_nb_alloc
;
111 unsigned nodes_nb_alloc
;
113 MemoryRegionSection
*sections
;
116 struct AddressSpaceDispatch
{
117 /* This is a multi-level map on the physical address space.
118 * The bottom level has pointers to MemoryRegionSections.
120 PhysPageEntry phys_map
;
125 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
126 typedef struct subpage_t
{
130 uint16_t sub_section
[TARGET_PAGE_SIZE
];
133 #define PHYS_SECTION_UNASSIGNED 0
134 #define PHYS_SECTION_NOTDIRTY 1
135 #define PHYS_SECTION_ROM 2
136 #define PHYS_SECTION_WATCH 3
138 static void io_mem_init(void);
139 static void memory_map_init(void);
140 static void tcg_commit(MemoryListener
*listener
);
142 static MemoryRegion io_mem_watch
;
145 #if !defined(CONFIG_USER_ONLY)
147 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
149 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
150 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
151 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
152 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
156 static uint32_t phys_map_node_alloc(PhysPageMap
*map
)
161 ret
= map
->nodes_nb
++;
162 assert(ret
!= PHYS_MAP_NODE_NIL
);
163 assert(ret
!= map
->nodes_nb_alloc
);
164 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
165 map
->nodes
[ret
][i
].skip
= 1;
166 map
->nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
171 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
172 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
177 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
179 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
180 lp
->ptr
= phys_map_node_alloc(map
);
181 p
= map
->nodes
[lp
->ptr
];
183 for (i
= 0; i
< P_L2_SIZE
; i
++) {
185 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
189 p
= map
->nodes
[lp
->ptr
];
191 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
193 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
194 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
200 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
206 static void phys_page_set(AddressSpaceDispatch
*d
,
207 hwaddr index
, hwaddr nb
,
210 /* Wildly overreserve - it doesn't matter much. */
211 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
213 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
216 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
217 * and update our entry so we can skip it and go directly to the destination.
219 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
221 unsigned valid_ptr
= P_L2_SIZE
;
226 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
231 for (i
= 0; i
< P_L2_SIZE
; i
++) {
232 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
239 phys_page_compact(&p
[i
], nodes
, compacted
);
243 /* We can only compress if there's only one child. */
248 assert(valid_ptr
< P_L2_SIZE
);
250 /* Don't compress if it won't fit in the # of bits we have. */
251 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
255 lp
->ptr
= p
[valid_ptr
].ptr
;
256 if (!p
[valid_ptr
].skip
) {
257 /* If our only child is a leaf, make this a leaf. */
258 /* By design, we should have made this node a leaf to begin with so we
259 * should never reach here.
260 * But since it's so simple to handle this, let's do it just in case we
265 lp
->skip
+= p
[valid_ptr
].skip
;
269 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
271 DECLARE_BITMAP(compacted
, nodes_nb
);
273 if (d
->phys_map
.skip
) {
274 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
278 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
279 Node
*nodes
, MemoryRegionSection
*sections
)
282 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
285 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
286 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
287 return §ions
[PHYS_SECTION_UNASSIGNED
];
290 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
293 if (sections
[lp
.ptr
].size
.hi
||
294 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
295 sections
[lp
.ptr
].size
.lo
, addr
)) {
296 return §ions
[lp
.ptr
];
298 return §ions
[PHYS_SECTION_UNASSIGNED
];
302 bool memory_region_is_unassigned(MemoryRegion
*mr
)
304 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
305 && mr
!= &io_mem_watch
;
308 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
310 bool resolve_subpage
)
312 MemoryRegionSection
*section
;
315 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
316 if (resolve_subpage
&& section
->mr
->subpage
) {
317 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
318 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
323 static MemoryRegionSection
*
324 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
325 hwaddr
*plen
, bool resolve_subpage
)
327 MemoryRegionSection
*section
;
330 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
331 /* Compute offset within MemoryRegionSection */
332 addr
-= section
->offset_within_address_space
;
334 /* Compute offset within MemoryRegion */
335 *xlat
= addr
+ section
->offset_within_region
;
337 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
338 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
342 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
344 if (memory_region_is_ram(mr
)) {
345 return !(is_write
&& mr
->readonly
);
347 if (memory_region_is_romd(mr
)) {
354 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
355 hwaddr
*xlat
, hwaddr
*plen
,
359 MemoryRegionSection
*section
;
364 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
367 if (!mr
->iommu_ops
) {
371 iotlb
= mr
->iommu_ops
->translate(mr
, addr
);
372 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
373 | (addr
& iotlb
.addr_mask
));
374 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
375 if (!(iotlb
.perm
& (1 << is_write
))) {
376 mr
= &io_mem_unassigned
;
380 as
= iotlb
.target_as
;
383 if (memory_access_is_direct(mr
, is_write
)) {
384 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
385 len
= MIN(page
, len
);
393 MemoryRegionSection
*
394 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
397 MemoryRegionSection
*section
;
398 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
400 assert(!section
->mr
->iommu_ops
);
405 void cpu_exec_init_all(void)
407 #if !defined(CONFIG_USER_ONLY)
408 qemu_mutex_init(&ram_list
.mutex
);
414 #if !defined(CONFIG_USER_ONLY)
416 static int cpu_common_post_load(void *opaque
, int version_id
)
418 CPUState
*cpu
= opaque
;
420 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
421 version_id is increased. */
422 cpu
->interrupt_request
&= ~0x01;
423 tlb_flush(cpu
->env_ptr
, 1);
428 const VMStateDescription vmstate_cpu_common
= {
429 .name
= "cpu_common",
431 .minimum_version_id
= 1,
432 .minimum_version_id_old
= 1,
433 .post_load
= cpu_common_post_load
,
434 .fields
= (VMStateField
[]) {
435 VMSTATE_UINT32(halted
, CPUState
),
436 VMSTATE_UINT32(interrupt_request
, CPUState
),
437 VMSTATE_END_OF_LIST()
443 CPUState
*qemu_get_cpu(int index
)
448 if (cpu
->cpu_index
== index
) {
456 #if !defined(CONFIG_USER_ONLY)
457 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
459 /* We only support one address space per cpu at the moment. */
460 assert(cpu
->as
== as
);
462 if (cpu
->tcg_as_listener
) {
463 memory_listener_unregister(cpu
->tcg_as_listener
);
465 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
467 cpu
->tcg_as_listener
->commit
= tcg_commit
;
468 memory_listener_register(cpu
->tcg_as_listener
, as
);
472 void cpu_exec_init(CPUArchState
*env
)
474 CPUState
*cpu
= ENV_GET_CPU(env
);
475 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
479 #if defined(CONFIG_USER_ONLY)
483 CPU_FOREACH(some_cpu
) {
486 cpu
->cpu_index
= cpu_index
;
488 QTAILQ_INIT(&cpu
->breakpoints
);
489 QTAILQ_INIT(&cpu
->watchpoints
);
490 #ifndef CONFIG_USER_ONLY
491 cpu
->as
= &address_space_memory
;
492 cpu
->thread_id
= qemu_get_thread_id();
494 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
495 #if defined(CONFIG_USER_ONLY)
498 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
499 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
501 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
502 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
503 cpu_save
, cpu_load
, env
);
504 assert(cc
->vmsd
== NULL
);
505 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
507 if (cc
->vmsd
!= NULL
) {
508 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
512 #if defined(TARGET_HAS_ICE)
513 #if defined(CONFIG_USER_ONLY)
514 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
516 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
519 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
521 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
523 tb_invalidate_phys_addr(cpu
->as
,
524 phys
| (pc
& ~TARGET_PAGE_MASK
));
528 #endif /* TARGET_HAS_ICE */
530 #if defined(CONFIG_USER_ONLY)
531 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
536 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
537 int flags
, CPUWatchpoint
**watchpoint
)
542 /* Add a watchpoint. */
543 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
544 int flags
, CPUWatchpoint
**watchpoint
)
546 CPUArchState
*env
= cpu
->env_ptr
;
547 vaddr len_mask
= ~(len
- 1);
550 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
551 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
552 len
== 0 || len
> TARGET_PAGE_SIZE
) {
553 error_report("tried to set invalid watchpoint at %"
554 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
557 wp
= g_malloc(sizeof(*wp
));
560 wp
->len_mask
= len_mask
;
563 /* keep all GDB-injected watchpoints in front */
564 if (flags
& BP_GDB
) {
565 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
567 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
570 tlb_flush_page(env
, addr
);
577 /* Remove a specific watchpoint. */
578 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
581 vaddr len_mask
= ~(len
- 1);
584 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
585 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
586 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
587 cpu_watchpoint_remove_by_ref(cpu
, wp
);
594 /* Remove a specific watchpoint by reference. */
595 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
597 CPUArchState
*env
= cpu
->env_ptr
;
599 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
601 tlb_flush_page(env
, watchpoint
->vaddr
);
606 /* Remove all matching watchpoints. */
607 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
609 CPUWatchpoint
*wp
, *next
;
611 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
612 if (wp
->flags
& mask
) {
613 cpu_watchpoint_remove_by_ref(cpu
, wp
);
619 /* Add a breakpoint. */
620 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
621 CPUBreakpoint
**breakpoint
)
623 #if defined(TARGET_HAS_ICE)
626 bp
= g_malloc(sizeof(*bp
));
631 /* keep all GDB-injected breakpoints in front */
632 if (flags
& BP_GDB
) {
633 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
635 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
638 breakpoint_invalidate(cpu
, pc
);
649 /* Remove a specific breakpoint. */
650 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
652 #if defined(TARGET_HAS_ICE)
655 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
656 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
657 cpu_breakpoint_remove_by_ref(cpu
, bp
);
667 /* Remove a specific breakpoint by reference. */
668 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
670 #if defined(TARGET_HAS_ICE)
671 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
673 breakpoint_invalidate(cpu
, breakpoint
->pc
);
679 /* Remove all matching breakpoints. */
680 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
682 #if defined(TARGET_HAS_ICE)
683 CPUBreakpoint
*bp
, *next
;
685 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
686 if (bp
->flags
& mask
) {
687 cpu_breakpoint_remove_by_ref(cpu
, bp
);
693 /* enable or disable single step mode. EXCP_DEBUG is returned by the
694 CPU loop after each instruction */
695 void cpu_single_step(CPUState
*cpu
, int enabled
)
697 #if defined(TARGET_HAS_ICE)
698 if (cpu
->singlestep_enabled
!= enabled
) {
699 cpu
->singlestep_enabled
= enabled
;
701 kvm_update_guest_debug(cpu
, 0);
703 /* must flush all the translated code to avoid inconsistencies */
704 /* XXX: only flush what is necessary */
705 CPUArchState
*env
= cpu
->env_ptr
;
712 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
714 CPUState
*cpu
= ENV_GET_CPU(env
);
720 fprintf(stderr
, "qemu: fatal: ");
721 vfprintf(stderr
, fmt
, ap
);
722 fprintf(stderr
, "\n");
723 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
724 if (qemu_log_enabled()) {
725 qemu_log("qemu: fatal: ");
726 qemu_log_vprintf(fmt
, ap2
);
728 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
734 #if defined(CONFIG_USER_ONLY)
736 struct sigaction act
;
737 sigfillset(&act
.sa_mask
);
738 act
.sa_handler
= SIG_DFL
;
739 sigaction(SIGABRT
, &act
, NULL
);
745 #if !defined(CONFIG_USER_ONLY)
746 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
750 /* The list is protected by the iothread lock here. */
751 block
= ram_list
.mru_block
;
752 if (block
&& addr
- block
->offset
< block
->length
) {
755 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
756 if (addr
- block
->offset
< block
->length
) {
761 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
765 ram_list
.mru_block
= block
;
769 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
775 end
= TARGET_PAGE_ALIGN(start
+ length
);
776 start
&= TARGET_PAGE_MASK
;
778 block
= qemu_get_ram_block(start
);
779 assert(block
== qemu_get_ram_block(end
- 1));
780 start1
= (uintptr_t)block
->host
+ (start
- block
->offset
);
781 cpu_tlb_reset_dirty_all(start1
, length
);
784 /* Note: start and end must be within the same ram block. */
785 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,
790 cpu_physical_memory_clear_dirty_range(start
, length
, client
);
793 tlb_reset_dirty_range_all(start
, length
);
797 static void cpu_physical_memory_set_dirty_tracking(bool enable
)
799 in_migration
= enable
;
802 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
803 MemoryRegionSection
*section
,
805 hwaddr paddr
, hwaddr xlat
,
807 target_ulong
*address
)
812 if (memory_region_is_ram(section
->mr
)) {
814 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
816 if (!section
->readonly
) {
817 iotlb
|= PHYS_SECTION_NOTDIRTY
;
819 iotlb
|= PHYS_SECTION_ROM
;
822 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
826 /* Make accesses to pages with watchpoints go via the
827 watchpoint trap routines. */
828 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
829 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
830 /* Avoid trapping reads of pages with a write breakpoint. */
831 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
832 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
833 *address
|= TLB_MMIO
;
841 #endif /* defined(CONFIG_USER_ONLY) */
843 #if !defined(CONFIG_USER_ONLY)
845 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
847 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
849 static void *(*phys_mem_alloc
)(size_t size
) = qemu_anon_ram_alloc
;
852 * Set a custom physical guest memory alloator.
853 * Accelerators with unusual needs may need this. Hopefully, we can
854 * get rid of it eventually.
856 void phys_mem_set_alloc(void *(*alloc
)(size_t))
858 phys_mem_alloc
= alloc
;
861 static uint16_t phys_section_add(PhysPageMap
*map
,
862 MemoryRegionSection
*section
)
864 /* The physical section number is ORed with a page-aligned
865 * pointer to produce the iotlb entries. Thus it should
866 * never overflow into the page-aligned value.
868 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
870 if (map
->sections_nb
== map
->sections_nb_alloc
) {
871 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
872 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
873 map
->sections_nb_alloc
);
875 map
->sections
[map
->sections_nb
] = *section
;
876 memory_region_ref(section
->mr
);
877 return map
->sections_nb
++;
880 static void phys_section_destroy(MemoryRegion
*mr
)
882 memory_region_unref(mr
);
885 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
886 memory_region_destroy(&subpage
->iomem
);
891 static void phys_sections_free(PhysPageMap
*map
)
893 while (map
->sections_nb
> 0) {
894 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
895 phys_section_destroy(section
->mr
);
897 g_free(map
->sections
);
901 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
904 hwaddr base
= section
->offset_within_address_space
906 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
907 d
->map
.nodes
, d
->map
.sections
);
908 MemoryRegionSection subsection
= {
909 .offset_within_address_space
= base
,
910 .size
= int128_make64(TARGET_PAGE_SIZE
),
914 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
916 if (!(existing
->mr
->subpage
)) {
917 subpage
= subpage_init(d
->as
, base
);
918 subsection
.address_space
= d
->as
;
919 subsection
.mr
= &subpage
->iomem
;
920 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
921 phys_section_add(&d
->map
, &subsection
));
923 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
925 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
926 end
= start
+ int128_get64(section
->size
) - 1;
927 subpage_register(subpage
, start
, end
,
928 phys_section_add(&d
->map
, section
));
932 static void register_multipage(AddressSpaceDispatch
*d
,
933 MemoryRegionSection
*section
)
935 hwaddr start_addr
= section
->offset_within_address_space
;
936 uint16_t section_index
= phys_section_add(&d
->map
, section
);
937 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
941 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
944 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
946 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
947 AddressSpaceDispatch
*d
= as
->next_dispatch
;
948 MemoryRegionSection now
= *section
, remain
= *section
;
949 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
951 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
952 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
953 - now
.offset_within_address_space
;
955 now
.size
= int128_min(int128_make64(left
), now
.size
);
956 register_subpage(d
, &now
);
958 now
.size
= int128_zero();
960 while (int128_ne(remain
.size
, now
.size
)) {
961 remain
.size
= int128_sub(remain
.size
, now
.size
);
962 remain
.offset_within_address_space
+= int128_get64(now
.size
);
963 remain
.offset_within_region
+= int128_get64(now
.size
);
965 if (int128_lt(remain
.size
, page_size
)) {
966 register_subpage(d
, &now
);
967 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
968 now
.size
= page_size
;
969 register_subpage(d
, &now
);
971 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
972 register_multipage(d
, &now
);
977 void qemu_flush_coalesced_mmio_buffer(void)
980 kvm_flush_coalesced_mmio_buffer();
983 void qemu_mutex_lock_ramlist(void)
985 qemu_mutex_lock(&ram_list
.mutex
);
988 void qemu_mutex_unlock_ramlist(void)
990 qemu_mutex_unlock(&ram_list
.mutex
);
997 #define HUGETLBFS_MAGIC 0x958458f6
999 static long gethugepagesize(const char *path
)
1005 ret
= statfs(path
, &fs
);
1006 } while (ret
!= 0 && errno
== EINTR
);
1013 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1014 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1019 static sigjmp_buf sigjump
;
1021 static void sigbus_handler(int signal
)
1023 siglongjmp(sigjump
, 1);
1026 static void *file_ram_alloc(RAMBlock
*block
,
1031 char *sanitized_name
;
1035 unsigned long hpagesize
;
1037 hpagesize
= gethugepagesize(path
);
1042 if (memory
< hpagesize
) {
1046 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1047 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1051 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1052 sanitized_name
= g_strdup(block
->mr
->name
);
1053 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1058 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1060 g_free(sanitized_name
);
1062 fd
= mkstemp(filename
);
1064 perror("unable to create backing store for hugepages");
1071 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1074 * ftruncate is not supported by hugetlbfs in older
1075 * hosts, so don't bother bailing out on errors.
1076 * If anything goes wrong with it under other filesystems,
1079 if (ftruncate(fd
, memory
))
1080 perror("ftruncate");
1082 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
1083 if (area
== MAP_FAILED
) {
1084 perror("file_ram_alloc: can't mmap RAM pages");
1091 struct sigaction act
, oldact
;
1092 sigset_t set
, oldset
;
1094 memset(&act
, 0, sizeof(act
));
1095 act
.sa_handler
= &sigbus_handler
;
1098 ret
= sigaction(SIGBUS
, &act
, &oldact
);
1100 perror("file_ram_alloc: failed to install signal handler");
1104 /* unblock SIGBUS */
1106 sigaddset(&set
, SIGBUS
);
1107 pthread_sigmask(SIG_UNBLOCK
, &set
, &oldset
);
1109 if (sigsetjmp(sigjump
, 1)) {
1110 fprintf(stderr
, "file_ram_alloc: failed to preallocate pages\n");
1114 /* MAP_POPULATE silently ignores failures */
1115 for (i
= 0; i
< (memory
/hpagesize
); i
++) {
1116 memset(area
+ (hpagesize
*i
), 0, 1);
1119 ret
= sigaction(SIGBUS
, &oldact
, NULL
);
1121 perror("file_ram_alloc: failed to reinstall signal handler");
1125 pthread_sigmask(SIG_SETMASK
, &oldset
, NULL
);
1138 static void *file_ram_alloc(RAMBlock
*block
,
1142 fprintf(stderr
, "-mem-path not supported on this host\n");
1147 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1149 RAMBlock
*block
, *next_block
;
1150 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1152 assert(size
!= 0); /* it would hand out same offset multiple times */
1154 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1157 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1158 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1160 end
= block
->offset
+ block
->length
;
1162 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1163 if (next_block
->offset
>= end
) {
1164 next
= MIN(next
, next_block
->offset
);
1167 if (next
- end
>= size
&& next
- end
< mingap
) {
1169 mingap
= next
- end
;
1173 if (offset
== RAM_ADDR_MAX
) {
1174 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1182 ram_addr_t
last_ram_offset(void)
1185 ram_addr_t last
= 0;
1187 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1188 last
= MAX(last
, block
->offset
+ block
->length
);
1193 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1197 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1198 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1199 "dump-guest-core", true)) {
1200 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1202 perror("qemu_madvise");
1203 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1204 "but dump_guest_core=off specified\n");
1209 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1211 RAMBlock
*new_block
, *block
;
1214 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1215 if (block
->offset
== addr
) {
1221 assert(!new_block
->idstr
[0]);
1224 char *id
= qdev_get_dev_path(dev
);
1226 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1230 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1232 /* This assumes the iothread lock is taken here too. */
1233 qemu_mutex_lock_ramlist();
1234 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1235 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1236 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1241 qemu_mutex_unlock_ramlist();
1244 static int memory_try_enable_merging(void *addr
, size_t len
)
1246 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1247 /* disabled by the user */
1251 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1254 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1257 RAMBlock
*block
, *new_block
;
1258 ram_addr_t old_ram_size
, new_ram_size
;
1260 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1262 size
= TARGET_PAGE_ALIGN(size
);
1263 new_block
= g_malloc0(sizeof(*new_block
));
1266 /* This assumes the iothread lock is taken here too. */
1267 qemu_mutex_lock_ramlist();
1269 new_block
->offset
= find_ram_offset(size
);
1271 new_block
->host
= host
;
1272 new_block
->flags
|= RAM_PREALLOC_MASK
;
1273 } else if (xen_enabled()) {
1275 fprintf(stderr
, "-mem-path not supported with Xen\n");
1278 xen_ram_alloc(new_block
->offset
, size
, mr
);
1281 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1283 * file_ram_alloc() needs to allocate just like
1284 * phys_mem_alloc, but we haven't bothered to provide
1288 "-mem-path not supported with this accelerator\n");
1291 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1293 if (!new_block
->host
) {
1294 new_block
->host
= phys_mem_alloc(size
);
1295 if (!new_block
->host
) {
1296 fprintf(stderr
, "Cannot set up guest memory '%s': %s\n",
1297 new_block
->mr
->name
, strerror(errno
));
1300 memory_try_enable_merging(new_block
->host
, size
);
1303 new_block
->length
= size
;
1305 /* Keep the list sorted from biggest to smallest block. */
1306 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1307 if (block
->length
< new_block
->length
) {
1312 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1314 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1316 ram_list
.mru_block
= NULL
;
1319 qemu_mutex_unlock_ramlist();
1321 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1323 if (new_ram_size
> old_ram_size
) {
1325 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1326 ram_list
.dirty_memory
[i
] =
1327 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1328 old_ram_size
, new_ram_size
);
1331 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
);
1333 qemu_ram_setup_dump(new_block
->host
, size
);
1334 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1335 qemu_madvise(new_block
->host
, size
, QEMU_MADV_DONTFORK
);
1338 kvm_setup_guest_memory(new_block
->host
, size
);
1340 return new_block
->offset
;
1343 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1345 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1348 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1352 /* This assumes the iothread lock is taken here too. */
1353 qemu_mutex_lock_ramlist();
1354 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1355 if (addr
== block
->offset
) {
1356 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1357 ram_list
.mru_block
= NULL
;
1363 qemu_mutex_unlock_ramlist();
1366 void qemu_ram_free(ram_addr_t addr
)
1370 /* This assumes the iothread lock is taken here too. */
1371 qemu_mutex_lock_ramlist();
1372 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1373 if (addr
== block
->offset
) {
1374 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1375 ram_list
.mru_block
= NULL
;
1377 if (block
->flags
& RAM_PREALLOC_MASK
) {
1379 } else if (xen_enabled()) {
1380 xen_invalidate_map_cache_entry(block
->host
);
1382 } else if (block
->fd
>= 0) {
1383 munmap(block
->host
, block
->length
);
1387 qemu_anon_ram_free(block
->host
, block
->length
);
1393 qemu_mutex_unlock_ramlist();
1398 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1405 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1406 offset
= addr
- block
->offset
;
1407 if (offset
< block
->length
) {
1408 vaddr
= block
->host
+ offset
;
1409 if (block
->flags
& RAM_PREALLOC_MASK
) {
1411 } else if (xen_enabled()) {
1415 munmap(vaddr
, length
);
1416 if (block
->fd
>= 0) {
1418 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1421 flags
|= MAP_PRIVATE
;
1423 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1424 flags
, block
->fd
, offset
);
1427 * Remap needs to match alloc. Accelerators that
1428 * set phys_mem_alloc never remap. If they did,
1429 * we'd need a remap hook here.
1431 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1433 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1434 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1437 if (area
!= vaddr
) {
1438 fprintf(stderr
, "Could not remap addr: "
1439 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1443 memory_try_enable_merging(vaddr
, length
);
1444 qemu_ram_setup_dump(vaddr
, length
);
1450 #endif /* !_WIN32 */
1452 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1453 With the exception of the softmmu code in this file, this should
1454 only be used for local memory (e.g. video ram) that the device owns,
1455 and knows it isn't going to access beyond the end of the block.
1457 It should not be used for general purpose DMA.
1458 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1460 void *qemu_get_ram_ptr(ram_addr_t addr
)
1462 RAMBlock
*block
= qemu_get_ram_block(addr
);
1464 if (xen_enabled()) {
1465 /* We need to check if the requested address is in the RAM
1466 * because we don't want to map the entire memory in QEMU.
1467 * In that case just map until the end of the page.
1469 if (block
->offset
== 0) {
1470 return xen_map_cache(addr
, 0, 0);
1471 } else if (block
->host
== NULL
) {
1473 xen_map_cache(block
->offset
, block
->length
, 1);
1476 return block
->host
+ (addr
- block
->offset
);
1479 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1480 * but takes a size argument */
1481 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1486 if (xen_enabled()) {
1487 return xen_map_cache(addr
, *size
, 1);
1491 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1492 if (addr
- block
->offset
< block
->length
) {
1493 if (addr
- block
->offset
+ *size
> block
->length
)
1494 *size
= block
->length
- addr
+ block
->offset
;
1495 return block
->host
+ (addr
- block
->offset
);
1499 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1504 /* Some of the softmmu routines need to translate from a host pointer
1505 (typically a TLB entry) back to a ram offset. */
1506 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1509 uint8_t *host
= ptr
;
1511 if (xen_enabled()) {
1512 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1513 return qemu_get_ram_block(*ram_addr
)->mr
;
1516 block
= ram_list
.mru_block
;
1517 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1521 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1522 /* This case append when the block is not mapped. */
1523 if (block
->host
== NULL
) {
1526 if (host
- block
->host
< block
->length
) {
1534 *ram_addr
= block
->offset
+ (host
- block
->host
);
1538 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1539 uint64_t val
, unsigned size
)
1541 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1542 tb_invalidate_phys_page_fast(ram_addr
, size
);
1546 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1549 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1552 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1557 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_MIGRATION
);
1558 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_VGA
);
1559 /* we remove the notdirty callback only if the code has been
1561 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1562 CPUArchState
*env
= current_cpu
->env_ptr
;
1563 tlb_set_dirty(env
, current_cpu
->mem_io_vaddr
);
1567 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1568 unsigned size
, bool is_write
)
1573 static const MemoryRegionOps notdirty_mem_ops
= {
1574 .write
= notdirty_mem_write
,
1575 .valid
.accepts
= notdirty_mem_accepts
,
1576 .endianness
= DEVICE_NATIVE_ENDIAN
,
1579 /* Generate a debug exception if a watchpoint has been hit. */
1580 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1582 CPUState
*cpu
= current_cpu
;
1583 CPUArchState
*env
= cpu
->env_ptr
;
1584 target_ulong pc
, cs_base
;
1589 if (cpu
->watchpoint_hit
) {
1590 /* We re-entered the check after replacing the TB. Now raise
1591 * the debug interrupt so that is will trigger after the
1592 * current instruction. */
1593 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1596 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1597 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1598 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1599 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1600 wp
->flags
|= BP_WATCHPOINT_HIT
;
1601 if (!cpu
->watchpoint_hit
) {
1602 cpu
->watchpoint_hit
= wp
;
1603 tb_check_watchpoint(cpu
);
1604 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1605 cpu
->exception_index
= EXCP_DEBUG
;
1608 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1609 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1610 cpu_resume_from_signal(cpu
, NULL
);
1614 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1619 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1620 so these check for a hit then pass through to the normal out-of-line
1622 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1625 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1627 case 1: return ldub_phys(&address_space_memory
, addr
);
1628 case 2: return lduw_phys(&address_space_memory
, addr
);
1629 case 4: return ldl_phys(&address_space_memory
, addr
);
1634 static void watch_mem_write(void *opaque
, hwaddr addr
,
1635 uint64_t val
, unsigned size
)
1637 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1640 stb_phys(&address_space_memory
, addr
, val
);
1643 stw_phys(&address_space_memory
, addr
, val
);
1646 stl_phys(&address_space_memory
, addr
, val
);
1652 static const MemoryRegionOps watch_mem_ops
= {
1653 .read
= watch_mem_read
,
1654 .write
= watch_mem_write
,
1655 .endianness
= DEVICE_NATIVE_ENDIAN
,
1658 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1661 subpage_t
*subpage
= opaque
;
1664 #if defined(DEBUG_SUBPAGE)
1665 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
1666 subpage
, len
, addr
);
1668 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1681 static void subpage_write(void *opaque
, hwaddr addr
,
1682 uint64_t value
, unsigned len
)
1684 subpage_t
*subpage
= opaque
;
1687 #if defined(DEBUG_SUBPAGE)
1688 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1689 " value %"PRIx64
"\n",
1690 __func__
, subpage
, len
, addr
, value
);
1705 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1708 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1709 unsigned len
, bool is_write
)
1711 subpage_t
*subpage
= opaque
;
1712 #if defined(DEBUG_SUBPAGE)
1713 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
1714 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1717 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1721 static const MemoryRegionOps subpage_ops
= {
1722 .read
= subpage_read
,
1723 .write
= subpage_write
,
1724 .valid
.accepts
= subpage_accepts
,
1725 .endianness
= DEVICE_NATIVE_ENDIAN
,
1728 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1733 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1735 idx
= SUBPAGE_IDX(start
);
1736 eidx
= SUBPAGE_IDX(end
);
1737 #if defined(DEBUG_SUBPAGE)
1738 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1739 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
1741 for (; idx
<= eidx
; idx
++) {
1742 mmio
->sub_section
[idx
] = section
;
1748 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1752 mmio
= g_malloc0(sizeof(subpage_t
));
1756 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1757 "subpage", TARGET_PAGE_SIZE
);
1758 mmio
->iomem
.subpage
= true;
1759 #if defined(DEBUG_SUBPAGE)
1760 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
1761 mmio
, base
, TARGET_PAGE_SIZE
);
1763 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1768 static uint16_t dummy_section(PhysPageMap
*map
, MemoryRegion
*mr
)
1770 MemoryRegionSection section
= {
1771 .address_space
= &address_space_memory
,
1773 .offset_within_address_space
= 0,
1774 .offset_within_region
= 0,
1775 .size
= int128_2_64(),
1778 return phys_section_add(map
, §ion
);
1781 MemoryRegion
*iotlb_to_region(AddressSpace
*as
, hwaddr index
)
1783 return as
->dispatch
->map
.sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1786 static void io_mem_init(void)
1788 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, "rom", UINT64_MAX
);
1789 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1790 "unassigned", UINT64_MAX
);
1791 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1792 "notdirty", UINT64_MAX
);
1793 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1794 "watch", UINT64_MAX
);
1797 static void mem_begin(MemoryListener
*listener
)
1799 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1800 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
1803 n
= dummy_section(&d
->map
, &io_mem_unassigned
);
1804 assert(n
== PHYS_SECTION_UNASSIGNED
);
1805 n
= dummy_section(&d
->map
, &io_mem_notdirty
);
1806 assert(n
== PHYS_SECTION_NOTDIRTY
);
1807 n
= dummy_section(&d
->map
, &io_mem_rom
);
1808 assert(n
== PHYS_SECTION_ROM
);
1809 n
= dummy_section(&d
->map
, &io_mem_watch
);
1810 assert(n
== PHYS_SECTION_WATCH
);
1812 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
1814 as
->next_dispatch
= d
;
1817 static void mem_commit(MemoryListener
*listener
)
1819 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1820 AddressSpaceDispatch
*cur
= as
->dispatch
;
1821 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1823 phys_page_compact_all(next
, next
->map
.nodes_nb
);
1825 as
->dispatch
= next
;
1828 phys_sections_free(&cur
->map
);
1833 static void tcg_commit(MemoryListener
*listener
)
1837 /* since each CPU stores ram addresses in its TLB cache, we must
1838 reset the modified entries */
1841 CPUArchState
*env
= cpu
->env_ptr
;
1843 /* FIXME: Disentangle the cpu.h circular files deps so we can
1844 directly get the right CPU from listener. */
1845 if (cpu
->tcg_as_listener
!= listener
) {
1852 static void core_log_global_start(MemoryListener
*listener
)
1854 cpu_physical_memory_set_dirty_tracking(true);
1857 static void core_log_global_stop(MemoryListener
*listener
)
1859 cpu_physical_memory_set_dirty_tracking(false);
1862 static MemoryListener core_memory_listener
= {
1863 .log_global_start
= core_log_global_start
,
1864 .log_global_stop
= core_log_global_stop
,
1868 void address_space_init_dispatch(AddressSpace
*as
)
1870 as
->dispatch
= NULL
;
1871 as
->dispatch_listener
= (MemoryListener
) {
1873 .commit
= mem_commit
,
1874 .region_add
= mem_add
,
1875 .region_nop
= mem_add
,
1878 memory_listener_register(&as
->dispatch_listener
, as
);
1881 void address_space_destroy_dispatch(AddressSpace
*as
)
1883 AddressSpaceDispatch
*d
= as
->dispatch
;
1885 memory_listener_unregister(&as
->dispatch_listener
);
1887 as
->dispatch
= NULL
;
1890 static void memory_map_init(void)
1892 system_memory
= g_malloc(sizeof(*system_memory
));
1894 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
1895 address_space_init(&address_space_memory
, system_memory
, "memory");
1897 system_io
= g_malloc(sizeof(*system_io
));
1898 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
1900 address_space_init(&address_space_io
, system_io
, "I/O");
1902 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1905 MemoryRegion
*get_system_memory(void)
1907 return system_memory
;
1910 MemoryRegion
*get_system_io(void)
1915 #endif /* !defined(CONFIG_USER_ONLY) */
1917 /* physical memory access (slow version, mainly for debug) */
1918 #if defined(CONFIG_USER_ONLY)
1919 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
1920 uint8_t *buf
, int len
, int is_write
)
1927 page
= addr
& TARGET_PAGE_MASK
;
1928 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1931 flags
= page_get_flags(page
);
1932 if (!(flags
& PAGE_VALID
))
1935 if (!(flags
& PAGE_WRITE
))
1937 /* XXX: this code should not depend on lock_user */
1938 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1941 unlock_user(p
, addr
, l
);
1943 if (!(flags
& PAGE_READ
))
1945 /* XXX: this code should not depend on lock_user */
1946 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1949 unlock_user(p
, addr
, 0);
1960 static void invalidate_and_set_dirty(hwaddr addr
,
1963 if (cpu_physical_memory_is_clean(addr
)) {
1964 /* invalidate code */
1965 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1967 cpu_physical_memory_set_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
1968 cpu_physical_memory_set_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
1970 xen_modified_memory(addr
, length
);
1973 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
1975 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
1977 /* Regions are assumed to support 1-4 byte accesses unless
1978 otherwise specified. */
1979 if (access_size_max
== 0) {
1980 access_size_max
= 4;
1983 /* Bound the maximum access by the alignment of the address. */
1984 if (!mr
->ops
->impl
.unaligned
) {
1985 unsigned align_size_max
= addr
& -addr
;
1986 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
1987 access_size_max
= align_size_max
;
1991 /* Don't attempt accesses larger than the maximum. */
1992 if (l
> access_size_max
) {
1993 l
= access_size_max
;
1996 l
= 1 << (qemu_fls(l
) - 1);
2002 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
2003 int len
, bool is_write
)
2014 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2017 if (!memory_access_is_direct(mr
, is_write
)) {
2018 l
= memory_access_size(mr
, l
, addr1
);
2019 /* XXX: could force current_cpu to NULL to avoid
2023 /* 64 bit write access */
2025 error
|= io_mem_write(mr
, addr1
, val
, 8);
2028 /* 32 bit write access */
2030 error
|= io_mem_write(mr
, addr1
, val
, 4);
2033 /* 16 bit write access */
2035 error
|= io_mem_write(mr
, addr1
, val
, 2);
2038 /* 8 bit write access */
2040 error
|= io_mem_write(mr
, addr1
, val
, 1);
2046 addr1
+= memory_region_get_ram_addr(mr
);
2048 ptr
= qemu_get_ram_ptr(addr1
);
2049 memcpy(ptr
, buf
, l
);
2050 invalidate_and_set_dirty(addr1
, l
);
2053 if (!memory_access_is_direct(mr
, is_write
)) {
2055 l
= memory_access_size(mr
, l
, addr1
);
2058 /* 64 bit read access */
2059 error
|= io_mem_read(mr
, addr1
, &val
, 8);
2063 /* 32 bit read access */
2064 error
|= io_mem_read(mr
, addr1
, &val
, 4);
2068 /* 16 bit read access */
2069 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2073 /* 8 bit read access */
2074 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2082 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2083 memcpy(buf
, ptr
, l
);
2094 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2095 const uint8_t *buf
, int len
)
2097 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2100 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2102 return address_space_rw(as
, addr
, buf
, len
, false);
2106 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2107 int len
, int is_write
)
2109 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2112 enum write_rom_type
{
2117 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2118 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2127 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2129 if (!(memory_region_is_ram(mr
) ||
2130 memory_region_is_romd(mr
))) {
2133 addr1
+= memory_region_get_ram_addr(mr
);
2135 ptr
= qemu_get_ram_ptr(addr1
);
2138 memcpy(ptr
, buf
, l
);
2139 invalidate_and_set_dirty(addr1
, l
);
2142 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2152 /* used for ROM loading : can write in RAM and ROM */
2153 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2154 const uint8_t *buf
, int len
)
2156 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2159 void cpu_flush_icache_range(hwaddr start
, int len
)
2162 * This function should do the same thing as an icache flush that was
2163 * triggered from within the guest. For TCG we are always cache coherent,
2164 * so there is no need to flush anything. For KVM / Xen we need to flush
2165 * the host's instruction cache at least.
2167 if (tcg_enabled()) {
2171 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2172 start
, NULL
, len
, FLUSH_CACHE
);
2182 static BounceBuffer bounce
;
2184 typedef struct MapClient
{
2186 void (*callback
)(void *opaque
);
2187 QLIST_ENTRY(MapClient
) link
;
2190 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2191 = QLIST_HEAD_INITIALIZER(map_client_list
);
2193 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2195 MapClient
*client
= g_malloc(sizeof(*client
));
2197 client
->opaque
= opaque
;
2198 client
->callback
= callback
;
2199 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2203 static void cpu_unregister_map_client(void *_client
)
2205 MapClient
*client
= (MapClient
*)_client
;
2207 QLIST_REMOVE(client
, link
);
2211 static void cpu_notify_map_clients(void)
2215 while (!QLIST_EMPTY(&map_client_list
)) {
2216 client
= QLIST_FIRST(&map_client_list
);
2217 client
->callback(client
->opaque
);
2218 cpu_unregister_map_client(client
);
2222 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2229 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2230 if (!memory_access_is_direct(mr
, is_write
)) {
2231 l
= memory_access_size(mr
, l
, addr
);
2232 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2243 /* Map a physical memory region into a host virtual address.
2244 * May map a subset of the requested range, given by and returned in *plen.
2245 * May return NULL if resources needed to perform the mapping are exhausted.
2246 * Use only for reads OR writes - not for read-modify-write operations.
2247 * Use cpu_register_map_client() to know when retrying the map operation is
2248 * likely to succeed.
2250 void *address_space_map(AddressSpace
*as
,
2257 hwaddr l
, xlat
, base
;
2258 MemoryRegion
*mr
, *this_mr
;
2266 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2267 if (!memory_access_is_direct(mr
, is_write
)) {
2268 if (bounce
.buffer
) {
2271 /* Avoid unbounded allocations */
2272 l
= MIN(l
, TARGET_PAGE_SIZE
);
2273 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2277 memory_region_ref(mr
);
2280 address_space_read(as
, addr
, bounce
.buffer
, l
);
2284 return bounce
.buffer
;
2288 raddr
= memory_region_get_ram_addr(mr
);
2299 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2300 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2305 memory_region_ref(mr
);
2307 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2310 /* Unmaps a memory region previously mapped by address_space_map().
2311 * Will also mark the memory as dirty if is_write == 1. access_len gives
2312 * the amount of memory that was actually read or written by the caller.
2314 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2315 int is_write
, hwaddr access_len
)
2317 if (buffer
!= bounce
.buffer
) {
2321 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2324 while (access_len
) {
2326 l
= TARGET_PAGE_SIZE
;
2329 invalidate_and_set_dirty(addr1
, l
);
2334 if (xen_enabled()) {
2335 xen_invalidate_map_cache_entry(buffer
);
2337 memory_region_unref(mr
);
2341 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2343 qemu_vfree(bounce
.buffer
);
2344 bounce
.buffer
= NULL
;
2345 memory_region_unref(bounce
.mr
);
2346 cpu_notify_map_clients();
2349 void *cpu_physical_memory_map(hwaddr addr
,
2353 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2356 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2357 int is_write
, hwaddr access_len
)
2359 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2362 /* warning: addr must be aligned */
2363 static inline uint32_t ldl_phys_internal(AddressSpace
*as
, hwaddr addr
,
2364 enum device_endian endian
)
2372 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2373 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2375 io_mem_read(mr
, addr1
, &val
, 4);
2376 #if defined(TARGET_WORDS_BIGENDIAN)
2377 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2381 if (endian
== DEVICE_BIG_ENDIAN
) {
2387 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2391 case DEVICE_LITTLE_ENDIAN
:
2392 val
= ldl_le_p(ptr
);
2394 case DEVICE_BIG_ENDIAN
:
2395 val
= ldl_be_p(ptr
);
2405 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2407 return ldl_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2410 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2412 return ldl_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2415 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2417 return ldl_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2420 /* warning: addr must be aligned */
2421 static inline uint64_t ldq_phys_internal(AddressSpace
*as
, hwaddr addr
,
2422 enum device_endian endian
)
2430 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2432 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2434 io_mem_read(mr
, addr1
, &val
, 8);
2435 #if defined(TARGET_WORDS_BIGENDIAN)
2436 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2440 if (endian
== DEVICE_BIG_ENDIAN
) {
2446 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2450 case DEVICE_LITTLE_ENDIAN
:
2451 val
= ldq_le_p(ptr
);
2453 case DEVICE_BIG_ENDIAN
:
2454 val
= ldq_be_p(ptr
);
2464 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
2466 return ldq_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2469 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
2471 return ldq_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2474 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
2476 return ldq_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2480 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
2483 address_space_rw(as
, addr
, &val
, 1, 0);
2487 /* warning: addr must be aligned */
2488 static inline uint32_t lduw_phys_internal(AddressSpace
*as
, hwaddr addr
,
2489 enum device_endian endian
)
2497 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2499 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2501 io_mem_read(mr
, addr1
, &val
, 2);
2502 #if defined(TARGET_WORDS_BIGENDIAN)
2503 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2507 if (endian
== DEVICE_BIG_ENDIAN
) {
2513 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2517 case DEVICE_LITTLE_ENDIAN
:
2518 val
= lduw_le_p(ptr
);
2520 case DEVICE_BIG_ENDIAN
:
2521 val
= lduw_be_p(ptr
);
2531 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
2533 return lduw_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2536 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
2538 return lduw_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2541 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
2543 return lduw_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2546 /* warning: addr must be aligned. The ram page is not masked as dirty
2547 and the code inside is not invalidated. It is useful if the dirty
2548 bits are used to track modified PTEs */
2549 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2556 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2558 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2559 io_mem_write(mr
, addr1
, val
, 4);
2561 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2562 ptr
= qemu_get_ram_ptr(addr1
);
2565 if (unlikely(in_migration
)) {
2566 if (cpu_physical_memory_is_clean(addr1
)) {
2567 /* invalidate code */
2568 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2570 cpu_physical_memory_set_dirty_flag(addr1
,
2571 DIRTY_MEMORY_MIGRATION
);
2572 cpu_physical_memory_set_dirty_flag(addr1
, DIRTY_MEMORY_VGA
);
2578 /* warning: addr must be aligned */
2579 static inline void stl_phys_internal(AddressSpace
*as
,
2580 hwaddr addr
, uint32_t val
,
2581 enum device_endian endian
)
2588 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2590 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2591 #if defined(TARGET_WORDS_BIGENDIAN)
2592 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2596 if (endian
== DEVICE_BIG_ENDIAN
) {
2600 io_mem_write(mr
, addr1
, val
, 4);
2603 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2604 ptr
= qemu_get_ram_ptr(addr1
);
2606 case DEVICE_LITTLE_ENDIAN
:
2609 case DEVICE_BIG_ENDIAN
:
2616 invalidate_and_set_dirty(addr1
, 4);
2620 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2622 stl_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2625 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2627 stl_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2630 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2632 stl_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2636 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2639 address_space_rw(as
, addr
, &v
, 1, 1);
2642 /* warning: addr must be aligned */
2643 static inline void stw_phys_internal(AddressSpace
*as
,
2644 hwaddr addr
, uint32_t val
,
2645 enum device_endian endian
)
2652 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2653 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2654 #if defined(TARGET_WORDS_BIGENDIAN)
2655 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2659 if (endian
== DEVICE_BIG_ENDIAN
) {
2663 io_mem_write(mr
, addr1
, val
, 2);
2666 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2667 ptr
= qemu_get_ram_ptr(addr1
);
2669 case DEVICE_LITTLE_ENDIAN
:
2672 case DEVICE_BIG_ENDIAN
:
2679 invalidate_and_set_dirty(addr1
, 2);
2683 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2685 stw_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2688 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2690 stw_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2693 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2695 stw_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2699 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2702 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2705 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2707 val
= cpu_to_le64(val
);
2708 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2711 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2713 val
= cpu_to_be64(val
);
2714 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2717 /* virtual memory access for debug (includes writing to ROM) */
2718 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2719 uint8_t *buf
, int len
, int is_write
)
2726 page
= addr
& TARGET_PAGE_MASK
;
2727 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2728 /* if no physical page mapped, return an error */
2729 if (phys_addr
== -1)
2731 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2734 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2736 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
2738 address_space_rw(cpu
->as
, phys_addr
, buf
, l
, 0);
2748 #if !defined(CONFIG_USER_ONLY)
2751 * A helper function for the _utterly broken_ virtio device model to find out if
2752 * it's running on a big endian machine. Don't do this at home kids!
2754 bool virtio_is_big_endian(void);
2755 bool virtio_is_big_endian(void)
2757 #if defined(TARGET_WORDS_BIGENDIAN)
2766 #ifndef CONFIG_USER_ONLY
2767 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2772 mr
= address_space_translate(&address_space_memory
,
2773 phys_addr
, &phys_addr
, &l
, false);
2775 return !(memory_region_is_ram(mr
) ||
2776 memory_region_is_romd(mr
));
2779 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2783 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2784 func(block
->host
, block
->offset
, block
->length
, opaque
);