4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
30 #include "qemu/osdep.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/xen/xen.h"
34 #include "qemu/timer.h"
35 #include "qemu/config-file.h"
36 #include "exec/memory.h"
37 #include "sysemu/dma.h"
38 #include "exec/address-spaces.h"
39 #if defined(CONFIG_USER_ONLY)
41 #else /* !CONFIG_USER_ONLY */
42 #include "sysemu/xen-mapcache.h"
45 #include "exec/cpu-all.h"
47 #include "exec/cputlb.h"
48 #include "translate-all.h"
50 #include "exec/memory-internal.h"
51 #include "exec/ram_addr.h"
52 #include "qemu/cache-utils.h"
54 #include "qemu/range.h"
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 static bool in_migration
;
61 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
63 static MemoryRegion
*system_memory
;
64 static MemoryRegion
*system_io
;
66 AddressSpace address_space_io
;
67 AddressSpace address_space_memory
;
69 MemoryRegion io_mem_rom
, io_mem_notdirty
;
70 static MemoryRegion io_mem_unassigned
;
74 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
75 /* current CPU in the current thread. It is only valid inside
77 DEFINE_TLS(CPUState
*, current_cpu
);
78 /* 0 = Do not count executed instructions.
79 1 = Precise instruction counting.
80 2 = Adaptive rate instruction counting. */
83 #if !defined(CONFIG_USER_ONLY)
85 typedef struct PhysPageEntry PhysPageEntry
;
87 struct PhysPageEntry
{
88 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
90 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
94 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
96 /* Size of the L2 (and L3, etc) page tables. */
97 #define ADDR_SPACE_BITS 64
100 #define P_L2_SIZE (1 << P_L2_BITS)
102 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
104 typedef PhysPageEntry Node
[P_L2_SIZE
];
106 typedef struct PhysPageMap
{
107 unsigned sections_nb
;
108 unsigned sections_nb_alloc
;
110 unsigned nodes_nb_alloc
;
112 MemoryRegionSection
*sections
;
115 struct AddressSpaceDispatch
{
116 /* This is a multi-level map on the physical address space.
117 * The bottom level has pointers to MemoryRegionSections.
119 PhysPageEntry phys_map
;
124 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
125 typedef struct subpage_t
{
129 uint16_t sub_section
[TARGET_PAGE_SIZE
];
132 #define PHYS_SECTION_UNASSIGNED 0
133 #define PHYS_SECTION_NOTDIRTY 1
134 #define PHYS_SECTION_ROM 2
135 #define PHYS_SECTION_WATCH 3
137 static void io_mem_init(void);
138 static void memory_map_init(void);
139 static void tcg_commit(MemoryListener
*listener
);
141 static MemoryRegion io_mem_watch
;
144 #if !defined(CONFIG_USER_ONLY)
146 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
148 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
149 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
150 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
151 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
155 static uint32_t phys_map_node_alloc(PhysPageMap
*map
)
160 ret
= map
->nodes_nb
++;
161 assert(ret
!= PHYS_MAP_NODE_NIL
);
162 assert(ret
!= map
->nodes_nb_alloc
);
163 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
164 map
->nodes
[ret
][i
].skip
= 1;
165 map
->nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
170 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
171 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
176 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
178 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
179 lp
->ptr
= phys_map_node_alloc(map
);
180 p
= map
->nodes
[lp
->ptr
];
182 for (i
= 0; i
< P_L2_SIZE
; i
++) {
184 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
188 p
= map
->nodes
[lp
->ptr
];
190 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
192 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
193 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
199 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
205 static void phys_page_set(AddressSpaceDispatch
*d
,
206 hwaddr index
, hwaddr nb
,
209 /* Wildly overreserve - it doesn't matter much. */
210 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
212 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
215 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
216 * and update our entry so we can skip it and go directly to the destination.
218 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
220 unsigned valid_ptr
= P_L2_SIZE
;
225 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
230 for (i
= 0; i
< P_L2_SIZE
; i
++) {
231 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
238 phys_page_compact(&p
[i
], nodes
, compacted
);
242 /* We can only compress if there's only one child. */
247 assert(valid_ptr
< P_L2_SIZE
);
249 /* Don't compress if it won't fit in the # of bits we have. */
250 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
254 lp
->ptr
= p
[valid_ptr
].ptr
;
255 if (!p
[valid_ptr
].skip
) {
256 /* If our only child is a leaf, make this a leaf. */
257 /* By design, we should have made this node a leaf to begin with so we
258 * should never reach here.
259 * But since it's so simple to handle this, let's do it just in case we
264 lp
->skip
+= p
[valid_ptr
].skip
;
268 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
270 DECLARE_BITMAP(compacted
, nodes_nb
);
272 if (d
->phys_map
.skip
) {
273 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
277 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
278 Node
*nodes
, MemoryRegionSection
*sections
)
281 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
284 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
285 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
286 return §ions
[PHYS_SECTION_UNASSIGNED
];
289 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
292 if (sections
[lp
.ptr
].size
.hi
||
293 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
294 sections
[lp
.ptr
].size
.lo
, addr
)) {
295 return §ions
[lp
.ptr
];
297 return §ions
[PHYS_SECTION_UNASSIGNED
];
301 bool memory_region_is_unassigned(MemoryRegion
*mr
)
303 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
304 && mr
!= &io_mem_watch
;
307 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
309 bool resolve_subpage
)
311 MemoryRegionSection
*section
;
314 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
315 if (resolve_subpage
&& section
->mr
->subpage
) {
316 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
317 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
322 static MemoryRegionSection
*
323 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
324 hwaddr
*plen
, bool resolve_subpage
)
326 MemoryRegionSection
*section
;
329 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
330 /* Compute offset within MemoryRegionSection */
331 addr
-= section
->offset_within_address_space
;
333 /* Compute offset within MemoryRegion */
334 *xlat
= addr
+ section
->offset_within_region
;
336 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
337 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
341 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
343 if (memory_region_is_ram(mr
)) {
344 return !(is_write
&& mr
->readonly
);
346 if (memory_region_is_romd(mr
)) {
353 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
354 hwaddr
*xlat
, hwaddr
*plen
,
358 MemoryRegionSection
*section
;
363 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
366 if (!mr
->iommu_ops
) {
370 iotlb
= mr
->iommu_ops
->translate(mr
, addr
);
371 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
372 | (addr
& iotlb
.addr_mask
));
373 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
374 if (!(iotlb
.perm
& (1 << is_write
))) {
375 mr
= &io_mem_unassigned
;
379 as
= iotlb
.target_as
;
382 if (memory_access_is_direct(mr
, is_write
)) {
383 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
384 len
= MIN(page
, len
);
392 MemoryRegionSection
*
393 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
396 MemoryRegionSection
*section
;
397 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
399 assert(!section
->mr
->iommu_ops
);
404 void cpu_exec_init_all(void)
406 #if !defined(CONFIG_USER_ONLY)
407 qemu_mutex_init(&ram_list
.mutex
);
413 #if !defined(CONFIG_USER_ONLY)
415 static int cpu_common_post_load(void *opaque
, int version_id
)
417 CPUState
*cpu
= opaque
;
419 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
420 version_id is increased. */
421 cpu
->interrupt_request
&= ~0x01;
422 tlb_flush(cpu
->env_ptr
, 1);
427 const VMStateDescription vmstate_cpu_common
= {
428 .name
= "cpu_common",
430 .minimum_version_id
= 1,
431 .minimum_version_id_old
= 1,
432 .post_load
= cpu_common_post_load
,
433 .fields
= (VMStateField
[]) {
434 VMSTATE_UINT32(halted
, CPUState
),
435 VMSTATE_UINT32(interrupt_request
, CPUState
),
436 VMSTATE_END_OF_LIST()
442 CPUState
*qemu_get_cpu(int index
)
447 if (cpu
->cpu_index
== index
) {
455 #if !defined(CONFIG_USER_ONLY)
456 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
458 /* We only support one address space per cpu at the moment. */
459 assert(cpu
->as
== as
);
461 if (cpu
->tcg_as_listener
) {
462 memory_listener_unregister(cpu
->tcg_as_listener
);
464 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
466 cpu
->tcg_as_listener
->commit
= tcg_commit
;
467 memory_listener_register(cpu
->tcg_as_listener
, as
);
471 void cpu_exec_init(CPUArchState
*env
)
473 CPUState
*cpu
= ENV_GET_CPU(env
);
474 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
478 #if defined(CONFIG_USER_ONLY)
482 CPU_FOREACH(some_cpu
) {
485 cpu
->cpu_index
= cpu_index
;
487 QTAILQ_INIT(&env
->breakpoints
);
488 QTAILQ_INIT(&env
->watchpoints
);
489 #ifndef CONFIG_USER_ONLY
490 cpu
->as
= &address_space_memory
;
491 cpu
->thread_id
= qemu_get_thread_id();
493 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
494 #if defined(CONFIG_USER_ONLY)
497 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
498 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
500 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
501 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
502 cpu_save
, cpu_load
, env
);
503 assert(cc
->vmsd
== NULL
);
504 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
506 if (cc
->vmsd
!= NULL
) {
507 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
511 #if defined(TARGET_HAS_ICE)
512 #if defined(CONFIG_USER_ONLY)
513 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
515 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
518 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
520 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
522 tb_invalidate_phys_addr(cpu
->as
,
523 phys
| (pc
& ~TARGET_PAGE_MASK
));
527 #endif /* TARGET_HAS_ICE */
529 #if defined(CONFIG_USER_ONLY)
530 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
535 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
536 int flags
, CPUWatchpoint
**watchpoint
)
541 /* Add a watchpoint. */
542 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
543 int flags
, CPUWatchpoint
**watchpoint
)
545 target_ulong len_mask
= ~(len
- 1);
548 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
549 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
550 len
== 0 || len
> TARGET_PAGE_SIZE
) {
551 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
552 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
555 wp
= g_malloc(sizeof(*wp
));
558 wp
->len_mask
= len_mask
;
561 /* keep all GDB-injected watchpoints in front */
563 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
565 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
567 tlb_flush_page(env
, addr
);
574 /* Remove a specific watchpoint. */
575 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
578 target_ulong len_mask
= ~(len
- 1);
581 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
582 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
583 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
584 cpu_watchpoint_remove_by_ref(env
, wp
);
591 /* Remove a specific watchpoint by reference. */
592 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
594 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
596 tlb_flush_page(env
, watchpoint
->vaddr
);
601 /* Remove all matching watchpoints. */
602 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
604 CPUWatchpoint
*wp
, *next
;
606 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
607 if (wp
->flags
& mask
)
608 cpu_watchpoint_remove_by_ref(env
, wp
);
613 /* Add a breakpoint. */
614 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
615 CPUBreakpoint
**breakpoint
)
617 #if defined(TARGET_HAS_ICE)
620 bp
= g_malloc(sizeof(*bp
));
625 /* keep all GDB-injected breakpoints in front */
626 if (flags
& BP_GDB
) {
627 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
629 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
632 breakpoint_invalidate(ENV_GET_CPU(env
), pc
);
643 /* Remove a specific breakpoint. */
644 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
646 #if defined(TARGET_HAS_ICE)
649 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
650 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
651 cpu_breakpoint_remove_by_ref(env
, bp
);
661 /* Remove a specific breakpoint by reference. */
662 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
664 #if defined(TARGET_HAS_ICE)
665 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
667 breakpoint_invalidate(ENV_GET_CPU(env
), breakpoint
->pc
);
673 /* Remove all matching breakpoints. */
674 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
676 #if defined(TARGET_HAS_ICE)
677 CPUBreakpoint
*bp
, *next
;
679 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
680 if (bp
->flags
& mask
)
681 cpu_breakpoint_remove_by_ref(env
, bp
);
686 /* enable or disable single step mode. EXCP_DEBUG is returned by the
687 CPU loop after each instruction */
688 void cpu_single_step(CPUState
*cpu
, int enabled
)
690 #if defined(TARGET_HAS_ICE)
691 if (cpu
->singlestep_enabled
!= enabled
) {
692 cpu
->singlestep_enabled
= enabled
;
694 kvm_update_guest_debug(cpu
, 0);
696 /* must flush all the translated code to avoid inconsistencies */
697 /* XXX: only flush what is necessary */
698 CPUArchState
*env
= cpu
->env_ptr
;
705 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
707 CPUState
*cpu
= ENV_GET_CPU(env
);
713 fprintf(stderr
, "qemu: fatal: ");
714 vfprintf(stderr
, fmt
, ap
);
715 fprintf(stderr
, "\n");
716 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
717 if (qemu_log_enabled()) {
718 qemu_log("qemu: fatal: ");
719 qemu_log_vprintf(fmt
, ap2
);
721 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
727 #if defined(CONFIG_USER_ONLY)
729 struct sigaction act
;
730 sigfillset(&act
.sa_mask
);
731 act
.sa_handler
= SIG_DFL
;
732 sigaction(SIGABRT
, &act
, NULL
);
738 #if !defined(CONFIG_USER_ONLY)
739 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
743 /* The list is protected by the iothread lock here. */
744 block
= ram_list
.mru_block
;
745 if (block
&& addr
- block
->offset
< block
->length
) {
748 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
749 if (addr
- block
->offset
< block
->length
) {
754 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
758 ram_list
.mru_block
= block
;
762 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
768 end
= TARGET_PAGE_ALIGN(start
+ length
);
769 start
&= TARGET_PAGE_MASK
;
771 block
= qemu_get_ram_block(start
);
772 assert(block
== qemu_get_ram_block(end
- 1));
773 start1
= (uintptr_t)block
->host
+ (start
- block
->offset
);
774 cpu_tlb_reset_dirty_all(start1
, length
);
777 /* Note: start and end must be within the same ram block. */
778 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,
783 cpu_physical_memory_clear_dirty_range(start
, length
, client
);
786 tlb_reset_dirty_range_all(start
, length
);
790 static void cpu_physical_memory_set_dirty_tracking(bool enable
)
792 in_migration
= enable
;
795 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
796 MemoryRegionSection
*section
,
798 hwaddr paddr
, hwaddr xlat
,
800 target_ulong
*address
)
805 if (memory_region_is_ram(section
->mr
)) {
807 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
809 if (!section
->readonly
) {
810 iotlb
|= PHYS_SECTION_NOTDIRTY
;
812 iotlb
|= PHYS_SECTION_ROM
;
815 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
819 /* Make accesses to pages with watchpoints go via the
820 watchpoint trap routines. */
821 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
822 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
823 /* Avoid trapping reads of pages with a write breakpoint. */
824 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
825 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
826 *address
|= TLB_MMIO
;
834 #endif /* defined(CONFIG_USER_ONLY) */
836 #if !defined(CONFIG_USER_ONLY)
838 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
840 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
842 static void *(*phys_mem_alloc
)(size_t size
) = qemu_anon_ram_alloc
;
845 * Set a custom physical guest memory alloator.
846 * Accelerators with unusual needs may need this. Hopefully, we can
847 * get rid of it eventually.
849 void phys_mem_set_alloc(void *(*alloc
)(size_t))
851 phys_mem_alloc
= alloc
;
854 static uint16_t phys_section_add(PhysPageMap
*map
,
855 MemoryRegionSection
*section
)
857 /* The physical section number is ORed with a page-aligned
858 * pointer to produce the iotlb entries. Thus it should
859 * never overflow into the page-aligned value.
861 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
863 if (map
->sections_nb
== map
->sections_nb_alloc
) {
864 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
865 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
866 map
->sections_nb_alloc
);
868 map
->sections
[map
->sections_nb
] = *section
;
869 memory_region_ref(section
->mr
);
870 return map
->sections_nb
++;
873 static void phys_section_destroy(MemoryRegion
*mr
)
875 memory_region_unref(mr
);
878 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
879 memory_region_destroy(&subpage
->iomem
);
884 static void phys_sections_free(PhysPageMap
*map
)
886 while (map
->sections_nb
> 0) {
887 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
888 phys_section_destroy(section
->mr
);
890 g_free(map
->sections
);
894 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
897 hwaddr base
= section
->offset_within_address_space
899 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
900 d
->map
.nodes
, d
->map
.sections
);
901 MemoryRegionSection subsection
= {
902 .offset_within_address_space
= base
,
903 .size
= int128_make64(TARGET_PAGE_SIZE
),
907 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
909 if (!(existing
->mr
->subpage
)) {
910 subpage
= subpage_init(d
->as
, base
);
911 subsection
.address_space
= d
->as
;
912 subsection
.mr
= &subpage
->iomem
;
913 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
914 phys_section_add(&d
->map
, &subsection
));
916 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
918 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
919 end
= start
+ int128_get64(section
->size
) - 1;
920 subpage_register(subpage
, start
, end
,
921 phys_section_add(&d
->map
, section
));
925 static void register_multipage(AddressSpaceDispatch
*d
,
926 MemoryRegionSection
*section
)
928 hwaddr start_addr
= section
->offset_within_address_space
;
929 uint16_t section_index
= phys_section_add(&d
->map
, section
);
930 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
934 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
937 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
939 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
940 AddressSpaceDispatch
*d
= as
->next_dispatch
;
941 MemoryRegionSection now
= *section
, remain
= *section
;
942 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
944 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
945 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
946 - now
.offset_within_address_space
;
948 now
.size
= int128_min(int128_make64(left
), now
.size
);
949 register_subpage(d
, &now
);
951 now
.size
= int128_zero();
953 while (int128_ne(remain
.size
, now
.size
)) {
954 remain
.size
= int128_sub(remain
.size
, now
.size
);
955 remain
.offset_within_address_space
+= int128_get64(now
.size
);
956 remain
.offset_within_region
+= int128_get64(now
.size
);
958 if (int128_lt(remain
.size
, page_size
)) {
959 register_subpage(d
, &now
);
960 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
961 now
.size
= page_size
;
962 register_subpage(d
, &now
);
964 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
965 register_multipage(d
, &now
);
970 void qemu_flush_coalesced_mmio_buffer(void)
973 kvm_flush_coalesced_mmio_buffer();
976 void qemu_mutex_lock_ramlist(void)
978 qemu_mutex_lock(&ram_list
.mutex
);
981 void qemu_mutex_unlock_ramlist(void)
983 qemu_mutex_unlock(&ram_list
.mutex
);
990 #define HUGETLBFS_MAGIC 0x958458f6
992 static long gethugepagesize(const char *path
)
998 ret
= statfs(path
, &fs
);
999 } while (ret
!= 0 && errno
== EINTR
);
1006 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1007 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1012 static sigjmp_buf sigjump
;
1014 static void sigbus_handler(int signal
)
1016 siglongjmp(sigjump
, 1);
1019 static void *file_ram_alloc(RAMBlock
*block
,
1024 char *sanitized_name
;
1028 unsigned long hpagesize
;
1030 hpagesize
= gethugepagesize(path
);
1035 if (memory
< hpagesize
) {
1039 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1040 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1044 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1045 sanitized_name
= g_strdup(block
->mr
->name
);
1046 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1051 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1053 g_free(sanitized_name
);
1055 fd
= mkstemp(filename
);
1057 perror("unable to create backing store for hugepages");
1064 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1067 * ftruncate is not supported by hugetlbfs in older
1068 * hosts, so don't bother bailing out on errors.
1069 * If anything goes wrong with it under other filesystems,
1072 if (ftruncate(fd
, memory
))
1073 perror("ftruncate");
1075 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
1076 if (area
== MAP_FAILED
) {
1077 perror("file_ram_alloc: can't mmap RAM pages");
1084 struct sigaction act
, oldact
;
1085 sigset_t set
, oldset
;
1087 memset(&act
, 0, sizeof(act
));
1088 act
.sa_handler
= &sigbus_handler
;
1091 ret
= sigaction(SIGBUS
, &act
, &oldact
);
1093 perror("file_ram_alloc: failed to install signal handler");
1097 /* unblock SIGBUS */
1099 sigaddset(&set
, SIGBUS
);
1100 pthread_sigmask(SIG_UNBLOCK
, &set
, &oldset
);
1102 if (sigsetjmp(sigjump
, 1)) {
1103 fprintf(stderr
, "file_ram_alloc: failed to preallocate pages\n");
1107 /* MAP_POPULATE silently ignores failures */
1108 for (i
= 0; i
< (memory
/hpagesize
); i
++) {
1109 memset(area
+ (hpagesize
*i
), 0, 1);
1112 ret
= sigaction(SIGBUS
, &oldact
, NULL
);
1114 perror("file_ram_alloc: failed to reinstall signal handler");
1118 pthread_sigmask(SIG_SETMASK
, &oldset
, NULL
);
1125 static void *file_ram_alloc(RAMBlock
*block
,
1129 fprintf(stderr
, "-mem-path not supported on this host\n");
1134 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1136 RAMBlock
*block
, *next_block
;
1137 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1139 assert(size
!= 0); /* it would hand out same offset multiple times */
1141 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1144 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1145 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1147 end
= block
->offset
+ block
->length
;
1149 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1150 if (next_block
->offset
>= end
) {
1151 next
= MIN(next
, next_block
->offset
);
1154 if (next
- end
>= size
&& next
- end
< mingap
) {
1156 mingap
= next
- end
;
1160 if (offset
== RAM_ADDR_MAX
) {
1161 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1169 ram_addr_t
last_ram_offset(void)
1172 ram_addr_t last
= 0;
1174 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1175 last
= MAX(last
, block
->offset
+ block
->length
);
1180 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1184 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1185 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1186 "dump-guest-core", true)) {
1187 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1189 perror("qemu_madvise");
1190 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1191 "but dump_guest_core=off specified\n");
1196 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1198 RAMBlock
*new_block
, *block
;
1201 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1202 if (block
->offset
== addr
) {
1208 assert(!new_block
->idstr
[0]);
1211 char *id
= qdev_get_dev_path(dev
);
1213 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1217 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1219 /* This assumes the iothread lock is taken here too. */
1220 qemu_mutex_lock_ramlist();
1221 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1222 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1223 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1228 qemu_mutex_unlock_ramlist();
1231 static int memory_try_enable_merging(void *addr
, size_t len
)
1233 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1234 /* disabled by the user */
1238 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1241 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1244 RAMBlock
*block
, *new_block
;
1245 ram_addr_t old_ram_size
, new_ram_size
;
1247 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1249 size
= TARGET_PAGE_ALIGN(size
);
1250 new_block
= g_malloc0(sizeof(*new_block
));
1253 /* This assumes the iothread lock is taken here too. */
1254 qemu_mutex_lock_ramlist();
1256 new_block
->offset
= find_ram_offset(size
);
1258 new_block
->host
= host
;
1259 new_block
->flags
|= RAM_PREALLOC_MASK
;
1260 } else if (xen_enabled()) {
1262 fprintf(stderr
, "-mem-path not supported with Xen\n");
1265 xen_ram_alloc(new_block
->offset
, size
, mr
);
1268 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1270 * file_ram_alloc() needs to allocate just like
1271 * phys_mem_alloc, but we haven't bothered to provide
1275 "-mem-path not supported with this accelerator\n");
1278 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1280 if (!new_block
->host
) {
1281 new_block
->host
= phys_mem_alloc(size
);
1282 if (!new_block
->host
) {
1283 fprintf(stderr
, "Cannot set up guest memory '%s': %s\n",
1284 new_block
->mr
->name
, strerror(errno
));
1287 memory_try_enable_merging(new_block
->host
, size
);
1290 new_block
->length
= size
;
1292 /* Keep the list sorted from biggest to smallest block. */
1293 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1294 if (block
->length
< new_block
->length
) {
1299 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1301 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1303 ram_list
.mru_block
= NULL
;
1306 qemu_mutex_unlock_ramlist();
1308 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1310 if (new_ram_size
> old_ram_size
) {
1312 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1313 ram_list
.dirty_memory
[i
] =
1314 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1315 old_ram_size
, new_ram_size
);
1318 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
);
1320 qemu_ram_setup_dump(new_block
->host
, size
);
1321 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1322 qemu_madvise(new_block
->host
, size
, QEMU_MADV_DONTFORK
);
1325 kvm_setup_guest_memory(new_block
->host
, size
);
1327 return new_block
->offset
;
1330 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1332 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1335 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1339 /* This assumes the iothread lock is taken here too. */
1340 qemu_mutex_lock_ramlist();
1341 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1342 if (addr
== block
->offset
) {
1343 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1344 ram_list
.mru_block
= NULL
;
1350 qemu_mutex_unlock_ramlist();
1353 void qemu_ram_free(ram_addr_t addr
)
1357 /* This assumes the iothread lock is taken here too. */
1358 qemu_mutex_lock_ramlist();
1359 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1360 if (addr
== block
->offset
) {
1361 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1362 ram_list
.mru_block
= NULL
;
1364 if (block
->flags
& RAM_PREALLOC_MASK
) {
1366 } else if (xen_enabled()) {
1367 xen_invalidate_map_cache_entry(block
->host
);
1369 } else if (block
->fd
>= 0) {
1370 munmap(block
->host
, block
->length
);
1374 qemu_anon_ram_free(block
->host
, block
->length
);
1380 qemu_mutex_unlock_ramlist();
1385 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1392 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1393 offset
= addr
- block
->offset
;
1394 if (offset
< block
->length
) {
1395 vaddr
= block
->host
+ offset
;
1396 if (block
->flags
& RAM_PREALLOC_MASK
) {
1398 } else if (xen_enabled()) {
1402 munmap(vaddr
, length
);
1403 if (block
->fd
>= 0) {
1405 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1408 flags
|= MAP_PRIVATE
;
1410 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1411 flags
, block
->fd
, offset
);
1414 * Remap needs to match alloc. Accelerators that
1415 * set phys_mem_alloc never remap. If they did,
1416 * we'd need a remap hook here.
1418 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1420 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1421 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1424 if (area
!= vaddr
) {
1425 fprintf(stderr
, "Could not remap addr: "
1426 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1430 memory_try_enable_merging(vaddr
, length
);
1431 qemu_ram_setup_dump(vaddr
, length
);
1437 #endif /* !_WIN32 */
1439 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1440 With the exception of the softmmu code in this file, this should
1441 only be used for local memory (e.g. video ram) that the device owns,
1442 and knows it isn't going to access beyond the end of the block.
1444 It should not be used for general purpose DMA.
1445 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1447 void *qemu_get_ram_ptr(ram_addr_t addr
)
1449 RAMBlock
*block
= qemu_get_ram_block(addr
);
1451 if (xen_enabled()) {
1452 /* We need to check if the requested address is in the RAM
1453 * because we don't want to map the entire memory in QEMU.
1454 * In that case just map until the end of the page.
1456 if (block
->offset
== 0) {
1457 return xen_map_cache(addr
, 0, 0);
1458 } else if (block
->host
== NULL
) {
1460 xen_map_cache(block
->offset
, block
->length
, 1);
1463 return block
->host
+ (addr
- block
->offset
);
1466 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1467 * but takes a size argument */
1468 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1473 if (xen_enabled()) {
1474 return xen_map_cache(addr
, *size
, 1);
1478 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1479 if (addr
- block
->offset
< block
->length
) {
1480 if (addr
- block
->offset
+ *size
> block
->length
)
1481 *size
= block
->length
- addr
+ block
->offset
;
1482 return block
->host
+ (addr
- block
->offset
);
1486 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1491 /* Some of the softmmu routines need to translate from a host pointer
1492 (typically a TLB entry) back to a ram offset. */
1493 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1496 uint8_t *host
= ptr
;
1498 if (xen_enabled()) {
1499 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1500 return qemu_get_ram_block(*ram_addr
)->mr
;
1503 block
= ram_list
.mru_block
;
1504 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1508 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1509 /* This case append when the block is not mapped. */
1510 if (block
->host
== NULL
) {
1513 if (host
- block
->host
< block
->length
) {
1521 *ram_addr
= block
->offset
+ (host
- block
->host
);
1525 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1526 uint64_t val
, unsigned size
)
1528 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1529 tb_invalidate_phys_page_fast(ram_addr
, size
);
1533 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1536 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1539 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1544 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_MIGRATION
);
1545 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_VGA
);
1546 /* we remove the notdirty callback only if the code has been
1548 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1549 CPUArchState
*env
= current_cpu
->env_ptr
;
1550 tlb_set_dirty(env
, env
->mem_io_vaddr
);
1554 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1555 unsigned size
, bool is_write
)
1560 static const MemoryRegionOps notdirty_mem_ops
= {
1561 .write
= notdirty_mem_write
,
1562 .valid
.accepts
= notdirty_mem_accepts
,
1563 .endianness
= DEVICE_NATIVE_ENDIAN
,
1566 /* Generate a debug exception if a watchpoint has been hit. */
1567 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1569 CPUArchState
*env
= current_cpu
->env_ptr
;
1570 target_ulong pc
, cs_base
;
1575 if (env
->watchpoint_hit
) {
1576 /* We re-entered the check after replacing the TB. Now raise
1577 * the debug interrupt so that is will trigger after the
1578 * current instruction. */
1579 cpu_interrupt(ENV_GET_CPU(env
), CPU_INTERRUPT_DEBUG
);
1582 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1583 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1584 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1585 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1586 wp
->flags
|= BP_WATCHPOINT_HIT
;
1587 if (!env
->watchpoint_hit
) {
1588 env
->watchpoint_hit
= wp
;
1589 tb_check_watchpoint(env
);
1590 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1591 env
->exception_index
= EXCP_DEBUG
;
1594 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1595 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
1596 cpu_resume_from_signal(env
, NULL
);
1600 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1605 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1606 so these check for a hit then pass through to the normal out-of-line
1608 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1611 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1613 case 1: return ldub_phys(&address_space_memory
, addr
);
1614 case 2: return lduw_phys(&address_space_memory
, addr
);
1615 case 4: return ldl_phys(&address_space_memory
, addr
);
1620 static void watch_mem_write(void *opaque
, hwaddr addr
,
1621 uint64_t val
, unsigned size
)
1623 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1626 stb_phys(&address_space_memory
, addr
, val
);
1629 stw_phys(&address_space_memory
, addr
, val
);
1632 stl_phys(&address_space_memory
, addr
, val
);
1638 static const MemoryRegionOps watch_mem_ops
= {
1639 .read
= watch_mem_read
,
1640 .write
= watch_mem_write
,
1641 .endianness
= DEVICE_NATIVE_ENDIAN
,
1644 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1647 subpage_t
*subpage
= opaque
;
1650 #if defined(DEBUG_SUBPAGE)
1651 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
1652 subpage
, len
, addr
);
1654 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1667 static void subpage_write(void *opaque
, hwaddr addr
,
1668 uint64_t value
, unsigned len
)
1670 subpage_t
*subpage
= opaque
;
1673 #if defined(DEBUG_SUBPAGE)
1674 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1675 " value %"PRIx64
"\n",
1676 __func__
, subpage
, len
, addr
, value
);
1691 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1694 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1695 unsigned len
, bool is_write
)
1697 subpage_t
*subpage
= opaque
;
1698 #if defined(DEBUG_SUBPAGE)
1699 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
1700 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1703 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1707 static const MemoryRegionOps subpage_ops
= {
1708 .read
= subpage_read
,
1709 .write
= subpage_write
,
1710 .valid
.accepts
= subpage_accepts
,
1711 .endianness
= DEVICE_NATIVE_ENDIAN
,
1714 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1719 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1721 idx
= SUBPAGE_IDX(start
);
1722 eidx
= SUBPAGE_IDX(end
);
1723 #if defined(DEBUG_SUBPAGE)
1724 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1725 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
1727 for (; idx
<= eidx
; idx
++) {
1728 mmio
->sub_section
[idx
] = section
;
1734 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1738 mmio
= g_malloc0(sizeof(subpage_t
));
1742 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1743 "subpage", TARGET_PAGE_SIZE
);
1744 mmio
->iomem
.subpage
= true;
1745 #if defined(DEBUG_SUBPAGE)
1746 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
1747 mmio
, base
, TARGET_PAGE_SIZE
);
1749 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1754 static uint16_t dummy_section(PhysPageMap
*map
, MemoryRegion
*mr
)
1756 MemoryRegionSection section
= {
1757 .address_space
= &address_space_memory
,
1759 .offset_within_address_space
= 0,
1760 .offset_within_region
= 0,
1761 .size
= int128_2_64(),
1764 return phys_section_add(map
, §ion
);
1767 MemoryRegion
*iotlb_to_region(AddressSpace
*as
, hwaddr index
)
1769 return as
->dispatch
->map
.sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1772 static void io_mem_init(void)
1774 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, "rom", UINT64_MAX
);
1775 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1776 "unassigned", UINT64_MAX
);
1777 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1778 "notdirty", UINT64_MAX
);
1779 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1780 "watch", UINT64_MAX
);
1783 static void mem_begin(MemoryListener
*listener
)
1785 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1786 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
1789 n
= dummy_section(&d
->map
, &io_mem_unassigned
);
1790 assert(n
== PHYS_SECTION_UNASSIGNED
);
1791 n
= dummy_section(&d
->map
, &io_mem_notdirty
);
1792 assert(n
== PHYS_SECTION_NOTDIRTY
);
1793 n
= dummy_section(&d
->map
, &io_mem_rom
);
1794 assert(n
== PHYS_SECTION_ROM
);
1795 n
= dummy_section(&d
->map
, &io_mem_watch
);
1796 assert(n
== PHYS_SECTION_WATCH
);
1798 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
1800 as
->next_dispatch
= d
;
1803 static void mem_commit(MemoryListener
*listener
)
1805 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1806 AddressSpaceDispatch
*cur
= as
->dispatch
;
1807 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1809 phys_page_compact_all(next
, next
->map
.nodes_nb
);
1811 as
->dispatch
= next
;
1814 phys_sections_free(&cur
->map
);
1819 static void tcg_commit(MemoryListener
*listener
)
1823 /* since each CPU stores ram addresses in its TLB cache, we must
1824 reset the modified entries */
1827 CPUArchState
*env
= cpu
->env_ptr
;
1829 /* FIXME: Disentangle the cpu.h circular files deps so we can
1830 directly get the right CPU from listener. */
1831 if (cpu
->tcg_as_listener
!= listener
) {
1838 static void core_log_global_start(MemoryListener
*listener
)
1840 cpu_physical_memory_set_dirty_tracking(true);
1843 static void core_log_global_stop(MemoryListener
*listener
)
1845 cpu_physical_memory_set_dirty_tracking(false);
1848 static MemoryListener core_memory_listener
= {
1849 .log_global_start
= core_log_global_start
,
1850 .log_global_stop
= core_log_global_stop
,
1854 void address_space_init_dispatch(AddressSpace
*as
)
1856 as
->dispatch
= NULL
;
1857 as
->dispatch_listener
= (MemoryListener
) {
1859 .commit
= mem_commit
,
1860 .region_add
= mem_add
,
1861 .region_nop
= mem_add
,
1864 memory_listener_register(&as
->dispatch_listener
, as
);
1867 void address_space_destroy_dispatch(AddressSpace
*as
)
1869 AddressSpaceDispatch
*d
= as
->dispatch
;
1871 memory_listener_unregister(&as
->dispatch_listener
);
1873 as
->dispatch
= NULL
;
1876 static void memory_map_init(void)
1878 system_memory
= g_malloc(sizeof(*system_memory
));
1880 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
1881 address_space_init(&address_space_memory
, system_memory
, "memory");
1883 system_io
= g_malloc(sizeof(*system_io
));
1884 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
1886 address_space_init(&address_space_io
, system_io
, "I/O");
1888 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1891 MemoryRegion
*get_system_memory(void)
1893 return system_memory
;
1896 MemoryRegion
*get_system_io(void)
1901 #endif /* !defined(CONFIG_USER_ONLY) */
1903 /* physical memory access (slow version, mainly for debug) */
1904 #if defined(CONFIG_USER_ONLY)
1905 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
1906 uint8_t *buf
, int len
, int is_write
)
1913 page
= addr
& TARGET_PAGE_MASK
;
1914 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1917 flags
= page_get_flags(page
);
1918 if (!(flags
& PAGE_VALID
))
1921 if (!(flags
& PAGE_WRITE
))
1923 /* XXX: this code should not depend on lock_user */
1924 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1927 unlock_user(p
, addr
, l
);
1929 if (!(flags
& PAGE_READ
))
1931 /* XXX: this code should not depend on lock_user */
1932 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1935 unlock_user(p
, addr
, 0);
1946 static void invalidate_and_set_dirty(hwaddr addr
,
1949 if (cpu_physical_memory_is_clean(addr
)) {
1950 /* invalidate code */
1951 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1953 cpu_physical_memory_set_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
1954 cpu_physical_memory_set_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
1956 xen_modified_memory(addr
, length
);
1959 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
1961 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
1963 /* Regions are assumed to support 1-4 byte accesses unless
1964 otherwise specified. */
1965 if (access_size_max
== 0) {
1966 access_size_max
= 4;
1969 /* Bound the maximum access by the alignment of the address. */
1970 if (!mr
->ops
->impl
.unaligned
) {
1971 unsigned align_size_max
= addr
& -addr
;
1972 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
1973 access_size_max
= align_size_max
;
1977 /* Don't attempt accesses larger than the maximum. */
1978 if (l
> access_size_max
) {
1979 l
= access_size_max
;
1982 l
= 1 << (qemu_fls(l
) - 1);
1988 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
1989 int len
, bool is_write
)
2000 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2003 if (!memory_access_is_direct(mr
, is_write
)) {
2004 l
= memory_access_size(mr
, l
, addr1
);
2005 /* XXX: could force current_cpu to NULL to avoid
2009 /* 64 bit write access */
2011 error
|= io_mem_write(mr
, addr1
, val
, 8);
2014 /* 32 bit write access */
2016 error
|= io_mem_write(mr
, addr1
, val
, 4);
2019 /* 16 bit write access */
2021 error
|= io_mem_write(mr
, addr1
, val
, 2);
2024 /* 8 bit write access */
2026 error
|= io_mem_write(mr
, addr1
, val
, 1);
2032 addr1
+= memory_region_get_ram_addr(mr
);
2034 ptr
= qemu_get_ram_ptr(addr1
);
2035 memcpy(ptr
, buf
, l
);
2036 invalidate_and_set_dirty(addr1
, l
);
2039 if (!memory_access_is_direct(mr
, is_write
)) {
2041 l
= memory_access_size(mr
, l
, addr1
);
2044 /* 64 bit read access */
2045 error
|= io_mem_read(mr
, addr1
, &val
, 8);
2049 /* 32 bit read access */
2050 error
|= io_mem_read(mr
, addr1
, &val
, 4);
2054 /* 16 bit read access */
2055 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2059 /* 8 bit read access */
2060 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2068 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2069 memcpy(buf
, ptr
, l
);
2080 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2081 const uint8_t *buf
, int len
)
2083 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2086 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2088 return address_space_rw(as
, addr
, buf
, len
, false);
2092 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2093 int len
, int is_write
)
2095 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2098 enum write_rom_type
{
2103 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2104 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2113 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2115 if (!(memory_region_is_ram(mr
) ||
2116 memory_region_is_romd(mr
))) {
2119 addr1
+= memory_region_get_ram_addr(mr
);
2121 ptr
= qemu_get_ram_ptr(addr1
);
2124 memcpy(ptr
, buf
, l
);
2125 invalidate_and_set_dirty(addr1
, l
);
2128 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2138 /* used for ROM loading : can write in RAM and ROM */
2139 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2140 const uint8_t *buf
, int len
)
2142 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2145 void cpu_flush_icache_range(hwaddr start
, int len
)
2148 * This function should do the same thing as an icache flush that was
2149 * triggered from within the guest. For TCG we are always cache coherent,
2150 * so there is no need to flush anything. For KVM / Xen we need to flush
2151 * the host's instruction cache at least.
2153 if (tcg_enabled()) {
2157 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2158 start
, NULL
, len
, FLUSH_CACHE
);
2168 static BounceBuffer bounce
;
2170 typedef struct MapClient
{
2172 void (*callback
)(void *opaque
);
2173 QLIST_ENTRY(MapClient
) link
;
2176 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2177 = QLIST_HEAD_INITIALIZER(map_client_list
);
2179 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2181 MapClient
*client
= g_malloc(sizeof(*client
));
2183 client
->opaque
= opaque
;
2184 client
->callback
= callback
;
2185 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2189 static void cpu_unregister_map_client(void *_client
)
2191 MapClient
*client
= (MapClient
*)_client
;
2193 QLIST_REMOVE(client
, link
);
2197 static void cpu_notify_map_clients(void)
2201 while (!QLIST_EMPTY(&map_client_list
)) {
2202 client
= QLIST_FIRST(&map_client_list
);
2203 client
->callback(client
->opaque
);
2204 cpu_unregister_map_client(client
);
2208 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2215 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2216 if (!memory_access_is_direct(mr
, is_write
)) {
2217 l
= memory_access_size(mr
, l
, addr
);
2218 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2229 /* Map a physical memory region into a host virtual address.
2230 * May map a subset of the requested range, given by and returned in *plen.
2231 * May return NULL if resources needed to perform the mapping are exhausted.
2232 * Use only for reads OR writes - not for read-modify-write operations.
2233 * Use cpu_register_map_client() to know when retrying the map operation is
2234 * likely to succeed.
2236 void *address_space_map(AddressSpace
*as
,
2243 hwaddr l
, xlat
, base
;
2244 MemoryRegion
*mr
, *this_mr
;
2252 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2253 if (!memory_access_is_direct(mr
, is_write
)) {
2254 if (bounce
.buffer
) {
2257 /* Avoid unbounded allocations */
2258 l
= MIN(l
, TARGET_PAGE_SIZE
);
2259 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2263 memory_region_ref(mr
);
2266 address_space_read(as
, addr
, bounce
.buffer
, l
);
2270 return bounce
.buffer
;
2274 raddr
= memory_region_get_ram_addr(mr
);
2285 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2286 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2291 memory_region_ref(mr
);
2293 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2296 /* Unmaps a memory region previously mapped by address_space_map().
2297 * Will also mark the memory as dirty if is_write == 1. access_len gives
2298 * the amount of memory that was actually read or written by the caller.
2300 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2301 int is_write
, hwaddr access_len
)
2303 if (buffer
!= bounce
.buffer
) {
2307 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2310 while (access_len
) {
2312 l
= TARGET_PAGE_SIZE
;
2315 invalidate_and_set_dirty(addr1
, l
);
2320 if (xen_enabled()) {
2321 xen_invalidate_map_cache_entry(buffer
);
2323 memory_region_unref(mr
);
2327 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2329 qemu_vfree(bounce
.buffer
);
2330 bounce
.buffer
= NULL
;
2331 memory_region_unref(bounce
.mr
);
2332 cpu_notify_map_clients();
2335 void *cpu_physical_memory_map(hwaddr addr
,
2339 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2342 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2343 int is_write
, hwaddr access_len
)
2345 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2348 /* warning: addr must be aligned */
2349 static inline uint32_t ldl_phys_internal(AddressSpace
*as
, hwaddr addr
,
2350 enum device_endian endian
)
2358 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2359 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2361 io_mem_read(mr
, addr1
, &val
, 4);
2362 #if defined(TARGET_WORDS_BIGENDIAN)
2363 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2367 if (endian
== DEVICE_BIG_ENDIAN
) {
2373 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2377 case DEVICE_LITTLE_ENDIAN
:
2378 val
= ldl_le_p(ptr
);
2380 case DEVICE_BIG_ENDIAN
:
2381 val
= ldl_be_p(ptr
);
2391 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2393 return ldl_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2396 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2398 return ldl_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2401 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2403 return ldl_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2406 /* warning: addr must be aligned */
2407 static inline uint64_t ldq_phys_internal(AddressSpace
*as
, hwaddr addr
,
2408 enum device_endian endian
)
2416 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2418 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2420 io_mem_read(mr
, addr1
, &val
, 8);
2421 #if defined(TARGET_WORDS_BIGENDIAN)
2422 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2426 if (endian
== DEVICE_BIG_ENDIAN
) {
2432 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2436 case DEVICE_LITTLE_ENDIAN
:
2437 val
= ldq_le_p(ptr
);
2439 case DEVICE_BIG_ENDIAN
:
2440 val
= ldq_be_p(ptr
);
2450 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
2452 return ldq_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2455 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
2457 return ldq_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2460 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
2462 return ldq_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2466 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
2469 address_space_rw(as
, addr
, &val
, 1, 0);
2473 /* warning: addr must be aligned */
2474 static inline uint32_t lduw_phys_internal(AddressSpace
*as
, hwaddr addr
,
2475 enum device_endian endian
)
2483 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2485 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2487 io_mem_read(mr
, addr1
, &val
, 2);
2488 #if defined(TARGET_WORDS_BIGENDIAN)
2489 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2493 if (endian
== DEVICE_BIG_ENDIAN
) {
2499 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2503 case DEVICE_LITTLE_ENDIAN
:
2504 val
= lduw_le_p(ptr
);
2506 case DEVICE_BIG_ENDIAN
:
2507 val
= lduw_be_p(ptr
);
2517 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
2519 return lduw_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2522 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
2524 return lduw_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2527 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
2529 return lduw_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2532 /* warning: addr must be aligned. The ram page is not masked as dirty
2533 and the code inside is not invalidated. It is useful if the dirty
2534 bits are used to track modified PTEs */
2535 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2542 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2544 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2545 io_mem_write(mr
, addr1
, val
, 4);
2547 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2548 ptr
= qemu_get_ram_ptr(addr1
);
2551 if (unlikely(in_migration
)) {
2552 if (cpu_physical_memory_is_clean(addr1
)) {
2553 /* invalidate code */
2554 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2556 cpu_physical_memory_set_dirty_flag(addr1
,
2557 DIRTY_MEMORY_MIGRATION
);
2558 cpu_physical_memory_set_dirty_flag(addr1
, DIRTY_MEMORY_VGA
);
2564 /* warning: addr must be aligned */
2565 static inline void stl_phys_internal(AddressSpace
*as
,
2566 hwaddr addr
, uint32_t val
,
2567 enum device_endian endian
)
2574 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2576 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2577 #if defined(TARGET_WORDS_BIGENDIAN)
2578 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2582 if (endian
== DEVICE_BIG_ENDIAN
) {
2586 io_mem_write(mr
, addr1
, val
, 4);
2589 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2590 ptr
= qemu_get_ram_ptr(addr1
);
2592 case DEVICE_LITTLE_ENDIAN
:
2595 case DEVICE_BIG_ENDIAN
:
2602 invalidate_and_set_dirty(addr1
, 4);
2606 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2608 stl_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2611 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2613 stl_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2616 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2618 stl_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2622 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2625 address_space_rw(as
, addr
, &v
, 1, 1);
2628 /* warning: addr must be aligned */
2629 static inline void stw_phys_internal(AddressSpace
*as
,
2630 hwaddr addr
, uint32_t val
,
2631 enum device_endian endian
)
2638 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2639 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2640 #if defined(TARGET_WORDS_BIGENDIAN)
2641 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2645 if (endian
== DEVICE_BIG_ENDIAN
) {
2649 io_mem_write(mr
, addr1
, val
, 2);
2652 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2653 ptr
= qemu_get_ram_ptr(addr1
);
2655 case DEVICE_LITTLE_ENDIAN
:
2658 case DEVICE_BIG_ENDIAN
:
2665 invalidate_and_set_dirty(addr1
, 2);
2669 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2671 stw_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2674 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2676 stw_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2679 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2681 stw_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2685 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2688 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2691 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2693 val
= cpu_to_le64(val
);
2694 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2697 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2699 val
= cpu_to_be64(val
);
2700 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2703 /* virtual memory access for debug (includes writing to ROM) */
2704 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2705 uint8_t *buf
, int len
, int is_write
)
2712 page
= addr
& TARGET_PAGE_MASK
;
2713 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2714 /* if no physical page mapped, return an error */
2715 if (phys_addr
== -1)
2717 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2720 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2722 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
2724 address_space_rw(cpu
->as
, phys_addr
, buf
, l
, 0);
2734 #if !defined(CONFIG_USER_ONLY)
2737 * A helper function for the _utterly broken_ virtio device model to find out if
2738 * it's running on a big endian machine. Don't do this at home kids!
2740 bool virtio_is_big_endian(void);
2741 bool virtio_is_big_endian(void)
2743 #if defined(TARGET_WORDS_BIGENDIAN)
2752 #ifndef CONFIG_USER_ONLY
2753 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2758 mr
= address_space_translate(&address_space_memory
,
2759 phys_addr
, &phys_addr
, &l
, false);
2761 return !(memory_region_is_ram(mr
) ||
2762 memory_region_is_romd(mr
));
2765 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2769 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2770 func(block
->host
, block
->offset
, block
->length
, opaque
);