4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
30 #include "qemu/osdep.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/xen/xen.h"
34 #include "qemu/timer.h"
35 #include "qemu/config-file.h"
36 #include "qemu/error-report.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
52 #include "exec/ram_addr.h"
54 #include "qemu/range.h"
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 static bool in_migration
;
61 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
63 static MemoryRegion
*system_memory
;
64 static MemoryRegion
*system_io
;
66 AddressSpace address_space_io
;
67 AddressSpace address_space_memory
;
69 MemoryRegion io_mem_rom
, io_mem_notdirty
;
70 static MemoryRegion io_mem_unassigned
;
72 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73 #define RAM_PREALLOC (1 << 0)
75 /* RAM is mmap-ed with MAP_SHARED */
76 #define RAM_SHARED (1 << 1)
80 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
81 /* current CPU in the current thread. It is only valid inside
83 DEFINE_TLS(CPUState
*, current_cpu
);
84 /* 0 = Do not count executed instructions.
85 1 = Precise instruction counting.
86 2 = Adaptive rate instruction counting. */
89 #if !defined(CONFIG_USER_ONLY)
91 typedef struct PhysPageEntry PhysPageEntry
;
93 struct PhysPageEntry
{
94 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
96 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
100 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
102 /* Size of the L2 (and L3, etc) page tables. */
103 #define ADDR_SPACE_BITS 64
106 #define P_L2_SIZE (1 << P_L2_BITS)
108 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
110 typedef PhysPageEntry Node
[P_L2_SIZE
];
112 typedef struct PhysPageMap
{
113 unsigned sections_nb
;
114 unsigned sections_nb_alloc
;
116 unsigned nodes_nb_alloc
;
118 MemoryRegionSection
*sections
;
121 struct AddressSpaceDispatch
{
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
125 PhysPageEntry phys_map
;
130 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131 typedef struct subpage_t
{
135 uint16_t sub_section
[TARGET_PAGE_SIZE
];
138 #define PHYS_SECTION_UNASSIGNED 0
139 #define PHYS_SECTION_NOTDIRTY 1
140 #define PHYS_SECTION_ROM 2
141 #define PHYS_SECTION_WATCH 3
143 static void io_mem_init(void);
144 static void memory_map_init(void);
145 static void tcg_commit(MemoryListener
*listener
);
147 static MemoryRegion io_mem_watch
;
150 #if !defined(CONFIG_USER_ONLY)
152 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
154 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
155 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
156 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
157 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
161 static uint32_t phys_map_node_alloc(PhysPageMap
*map
)
166 ret
= map
->nodes_nb
++;
167 assert(ret
!= PHYS_MAP_NODE_NIL
);
168 assert(ret
!= map
->nodes_nb_alloc
);
169 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
170 map
->nodes
[ret
][i
].skip
= 1;
171 map
->nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
176 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
177 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
182 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
184 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
185 lp
->ptr
= phys_map_node_alloc(map
);
186 p
= map
->nodes
[lp
->ptr
];
188 for (i
= 0; i
< P_L2_SIZE
; i
++) {
190 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
194 p
= map
->nodes
[lp
->ptr
];
196 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
198 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
199 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
205 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
211 static void phys_page_set(AddressSpaceDispatch
*d
,
212 hwaddr index
, hwaddr nb
,
215 /* Wildly overreserve - it doesn't matter much. */
216 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
218 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
221 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
224 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
226 unsigned valid_ptr
= P_L2_SIZE
;
231 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
236 for (i
= 0; i
< P_L2_SIZE
; i
++) {
237 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
244 phys_page_compact(&p
[i
], nodes
, compacted
);
248 /* We can only compress if there's only one child. */
253 assert(valid_ptr
< P_L2_SIZE
);
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
260 lp
->ptr
= p
[valid_ptr
].ptr
;
261 if (!p
[valid_ptr
].skip
) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
270 lp
->skip
+= p
[valid_ptr
].skip
;
274 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
276 DECLARE_BITMAP(compacted
, nodes_nb
);
278 if (d
->phys_map
.skip
) {
279 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
283 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
284 Node
*nodes
, MemoryRegionSection
*sections
)
287 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
290 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
291 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
292 return §ions
[PHYS_SECTION_UNASSIGNED
];
295 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
298 if (sections
[lp
.ptr
].size
.hi
||
299 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
300 sections
[lp
.ptr
].size
.lo
, addr
)) {
301 return §ions
[lp
.ptr
];
303 return §ions
[PHYS_SECTION_UNASSIGNED
];
307 bool memory_region_is_unassigned(MemoryRegion
*mr
)
309 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
310 && mr
!= &io_mem_watch
;
313 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
315 bool resolve_subpage
)
317 MemoryRegionSection
*section
;
320 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
321 if (resolve_subpage
&& section
->mr
->subpage
) {
322 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
323 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
328 static MemoryRegionSection
*
329 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
330 hwaddr
*plen
, bool resolve_subpage
)
332 MemoryRegionSection
*section
;
335 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
336 /* Compute offset within MemoryRegionSection */
337 addr
-= section
->offset_within_address_space
;
339 /* Compute offset within MemoryRegion */
340 *xlat
= addr
+ section
->offset_within_region
;
342 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
343 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
347 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
349 if (memory_region_is_ram(mr
)) {
350 return !(is_write
&& mr
->readonly
);
352 if (memory_region_is_romd(mr
)) {
359 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
360 hwaddr
*xlat
, hwaddr
*plen
,
364 MemoryRegionSection
*section
;
369 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
372 if (!mr
->iommu_ops
) {
376 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
377 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
378 | (addr
& iotlb
.addr_mask
));
379 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
380 if (!(iotlb
.perm
& (1 << is_write
))) {
381 mr
= &io_mem_unassigned
;
385 as
= iotlb
.target_as
;
388 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
389 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
390 len
= MIN(page
, len
);
398 MemoryRegionSection
*
399 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
402 MemoryRegionSection
*section
;
403 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
405 assert(!section
->mr
->iommu_ops
);
410 void cpu_exec_init_all(void)
412 #if !defined(CONFIG_USER_ONLY)
413 qemu_mutex_init(&ram_list
.mutex
);
419 #if !defined(CONFIG_USER_ONLY)
421 static int cpu_common_post_load(void *opaque
, int version_id
)
423 CPUState
*cpu
= opaque
;
425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
427 cpu
->interrupt_request
&= ~0x01;
433 static int cpu_common_pre_load(void *opaque
)
435 CPUState
*cpu
= opaque
;
437 cpu
->exception_index
= 0;
442 static bool cpu_common_exception_index_needed(void *opaque
)
444 CPUState
*cpu
= opaque
;
446 return cpu
->exception_index
!= 0;
449 static const VMStateDescription vmstate_cpu_common_exception_index
= {
450 .name
= "cpu_common/exception_index",
452 .minimum_version_id
= 1,
453 .fields
= (VMStateField
[]) {
454 VMSTATE_INT32(exception_index
, CPUState
),
455 VMSTATE_END_OF_LIST()
459 const VMStateDescription vmstate_cpu_common
= {
460 .name
= "cpu_common",
462 .minimum_version_id
= 1,
463 .pre_load
= cpu_common_pre_load
,
464 .post_load
= cpu_common_post_load
,
465 .fields
= (VMStateField
[]) {
466 VMSTATE_UINT32(halted
, CPUState
),
467 VMSTATE_UINT32(interrupt_request
, CPUState
),
468 VMSTATE_END_OF_LIST()
470 .subsections
= (VMStateSubsection
[]) {
472 .vmsd
= &vmstate_cpu_common_exception_index
,
473 .needed
= cpu_common_exception_index_needed
,
482 CPUState
*qemu_get_cpu(int index
)
487 if (cpu
->cpu_index
== index
) {
495 #if !defined(CONFIG_USER_ONLY)
496 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
498 /* We only support one address space per cpu at the moment. */
499 assert(cpu
->as
== as
);
501 if (cpu
->tcg_as_listener
) {
502 memory_listener_unregister(cpu
->tcg_as_listener
);
504 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
506 cpu
->tcg_as_listener
->commit
= tcg_commit
;
507 memory_listener_register(cpu
->tcg_as_listener
, as
);
511 void cpu_exec_init(CPUArchState
*env
)
513 CPUState
*cpu
= ENV_GET_CPU(env
);
514 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
518 #if defined(CONFIG_USER_ONLY)
522 CPU_FOREACH(some_cpu
) {
525 cpu
->cpu_index
= cpu_index
;
527 QTAILQ_INIT(&cpu
->breakpoints
);
528 QTAILQ_INIT(&cpu
->watchpoints
);
529 #ifndef CONFIG_USER_ONLY
530 cpu
->as
= &address_space_memory
;
531 cpu
->thread_id
= qemu_get_thread_id();
533 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
534 #if defined(CONFIG_USER_ONLY)
537 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
538 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
540 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
541 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
542 cpu_save
, cpu_load
, env
);
543 assert(cc
->vmsd
== NULL
);
544 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
546 if (cc
->vmsd
!= NULL
) {
547 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
551 #if defined(TARGET_HAS_ICE)
552 #if defined(CONFIG_USER_ONLY)
553 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
555 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
558 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
560 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
562 tb_invalidate_phys_addr(cpu
->as
,
563 phys
| (pc
& ~TARGET_PAGE_MASK
));
567 #endif /* TARGET_HAS_ICE */
569 #if defined(CONFIG_USER_ONLY)
570 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
575 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
576 int flags
, CPUWatchpoint
**watchpoint
)
581 /* Add a watchpoint. */
582 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
583 int flags
, CPUWatchpoint
**watchpoint
)
585 vaddr len_mask
= ~(len
- 1);
588 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
589 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
590 len
== 0 || len
> TARGET_PAGE_SIZE
) {
591 error_report("tried to set invalid watchpoint at %"
592 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
595 wp
= g_malloc(sizeof(*wp
));
598 wp
->len_mask
= len_mask
;
601 /* keep all GDB-injected watchpoints in front */
602 if (flags
& BP_GDB
) {
603 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
605 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
608 tlb_flush_page(cpu
, addr
);
615 /* Remove a specific watchpoint. */
616 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
619 vaddr len_mask
= ~(len
- 1);
622 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
623 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
624 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
625 cpu_watchpoint_remove_by_ref(cpu
, wp
);
632 /* Remove a specific watchpoint by reference. */
633 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
635 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
637 tlb_flush_page(cpu
, watchpoint
->vaddr
);
642 /* Remove all matching watchpoints. */
643 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
645 CPUWatchpoint
*wp
, *next
;
647 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
648 if (wp
->flags
& mask
) {
649 cpu_watchpoint_remove_by_ref(cpu
, wp
);
655 /* Add a breakpoint. */
656 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
657 CPUBreakpoint
**breakpoint
)
659 #if defined(TARGET_HAS_ICE)
662 bp
= g_malloc(sizeof(*bp
));
667 /* keep all GDB-injected breakpoints in front */
668 if (flags
& BP_GDB
) {
669 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
671 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
674 breakpoint_invalidate(cpu
, pc
);
685 /* Remove a specific breakpoint. */
686 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
688 #if defined(TARGET_HAS_ICE)
691 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
692 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
693 cpu_breakpoint_remove_by_ref(cpu
, bp
);
703 /* Remove a specific breakpoint by reference. */
704 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
706 #if defined(TARGET_HAS_ICE)
707 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
709 breakpoint_invalidate(cpu
, breakpoint
->pc
);
715 /* Remove all matching breakpoints. */
716 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
718 #if defined(TARGET_HAS_ICE)
719 CPUBreakpoint
*bp
, *next
;
721 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
722 if (bp
->flags
& mask
) {
723 cpu_breakpoint_remove_by_ref(cpu
, bp
);
729 /* enable or disable single step mode. EXCP_DEBUG is returned by the
730 CPU loop after each instruction */
731 void cpu_single_step(CPUState
*cpu
, int enabled
)
733 #if defined(TARGET_HAS_ICE)
734 if (cpu
->singlestep_enabled
!= enabled
) {
735 cpu
->singlestep_enabled
= enabled
;
737 kvm_update_guest_debug(cpu
, 0);
739 /* must flush all the translated code to avoid inconsistencies */
740 /* XXX: only flush what is necessary */
741 CPUArchState
*env
= cpu
->env_ptr
;
748 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
755 fprintf(stderr
, "qemu: fatal: ");
756 vfprintf(stderr
, fmt
, ap
);
757 fprintf(stderr
, "\n");
758 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
759 if (qemu_log_enabled()) {
760 qemu_log("qemu: fatal: ");
761 qemu_log_vprintf(fmt
, ap2
);
763 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
769 #if defined(CONFIG_USER_ONLY)
771 struct sigaction act
;
772 sigfillset(&act
.sa_mask
);
773 act
.sa_handler
= SIG_DFL
;
774 sigaction(SIGABRT
, &act
, NULL
);
780 #if !defined(CONFIG_USER_ONLY)
781 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
785 /* The list is protected by the iothread lock here. */
786 block
= ram_list
.mru_block
;
787 if (block
&& addr
- block
->offset
< block
->length
) {
790 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
791 if (addr
- block
->offset
< block
->length
) {
796 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
800 ram_list
.mru_block
= block
;
804 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
810 end
= TARGET_PAGE_ALIGN(start
+ length
);
811 start
&= TARGET_PAGE_MASK
;
813 block
= qemu_get_ram_block(start
);
814 assert(block
== qemu_get_ram_block(end
- 1));
815 start1
= (uintptr_t)block
->host
+ (start
- block
->offset
);
816 cpu_tlb_reset_dirty_all(start1
, length
);
819 /* Note: start and end must be within the same ram block. */
820 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,
825 cpu_physical_memory_clear_dirty_range(start
, length
, client
);
828 tlb_reset_dirty_range_all(start
, length
);
832 static void cpu_physical_memory_set_dirty_tracking(bool enable
)
834 in_migration
= enable
;
837 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
838 MemoryRegionSection
*section
,
840 hwaddr paddr
, hwaddr xlat
,
842 target_ulong
*address
)
847 if (memory_region_is_ram(section
->mr
)) {
849 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
851 if (!section
->readonly
) {
852 iotlb
|= PHYS_SECTION_NOTDIRTY
;
854 iotlb
|= PHYS_SECTION_ROM
;
857 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
861 /* Make accesses to pages with watchpoints go via the
862 watchpoint trap routines. */
863 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
864 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
865 /* Avoid trapping reads of pages with a write breakpoint. */
866 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
867 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
868 *address
|= TLB_MMIO
;
876 #endif /* defined(CONFIG_USER_ONLY) */
878 #if !defined(CONFIG_USER_ONLY)
880 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
882 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
884 static void *(*phys_mem_alloc
)(size_t size
) = qemu_anon_ram_alloc
;
887 * Set a custom physical guest memory alloator.
888 * Accelerators with unusual needs may need this. Hopefully, we can
889 * get rid of it eventually.
891 void phys_mem_set_alloc(void *(*alloc
)(size_t))
893 phys_mem_alloc
= alloc
;
896 static uint16_t phys_section_add(PhysPageMap
*map
,
897 MemoryRegionSection
*section
)
899 /* The physical section number is ORed with a page-aligned
900 * pointer to produce the iotlb entries. Thus it should
901 * never overflow into the page-aligned value.
903 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
905 if (map
->sections_nb
== map
->sections_nb_alloc
) {
906 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
907 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
908 map
->sections_nb_alloc
);
910 map
->sections
[map
->sections_nb
] = *section
;
911 memory_region_ref(section
->mr
);
912 return map
->sections_nb
++;
915 static void phys_section_destroy(MemoryRegion
*mr
)
917 memory_region_unref(mr
);
920 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
921 object_unref(OBJECT(&subpage
->iomem
));
926 static void phys_sections_free(PhysPageMap
*map
)
928 while (map
->sections_nb
> 0) {
929 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
930 phys_section_destroy(section
->mr
);
932 g_free(map
->sections
);
936 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
939 hwaddr base
= section
->offset_within_address_space
941 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
942 d
->map
.nodes
, d
->map
.sections
);
943 MemoryRegionSection subsection
= {
944 .offset_within_address_space
= base
,
945 .size
= int128_make64(TARGET_PAGE_SIZE
),
949 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
951 if (!(existing
->mr
->subpage
)) {
952 subpage
= subpage_init(d
->as
, base
);
953 subsection
.address_space
= d
->as
;
954 subsection
.mr
= &subpage
->iomem
;
955 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
956 phys_section_add(&d
->map
, &subsection
));
958 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
960 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
961 end
= start
+ int128_get64(section
->size
) - 1;
962 subpage_register(subpage
, start
, end
,
963 phys_section_add(&d
->map
, section
));
967 static void register_multipage(AddressSpaceDispatch
*d
,
968 MemoryRegionSection
*section
)
970 hwaddr start_addr
= section
->offset_within_address_space
;
971 uint16_t section_index
= phys_section_add(&d
->map
, section
);
972 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
976 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
979 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
981 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
982 AddressSpaceDispatch
*d
= as
->next_dispatch
;
983 MemoryRegionSection now
= *section
, remain
= *section
;
984 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
986 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
987 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
988 - now
.offset_within_address_space
;
990 now
.size
= int128_min(int128_make64(left
), now
.size
);
991 register_subpage(d
, &now
);
993 now
.size
= int128_zero();
995 while (int128_ne(remain
.size
, now
.size
)) {
996 remain
.size
= int128_sub(remain
.size
, now
.size
);
997 remain
.offset_within_address_space
+= int128_get64(now
.size
);
998 remain
.offset_within_region
+= int128_get64(now
.size
);
1000 if (int128_lt(remain
.size
, page_size
)) {
1001 register_subpage(d
, &now
);
1002 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1003 now
.size
= page_size
;
1004 register_subpage(d
, &now
);
1006 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1007 register_multipage(d
, &now
);
1012 void qemu_flush_coalesced_mmio_buffer(void)
1015 kvm_flush_coalesced_mmio_buffer();
1018 void qemu_mutex_lock_ramlist(void)
1020 qemu_mutex_lock(&ram_list
.mutex
);
1023 void qemu_mutex_unlock_ramlist(void)
1025 qemu_mutex_unlock(&ram_list
.mutex
);
1030 #include <sys/vfs.h>
1032 #define HUGETLBFS_MAGIC 0x958458f6
1034 static long gethugepagesize(const char *path
)
1040 ret
= statfs(path
, &fs
);
1041 } while (ret
!= 0 && errno
== EINTR
);
1048 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1049 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1054 static void *file_ram_alloc(RAMBlock
*block
,
1060 char *sanitized_name
;
1064 unsigned long hpagesize
;
1066 hpagesize
= gethugepagesize(path
);
1071 if (memory
< hpagesize
) {
1075 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1077 "host lacks kvm mmu notifiers, -mem-path unsupported");
1081 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1082 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1083 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1088 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1090 g_free(sanitized_name
);
1092 fd
= mkstemp(filename
);
1094 error_setg_errno(errp
, errno
,
1095 "unable to create backing store for hugepages");
1102 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1105 * ftruncate is not supported by hugetlbfs in older
1106 * hosts, so don't bother bailing out on errors.
1107 * If anything goes wrong with it under other filesystems,
1110 if (ftruncate(fd
, memory
)) {
1111 perror("ftruncate");
1114 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
,
1115 (block
->flags
& RAM_SHARED
? MAP_SHARED
: MAP_PRIVATE
),
1117 if (area
== MAP_FAILED
) {
1118 error_setg_errno(errp
, errno
,
1119 "unable to map backing store for hugepages");
1125 os_mem_prealloc(fd
, area
, memory
);
1139 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1141 RAMBlock
*block
, *next_block
;
1142 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1144 assert(size
!= 0); /* it would hand out same offset multiple times */
1146 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1149 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1150 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1152 end
= block
->offset
+ block
->length
;
1154 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1155 if (next_block
->offset
>= end
) {
1156 next
= MIN(next
, next_block
->offset
);
1159 if (next
- end
>= size
&& next
- end
< mingap
) {
1161 mingap
= next
- end
;
1165 if (offset
== RAM_ADDR_MAX
) {
1166 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1174 ram_addr_t
last_ram_offset(void)
1177 ram_addr_t last
= 0;
1179 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1180 last
= MAX(last
, block
->offset
+ block
->length
);
1185 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1189 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1190 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1191 "dump-guest-core", true)) {
1192 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1194 perror("qemu_madvise");
1195 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1196 "but dump_guest_core=off specified\n");
1201 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1205 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1206 if (block
->offset
== addr
) {
1214 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1216 RAMBlock
*new_block
= find_ram_block(addr
);
1220 assert(!new_block
->idstr
[0]);
1223 char *id
= qdev_get_dev_path(dev
);
1225 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1229 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1231 /* This assumes the iothread lock is taken here too. */
1232 qemu_mutex_lock_ramlist();
1233 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1234 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1235 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1240 qemu_mutex_unlock_ramlist();
1243 void qemu_ram_unset_idstr(ram_addr_t addr
)
1245 RAMBlock
*block
= find_ram_block(addr
);
1248 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1252 static int memory_try_enable_merging(void *addr
, size_t len
)
1254 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1255 /* disabled by the user */
1259 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1262 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1265 ram_addr_t old_ram_size
, new_ram_size
;
1267 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1269 /* This assumes the iothread lock is taken here too. */
1270 qemu_mutex_lock_ramlist();
1271 new_block
->offset
= find_ram_offset(new_block
->length
);
1273 if (!new_block
->host
) {
1274 if (xen_enabled()) {
1275 xen_ram_alloc(new_block
->offset
, new_block
->length
, new_block
->mr
);
1277 new_block
->host
= phys_mem_alloc(new_block
->length
);
1278 if (!new_block
->host
) {
1279 error_setg_errno(errp
, errno
,
1280 "cannot set up guest memory '%s'",
1281 memory_region_name(new_block
->mr
));
1282 qemu_mutex_unlock_ramlist();
1285 memory_try_enable_merging(new_block
->host
, new_block
->length
);
1289 /* Keep the list sorted from biggest to smallest block. */
1290 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1291 if (block
->length
< new_block
->length
) {
1296 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1298 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1300 ram_list
.mru_block
= NULL
;
1303 qemu_mutex_unlock_ramlist();
1305 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1307 if (new_ram_size
> old_ram_size
) {
1309 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1310 ram_list
.dirty_memory
[i
] =
1311 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1312 old_ram_size
, new_ram_size
);
1315 cpu_physical_memory_set_dirty_range(new_block
->offset
, new_block
->length
);
1317 qemu_ram_setup_dump(new_block
->host
, new_block
->length
);
1318 qemu_madvise(new_block
->host
, new_block
->length
, QEMU_MADV_HUGEPAGE
);
1319 qemu_madvise(new_block
->host
, new_block
->length
, QEMU_MADV_DONTFORK
);
1321 if (kvm_enabled()) {
1322 kvm_setup_guest_memory(new_block
->host
, new_block
->length
);
1325 return new_block
->offset
;
1329 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1330 bool share
, const char *mem_path
,
1333 RAMBlock
*new_block
;
1335 Error
*local_err
= NULL
;
1337 if (xen_enabled()) {
1338 error_setg(errp
, "-mem-path not supported with Xen");
1342 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1344 * file_ram_alloc() needs to allocate just like
1345 * phys_mem_alloc, but we haven't bothered to provide
1349 "-mem-path not supported with this accelerator");
1353 size
= TARGET_PAGE_ALIGN(size
);
1354 new_block
= g_malloc0(sizeof(*new_block
));
1356 new_block
->length
= size
;
1357 new_block
->flags
= share
? RAM_SHARED
: 0;
1358 new_block
->host
= file_ram_alloc(new_block
, size
,
1360 if (!new_block
->host
) {
1365 addr
= ram_block_add(new_block
, &local_err
);
1368 error_propagate(errp
, local_err
);
1375 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1376 MemoryRegion
*mr
, Error
**errp
)
1378 RAMBlock
*new_block
;
1380 Error
*local_err
= NULL
;
1382 size
= TARGET_PAGE_ALIGN(size
);
1383 new_block
= g_malloc0(sizeof(*new_block
));
1385 new_block
->length
= size
;
1387 new_block
->host
= host
;
1389 new_block
->flags
|= RAM_PREALLOC
;
1391 addr
= ram_block_add(new_block
, &local_err
);
1394 error_propagate(errp
, local_err
);
1400 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1402 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
, errp
);
1405 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1409 /* This assumes the iothread lock is taken here too. */
1410 qemu_mutex_lock_ramlist();
1411 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1412 if (addr
== block
->offset
) {
1413 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1414 ram_list
.mru_block
= NULL
;
1420 qemu_mutex_unlock_ramlist();
1423 void qemu_ram_free(ram_addr_t addr
)
1427 /* This assumes the iothread lock is taken here too. */
1428 qemu_mutex_lock_ramlist();
1429 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1430 if (addr
== block
->offset
) {
1431 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1432 ram_list
.mru_block
= NULL
;
1434 if (block
->flags
& RAM_PREALLOC
) {
1436 } else if (xen_enabled()) {
1437 xen_invalidate_map_cache_entry(block
->host
);
1439 } else if (block
->fd
>= 0) {
1440 munmap(block
->host
, block
->length
);
1444 qemu_anon_ram_free(block
->host
, block
->length
);
1450 qemu_mutex_unlock_ramlist();
1455 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1462 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1463 offset
= addr
- block
->offset
;
1464 if (offset
< block
->length
) {
1465 vaddr
= block
->host
+ offset
;
1466 if (block
->flags
& RAM_PREALLOC
) {
1468 } else if (xen_enabled()) {
1472 munmap(vaddr
, length
);
1473 if (block
->fd
>= 0) {
1474 flags
|= (block
->flags
& RAM_SHARED
?
1475 MAP_SHARED
: MAP_PRIVATE
);
1476 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1477 flags
, block
->fd
, offset
);
1480 * Remap needs to match alloc. Accelerators that
1481 * set phys_mem_alloc never remap. If they did,
1482 * we'd need a remap hook here.
1484 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1486 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1487 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1490 if (area
!= vaddr
) {
1491 fprintf(stderr
, "Could not remap addr: "
1492 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1496 memory_try_enable_merging(vaddr
, length
);
1497 qemu_ram_setup_dump(vaddr
, length
);
1503 #endif /* !_WIN32 */
1505 int qemu_get_ram_fd(ram_addr_t addr
)
1507 RAMBlock
*block
= qemu_get_ram_block(addr
);
1512 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1514 RAMBlock
*block
= qemu_get_ram_block(addr
);
1519 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1520 With the exception of the softmmu code in this file, this should
1521 only be used for local memory (e.g. video ram) that the device owns,
1522 and knows it isn't going to access beyond the end of the block.
1524 It should not be used for general purpose DMA.
1525 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1527 void *qemu_get_ram_ptr(ram_addr_t addr
)
1529 RAMBlock
*block
= qemu_get_ram_block(addr
);
1531 if (xen_enabled()) {
1532 /* We need to check if the requested address is in the RAM
1533 * because we don't want to map the entire memory in QEMU.
1534 * In that case just map until the end of the page.
1536 if (block
->offset
== 0) {
1537 return xen_map_cache(addr
, 0, 0);
1538 } else if (block
->host
== NULL
) {
1540 xen_map_cache(block
->offset
, block
->length
, 1);
1543 return block
->host
+ (addr
- block
->offset
);
1546 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1547 * but takes a size argument */
1548 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1553 if (xen_enabled()) {
1554 return xen_map_cache(addr
, *size
, 1);
1558 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1559 if (addr
- block
->offset
< block
->length
) {
1560 if (addr
- block
->offset
+ *size
> block
->length
)
1561 *size
= block
->length
- addr
+ block
->offset
;
1562 return block
->host
+ (addr
- block
->offset
);
1566 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1571 /* Some of the softmmu routines need to translate from a host pointer
1572 (typically a TLB entry) back to a ram offset. */
1573 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1576 uint8_t *host
= ptr
;
1578 if (xen_enabled()) {
1579 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1580 return qemu_get_ram_block(*ram_addr
)->mr
;
1583 block
= ram_list
.mru_block
;
1584 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1588 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1589 /* This case append when the block is not mapped. */
1590 if (block
->host
== NULL
) {
1593 if (host
- block
->host
< block
->length
) {
1601 *ram_addr
= block
->offset
+ (host
- block
->host
);
1605 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1606 uint64_t val
, unsigned size
)
1608 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1609 tb_invalidate_phys_page_fast(ram_addr
, size
);
1613 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1616 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1619 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1624 cpu_physical_memory_set_dirty_range_nocode(ram_addr
, size
);
1625 /* we remove the notdirty callback only if the code has been
1627 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1628 CPUArchState
*env
= current_cpu
->env_ptr
;
1629 tlb_set_dirty(env
, current_cpu
->mem_io_vaddr
);
1633 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1634 unsigned size
, bool is_write
)
1639 static const MemoryRegionOps notdirty_mem_ops
= {
1640 .write
= notdirty_mem_write
,
1641 .valid
.accepts
= notdirty_mem_accepts
,
1642 .endianness
= DEVICE_NATIVE_ENDIAN
,
1645 /* Generate a debug exception if a watchpoint has been hit. */
1646 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1648 CPUState
*cpu
= current_cpu
;
1649 CPUArchState
*env
= cpu
->env_ptr
;
1650 target_ulong pc
, cs_base
;
1655 if (cpu
->watchpoint_hit
) {
1656 /* We re-entered the check after replacing the TB. Now raise
1657 * the debug interrupt so that is will trigger after the
1658 * current instruction. */
1659 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1662 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1663 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1664 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1665 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1666 wp
->flags
|= BP_WATCHPOINT_HIT
;
1667 if (!cpu
->watchpoint_hit
) {
1668 cpu
->watchpoint_hit
= wp
;
1669 tb_check_watchpoint(cpu
);
1670 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1671 cpu
->exception_index
= EXCP_DEBUG
;
1674 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1675 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1676 cpu_resume_from_signal(cpu
, NULL
);
1680 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1685 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1686 so these check for a hit then pass through to the normal out-of-line
1688 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1691 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1693 case 1: return ldub_phys(&address_space_memory
, addr
);
1694 case 2: return lduw_phys(&address_space_memory
, addr
);
1695 case 4: return ldl_phys(&address_space_memory
, addr
);
1700 static void watch_mem_write(void *opaque
, hwaddr addr
,
1701 uint64_t val
, unsigned size
)
1703 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1706 stb_phys(&address_space_memory
, addr
, val
);
1709 stw_phys(&address_space_memory
, addr
, val
);
1712 stl_phys(&address_space_memory
, addr
, val
);
1718 static const MemoryRegionOps watch_mem_ops
= {
1719 .read
= watch_mem_read
,
1720 .write
= watch_mem_write
,
1721 .endianness
= DEVICE_NATIVE_ENDIAN
,
1724 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1727 subpage_t
*subpage
= opaque
;
1730 #if defined(DEBUG_SUBPAGE)
1731 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
1732 subpage
, len
, addr
);
1734 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1747 static void subpage_write(void *opaque
, hwaddr addr
,
1748 uint64_t value
, unsigned len
)
1750 subpage_t
*subpage
= opaque
;
1753 #if defined(DEBUG_SUBPAGE)
1754 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1755 " value %"PRIx64
"\n",
1756 __func__
, subpage
, len
, addr
, value
);
1771 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1774 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1775 unsigned len
, bool is_write
)
1777 subpage_t
*subpage
= opaque
;
1778 #if defined(DEBUG_SUBPAGE)
1779 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
1780 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1783 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1787 static const MemoryRegionOps subpage_ops
= {
1788 .read
= subpage_read
,
1789 .write
= subpage_write
,
1790 .valid
.accepts
= subpage_accepts
,
1791 .endianness
= DEVICE_NATIVE_ENDIAN
,
1794 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1799 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1801 idx
= SUBPAGE_IDX(start
);
1802 eidx
= SUBPAGE_IDX(end
);
1803 #if defined(DEBUG_SUBPAGE)
1804 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1805 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
1807 for (; idx
<= eidx
; idx
++) {
1808 mmio
->sub_section
[idx
] = section
;
1814 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1818 mmio
= g_malloc0(sizeof(subpage_t
));
1822 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1823 NULL
, TARGET_PAGE_SIZE
);
1824 mmio
->iomem
.subpage
= true;
1825 #if defined(DEBUG_SUBPAGE)
1826 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
1827 mmio
, base
, TARGET_PAGE_SIZE
);
1829 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1834 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
1838 MemoryRegionSection section
= {
1839 .address_space
= as
,
1841 .offset_within_address_space
= 0,
1842 .offset_within_region
= 0,
1843 .size
= int128_2_64(),
1846 return phys_section_add(map
, §ion
);
1849 MemoryRegion
*iotlb_to_region(AddressSpace
*as
, hwaddr index
)
1851 return as
->dispatch
->map
.sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1854 static void io_mem_init(void)
1856 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
1857 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1859 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1861 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1865 static void mem_begin(MemoryListener
*listener
)
1867 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1868 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
1871 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
1872 assert(n
== PHYS_SECTION_UNASSIGNED
);
1873 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
1874 assert(n
== PHYS_SECTION_NOTDIRTY
);
1875 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
1876 assert(n
== PHYS_SECTION_ROM
);
1877 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
1878 assert(n
== PHYS_SECTION_WATCH
);
1880 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
1882 as
->next_dispatch
= d
;
1885 static void mem_commit(MemoryListener
*listener
)
1887 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1888 AddressSpaceDispatch
*cur
= as
->dispatch
;
1889 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1891 phys_page_compact_all(next
, next
->map
.nodes_nb
);
1893 as
->dispatch
= next
;
1896 phys_sections_free(&cur
->map
);
1901 static void tcg_commit(MemoryListener
*listener
)
1905 /* since each CPU stores ram addresses in its TLB cache, we must
1906 reset the modified entries */
1909 /* FIXME: Disentangle the cpu.h circular files deps so we can
1910 directly get the right CPU from listener. */
1911 if (cpu
->tcg_as_listener
!= listener
) {
1918 static void core_log_global_start(MemoryListener
*listener
)
1920 cpu_physical_memory_set_dirty_tracking(true);
1923 static void core_log_global_stop(MemoryListener
*listener
)
1925 cpu_physical_memory_set_dirty_tracking(false);
1928 static MemoryListener core_memory_listener
= {
1929 .log_global_start
= core_log_global_start
,
1930 .log_global_stop
= core_log_global_stop
,
1934 void address_space_init_dispatch(AddressSpace
*as
)
1936 as
->dispatch
= NULL
;
1937 as
->dispatch_listener
= (MemoryListener
) {
1939 .commit
= mem_commit
,
1940 .region_add
= mem_add
,
1941 .region_nop
= mem_add
,
1944 memory_listener_register(&as
->dispatch_listener
, as
);
1947 void address_space_destroy_dispatch(AddressSpace
*as
)
1949 AddressSpaceDispatch
*d
= as
->dispatch
;
1951 memory_listener_unregister(&as
->dispatch_listener
);
1953 as
->dispatch
= NULL
;
1956 static void memory_map_init(void)
1958 system_memory
= g_malloc(sizeof(*system_memory
));
1960 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
1961 address_space_init(&address_space_memory
, system_memory
, "memory");
1963 system_io
= g_malloc(sizeof(*system_io
));
1964 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
1966 address_space_init(&address_space_io
, system_io
, "I/O");
1968 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1971 MemoryRegion
*get_system_memory(void)
1973 return system_memory
;
1976 MemoryRegion
*get_system_io(void)
1981 #endif /* !defined(CONFIG_USER_ONLY) */
1983 /* physical memory access (slow version, mainly for debug) */
1984 #if defined(CONFIG_USER_ONLY)
1985 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
1986 uint8_t *buf
, int len
, int is_write
)
1993 page
= addr
& TARGET_PAGE_MASK
;
1994 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1997 flags
= page_get_flags(page
);
1998 if (!(flags
& PAGE_VALID
))
2001 if (!(flags
& PAGE_WRITE
))
2003 /* XXX: this code should not depend on lock_user */
2004 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2007 unlock_user(p
, addr
, l
);
2009 if (!(flags
& PAGE_READ
))
2011 /* XXX: this code should not depend on lock_user */
2012 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2015 unlock_user(p
, addr
, 0);
2026 static void invalidate_and_set_dirty(hwaddr addr
,
2029 if (cpu_physical_memory_is_clean(addr
)) {
2030 /* invalidate code */
2031 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
2033 cpu_physical_memory_set_dirty_range_nocode(addr
, length
);
2035 xen_modified_memory(addr
, length
);
2038 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2040 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2042 /* Regions are assumed to support 1-4 byte accesses unless
2043 otherwise specified. */
2044 if (access_size_max
== 0) {
2045 access_size_max
= 4;
2048 /* Bound the maximum access by the alignment of the address. */
2049 if (!mr
->ops
->impl
.unaligned
) {
2050 unsigned align_size_max
= addr
& -addr
;
2051 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2052 access_size_max
= align_size_max
;
2056 /* Don't attempt accesses larger than the maximum. */
2057 if (l
> access_size_max
) {
2058 l
= access_size_max
;
2061 l
= 1 << (qemu_fls(l
) - 1);
2067 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
2068 int len
, bool is_write
)
2079 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2082 if (!memory_access_is_direct(mr
, is_write
)) {
2083 l
= memory_access_size(mr
, l
, addr1
);
2084 /* XXX: could force current_cpu to NULL to avoid
2088 /* 64 bit write access */
2090 error
|= io_mem_write(mr
, addr1
, val
, 8);
2093 /* 32 bit write access */
2095 error
|= io_mem_write(mr
, addr1
, val
, 4);
2098 /* 16 bit write access */
2100 error
|= io_mem_write(mr
, addr1
, val
, 2);
2103 /* 8 bit write access */
2105 error
|= io_mem_write(mr
, addr1
, val
, 1);
2111 addr1
+= memory_region_get_ram_addr(mr
);
2113 ptr
= qemu_get_ram_ptr(addr1
);
2114 memcpy(ptr
, buf
, l
);
2115 invalidate_and_set_dirty(addr1
, l
);
2118 if (!memory_access_is_direct(mr
, is_write
)) {
2120 l
= memory_access_size(mr
, l
, addr1
);
2123 /* 64 bit read access */
2124 error
|= io_mem_read(mr
, addr1
, &val
, 8);
2128 /* 32 bit read access */
2129 error
|= io_mem_read(mr
, addr1
, &val
, 4);
2133 /* 16 bit read access */
2134 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2138 /* 8 bit read access */
2139 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2147 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2148 memcpy(buf
, ptr
, l
);
2159 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2160 const uint8_t *buf
, int len
)
2162 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2165 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2167 return address_space_rw(as
, addr
, buf
, len
, false);
2171 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2172 int len
, int is_write
)
2174 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2177 enum write_rom_type
{
2182 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2183 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2192 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2194 if (!(memory_region_is_ram(mr
) ||
2195 memory_region_is_romd(mr
))) {
2198 addr1
+= memory_region_get_ram_addr(mr
);
2200 ptr
= qemu_get_ram_ptr(addr1
);
2203 memcpy(ptr
, buf
, l
);
2204 invalidate_and_set_dirty(addr1
, l
);
2207 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2217 /* used for ROM loading : can write in RAM and ROM */
2218 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2219 const uint8_t *buf
, int len
)
2221 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2224 void cpu_flush_icache_range(hwaddr start
, int len
)
2227 * This function should do the same thing as an icache flush that was
2228 * triggered from within the guest. For TCG we are always cache coherent,
2229 * so there is no need to flush anything. For KVM / Xen we need to flush
2230 * the host's instruction cache at least.
2232 if (tcg_enabled()) {
2236 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2237 start
, NULL
, len
, FLUSH_CACHE
);
2247 static BounceBuffer bounce
;
2249 typedef struct MapClient
{
2251 void (*callback
)(void *opaque
);
2252 QLIST_ENTRY(MapClient
) link
;
2255 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2256 = QLIST_HEAD_INITIALIZER(map_client_list
);
2258 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2260 MapClient
*client
= g_malloc(sizeof(*client
));
2262 client
->opaque
= opaque
;
2263 client
->callback
= callback
;
2264 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2268 static void cpu_unregister_map_client(void *_client
)
2270 MapClient
*client
= (MapClient
*)_client
;
2272 QLIST_REMOVE(client
, link
);
2276 static void cpu_notify_map_clients(void)
2280 while (!QLIST_EMPTY(&map_client_list
)) {
2281 client
= QLIST_FIRST(&map_client_list
);
2282 client
->callback(client
->opaque
);
2283 cpu_unregister_map_client(client
);
2287 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2294 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2295 if (!memory_access_is_direct(mr
, is_write
)) {
2296 l
= memory_access_size(mr
, l
, addr
);
2297 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2308 /* Map a physical memory region into a host virtual address.
2309 * May map a subset of the requested range, given by and returned in *plen.
2310 * May return NULL if resources needed to perform the mapping are exhausted.
2311 * Use only for reads OR writes - not for read-modify-write operations.
2312 * Use cpu_register_map_client() to know when retrying the map operation is
2313 * likely to succeed.
2315 void *address_space_map(AddressSpace
*as
,
2322 hwaddr l
, xlat
, base
;
2323 MemoryRegion
*mr
, *this_mr
;
2331 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2332 if (!memory_access_is_direct(mr
, is_write
)) {
2333 if (bounce
.buffer
) {
2336 /* Avoid unbounded allocations */
2337 l
= MIN(l
, TARGET_PAGE_SIZE
);
2338 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2342 memory_region_ref(mr
);
2345 address_space_read(as
, addr
, bounce
.buffer
, l
);
2349 return bounce
.buffer
;
2353 raddr
= memory_region_get_ram_addr(mr
);
2364 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2365 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2370 memory_region_ref(mr
);
2372 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2375 /* Unmaps a memory region previously mapped by address_space_map().
2376 * Will also mark the memory as dirty if is_write == 1. access_len gives
2377 * the amount of memory that was actually read or written by the caller.
2379 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2380 int is_write
, hwaddr access_len
)
2382 if (buffer
!= bounce
.buffer
) {
2386 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2389 invalidate_and_set_dirty(addr1
, access_len
);
2391 if (xen_enabled()) {
2392 xen_invalidate_map_cache_entry(buffer
);
2394 memory_region_unref(mr
);
2398 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2400 qemu_vfree(bounce
.buffer
);
2401 bounce
.buffer
= NULL
;
2402 memory_region_unref(bounce
.mr
);
2403 cpu_notify_map_clients();
2406 void *cpu_physical_memory_map(hwaddr addr
,
2410 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2413 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2414 int is_write
, hwaddr access_len
)
2416 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2419 /* warning: addr must be aligned */
2420 static inline uint32_t ldl_phys_internal(AddressSpace
*as
, hwaddr addr
,
2421 enum device_endian endian
)
2429 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2430 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2432 io_mem_read(mr
, addr1
, &val
, 4);
2433 #if defined(TARGET_WORDS_BIGENDIAN)
2434 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2438 if (endian
== DEVICE_BIG_ENDIAN
) {
2444 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2448 case DEVICE_LITTLE_ENDIAN
:
2449 val
= ldl_le_p(ptr
);
2451 case DEVICE_BIG_ENDIAN
:
2452 val
= ldl_be_p(ptr
);
2462 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2464 return ldl_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2467 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2469 return ldl_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2472 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2474 return ldl_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2477 /* warning: addr must be aligned */
2478 static inline uint64_t ldq_phys_internal(AddressSpace
*as
, hwaddr addr
,
2479 enum device_endian endian
)
2487 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2489 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2491 io_mem_read(mr
, addr1
, &val
, 8);
2492 #if defined(TARGET_WORDS_BIGENDIAN)
2493 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2497 if (endian
== DEVICE_BIG_ENDIAN
) {
2503 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2507 case DEVICE_LITTLE_ENDIAN
:
2508 val
= ldq_le_p(ptr
);
2510 case DEVICE_BIG_ENDIAN
:
2511 val
= ldq_be_p(ptr
);
2521 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
2523 return ldq_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2526 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
2528 return ldq_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2531 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
2533 return ldq_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2537 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
2540 address_space_rw(as
, addr
, &val
, 1, 0);
2544 /* warning: addr must be aligned */
2545 static inline uint32_t lduw_phys_internal(AddressSpace
*as
, hwaddr addr
,
2546 enum device_endian endian
)
2554 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2556 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2558 io_mem_read(mr
, addr1
, &val
, 2);
2559 #if defined(TARGET_WORDS_BIGENDIAN)
2560 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2564 if (endian
== DEVICE_BIG_ENDIAN
) {
2570 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2574 case DEVICE_LITTLE_ENDIAN
:
2575 val
= lduw_le_p(ptr
);
2577 case DEVICE_BIG_ENDIAN
:
2578 val
= lduw_be_p(ptr
);
2588 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
2590 return lduw_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2593 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
2595 return lduw_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2598 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
2600 return lduw_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2603 /* warning: addr must be aligned. The ram page is not masked as dirty
2604 and the code inside is not invalidated. It is useful if the dirty
2605 bits are used to track modified PTEs */
2606 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2613 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2615 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2616 io_mem_write(mr
, addr1
, val
, 4);
2618 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2619 ptr
= qemu_get_ram_ptr(addr1
);
2622 if (unlikely(in_migration
)) {
2623 if (cpu_physical_memory_is_clean(addr1
)) {
2624 /* invalidate code */
2625 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2627 cpu_physical_memory_set_dirty_range_nocode(addr1
, 4);
2633 /* warning: addr must be aligned */
2634 static inline void stl_phys_internal(AddressSpace
*as
,
2635 hwaddr addr
, uint32_t val
,
2636 enum device_endian endian
)
2643 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2645 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2646 #if defined(TARGET_WORDS_BIGENDIAN)
2647 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2651 if (endian
== DEVICE_BIG_ENDIAN
) {
2655 io_mem_write(mr
, addr1
, val
, 4);
2658 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2659 ptr
= qemu_get_ram_ptr(addr1
);
2661 case DEVICE_LITTLE_ENDIAN
:
2664 case DEVICE_BIG_ENDIAN
:
2671 invalidate_and_set_dirty(addr1
, 4);
2675 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2677 stl_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2680 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2682 stl_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2685 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2687 stl_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2691 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2694 address_space_rw(as
, addr
, &v
, 1, 1);
2697 /* warning: addr must be aligned */
2698 static inline void stw_phys_internal(AddressSpace
*as
,
2699 hwaddr addr
, uint32_t val
,
2700 enum device_endian endian
)
2707 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2708 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2709 #if defined(TARGET_WORDS_BIGENDIAN)
2710 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2714 if (endian
== DEVICE_BIG_ENDIAN
) {
2718 io_mem_write(mr
, addr1
, val
, 2);
2721 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2722 ptr
= qemu_get_ram_ptr(addr1
);
2724 case DEVICE_LITTLE_ENDIAN
:
2727 case DEVICE_BIG_ENDIAN
:
2734 invalidate_and_set_dirty(addr1
, 2);
2738 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2740 stw_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2743 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2745 stw_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2748 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2750 stw_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2754 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2757 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2760 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2762 val
= cpu_to_le64(val
);
2763 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2766 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2768 val
= cpu_to_be64(val
);
2769 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2772 /* virtual memory access for debug (includes writing to ROM) */
2773 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2774 uint8_t *buf
, int len
, int is_write
)
2781 page
= addr
& TARGET_PAGE_MASK
;
2782 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2783 /* if no physical page mapped, return an error */
2784 if (phys_addr
== -1)
2786 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2789 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2791 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
2793 address_space_rw(cpu
->as
, phys_addr
, buf
, l
, 0);
2804 * A helper function for the _utterly broken_ virtio device model to find out if
2805 * it's running on a big endian machine. Don't do this at home kids!
2807 bool target_words_bigendian(void);
2808 bool target_words_bigendian(void)
2810 #if defined(TARGET_WORDS_BIGENDIAN)
2817 #ifndef CONFIG_USER_ONLY
2818 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2823 mr
= address_space_translate(&address_space_memory
,
2824 phys_addr
, &phys_addr
, &l
, false);
2826 return !(memory_region_is_ram(mr
) ||
2827 memory_region_is_romd(mr
));
2830 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2834 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2835 func(block
->host
, block
->offset
, block
->length
, opaque
);