4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
30 #include "qemu/osdep.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/xen/xen.h"
34 #include "qemu/timer.h"
35 #include "qemu/config-file.h"
36 #include "qemu/error-report.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
52 #include "exec/ram_addr.h"
54 #include "qemu/range.h"
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 static bool in_migration
;
61 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
63 static MemoryRegion
*system_memory
;
64 static MemoryRegion
*system_io
;
66 AddressSpace address_space_io
;
67 AddressSpace address_space_memory
;
69 MemoryRegion io_mem_rom
, io_mem_notdirty
;
70 static MemoryRegion io_mem_unassigned
;
72 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73 #define RAM_PREALLOC (1 << 0)
75 /* RAM is mmap-ed with MAP_SHARED */
76 #define RAM_SHARED (1 << 1)
80 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
81 /* current CPU in the current thread. It is only valid inside
83 DEFINE_TLS(CPUState
*, current_cpu
);
84 /* 0 = Do not count executed instructions.
85 1 = Precise instruction counting.
86 2 = Adaptive rate instruction counting. */
89 #if !defined(CONFIG_USER_ONLY)
91 typedef struct PhysPageEntry PhysPageEntry
;
93 struct PhysPageEntry
{
94 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
96 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
100 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
102 /* Size of the L2 (and L3, etc) page tables. */
103 #define ADDR_SPACE_BITS 64
106 #define P_L2_SIZE (1 << P_L2_BITS)
108 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
110 typedef PhysPageEntry Node
[P_L2_SIZE
];
112 typedef struct PhysPageMap
{
113 unsigned sections_nb
;
114 unsigned sections_nb_alloc
;
116 unsigned nodes_nb_alloc
;
118 MemoryRegionSection
*sections
;
121 struct AddressSpaceDispatch
{
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
125 PhysPageEntry phys_map
;
130 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131 typedef struct subpage_t
{
135 uint16_t sub_section
[TARGET_PAGE_SIZE
];
138 #define PHYS_SECTION_UNASSIGNED 0
139 #define PHYS_SECTION_NOTDIRTY 1
140 #define PHYS_SECTION_ROM 2
141 #define PHYS_SECTION_WATCH 3
143 static void io_mem_init(void);
144 static void memory_map_init(void);
145 static void tcg_commit(MemoryListener
*listener
);
147 static MemoryRegion io_mem_watch
;
150 #if !defined(CONFIG_USER_ONLY)
152 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
154 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
155 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
156 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
157 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
161 static uint32_t phys_map_node_alloc(PhysPageMap
*map
)
166 ret
= map
->nodes_nb
++;
167 assert(ret
!= PHYS_MAP_NODE_NIL
);
168 assert(ret
!= map
->nodes_nb_alloc
);
169 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
170 map
->nodes
[ret
][i
].skip
= 1;
171 map
->nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
176 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
177 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
182 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
184 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
185 lp
->ptr
= phys_map_node_alloc(map
);
186 p
= map
->nodes
[lp
->ptr
];
188 for (i
= 0; i
< P_L2_SIZE
; i
++) {
190 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
194 p
= map
->nodes
[lp
->ptr
];
196 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
198 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
199 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
205 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
211 static void phys_page_set(AddressSpaceDispatch
*d
,
212 hwaddr index
, hwaddr nb
,
215 /* Wildly overreserve - it doesn't matter much. */
216 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
218 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
221 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
224 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
226 unsigned valid_ptr
= P_L2_SIZE
;
231 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
236 for (i
= 0; i
< P_L2_SIZE
; i
++) {
237 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
244 phys_page_compact(&p
[i
], nodes
, compacted
);
248 /* We can only compress if there's only one child. */
253 assert(valid_ptr
< P_L2_SIZE
);
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
260 lp
->ptr
= p
[valid_ptr
].ptr
;
261 if (!p
[valid_ptr
].skip
) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
270 lp
->skip
+= p
[valid_ptr
].skip
;
274 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
276 DECLARE_BITMAP(compacted
, nodes_nb
);
278 if (d
->phys_map
.skip
) {
279 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
283 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
284 Node
*nodes
, MemoryRegionSection
*sections
)
287 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
290 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
291 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
292 return §ions
[PHYS_SECTION_UNASSIGNED
];
295 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
298 if (sections
[lp
.ptr
].size
.hi
||
299 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
300 sections
[lp
.ptr
].size
.lo
, addr
)) {
301 return §ions
[lp
.ptr
];
303 return §ions
[PHYS_SECTION_UNASSIGNED
];
307 bool memory_region_is_unassigned(MemoryRegion
*mr
)
309 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
310 && mr
!= &io_mem_watch
;
313 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
315 bool resolve_subpage
)
317 MemoryRegionSection
*section
;
320 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
321 if (resolve_subpage
&& section
->mr
->subpage
) {
322 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
323 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
328 static MemoryRegionSection
*
329 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
330 hwaddr
*plen
, bool resolve_subpage
)
332 MemoryRegionSection
*section
;
335 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
336 /* Compute offset within MemoryRegionSection */
337 addr
-= section
->offset_within_address_space
;
339 /* Compute offset within MemoryRegion */
340 *xlat
= addr
+ section
->offset_within_region
;
342 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
343 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
347 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
349 if (memory_region_is_ram(mr
)) {
350 return !(is_write
&& mr
->readonly
);
352 if (memory_region_is_romd(mr
)) {
359 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
360 hwaddr
*xlat
, hwaddr
*plen
,
364 MemoryRegionSection
*section
;
369 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
372 if (!mr
->iommu_ops
) {
376 iotlb
= mr
->iommu_ops
->translate(mr
, addr
);
377 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
378 | (addr
& iotlb
.addr_mask
));
379 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
380 if (!(iotlb
.perm
& (1 << is_write
))) {
381 mr
= &io_mem_unassigned
;
385 as
= iotlb
.target_as
;
388 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
389 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
390 len
= MIN(page
, len
);
398 MemoryRegionSection
*
399 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
402 MemoryRegionSection
*section
;
403 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
405 assert(!section
->mr
->iommu_ops
);
410 void cpu_exec_init_all(void)
412 #if !defined(CONFIG_USER_ONLY)
413 qemu_mutex_init(&ram_list
.mutex
);
419 #if !defined(CONFIG_USER_ONLY)
421 static int cpu_common_post_load(void *opaque
, int version_id
)
423 CPUState
*cpu
= opaque
;
425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
427 cpu
->interrupt_request
&= ~0x01;
433 const VMStateDescription vmstate_cpu_common
= {
434 .name
= "cpu_common",
436 .minimum_version_id
= 1,
437 .post_load
= cpu_common_post_load
,
438 .fields
= (VMStateField
[]) {
439 VMSTATE_UINT32(halted
, CPUState
),
440 VMSTATE_UINT32(interrupt_request
, CPUState
),
441 VMSTATE_END_OF_LIST()
447 CPUState
*qemu_get_cpu(int index
)
452 if (cpu
->cpu_index
== index
) {
460 #if !defined(CONFIG_USER_ONLY)
461 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
463 /* We only support one address space per cpu at the moment. */
464 assert(cpu
->as
== as
);
466 if (cpu
->tcg_as_listener
) {
467 memory_listener_unregister(cpu
->tcg_as_listener
);
469 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
471 cpu
->tcg_as_listener
->commit
= tcg_commit
;
472 memory_listener_register(cpu
->tcg_as_listener
, as
);
476 void cpu_exec_init(CPUArchState
*env
)
478 CPUState
*cpu
= ENV_GET_CPU(env
);
479 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
483 #ifdef TARGET_WORDS_BIGENDIAN
484 cpu
->bigendian
= true;
486 cpu
->bigendian
= false;
489 #if defined(CONFIG_USER_ONLY)
493 CPU_FOREACH(some_cpu
) {
496 cpu
->cpu_index
= cpu_index
;
498 QTAILQ_INIT(&cpu
->breakpoints
);
499 QTAILQ_INIT(&cpu
->watchpoints
);
500 #ifndef CONFIG_USER_ONLY
501 cpu
->as
= &address_space_memory
;
502 cpu
->thread_id
= qemu_get_thread_id();
504 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
505 #if defined(CONFIG_USER_ONLY)
508 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
509 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
511 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
512 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
513 cpu_save
, cpu_load
, env
);
514 assert(cc
->vmsd
== NULL
);
515 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
517 if (cc
->vmsd
!= NULL
) {
518 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
522 #if defined(TARGET_HAS_ICE)
523 #if defined(CONFIG_USER_ONLY)
524 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
526 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
529 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
531 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
533 tb_invalidate_phys_addr(cpu
->as
,
534 phys
| (pc
& ~TARGET_PAGE_MASK
));
538 #endif /* TARGET_HAS_ICE */
540 #if defined(CONFIG_USER_ONLY)
541 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
546 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
547 int flags
, CPUWatchpoint
**watchpoint
)
552 /* Add a watchpoint. */
553 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
554 int flags
, CPUWatchpoint
**watchpoint
)
556 vaddr len_mask
= ~(len
- 1);
559 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
560 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
561 len
== 0 || len
> TARGET_PAGE_SIZE
) {
562 error_report("tried to set invalid watchpoint at %"
563 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
566 wp
= g_malloc(sizeof(*wp
));
569 wp
->len_mask
= len_mask
;
572 /* keep all GDB-injected watchpoints in front */
573 if (flags
& BP_GDB
) {
574 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
576 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
579 tlb_flush_page(cpu
, addr
);
586 /* Remove a specific watchpoint. */
587 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
590 vaddr len_mask
= ~(len
- 1);
593 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
594 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
595 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
596 cpu_watchpoint_remove_by_ref(cpu
, wp
);
603 /* Remove a specific watchpoint by reference. */
604 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
606 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
608 tlb_flush_page(cpu
, watchpoint
->vaddr
);
613 /* Remove all matching watchpoints. */
614 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
616 CPUWatchpoint
*wp
, *next
;
618 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
619 if (wp
->flags
& mask
) {
620 cpu_watchpoint_remove_by_ref(cpu
, wp
);
626 /* Add a breakpoint. */
627 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
628 CPUBreakpoint
**breakpoint
)
630 #if defined(TARGET_HAS_ICE)
633 bp
= g_malloc(sizeof(*bp
));
638 /* keep all GDB-injected breakpoints in front */
639 if (flags
& BP_GDB
) {
640 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
642 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
645 breakpoint_invalidate(cpu
, pc
);
656 /* Remove a specific breakpoint. */
657 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
659 #if defined(TARGET_HAS_ICE)
662 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
663 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
664 cpu_breakpoint_remove_by_ref(cpu
, bp
);
674 /* Remove a specific breakpoint by reference. */
675 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
677 #if defined(TARGET_HAS_ICE)
678 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
680 breakpoint_invalidate(cpu
, breakpoint
->pc
);
686 /* Remove all matching breakpoints. */
687 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
689 #if defined(TARGET_HAS_ICE)
690 CPUBreakpoint
*bp
, *next
;
692 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
693 if (bp
->flags
& mask
) {
694 cpu_breakpoint_remove_by_ref(cpu
, bp
);
700 /* enable or disable single step mode. EXCP_DEBUG is returned by the
701 CPU loop after each instruction */
702 void cpu_single_step(CPUState
*cpu
, int enabled
)
704 #if defined(TARGET_HAS_ICE)
705 if (cpu
->singlestep_enabled
!= enabled
) {
706 cpu
->singlestep_enabled
= enabled
;
708 kvm_update_guest_debug(cpu
, 0);
710 /* must flush all the translated code to avoid inconsistencies */
711 /* XXX: only flush what is necessary */
712 CPUArchState
*env
= cpu
->env_ptr
;
719 void QEMU_NORETURN
cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
726 fprintf(stderr
, "qemu: fatal: ");
727 vfprintf(stderr
, fmt
, ap
);
728 fprintf(stderr
, "\n");
729 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
730 if (qemu_log_enabled()) {
731 qemu_log("qemu: fatal: ");
732 qemu_log_vprintf(fmt
, ap2
);
734 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
740 #if defined(CONFIG_USER_ONLY)
742 struct sigaction act
;
743 sigfillset(&act
.sa_mask
);
744 act
.sa_handler
= SIG_DFL
;
745 sigaction(SIGABRT
, &act
, NULL
);
751 #if !defined(CONFIG_USER_ONLY)
752 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
756 /* The list is protected by the iothread lock here. */
757 block
= ram_list
.mru_block
;
758 if (block
&& addr
- block
->offset
< block
->length
) {
761 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
762 if (addr
- block
->offset
< block
->length
) {
767 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
771 ram_list
.mru_block
= block
;
775 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
781 end
= TARGET_PAGE_ALIGN(start
+ length
);
782 start
&= TARGET_PAGE_MASK
;
784 block
= qemu_get_ram_block(start
);
785 assert(block
== qemu_get_ram_block(end
- 1));
786 start1
= (uintptr_t)block
->host
+ (start
- block
->offset
);
787 cpu_tlb_reset_dirty_all(start1
, length
);
790 /* Note: start and end must be within the same ram block. */
791 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,
796 cpu_physical_memory_clear_dirty_range(start
, length
, client
);
799 tlb_reset_dirty_range_all(start
, length
);
803 static void cpu_physical_memory_set_dirty_tracking(bool enable
)
805 in_migration
= enable
;
808 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
809 MemoryRegionSection
*section
,
811 hwaddr paddr
, hwaddr xlat
,
813 target_ulong
*address
)
818 if (memory_region_is_ram(section
->mr
)) {
820 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
822 if (!section
->readonly
) {
823 iotlb
|= PHYS_SECTION_NOTDIRTY
;
825 iotlb
|= PHYS_SECTION_ROM
;
828 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
832 /* Make accesses to pages with watchpoints go via the
833 watchpoint trap routines. */
834 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
835 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
836 /* Avoid trapping reads of pages with a write breakpoint. */
837 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
838 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
839 *address
|= TLB_MMIO
;
847 #endif /* defined(CONFIG_USER_ONLY) */
849 #if !defined(CONFIG_USER_ONLY)
851 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
853 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
855 static void *(*phys_mem_alloc
)(size_t size
) = qemu_anon_ram_alloc
;
858 * Set a custom physical guest memory alloator.
859 * Accelerators with unusual needs may need this. Hopefully, we can
860 * get rid of it eventually.
862 void phys_mem_set_alloc(void *(*alloc
)(size_t))
864 phys_mem_alloc
= alloc
;
867 static uint16_t phys_section_add(PhysPageMap
*map
,
868 MemoryRegionSection
*section
)
870 /* The physical section number is ORed with a page-aligned
871 * pointer to produce the iotlb entries. Thus it should
872 * never overflow into the page-aligned value.
874 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
876 if (map
->sections_nb
== map
->sections_nb_alloc
) {
877 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
878 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
879 map
->sections_nb_alloc
);
881 map
->sections
[map
->sections_nb
] = *section
;
882 memory_region_ref(section
->mr
);
883 return map
->sections_nb
++;
886 static void phys_section_destroy(MemoryRegion
*mr
)
888 memory_region_unref(mr
);
891 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
892 object_unref(OBJECT(&subpage
->iomem
));
897 static void phys_sections_free(PhysPageMap
*map
)
899 while (map
->sections_nb
> 0) {
900 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
901 phys_section_destroy(section
->mr
);
903 g_free(map
->sections
);
907 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
910 hwaddr base
= section
->offset_within_address_space
912 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
913 d
->map
.nodes
, d
->map
.sections
);
914 MemoryRegionSection subsection
= {
915 .offset_within_address_space
= base
,
916 .size
= int128_make64(TARGET_PAGE_SIZE
),
920 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
922 if (!(existing
->mr
->subpage
)) {
923 subpage
= subpage_init(d
->as
, base
);
924 subsection
.address_space
= d
->as
;
925 subsection
.mr
= &subpage
->iomem
;
926 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
927 phys_section_add(&d
->map
, &subsection
));
929 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
931 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
932 end
= start
+ int128_get64(section
->size
) - 1;
933 subpage_register(subpage
, start
, end
,
934 phys_section_add(&d
->map
, section
));
938 static void register_multipage(AddressSpaceDispatch
*d
,
939 MemoryRegionSection
*section
)
941 hwaddr start_addr
= section
->offset_within_address_space
;
942 uint16_t section_index
= phys_section_add(&d
->map
, section
);
943 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
947 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
950 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
952 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
953 AddressSpaceDispatch
*d
= as
->next_dispatch
;
954 MemoryRegionSection now
= *section
, remain
= *section
;
955 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
957 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
958 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
959 - now
.offset_within_address_space
;
961 now
.size
= int128_min(int128_make64(left
), now
.size
);
962 register_subpage(d
, &now
);
964 now
.size
= int128_zero();
966 while (int128_ne(remain
.size
, now
.size
)) {
967 remain
.size
= int128_sub(remain
.size
, now
.size
);
968 remain
.offset_within_address_space
+= int128_get64(now
.size
);
969 remain
.offset_within_region
+= int128_get64(now
.size
);
971 if (int128_lt(remain
.size
, page_size
)) {
972 register_subpage(d
, &now
);
973 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
974 now
.size
= page_size
;
975 register_subpage(d
, &now
);
977 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
978 register_multipage(d
, &now
);
983 void qemu_flush_coalesced_mmio_buffer(void)
986 kvm_flush_coalesced_mmio_buffer();
989 void qemu_mutex_lock_ramlist(void)
991 qemu_mutex_lock(&ram_list
.mutex
);
994 void qemu_mutex_unlock_ramlist(void)
996 qemu_mutex_unlock(&ram_list
.mutex
);
1001 #include <sys/vfs.h>
1003 #define HUGETLBFS_MAGIC 0x958458f6
1005 static long gethugepagesize(const char *path
)
1011 ret
= statfs(path
, &fs
);
1012 } while (ret
!= 0 && errno
== EINTR
);
1019 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1020 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1025 static void *file_ram_alloc(RAMBlock
*block
,
1031 char *sanitized_name
;
1033 void * volatile area
;
1035 uintptr_t hpagesize
;
1037 hpagesize
= gethugepagesize(path
);
1042 if (memory
< hpagesize
) {
1046 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1048 "host lacks kvm mmu notifiers, -mem-path unsupported");
1052 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1053 sanitized_name
= g_strdup(block
->mr
->name
);
1054 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1059 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1061 g_free(sanitized_name
);
1063 fd
= mkstemp(filename
);
1065 error_setg_errno(errp
, errno
,
1066 "unable to create backing store for hugepages");
1073 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1076 * ftruncate is not supported by hugetlbfs in older
1077 * hosts, so don't bother bailing out on errors.
1078 * If anything goes wrong with it under other filesystems,
1081 if (ftruncate(fd
, memory
)) {
1082 perror("ftruncate");
1085 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
,
1086 (block
->flags
& RAM_SHARED
? MAP_SHARED
: MAP_PRIVATE
),
1088 if (area
== MAP_FAILED
) {
1089 error_setg_errno(errp
, errno
,
1090 "unable to map backing store for hugepages");
1096 os_mem_prealloc(fd
, area
, memory
);
1110 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1112 RAMBlock
*block
, *next_block
;
1113 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1115 assert(size
!= 0); /* it would hand out same offset multiple times */
1117 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1120 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1121 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1123 end
= block
->offset
+ block
->length
;
1125 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1126 if (next_block
->offset
>= end
) {
1127 next
= MIN(next
, next_block
->offset
);
1130 if (next
- end
>= size
&& next
- end
< mingap
) {
1132 mingap
= next
- end
;
1136 if (offset
== RAM_ADDR_MAX
) {
1137 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1145 ram_addr_t
last_ram_offset(void)
1148 ram_addr_t last
= 0;
1150 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1151 last
= MAX(last
, block
->offset
+ block
->length
);
1156 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1160 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1161 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1162 "dump-guest-core", true)) {
1163 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1165 perror("qemu_madvise");
1166 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1167 "but dump_guest_core=off specified\n");
1172 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1176 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1177 if (block
->offset
== addr
) {
1185 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1187 RAMBlock
*new_block
= find_ram_block(addr
);
1191 assert(!new_block
->idstr
[0]);
1194 char *id
= qdev_get_dev_path(dev
);
1196 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1200 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1202 /* This assumes the iothread lock is taken here too. */
1203 qemu_mutex_lock_ramlist();
1204 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1205 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1206 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1211 qemu_mutex_unlock_ramlist();
1214 void qemu_ram_unset_idstr(ram_addr_t addr
)
1216 RAMBlock
*block
= find_ram_block(addr
);
1219 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1223 static int memory_try_enable_merging(void *addr
, size_t len
)
1225 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1226 /* disabled by the user */
1230 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1233 static ram_addr_t
ram_block_add(RAMBlock
*new_block
)
1236 ram_addr_t old_ram_size
, new_ram_size
;
1238 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1240 /* This assumes the iothread lock is taken here too. */
1241 qemu_mutex_lock_ramlist();
1242 new_block
->offset
= find_ram_offset(new_block
->length
);
1244 if (!new_block
->host
) {
1245 if (xen_enabled()) {
1246 xen_ram_alloc(new_block
->offset
, new_block
->length
, new_block
->mr
);
1248 new_block
->host
= phys_mem_alloc(new_block
->length
);
1249 if (!new_block
->host
) {
1250 fprintf(stderr
, "Cannot set up guest memory '%s': %s\n",
1251 new_block
->mr
->name
, strerror(errno
));
1254 memory_try_enable_merging(new_block
->host
, new_block
->length
);
1258 /* Keep the list sorted from biggest to smallest block. */
1259 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1260 if (block
->length
< new_block
->length
) {
1265 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1267 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1269 ram_list
.mru_block
= NULL
;
1272 qemu_mutex_unlock_ramlist();
1274 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1276 if (new_ram_size
> old_ram_size
) {
1278 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1279 ram_list
.dirty_memory
[i
] =
1280 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1281 old_ram_size
, new_ram_size
);
1284 cpu_physical_memory_set_dirty_range(new_block
->offset
, new_block
->length
);
1286 qemu_ram_setup_dump(new_block
->host
, new_block
->length
);
1287 qemu_madvise(new_block
->host
, new_block
->length
, QEMU_MADV_HUGEPAGE
);
1288 qemu_madvise(new_block
->host
, new_block
->length
, QEMU_MADV_DONTFORK
);
1290 if (kvm_enabled()) {
1291 kvm_setup_guest_memory(new_block
->host
, new_block
->length
);
1294 return new_block
->offset
;
1298 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1299 bool share
, const char *mem_path
,
1302 RAMBlock
*new_block
;
1304 if (xen_enabled()) {
1305 error_setg(errp
, "-mem-path not supported with Xen");
1309 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1311 * file_ram_alloc() needs to allocate just like
1312 * phys_mem_alloc, but we haven't bothered to provide
1316 "-mem-path not supported with this accelerator");
1320 size
= TARGET_PAGE_ALIGN(size
);
1321 new_block
= g_malloc0(sizeof(*new_block
));
1323 new_block
->length
= size
;
1324 new_block
->flags
= share
? RAM_SHARED
: 0;
1325 new_block
->host
= file_ram_alloc(new_block
, size
,
1327 if (!new_block
->host
) {
1332 return ram_block_add(new_block
);
1336 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1339 RAMBlock
*new_block
;
1341 size
= TARGET_PAGE_ALIGN(size
);
1342 new_block
= g_malloc0(sizeof(*new_block
));
1344 new_block
->length
= size
;
1346 new_block
->host
= host
;
1348 new_block
->flags
|= RAM_PREALLOC
;
1350 return ram_block_add(new_block
);
1353 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1355 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1358 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1362 /* This assumes the iothread lock is taken here too. */
1363 qemu_mutex_lock_ramlist();
1364 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1365 if (addr
== block
->offset
) {
1366 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1367 ram_list
.mru_block
= NULL
;
1373 qemu_mutex_unlock_ramlist();
1376 void qemu_ram_free(ram_addr_t addr
)
1380 /* This assumes the iothread lock is taken here too. */
1381 qemu_mutex_lock_ramlist();
1382 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1383 if (addr
== block
->offset
) {
1384 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1385 ram_list
.mru_block
= NULL
;
1387 if (block
->flags
& RAM_PREALLOC
) {
1389 } else if (xen_enabled()) {
1390 xen_invalidate_map_cache_entry(block
->host
);
1392 } else if (block
->fd
>= 0) {
1393 munmap(block
->host
, block
->length
);
1397 qemu_anon_ram_free(block
->host
, block
->length
);
1403 qemu_mutex_unlock_ramlist();
1408 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1415 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1416 offset
= addr
- block
->offset
;
1417 if (offset
< block
->length
) {
1418 vaddr
= block
->host
+ offset
;
1419 if (block
->flags
& RAM_PREALLOC
) {
1421 } else if (xen_enabled()) {
1425 munmap(vaddr
, length
);
1426 if (block
->fd
>= 0) {
1427 flags
|= (block
->flags
& RAM_SHARED
?
1428 MAP_SHARED
: MAP_PRIVATE
);
1429 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1430 flags
, block
->fd
, offset
);
1433 * Remap needs to match alloc. Accelerators that
1434 * set phys_mem_alloc never remap. If they did,
1435 * we'd need a remap hook here.
1437 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1439 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1440 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1443 if (area
!= vaddr
) {
1444 fprintf(stderr
, "Could not remap addr: "
1445 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1449 memory_try_enable_merging(vaddr
, length
);
1450 qemu_ram_setup_dump(vaddr
, length
);
1456 #endif /* !_WIN32 */
1458 int qemu_get_ram_fd(ram_addr_t addr
)
1460 RAMBlock
*block
= qemu_get_ram_block(addr
);
1465 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1467 RAMBlock
*block
= qemu_get_ram_block(addr
);
1472 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1473 With the exception of the softmmu code in this file, this should
1474 only be used for local memory (e.g. video ram) that the device owns,
1475 and knows it isn't going to access beyond the end of the block.
1477 It should not be used for general purpose DMA.
1478 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1480 void *qemu_get_ram_ptr(ram_addr_t addr
)
1482 RAMBlock
*block
= qemu_get_ram_block(addr
);
1484 if (xen_enabled()) {
1485 /* We need to check if the requested address is in the RAM
1486 * because we don't want to map the entire memory in QEMU.
1487 * In that case just map until the end of the page.
1489 if (block
->offset
== 0) {
1490 return xen_map_cache(addr
, 0, 0);
1491 } else if (block
->host
== NULL
) {
1493 xen_map_cache(block
->offset
, block
->length
, 1);
1496 return block
->host
+ (addr
- block
->offset
);
1499 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1500 * but takes a size argument */
1501 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1506 if (xen_enabled()) {
1507 return xen_map_cache(addr
, *size
, 1);
1511 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1512 if (addr
- block
->offset
< block
->length
) {
1513 if (addr
- block
->offset
+ *size
> block
->length
)
1514 *size
= block
->length
- addr
+ block
->offset
;
1515 return block
->host
+ (addr
- block
->offset
);
1519 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1524 /* Some of the softmmu routines need to translate from a host pointer
1525 (typically a TLB entry) back to a ram offset. */
1526 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1529 uint8_t *host
= ptr
;
1531 if (xen_enabled()) {
1532 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1533 return qemu_get_ram_block(*ram_addr
)->mr
;
1536 block
= ram_list
.mru_block
;
1537 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1541 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1542 /* This case append when the block is not mapped. */
1543 if (block
->host
== NULL
) {
1546 if (host
- block
->host
< block
->length
) {
1554 *ram_addr
= block
->offset
+ (host
- block
->host
);
1558 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1559 uint64_t val
, unsigned size
)
1561 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1562 tb_invalidate_phys_page_fast(ram_addr
, size
);
1566 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1569 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1572 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1577 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_MIGRATION
);
1578 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_VGA
);
1579 /* we remove the notdirty callback only if the code has been
1581 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1582 CPUArchState
*env
= current_cpu
->env_ptr
;
1583 tlb_set_dirty(env
, current_cpu
->mem_io_vaddr
);
1587 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1588 unsigned size
, bool is_write
)
1593 static const MemoryRegionOps notdirty_mem_ops
= {
1594 .write
= notdirty_mem_write
,
1595 .valid
.accepts
= notdirty_mem_accepts
,
1596 .endianness
= DEVICE_NATIVE_ENDIAN
,
1599 /* Generate a debug exception if a watchpoint has been hit. */
1600 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1602 CPUState
*cpu
= current_cpu
;
1603 CPUArchState
*env
= cpu
->env_ptr
;
1604 target_ulong pc
, cs_base
;
1609 if (cpu
->watchpoint_hit
) {
1610 /* We re-entered the check after replacing the TB. Now raise
1611 * the debug interrupt so that is will trigger after the
1612 * current instruction. */
1613 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1616 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1617 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1618 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1619 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1620 wp
->flags
|= BP_WATCHPOINT_HIT
;
1621 if (!cpu
->watchpoint_hit
) {
1622 cpu
->watchpoint_hit
= wp
;
1623 tb_check_watchpoint(cpu
);
1624 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1625 cpu
->exception_index
= EXCP_DEBUG
;
1628 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1629 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1630 cpu_resume_from_signal(cpu
, NULL
);
1634 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1639 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1640 so these check for a hit then pass through to the normal out-of-line
1642 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1645 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1647 case 1: return ldub_phys(&address_space_memory
, addr
);
1648 case 2: return lduw_phys(&address_space_memory
, addr
);
1649 case 4: return ldl_phys(&address_space_memory
, addr
);
1654 static void watch_mem_write(void *opaque
, hwaddr addr
,
1655 uint64_t val
, unsigned size
)
1657 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1660 stb_phys(&address_space_memory
, addr
, val
);
1663 stw_phys(&address_space_memory
, addr
, val
);
1666 stl_phys(&address_space_memory
, addr
, val
);
1672 static const MemoryRegionOps watch_mem_ops
= {
1673 .read
= watch_mem_read
,
1674 .write
= watch_mem_write
,
1675 .endianness
= DEVICE_NATIVE_ENDIAN
,
1678 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1681 subpage_t
*subpage
= opaque
;
1684 #if defined(DEBUG_SUBPAGE)
1685 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
1686 subpage
, len
, addr
);
1688 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1701 static void subpage_write(void *opaque
, hwaddr addr
,
1702 uint64_t value
, unsigned len
)
1704 subpage_t
*subpage
= opaque
;
1707 #if defined(DEBUG_SUBPAGE)
1708 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1709 " value %"PRIx64
"\n",
1710 __func__
, subpage
, len
, addr
, value
);
1725 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1728 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1729 unsigned len
, bool is_write
)
1731 subpage_t
*subpage
= opaque
;
1732 #if defined(DEBUG_SUBPAGE)
1733 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
1734 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1737 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1741 static const MemoryRegionOps subpage_ops
= {
1742 .read
= subpage_read
,
1743 .write
= subpage_write
,
1744 .valid
.accepts
= subpage_accepts
,
1745 .endianness
= DEVICE_NATIVE_ENDIAN
,
1748 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1753 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1755 idx
= SUBPAGE_IDX(start
);
1756 eidx
= SUBPAGE_IDX(end
);
1757 #if defined(DEBUG_SUBPAGE)
1758 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1759 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
1761 for (; idx
<= eidx
; idx
++) {
1762 mmio
->sub_section
[idx
] = section
;
1768 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1772 mmio
= g_malloc0(sizeof(subpage_t
));
1776 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1777 NULL
, TARGET_PAGE_SIZE
);
1778 mmio
->iomem
.subpage
= true;
1779 #if defined(DEBUG_SUBPAGE)
1780 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
1781 mmio
, base
, TARGET_PAGE_SIZE
);
1783 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1788 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
1792 MemoryRegionSection section
= {
1793 .address_space
= as
,
1795 .offset_within_address_space
= 0,
1796 .offset_within_region
= 0,
1797 .size
= int128_2_64(),
1800 return phys_section_add(map
, §ion
);
1803 MemoryRegion
*iotlb_to_region(AddressSpace
*as
, hwaddr index
)
1805 return as
->dispatch
->map
.sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1808 static void io_mem_init(void)
1810 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
1811 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1813 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1815 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1819 static void mem_begin(MemoryListener
*listener
)
1821 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1822 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
1825 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
1826 assert(n
== PHYS_SECTION_UNASSIGNED
);
1827 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
1828 assert(n
== PHYS_SECTION_NOTDIRTY
);
1829 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
1830 assert(n
== PHYS_SECTION_ROM
);
1831 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
1832 assert(n
== PHYS_SECTION_WATCH
);
1834 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
1836 as
->next_dispatch
= d
;
1839 static void mem_commit(MemoryListener
*listener
)
1841 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1842 AddressSpaceDispatch
*cur
= as
->dispatch
;
1843 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1845 phys_page_compact_all(next
, next
->map
.nodes_nb
);
1847 as
->dispatch
= next
;
1850 phys_sections_free(&cur
->map
);
1855 static void tcg_commit(MemoryListener
*listener
)
1859 /* since each CPU stores ram addresses in its TLB cache, we must
1860 reset the modified entries */
1863 /* FIXME: Disentangle the cpu.h circular files deps so we can
1864 directly get the right CPU from listener. */
1865 if (cpu
->tcg_as_listener
!= listener
) {
1872 static void core_log_global_start(MemoryListener
*listener
)
1874 cpu_physical_memory_set_dirty_tracking(true);
1877 static void core_log_global_stop(MemoryListener
*listener
)
1879 cpu_physical_memory_set_dirty_tracking(false);
1882 static MemoryListener core_memory_listener
= {
1883 .log_global_start
= core_log_global_start
,
1884 .log_global_stop
= core_log_global_stop
,
1888 void address_space_init_dispatch(AddressSpace
*as
)
1890 as
->dispatch
= NULL
;
1891 as
->dispatch_listener
= (MemoryListener
) {
1893 .commit
= mem_commit
,
1894 .region_add
= mem_add
,
1895 .region_nop
= mem_add
,
1898 memory_listener_register(&as
->dispatch_listener
, as
);
1901 void address_space_destroy_dispatch(AddressSpace
*as
)
1903 AddressSpaceDispatch
*d
= as
->dispatch
;
1905 memory_listener_unregister(&as
->dispatch_listener
);
1907 as
->dispatch
= NULL
;
1910 static void memory_map_init(void)
1912 system_memory
= g_malloc(sizeof(*system_memory
));
1914 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
1915 address_space_init(&address_space_memory
, system_memory
, "memory");
1917 system_io
= g_malloc(sizeof(*system_io
));
1918 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
1920 address_space_init(&address_space_io
, system_io
, "I/O");
1922 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1925 MemoryRegion
*get_system_memory(void)
1927 return system_memory
;
1930 MemoryRegion
*get_system_io(void)
1935 #endif /* !defined(CONFIG_USER_ONLY) */
1937 /* physical memory access (slow version, mainly for debug) */
1938 #if defined(CONFIG_USER_ONLY)
1939 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
1940 uint8_t *buf
, int len
, int is_write
)
1947 page
= addr
& TARGET_PAGE_MASK
;
1948 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1951 flags
= page_get_flags(page
);
1952 if (!(flags
& PAGE_VALID
))
1955 if (!(flags
& PAGE_WRITE
))
1957 /* XXX: this code should not depend on lock_user */
1958 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1961 unlock_user(p
, addr
, l
);
1963 if (!(flags
& PAGE_READ
))
1965 /* XXX: this code should not depend on lock_user */
1966 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1969 unlock_user(p
, addr
, 0);
1980 static void invalidate_and_set_dirty(hwaddr addr
,
1983 if (cpu_physical_memory_is_clean(addr
)) {
1984 /* invalidate code */
1985 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1987 cpu_physical_memory_set_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
1988 cpu_physical_memory_set_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
1990 xen_modified_memory(addr
, length
);
1993 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
1995 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
1997 /* Regions are assumed to support 1-4 byte accesses unless
1998 otherwise specified. */
1999 if (access_size_max
== 0) {
2000 access_size_max
= 4;
2003 /* Bound the maximum access by the alignment of the address. */
2004 if (!mr
->ops
->impl
.unaligned
) {
2005 unsigned align_size_max
= addr
& -addr
;
2006 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2007 access_size_max
= align_size_max
;
2011 /* Don't attempt accesses larger than the maximum. */
2012 if (l
> access_size_max
) {
2013 l
= access_size_max
;
2016 l
= 1 << (qemu_fls(l
) - 1);
2022 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
2023 int len
, bool is_write
)
2034 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2037 if (!memory_access_is_direct(mr
, is_write
)) {
2038 l
= memory_access_size(mr
, l
, addr1
);
2039 /* XXX: could force current_cpu to NULL to avoid
2043 /* 64 bit write access */
2045 error
|= io_mem_write(mr
, addr1
, val
, 8);
2048 /* 32 bit write access */
2050 error
|= io_mem_write(mr
, addr1
, val
, 4);
2053 /* 16 bit write access */
2055 error
|= io_mem_write(mr
, addr1
, val
, 2);
2058 /* 8 bit write access */
2060 error
|= io_mem_write(mr
, addr1
, val
, 1);
2066 addr1
+= memory_region_get_ram_addr(mr
);
2068 ptr
= qemu_get_ram_ptr(addr1
);
2069 memcpy(ptr
, buf
, l
);
2070 invalidate_and_set_dirty(addr1
, l
);
2073 if (!memory_access_is_direct(mr
, is_write
)) {
2075 l
= memory_access_size(mr
, l
, addr1
);
2078 /* 64 bit read access */
2079 error
|= io_mem_read(mr
, addr1
, &val
, 8);
2083 /* 32 bit read access */
2084 error
|= io_mem_read(mr
, addr1
, &val
, 4);
2088 /* 16 bit read access */
2089 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2093 /* 8 bit read access */
2094 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2102 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2103 memcpy(buf
, ptr
, l
);
2114 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2115 const uint8_t *buf
, int len
)
2117 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2120 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2122 return address_space_rw(as
, addr
, buf
, len
, false);
2126 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2127 int len
, int is_write
)
2129 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2132 enum write_rom_type
{
2137 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2138 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2147 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2149 if (!(memory_region_is_ram(mr
) ||
2150 memory_region_is_romd(mr
))) {
2153 addr1
+= memory_region_get_ram_addr(mr
);
2155 ptr
= qemu_get_ram_ptr(addr1
);
2158 memcpy(ptr
, buf
, l
);
2159 invalidate_and_set_dirty(addr1
, l
);
2162 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2172 /* used for ROM loading : can write in RAM and ROM */
2173 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2174 const uint8_t *buf
, int len
)
2176 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2179 void cpu_flush_icache_range(hwaddr start
, int len
)
2182 * This function should do the same thing as an icache flush that was
2183 * triggered from within the guest. For TCG we are always cache coherent,
2184 * so there is no need to flush anything. For KVM / Xen we need to flush
2185 * the host's instruction cache at least.
2187 if (tcg_enabled()) {
2191 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2192 start
, NULL
, len
, FLUSH_CACHE
);
2202 static BounceBuffer bounce
;
2204 typedef struct MapClient
{
2206 void (*callback
)(void *opaque
);
2207 QLIST_ENTRY(MapClient
) link
;
2210 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2211 = QLIST_HEAD_INITIALIZER(map_client_list
);
2213 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2215 MapClient
*client
= g_malloc(sizeof(*client
));
2217 client
->opaque
= opaque
;
2218 client
->callback
= callback
;
2219 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2223 static void cpu_unregister_map_client(void *_client
)
2225 MapClient
*client
= (MapClient
*)_client
;
2227 QLIST_REMOVE(client
, link
);
2231 static void cpu_notify_map_clients(void)
2235 while (!QLIST_EMPTY(&map_client_list
)) {
2236 client
= QLIST_FIRST(&map_client_list
);
2237 client
->callback(client
->opaque
);
2238 cpu_unregister_map_client(client
);
2242 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2249 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2250 if (!memory_access_is_direct(mr
, is_write
)) {
2251 l
= memory_access_size(mr
, l
, addr
);
2252 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2263 /* Map a physical memory region into a host virtual address.
2264 * May map a subset of the requested range, given by and returned in *plen.
2265 * May return NULL if resources needed to perform the mapping are exhausted.
2266 * Use only for reads OR writes - not for read-modify-write operations.
2267 * Use cpu_register_map_client() to know when retrying the map operation is
2268 * likely to succeed.
2270 void *address_space_map(AddressSpace
*as
,
2277 hwaddr l
, xlat
, base
;
2278 MemoryRegion
*mr
, *this_mr
;
2286 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2287 if (!memory_access_is_direct(mr
, is_write
)) {
2288 if (bounce
.buffer
) {
2291 /* Avoid unbounded allocations */
2292 l
= MIN(l
, TARGET_PAGE_SIZE
);
2293 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2297 memory_region_ref(mr
);
2300 address_space_read(as
, addr
, bounce
.buffer
, l
);
2304 return bounce
.buffer
;
2308 raddr
= memory_region_get_ram_addr(mr
);
2319 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2320 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2325 memory_region_ref(mr
);
2327 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2330 /* Unmaps a memory region previously mapped by address_space_map().
2331 * Will also mark the memory as dirty if is_write == 1. access_len gives
2332 * the amount of memory that was actually read or written by the caller.
2334 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2335 int is_write
, hwaddr access_len
)
2337 if (buffer
!= bounce
.buffer
) {
2341 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2344 while (access_len
) {
2346 l
= TARGET_PAGE_SIZE
;
2349 invalidate_and_set_dirty(addr1
, l
);
2354 if (xen_enabled()) {
2355 xen_invalidate_map_cache_entry(buffer
);
2357 memory_region_unref(mr
);
2361 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2363 qemu_vfree(bounce
.buffer
);
2364 bounce
.buffer
= NULL
;
2365 memory_region_unref(bounce
.mr
);
2366 cpu_notify_map_clients();
2369 void *cpu_physical_memory_map(hwaddr addr
,
2373 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2376 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2377 int is_write
, hwaddr access_len
)
2379 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2382 /* warning: addr must be aligned */
2383 static inline uint32_t ldl_phys_internal(AddressSpace
*as
, hwaddr addr
,
2384 enum device_endian endian
)
2392 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2393 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2395 io_mem_read(mr
, addr1
, &val
, 4);
2396 #if defined(TARGET_WORDS_BIGENDIAN)
2397 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2401 if (endian
== DEVICE_BIG_ENDIAN
) {
2407 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2411 case DEVICE_LITTLE_ENDIAN
:
2412 val
= ldl_le_p(ptr
);
2414 case DEVICE_BIG_ENDIAN
:
2415 val
= ldl_be_p(ptr
);
2425 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2427 return ldl_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2430 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2432 return ldl_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2435 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2437 return ldl_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2440 /* warning: addr must be aligned */
2441 static inline uint64_t ldq_phys_internal(AddressSpace
*as
, hwaddr addr
,
2442 enum device_endian endian
)
2450 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2452 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2454 io_mem_read(mr
, addr1
, &val
, 8);
2455 #if defined(TARGET_WORDS_BIGENDIAN)
2456 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2460 if (endian
== DEVICE_BIG_ENDIAN
) {
2466 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2470 case DEVICE_LITTLE_ENDIAN
:
2471 val
= ldq_le_p(ptr
);
2473 case DEVICE_BIG_ENDIAN
:
2474 val
= ldq_be_p(ptr
);
2484 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
2486 return ldq_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2489 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
2491 return ldq_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2494 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
2496 return ldq_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2500 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
2503 address_space_rw(as
, addr
, &val
, 1, 0);
2507 /* warning: addr must be aligned */
2508 static inline uint32_t lduw_phys_internal(AddressSpace
*as
, hwaddr addr
,
2509 enum device_endian endian
)
2517 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2519 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2521 io_mem_read(mr
, addr1
, &val
, 2);
2522 #if defined(TARGET_WORDS_BIGENDIAN)
2523 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2527 if (endian
== DEVICE_BIG_ENDIAN
) {
2533 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2537 case DEVICE_LITTLE_ENDIAN
:
2538 val
= lduw_le_p(ptr
);
2540 case DEVICE_BIG_ENDIAN
:
2541 val
= lduw_be_p(ptr
);
2551 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
2553 return lduw_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2556 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
2558 return lduw_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2561 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
2563 return lduw_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2566 /* warning: addr must be aligned. The ram page is not masked as dirty
2567 and the code inside is not invalidated. It is useful if the dirty
2568 bits are used to track modified PTEs */
2569 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2576 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2578 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2579 io_mem_write(mr
, addr1
, val
, 4);
2581 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2582 ptr
= qemu_get_ram_ptr(addr1
);
2585 if (unlikely(in_migration
)) {
2586 if (cpu_physical_memory_is_clean(addr1
)) {
2587 /* invalidate code */
2588 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2590 cpu_physical_memory_set_dirty_flag(addr1
,
2591 DIRTY_MEMORY_MIGRATION
);
2592 cpu_physical_memory_set_dirty_flag(addr1
, DIRTY_MEMORY_VGA
);
2598 /* warning: addr must be aligned */
2599 static inline void stl_phys_internal(AddressSpace
*as
,
2600 hwaddr addr
, uint32_t val
,
2601 enum device_endian endian
)
2608 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2610 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2611 #if defined(TARGET_WORDS_BIGENDIAN)
2612 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2616 if (endian
== DEVICE_BIG_ENDIAN
) {
2620 io_mem_write(mr
, addr1
, val
, 4);
2623 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2624 ptr
= qemu_get_ram_ptr(addr1
);
2626 case DEVICE_LITTLE_ENDIAN
:
2629 case DEVICE_BIG_ENDIAN
:
2636 invalidate_and_set_dirty(addr1
, 4);
2640 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2642 stl_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2645 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2647 stl_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2650 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2652 stl_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2656 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2659 address_space_rw(as
, addr
, &v
, 1, 1);
2662 /* warning: addr must be aligned */
2663 static inline void stw_phys_internal(AddressSpace
*as
,
2664 hwaddr addr
, uint32_t val
,
2665 enum device_endian endian
)
2672 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2673 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2674 #if defined(TARGET_WORDS_BIGENDIAN)
2675 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2679 if (endian
== DEVICE_BIG_ENDIAN
) {
2683 io_mem_write(mr
, addr1
, val
, 2);
2686 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2687 ptr
= qemu_get_ram_ptr(addr1
);
2689 case DEVICE_LITTLE_ENDIAN
:
2692 case DEVICE_BIG_ENDIAN
:
2699 invalidate_and_set_dirty(addr1
, 2);
2703 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2705 stw_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2708 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2710 stw_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2713 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2715 stw_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2719 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2722 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2725 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2727 val
= cpu_to_le64(val
);
2728 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2731 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2733 val
= cpu_to_be64(val
);
2734 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2737 /* virtual memory access for debug (includes writing to ROM) */
2738 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2739 uint8_t *buf
, int len
, int is_write
)
2746 page
= addr
& TARGET_PAGE_MASK
;
2747 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2748 /* if no physical page mapped, return an error */
2749 if (phys_addr
== -1)
2751 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2754 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2756 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
2758 address_space_rw(cpu
->as
, phys_addr
, buf
, l
, 0);
2769 * A helper function for the _utterly broken_ virtio device model to find out if
2770 * it's running on a big endian machine. Don't do this at home kids!
2772 bool target_words_bigendian(void);
2773 bool target_words_bigendian(void)
2775 #if defined(TARGET_WORDS_BIGENDIAN)
2782 #ifndef CONFIG_USER_ONLY
2783 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2788 mr
= address_space_translate(&address_space_memory
,
2789 phys_addr
, &phys_addr
, &l
, false);
2791 return !(memory_region_is_ram(mr
) ||
2792 memory_region_is_romd(mr
));
2795 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2799 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2800 func(block
->host
, block
->offset
, block
->length
, opaque
);