4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
30 #include "qemu/osdep.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/xen/xen.h"
34 #include "qemu/timer.h"
35 #include "qemu/config-file.h"
36 #include "qemu/error-report.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
52 #include "exec/ram_addr.h"
54 #include "qemu/range.h"
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 static bool in_migration
;
61 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
63 static MemoryRegion
*system_memory
;
64 static MemoryRegion
*system_io
;
66 AddressSpace address_space_io
;
67 AddressSpace address_space_memory
;
69 MemoryRegion io_mem_rom
, io_mem_notdirty
;
70 static MemoryRegion io_mem_unassigned
;
72 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73 #define RAM_PREALLOC (1 << 0)
75 /* RAM is mmap-ed with MAP_SHARED */
76 #define RAM_SHARED (1 << 1)
80 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
81 /* current CPU in the current thread. It is only valid inside
83 DEFINE_TLS(CPUState
*, current_cpu
);
84 /* 0 = Do not count executed instructions.
85 1 = Precise instruction counting.
86 2 = Adaptive rate instruction counting. */
89 #if !defined(CONFIG_USER_ONLY)
91 typedef struct PhysPageEntry PhysPageEntry
;
93 struct PhysPageEntry
{
94 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
96 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
100 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
102 /* Size of the L2 (and L3, etc) page tables. */
103 #define ADDR_SPACE_BITS 64
106 #define P_L2_SIZE (1 << P_L2_BITS)
108 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
110 typedef PhysPageEntry Node
[P_L2_SIZE
];
112 typedef struct PhysPageMap
{
113 unsigned sections_nb
;
114 unsigned sections_nb_alloc
;
116 unsigned nodes_nb_alloc
;
118 MemoryRegionSection
*sections
;
121 struct AddressSpaceDispatch
{
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
125 PhysPageEntry phys_map
;
130 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131 typedef struct subpage_t
{
135 uint16_t sub_section
[TARGET_PAGE_SIZE
];
138 #define PHYS_SECTION_UNASSIGNED 0
139 #define PHYS_SECTION_NOTDIRTY 1
140 #define PHYS_SECTION_ROM 2
141 #define PHYS_SECTION_WATCH 3
143 static void io_mem_init(void);
144 static void memory_map_init(void);
145 static void tcg_commit(MemoryListener
*listener
);
147 static MemoryRegion io_mem_watch
;
150 #if !defined(CONFIG_USER_ONLY)
152 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
154 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
155 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
156 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
157 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
161 static uint32_t phys_map_node_alloc(PhysPageMap
*map
)
166 ret
= map
->nodes_nb
++;
167 assert(ret
!= PHYS_MAP_NODE_NIL
);
168 assert(ret
!= map
->nodes_nb_alloc
);
169 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
170 map
->nodes
[ret
][i
].skip
= 1;
171 map
->nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
176 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
177 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
182 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
184 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
185 lp
->ptr
= phys_map_node_alloc(map
);
186 p
= map
->nodes
[lp
->ptr
];
188 for (i
= 0; i
< P_L2_SIZE
; i
++) {
190 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
194 p
= map
->nodes
[lp
->ptr
];
196 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
198 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
199 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
205 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
211 static void phys_page_set(AddressSpaceDispatch
*d
,
212 hwaddr index
, hwaddr nb
,
215 /* Wildly overreserve - it doesn't matter much. */
216 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
218 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
221 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
224 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
226 unsigned valid_ptr
= P_L2_SIZE
;
231 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
236 for (i
= 0; i
< P_L2_SIZE
; i
++) {
237 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
244 phys_page_compact(&p
[i
], nodes
, compacted
);
248 /* We can only compress if there's only one child. */
253 assert(valid_ptr
< P_L2_SIZE
);
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
260 lp
->ptr
= p
[valid_ptr
].ptr
;
261 if (!p
[valid_ptr
].skip
) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
270 lp
->skip
+= p
[valid_ptr
].skip
;
274 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
276 DECLARE_BITMAP(compacted
, nodes_nb
);
278 if (d
->phys_map
.skip
) {
279 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
283 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
284 Node
*nodes
, MemoryRegionSection
*sections
)
287 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
290 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
291 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
292 return §ions
[PHYS_SECTION_UNASSIGNED
];
295 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
298 if (sections
[lp
.ptr
].size
.hi
||
299 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
300 sections
[lp
.ptr
].size
.lo
, addr
)) {
301 return §ions
[lp
.ptr
];
303 return §ions
[PHYS_SECTION_UNASSIGNED
];
307 bool memory_region_is_unassigned(MemoryRegion
*mr
)
309 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
310 && mr
!= &io_mem_watch
;
313 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
315 bool resolve_subpage
)
317 MemoryRegionSection
*section
;
320 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
321 if (resolve_subpage
&& section
->mr
->subpage
) {
322 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
323 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
328 static MemoryRegionSection
*
329 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
330 hwaddr
*plen
, bool resolve_subpage
)
332 MemoryRegionSection
*section
;
335 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
336 /* Compute offset within MemoryRegionSection */
337 addr
-= section
->offset_within_address_space
;
339 /* Compute offset within MemoryRegion */
340 *xlat
= addr
+ section
->offset_within_region
;
342 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
343 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
347 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
349 if (memory_region_is_ram(mr
)) {
350 return !(is_write
&& mr
->readonly
);
352 if (memory_region_is_romd(mr
)) {
359 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
360 hwaddr
*xlat
, hwaddr
*plen
,
364 MemoryRegionSection
*section
;
369 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
372 if (!mr
->iommu_ops
) {
376 iotlb
= mr
->iommu_ops
->translate(mr
, addr
);
377 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
378 | (addr
& iotlb
.addr_mask
));
379 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
380 if (!(iotlb
.perm
& (1 << is_write
))) {
381 mr
= &io_mem_unassigned
;
385 as
= iotlb
.target_as
;
388 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
389 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
390 len
= MIN(page
, len
);
398 MemoryRegionSection
*
399 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
402 MemoryRegionSection
*section
;
403 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
405 assert(!section
->mr
->iommu_ops
);
410 void cpu_exec_init_all(void)
412 #if !defined(CONFIG_USER_ONLY)
413 qemu_mutex_init(&ram_list
.mutex
);
419 #if !defined(CONFIG_USER_ONLY)
421 static int cpu_common_post_load(void *opaque
, int version_id
)
423 CPUState
*cpu
= opaque
;
425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
427 cpu
->interrupt_request
&= ~0x01;
433 const VMStateDescription vmstate_cpu_common
= {
434 .name
= "cpu_common",
436 .minimum_version_id
= 1,
437 .post_load
= cpu_common_post_load
,
438 .fields
= (VMStateField
[]) {
439 VMSTATE_UINT32(halted
, CPUState
),
440 VMSTATE_UINT32(interrupt_request
, CPUState
),
441 VMSTATE_END_OF_LIST()
447 CPUState
*qemu_get_cpu(int index
)
452 if (cpu
->cpu_index
== index
) {
460 #if !defined(CONFIG_USER_ONLY)
461 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
463 /* We only support one address space per cpu at the moment. */
464 assert(cpu
->as
== as
);
466 if (cpu
->tcg_as_listener
) {
467 memory_listener_unregister(cpu
->tcg_as_listener
);
469 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
471 cpu
->tcg_as_listener
->commit
= tcg_commit
;
472 memory_listener_register(cpu
->tcg_as_listener
, as
);
476 void cpu_exec_init(CPUArchState
*env
)
478 CPUState
*cpu
= ENV_GET_CPU(env
);
479 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
483 #ifdef TARGET_WORDS_BIGENDIAN
484 cpu
->bigendian
= true;
486 cpu
->bigendian
= false;
489 #if defined(CONFIG_USER_ONLY)
493 CPU_FOREACH(some_cpu
) {
496 cpu
->cpu_index
= cpu_index
;
498 QTAILQ_INIT(&cpu
->breakpoints
);
499 QTAILQ_INIT(&cpu
->watchpoints
);
500 #ifndef CONFIG_USER_ONLY
501 cpu
->as
= &address_space_memory
;
502 cpu
->thread_id
= qemu_get_thread_id();
504 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
505 #if defined(CONFIG_USER_ONLY)
508 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
509 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
511 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
512 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
513 cpu_save
, cpu_load
, env
);
514 assert(cc
->vmsd
== NULL
);
515 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
517 if (cc
->vmsd
!= NULL
) {
518 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
522 #if defined(TARGET_HAS_ICE)
523 #if defined(CONFIG_USER_ONLY)
524 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
526 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
529 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
531 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
533 tb_invalidate_phys_addr(cpu
->as
,
534 phys
| (pc
& ~TARGET_PAGE_MASK
));
538 #endif /* TARGET_HAS_ICE */
540 #if defined(CONFIG_USER_ONLY)
541 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
546 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
547 int flags
, CPUWatchpoint
**watchpoint
)
552 /* Add a watchpoint. */
553 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
554 int flags
, CPUWatchpoint
**watchpoint
)
556 vaddr len_mask
= ~(len
- 1);
559 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
560 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
561 len
== 0 || len
> TARGET_PAGE_SIZE
) {
562 error_report("tried to set invalid watchpoint at %"
563 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
566 wp
= g_malloc(sizeof(*wp
));
569 wp
->len_mask
= len_mask
;
572 /* keep all GDB-injected watchpoints in front */
573 if (flags
& BP_GDB
) {
574 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
576 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
579 tlb_flush_page(cpu
, addr
);
586 /* Remove a specific watchpoint. */
587 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
590 vaddr len_mask
= ~(len
- 1);
593 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
594 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
595 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
596 cpu_watchpoint_remove_by_ref(cpu
, wp
);
603 /* Remove a specific watchpoint by reference. */
604 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
606 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
608 tlb_flush_page(cpu
, watchpoint
->vaddr
);
613 /* Remove all matching watchpoints. */
614 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
616 CPUWatchpoint
*wp
, *next
;
618 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
619 if (wp
->flags
& mask
) {
620 cpu_watchpoint_remove_by_ref(cpu
, wp
);
626 /* Add a breakpoint. */
627 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
628 CPUBreakpoint
**breakpoint
)
630 #if defined(TARGET_HAS_ICE)
633 bp
= g_malloc(sizeof(*bp
));
638 /* keep all GDB-injected breakpoints in front */
639 if (flags
& BP_GDB
) {
640 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
642 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
645 breakpoint_invalidate(cpu
, pc
);
656 /* Remove a specific breakpoint. */
657 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
659 #if defined(TARGET_HAS_ICE)
662 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
663 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
664 cpu_breakpoint_remove_by_ref(cpu
, bp
);
674 /* Remove a specific breakpoint by reference. */
675 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
677 #if defined(TARGET_HAS_ICE)
678 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
680 breakpoint_invalidate(cpu
, breakpoint
->pc
);
686 /* Remove all matching breakpoints. */
687 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
689 #if defined(TARGET_HAS_ICE)
690 CPUBreakpoint
*bp
, *next
;
692 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
693 if (bp
->flags
& mask
) {
694 cpu_breakpoint_remove_by_ref(cpu
, bp
);
700 /* enable or disable single step mode. EXCP_DEBUG is returned by the
701 CPU loop after each instruction */
702 void cpu_single_step(CPUState
*cpu
, int enabled
)
704 #if defined(TARGET_HAS_ICE)
705 if (cpu
->singlestep_enabled
!= enabled
) {
706 cpu
->singlestep_enabled
= enabled
;
708 kvm_update_guest_debug(cpu
, 0);
710 /* must flush all the translated code to avoid inconsistencies */
711 /* XXX: only flush what is necessary */
712 CPUArchState
*env
= cpu
->env_ptr
;
719 void QEMU_NORETURN
cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
726 fprintf(stderr
, "qemu: fatal: ");
727 vfprintf(stderr
, fmt
, ap
);
728 fprintf(stderr
, "\n");
729 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
730 if (qemu_log_enabled()) {
731 qemu_log("qemu: fatal: ");
732 qemu_log_vprintf(fmt
, ap2
);
734 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
740 #if defined(CONFIG_USER_ONLY)
742 struct sigaction act
;
743 sigfillset(&act
.sa_mask
);
744 act
.sa_handler
= SIG_DFL
;
745 sigaction(SIGABRT
, &act
, NULL
);
751 #if !defined(CONFIG_USER_ONLY)
752 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
756 /* The list is protected by the iothread lock here. */
757 block
= ram_list
.mru_block
;
758 if (block
&& addr
- block
->offset
< block
->length
) {
761 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
762 if (addr
- block
->offset
< block
->length
) {
767 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
771 ram_list
.mru_block
= block
;
775 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
781 end
= TARGET_PAGE_ALIGN(start
+ length
);
782 start
&= TARGET_PAGE_MASK
;
784 block
= qemu_get_ram_block(start
);
785 assert(block
== qemu_get_ram_block(end
- 1));
786 start1
= (uintptr_t)block
->host
+ (start
- block
->offset
);
787 cpu_tlb_reset_dirty_all(start1
, length
);
790 /* Note: start and end must be within the same ram block. */
791 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,
796 cpu_physical_memory_clear_dirty_range(start
, length
, client
);
799 tlb_reset_dirty_range_all(start
, length
);
803 static void cpu_physical_memory_set_dirty_tracking(bool enable
)
805 in_migration
= enable
;
808 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
809 MemoryRegionSection
*section
,
811 hwaddr paddr
, hwaddr xlat
,
813 target_ulong
*address
)
818 if (memory_region_is_ram(section
->mr
)) {
820 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
822 if (!section
->readonly
) {
823 iotlb
|= PHYS_SECTION_NOTDIRTY
;
825 iotlb
|= PHYS_SECTION_ROM
;
828 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
832 /* Make accesses to pages with watchpoints go via the
833 watchpoint trap routines. */
834 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
835 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
836 /* Avoid trapping reads of pages with a write breakpoint. */
837 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
838 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
839 *address
|= TLB_MMIO
;
847 #endif /* defined(CONFIG_USER_ONLY) */
849 #if !defined(CONFIG_USER_ONLY)
851 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
853 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
855 static void *(*phys_mem_alloc
)(size_t size
) = qemu_anon_ram_alloc
;
858 * Set a custom physical guest memory alloator.
859 * Accelerators with unusual needs may need this. Hopefully, we can
860 * get rid of it eventually.
862 void phys_mem_set_alloc(void *(*alloc
)(size_t))
864 phys_mem_alloc
= alloc
;
867 static uint16_t phys_section_add(PhysPageMap
*map
,
868 MemoryRegionSection
*section
)
870 /* The physical section number is ORed with a page-aligned
871 * pointer to produce the iotlb entries. Thus it should
872 * never overflow into the page-aligned value.
874 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
876 if (map
->sections_nb
== map
->sections_nb_alloc
) {
877 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
878 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
879 map
->sections_nb_alloc
);
881 map
->sections
[map
->sections_nb
] = *section
;
882 memory_region_ref(section
->mr
);
883 return map
->sections_nb
++;
886 static void phys_section_destroy(MemoryRegion
*mr
)
888 memory_region_unref(mr
);
891 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
892 object_unref(OBJECT(&subpage
->iomem
));
897 static void phys_sections_free(PhysPageMap
*map
)
899 while (map
->sections_nb
> 0) {
900 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
901 phys_section_destroy(section
->mr
);
903 g_free(map
->sections
);
907 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
910 hwaddr base
= section
->offset_within_address_space
912 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
913 d
->map
.nodes
, d
->map
.sections
);
914 MemoryRegionSection subsection
= {
915 .offset_within_address_space
= base
,
916 .size
= int128_make64(TARGET_PAGE_SIZE
),
920 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
922 if (!(existing
->mr
->subpage
)) {
923 subpage
= subpage_init(d
->as
, base
);
924 subsection
.address_space
= d
->as
;
925 subsection
.mr
= &subpage
->iomem
;
926 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
927 phys_section_add(&d
->map
, &subsection
));
929 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
931 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
932 end
= start
+ int128_get64(section
->size
) - 1;
933 subpage_register(subpage
, start
, end
,
934 phys_section_add(&d
->map
, section
));
938 static void register_multipage(AddressSpaceDispatch
*d
,
939 MemoryRegionSection
*section
)
941 hwaddr start_addr
= section
->offset_within_address_space
;
942 uint16_t section_index
= phys_section_add(&d
->map
, section
);
943 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
947 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
950 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
952 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
953 AddressSpaceDispatch
*d
= as
->next_dispatch
;
954 MemoryRegionSection now
= *section
, remain
= *section
;
955 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
957 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
958 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
959 - now
.offset_within_address_space
;
961 now
.size
= int128_min(int128_make64(left
), now
.size
);
962 register_subpage(d
, &now
);
964 now
.size
= int128_zero();
966 while (int128_ne(remain
.size
, now
.size
)) {
967 remain
.size
= int128_sub(remain
.size
, now
.size
);
968 remain
.offset_within_address_space
+= int128_get64(now
.size
);
969 remain
.offset_within_region
+= int128_get64(now
.size
);
971 if (int128_lt(remain
.size
, page_size
)) {
972 register_subpage(d
, &now
);
973 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
974 now
.size
= page_size
;
975 register_subpage(d
, &now
);
977 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
978 register_multipage(d
, &now
);
983 void qemu_flush_coalesced_mmio_buffer(void)
986 kvm_flush_coalesced_mmio_buffer();
989 void qemu_mutex_lock_ramlist(void)
991 qemu_mutex_lock(&ram_list
.mutex
);
994 void qemu_mutex_unlock_ramlist(void)
996 qemu_mutex_unlock(&ram_list
.mutex
);
1001 #include <sys/vfs.h>
1003 #define HUGETLBFS_MAGIC 0x958458f6
1005 static long gethugepagesize(const char *path
)
1011 ret
= statfs(path
, &fs
);
1012 } while (ret
!= 0 && errno
== EINTR
);
1019 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1020 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1025 static void *file_ram_alloc(RAMBlock
*block
,
1031 char *sanitized_name
;
1033 void * volatile area
;
1035 uintptr_t hpagesize
;
1037 hpagesize
= gethugepagesize(path
);
1042 if (memory
< hpagesize
) {
1046 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1048 "host lacks kvm mmu notifiers, -mem-path unsupported");
1052 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1053 sanitized_name
= g_strdup(block
->mr
->name
);
1054 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1059 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1061 g_free(sanitized_name
);
1063 fd
= mkstemp(filename
);
1065 error_setg_errno(errp
, errno
,
1066 "unable to create backing store for hugepages");
1073 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1076 * ftruncate is not supported by hugetlbfs in older
1077 * hosts, so don't bother bailing out on errors.
1078 * If anything goes wrong with it under other filesystems,
1081 if (ftruncate(fd
, memory
)) {
1082 perror("ftruncate");
1085 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
,
1086 (block
->flags
& RAM_SHARED
? MAP_SHARED
: MAP_PRIVATE
),
1088 if (area
== MAP_FAILED
) {
1089 error_setg_errno(errp
, errno
,
1090 "unable to map backing store for hugepages");
1096 os_mem_prealloc(fd
, area
, memory
);
1110 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1112 RAMBlock
*block
, *next_block
;
1113 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1115 assert(size
!= 0); /* it would hand out same offset multiple times */
1117 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1120 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1121 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1123 end
= block
->offset
+ block
->length
;
1125 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1126 if (next_block
->offset
>= end
) {
1127 next
= MIN(next
, next_block
->offset
);
1130 if (next
- end
>= size
&& next
- end
< mingap
) {
1132 mingap
= next
- end
;
1136 if (offset
== RAM_ADDR_MAX
) {
1137 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1145 ram_addr_t
last_ram_offset(void)
1148 ram_addr_t last
= 0;
1150 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1151 last
= MAX(last
, block
->offset
+ block
->length
);
1156 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1160 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1161 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1162 "dump-guest-core", true)) {
1163 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1165 perror("qemu_madvise");
1166 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1167 "but dump_guest_core=off specified\n");
1172 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1176 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1177 if (block
->offset
== addr
) {
1185 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1187 RAMBlock
*new_block
= find_ram_block(addr
);
1191 assert(!new_block
->idstr
[0]);
1194 char *id
= qdev_get_dev_path(dev
);
1196 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1200 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1202 /* This assumes the iothread lock is taken here too. */
1203 qemu_mutex_lock_ramlist();
1204 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1205 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1206 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1211 qemu_mutex_unlock_ramlist();
1214 void qemu_ram_unset_idstr(ram_addr_t addr
)
1216 RAMBlock
*block
= find_ram_block(addr
);
1219 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1223 static int memory_try_enable_merging(void *addr
, size_t len
)
1225 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1226 /* disabled by the user */
1230 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1233 static ram_addr_t
ram_block_add(RAMBlock
*new_block
)
1236 ram_addr_t old_ram_size
, new_ram_size
;
1238 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1240 /* This assumes the iothread lock is taken here too. */
1241 qemu_mutex_lock_ramlist();
1242 new_block
->offset
= find_ram_offset(new_block
->length
);
1244 if (!new_block
->host
) {
1245 if (xen_enabled()) {
1246 xen_ram_alloc(new_block
->offset
, new_block
->length
, new_block
->mr
);
1248 new_block
->host
= phys_mem_alloc(new_block
->length
);
1249 if (!new_block
->host
) {
1250 fprintf(stderr
, "Cannot set up guest memory '%s': %s\n",
1251 new_block
->mr
->name
, strerror(errno
));
1254 memory_try_enable_merging(new_block
->host
, new_block
->length
);
1258 /* Keep the list sorted from biggest to smallest block. */
1259 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1260 if (block
->length
< new_block
->length
) {
1265 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1267 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1269 ram_list
.mru_block
= NULL
;
1272 qemu_mutex_unlock_ramlist();
1274 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1276 if (new_ram_size
> old_ram_size
) {
1278 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1279 ram_list
.dirty_memory
[i
] =
1280 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1281 old_ram_size
, new_ram_size
);
1284 cpu_physical_memory_set_dirty_range(new_block
->offset
, new_block
->length
);
1286 qemu_ram_setup_dump(new_block
->host
, new_block
->length
);
1287 qemu_madvise(new_block
->host
, new_block
->length
, QEMU_MADV_HUGEPAGE
);
1288 qemu_madvise(new_block
->host
, new_block
->length
, QEMU_MADV_DONTFORK
);
1290 if (kvm_enabled()) {
1291 kvm_setup_guest_memory(new_block
->host
, new_block
->length
);
1294 return new_block
->offset
;
1298 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1299 bool share
, const char *mem_path
,
1302 RAMBlock
*new_block
;
1304 if (xen_enabled()) {
1305 error_setg(errp
, "-mem-path not supported with Xen");
1309 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1311 * file_ram_alloc() needs to allocate just like
1312 * phys_mem_alloc, but we haven't bothered to provide
1316 "-mem-path not supported with this accelerator");
1320 size
= TARGET_PAGE_ALIGN(size
);
1321 new_block
= g_malloc0(sizeof(*new_block
));
1323 new_block
->length
= size
;
1324 new_block
->flags
= share
? RAM_SHARED
: 0;
1325 new_block
->host
= file_ram_alloc(new_block
, size
,
1327 if (!new_block
->host
) {
1332 return ram_block_add(new_block
);
1336 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1339 RAMBlock
*new_block
;
1341 size
= TARGET_PAGE_ALIGN(size
);
1342 new_block
= g_malloc0(sizeof(*new_block
));
1344 new_block
->length
= size
;
1346 new_block
->host
= host
;
1348 new_block
->flags
|= RAM_PREALLOC
;
1350 return ram_block_add(new_block
);
1353 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1355 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1358 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1362 /* This assumes the iothread lock is taken here too. */
1363 qemu_mutex_lock_ramlist();
1364 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1365 if (addr
== block
->offset
) {
1366 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1367 ram_list
.mru_block
= NULL
;
1373 qemu_mutex_unlock_ramlist();
1376 void qemu_ram_free(ram_addr_t addr
)
1380 /* This assumes the iothread lock is taken here too. */
1381 qemu_mutex_lock_ramlist();
1382 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1383 if (addr
== block
->offset
) {
1384 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1385 ram_list
.mru_block
= NULL
;
1387 if (block
->flags
& RAM_PREALLOC
) {
1389 } else if (xen_enabled()) {
1390 xen_invalidate_map_cache_entry(block
->host
);
1392 } else if (block
->fd
>= 0) {
1393 munmap(block
->host
, block
->length
);
1397 qemu_anon_ram_free(block
->host
, block
->length
);
1403 qemu_mutex_unlock_ramlist();
1408 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1415 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1416 offset
= addr
- block
->offset
;
1417 if (offset
< block
->length
) {
1418 vaddr
= block
->host
+ offset
;
1419 if (block
->flags
& RAM_PREALLOC
) {
1421 } else if (xen_enabled()) {
1425 munmap(vaddr
, length
);
1426 if (block
->fd
>= 0) {
1427 flags
|= (block
->flags
& RAM_SHARED
?
1428 MAP_SHARED
: MAP_PRIVATE
);
1429 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1430 flags
, block
->fd
, offset
);
1433 * Remap needs to match alloc. Accelerators that
1434 * set phys_mem_alloc never remap. If they did,
1435 * we'd need a remap hook here.
1437 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1439 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1440 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1443 if (area
!= vaddr
) {
1444 fprintf(stderr
, "Could not remap addr: "
1445 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1449 memory_try_enable_merging(vaddr
, length
);
1450 qemu_ram_setup_dump(vaddr
, length
);
1456 #endif /* !_WIN32 */
1458 int qemu_get_ram_fd(ram_addr_t addr
)
1460 RAMBlock
*block
= qemu_get_ram_block(addr
);
1465 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1467 RAMBlock
*block
= qemu_get_ram_block(addr
);
1472 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1473 With the exception of the softmmu code in this file, this should
1474 only be used for local memory (e.g. video ram) that the device owns,
1475 and knows it isn't going to access beyond the end of the block.
1477 It should not be used for general purpose DMA.
1478 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1480 void *qemu_get_ram_ptr(ram_addr_t addr
)
1482 RAMBlock
*block
= qemu_get_ram_block(addr
);
1484 if (xen_enabled()) {
1485 /* We need to check if the requested address is in the RAM
1486 * because we don't want to map the entire memory in QEMU.
1487 * In that case just map until the end of the page.
1489 if (block
->offset
== 0) {
1490 return xen_map_cache(addr
, 0, 0);
1491 } else if (block
->host
== NULL
) {
1493 xen_map_cache(block
->offset
, block
->length
, 1);
1496 return block
->host
+ (addr
- block
->offset
);
1499 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1500 * but takes a size argument */
1501 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1506 if (xen_enabled()) {
1507 return xen_map_cache(addr
, *size
, 1);
1511 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1512 if (addr
- block
->offset
< block
->length
) {
1513 if (addr
- block
->offset
+ *size
> block
->length
)
1514 *size
= block
->length
- addr
+ block
->offset
;
1515 return block
->host
+ (addr
- block
->offset
);
1519 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1524 /* Some of the softmmu routines need to translate from a host pointer
1525 (typically a TLB entry) back to a ram offset. */
1526 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1529 uint8_t *host
= ptr
;
1531 if (xen_enabled()) {
1532 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1533 return qemu_get_ram_block(*ram_addr
)->mr
;
1536 block
= ram_list
.mru_block
;
1537 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1541 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1542 /* This case append when the block is not mapped. */
1543 if (block
->host
== NULL
) {
1546 if (host
- block
->host
< block
->length
) {
1554 *ram_addr
= block
->offset
+ (host
- block
->host
);
1558 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1559 uint64_t val
, unsigned size
)
1561 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1562 tb_invalidate_phys_page_fast(ram_addr
, size
);
1566 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1569 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1572 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1577 cpu_physical_memory_set_dirty_range_nocode(ram_addr
, size
);
1578 /* we remove the notdirty callback only if the code has been
1580 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1581 CPUArchState
*env
= current_cpu
->env_ptr
;
1582 tlb_set_dirty(env
, current_cpu
->mem_io_vaddr
);
1586 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1587 unsigned size
, bool is_write
)
1592 static const MemoryRegionOps notdirty_mem_ops
= {
1593 .write
= notdirty_mem_write
,
1594 .valid
.accepts
= notdirty_mem_accepts
,
1595 .endianness
= DEVICE_NATIVE_ENDIAN
,
1598 /* Generate a debug exception if a watchpoint has been hit. */
1599 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1601 CPUState
*cpu
= current_cpu
;
1602 CPUArchState
*env
= cpu
->env_ptr
;
1603 target_ulong pc
, cs_base
;
1608 if (cpu
->watchpoint_hit
) {
1609 /* We re-entered the check after replacing the TB. Now raise
1610 * the debug interrupt so that is will trigger after the
1611 * current instruction. */
1612 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1615 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1616 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1617 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1618 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1619 wp
->flags
|= BP_WATCHPOINT_HIT
;
1620 if (!cpu
->watchpoint_hit
) {
1621 cpu
->watchpoint_hit
= wp
;
1622 tb_check_watchpoint(cpu
);
1623 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1624 cpu
->exception_index
= EXCP_DEBUG
;
1627 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1628 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1629 cpu_resume_from_signal(cpu
, NULL
);
1633 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1638 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1639 so these check for a hit then pass through to the normal out-of-line
1641 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1644 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1646 case 1: return ldub_phys(&address_space_memory
, addr
);
1647 case 2: return lduw_phys(&address_space_memory
, addr
);
1648 case 4: return ldl_phys(&address_space_memory
, addr
);
1653 static void watch_mem_write(void *opaque
, hwaddr addr
,
1654 uint64_t val
, unsigned size
)
1656 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1659 stb_phys(&address_space_memory
, addr
, val
);
1662 stw_phys(&address_space_memory
, addr
, val
);
1665 stl_phys(&address_space_memory
, addr
, val
);
1671 static const MemoryRegionOps watch_mem_ops
= {
1672 .read
= watch_mem_read
,
1673 .write
= watch_mem_write
,
1674 .endianness
= DEVICE_NATIVE_ENDIAN
,
1677 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1680 subpage_t
*subpage
= opaque
;
1683 #if defined(DEBUG_SUBPAGE)
1684 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
1685 subpage
, len
, addr
);
1687 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1700 static void subpage_write(void *opaque
, hwaddr addr
,
1701 uint64_t value
, unsigned len
)
1703 subpage_t
*subpage
= opaque
;
1706 #if defined(DEBUG_SUBPAGE)
1707 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1708 " value %"PRIx64
"\n",
1709 __func__
, subpage
, len
, addr
, value
);
1724 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1727 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1728 unsigned len
, bool is_write
)
1730 subpage_t
*subpage
= opaque
;
1731 #if defined(DEBUG_SUBPAGE)
1732 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
1733 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1736 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1740 static const MemoryRegionOps subpage_ops
= {
1741 .read
= subpage_read
,
1742 .write
= subpage_write
,
1743 .valid
.accepts
= subpage_accepts
,
1744 .endianness
= DEVICE_NATIVE_ENDIAN
,
1747 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1752 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1754 idx
= SUBPAGE_IDX(start
);
1755 eidx
= SUBPAGE_IDX(end
);
1756 #if defined(DEBUG_SUBPAGE)
1757 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1758 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
1760 for (; idx
<= eidx
; idx
++) {
1761 mmio
->sub_section
[idx
] = section
;
1767 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1771 mmio
= g_malloc0(sizeof(subpage_t
));
1775 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1776 NULL
, TARGET_PAGE_SIZE
);
1777 mmio
->iomem
.subpage
= true;
1778 #if defined(DEBUG_SUBPAGE)
1779 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
1780 mmio
, base
, TARGET_PAGE_SIZE
);
1782 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1787 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
1791 MemoryRegionSection section
= {
1792 .address_space
= as
,
1794 .offset_within_address_space
= 0,
1795 .offset_within_region
= 0,
1796 .size
= int128_2_64(),
1799 return phys_section_add(map
, §ion
);
1802 MemoryRegion
*iotlb_to_region(AddressSpace
*as
, hwaddr index
)
1804 return as
->dispatch
->map
.sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1807 static void io_mem_init(void)
1809 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
1810 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1812 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1814 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1818 static void mem_begin(MemoryListener
*listener
)
1820 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1821 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
1824 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
1825 assert(n
== PHYS_SECTION_UNASSIGNED
);
1826 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
1827 assert(n
== PHYS_SECTION_NOTDIRTY
);
1828 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
1829 assert(n
== PHYS_SECTION_ROM
);
1830 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
1831 assert(n
== PHYS_SECTION_WATCH
);
1833 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
1835 as
->next_dispatch
= d
;
1838 static void mem_commit(MemoryListener
*listener
)
1840 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1841 AddressSpaceDispatch
*cur
= as
->dispatch
;
1842 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1844 phys_page_compact_all(next
, next
->map
.nodes_nb
);
1846 as
->dispatch
= next
;
1849 phys_sections_free(&cur
->map
);
1854 static void tcg_commit(MemoryListener
*listener
)
1858 /* since each CPU stores ram addresses in its TLB cache, we must
1859 reset the modified entries */
1862 /* FIXME: Disentangle the cpu.h circular files deps so we can
1863 directly get the right CPU from listener. */
1864 if (cpu
->tcg_as_listener
!= listener
) {
1871 static void core_log_global_start(MemoryListener
*listener
)
1873 cpu_physical_memory_set_dirty_tracking(true);
1876 static void core_log_global_stop(MemoryListener
*listener
)
1878 cpu_physical_memory_set_dirty_tracking(false);
1881 static MemoryListener core_memory_listener
= {
1882 .log_global_start
= core_log_global_start
,
1883 .log_global_stop
= core_log_global_stop
,
1887 void address_space_init_dispatch(AddressSpace
*as
)
1889 as
->dispatch
= NULL
;
1890 as
->dispatch_listener
= (MemoryListener
) {
1892 .commit
= mem_commit
,
1893 .region_add
= mem_add
,
1894 .region_nop
= mem_add
,
1897 memory_listener_register(&as
->dispatch_listener
, as
);
1900 void address_space_destroy_dispatch(AddressSpace
*as
)
1902 AddressSpaceDispatch
*d
= as
->dispatch
;
1904 memory_listener_unregister(&as
->dispatch_listener
);
1906 as
->dispatch
= NULL
;
1909 static void memory_map_init(void)
1911 system_memory
= g_malloc(sizeof(*system_memory
));
1913 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
1914 address_space_init(&address_space_memory
, system_memory
, "memory");
1916 system_io
= g_malloc(sizeof(*system_io
));
1917 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
1919 address_space_init(&address_space_io
, system_io
, "I/O");
1921 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1924 MemoryRegion
*get_system_memory(void)
1926 return system_memory
;
1929 MemoryRegion
*get_system_io(void)
1934 #endif /* !defined(CONFIG_USER_ONLY) */
1936 /* physical memory access (slow version, mainly for debug) */
1937 #if defined(CONFIG_USER_ONLY)
1938 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
1939 uint8_t *buf
, int len
, int is_write
)
1946 page
= addr
& TARGET_PAGE_MASK
;
1947 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1950 flags
= page_get_flags(page
);
1951 if (!(flags
& PAGE_VALID
))
1954 if (!(flags
& PAGE_WRITE
))
1956 /* XXX: this code should not depend on lock_user */
1957 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1960 unlock_user(p
, addr
, l
);
1962 if (!(flags
& PAGE_READ
))
1964 /* XXX: this code should not depend on lock_user */
1965 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1968 unlock_user(p
, addr
, 0);
1979 static void invalidate_and_set_dirty(hwaddr addr
,
1982 if (cpu_physical_memory_is_clean(addr
)) {
1983 /* invalidate code */
1984 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1986 cpu_physical_memory_set_dirty_range_nocode(addr
, length
);
1988 xen_modified_memory(addr
, length
);
1991 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
1993 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
1995 /* Regions are assumed to support 1-4 byte accesses unless
1996 otherwise specified. */
1997 if (access_size_max
== 0) {
1998 access_size_max
= 4;
2001 /* Bound the maximum access by the alignment of the address. */
2002 if (!mr
->ops
->impl
.unaligned
) {
2003 unsigned align_size_max
= addr
& -addr
;
2004 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2005 access_size_max
= align_size_max
;
2009 /* Don't attempt accesses larger than the maximum. */
2010 if (l
> access_size_max
) {
2011 l
= access_size_max
;
2014 l
= 1 << (qemu_fls(l
) - 1);
2020 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
2021 int len
, bool is_write
)
2032 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2035 if (!memory_access_is_direct(mr
, is_write
)) {
2036 l
= memory_access_size(mr
, l
, addr1
);
2037 /* XXX: could force current_cpu to NULL to avoid
2041 /* 64 bit write access */
2043 error
|= io_mem_write(mr
, addr1
, val
, 8);
2046 /* 32 bit write access */
2048 error
|= io_mem_write(mr
, addr1
, val
, 4);
2051 /* 16 bit write access */
2053 error
|= io_mem_write(mr
, addr1
, val
, 2);
2056 /* 8 bit write access */
2058 error
|= io_mem_write(mr
, addr1
, val
, 1);
2064 addr1
+= memory_region_get_ram_addr(mr
);
2066 ptr
= qemu_get_ram_ptr(addr1
);
2067 memcpy(ptr
, buf
, l
);
2068 invalidate_and_set_dirty(addr1
, l
);
2071 if (!memory_access_is_direct(mr
, is_write
)) {
2073 l
= memory_access_size(mr
, l
, addr1
);
2076 /* 64 bit read access */
2077 error
|= io_mem_read(mr
, addr1
, &val
, 8);
2081 /* 32 bit read access */
2082 error
|= io_mem_read(mr
, addr1
, &val
, 4);
2086 /* 16 bit read access */
2087 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2091 /* 8 bit read access */
2092 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2100 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2101 memcpy(buf
, ptr
, l
);
2112 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2113 const uint8_t *buf
, int len
)
2115 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2118 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2120 return address_space_rw(as
, addr
, buf
, len
, false);
2124 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2125 int len
, int is_write
)
2127 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2130 enum write_rom_type
{
2135 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2136 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2145 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2147 if (!(memory_region_is_ram(mr
) ||
2148 memory_region_is_romd(mr
))) {
2151 addr1
+= memory_region_get_ram_addr(mr
);
2153 ptr
= qemu_get_ram_ptr(addr1
);
2156 memcpy(ptr
, buf
, l
);
2157 invalidate_and_set_dirty(addr1
, l
);
2160 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2170 /* used for ROM loading : can write in RAM and ROM */
2171 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2172 const uint8_t *buf
, int len
)
2174 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2177 void cpu_flush_icache_range(hwaddr start
, int len
)
2180 * This function should do the same thing as an icache flush that was
2181 * triggered from within the guest. For TCG we are always cache coherent,
2182 * so there is no need to flush anything. For KVM / Xen we need to flush
2183 * the host's instruction cache at least.
2185 if (tcg_enabled()) {
2189 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2190 start
, NULL
, len
, FLUSH_CACHE
);
2200 static BounceBuffer bounce
;
2202 typedef struct MapClient
{
2204 void (*callback
)(void *opaque
);
2205 QLIST_ENTRY(MapClient
) link
;
2208 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2209 = QLIST_HEAD_INITIALIZER(map_client_list
);
2211 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2213 MapClient
*client
= g_malloc(sizeof(*client
));
2215 client
->opaque
= opaque
;
2216 client
->callback
= callback
;
2217 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2221 static void cpu_unregister_map_client(void *_client
)
2223 MapClient
*client
= (MapClient
*)_client
;
2225 QLIST_REMOVE(client
, link
);
2229 static void cpu_notify_map_clients(void)
2233 while (!QLIST_EMPTY(&map_client_list
)) {
2234 client
= QLIST_FIRST(&map_client_list
);
2235 client
->callback(client
->opaque
);
2236 cpu_unregister_map_client(client
);
2240 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2247 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2248 if (!memory_access_is_direct(mr
, is_write
)) {
2249 l
= memory_access_size(mr
, l
, addr
);
2250 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2261 /* Map a physical memory region into a host virtual address.
2262 * May map a subset of the requested range, given by and returned in *plen.
2263 * May return NULL if resources needed to perform the mapping are exhausted.
2264 * Use only for reads OR writes - not for read-modify-write operations.
2265 * Use cpu_register_map_client() to know when retrying the map operation is
2266 * likely to succeed.
2268 void *address_space_map(AddressSpace
*as
,
2275 hwaddr l
, xlat
, base
;
2276 MemoryRegion
*mr
, *this_mr
;
2284 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2285 if (!memory_access_is_direct(mr
, is_write
)) {
2286 if (bounce
.buffer
) {
2289 /* Avoid unbounded allocations */
2290 l
= MIN(l
, TARGET_PAGE_SIZE
);
2291 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2295 memory_region_ref(mr
);
2298 address_space_read(as
, addr
, bounce
.buffer
, l
);
2302 return bounce
.buffer
;
2306 raddr
= memory_region_get_ram_addr(mr
);
2317 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2318 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2323 memory_region_ref(mr
);
2325 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2328 /* Unmaps a memory region previously mapped by address_space_map().
2329 * Will also mark the memory as dirty if is_write == 1. access_len gives
2330 * the amount of memory that was actually read or written by the caller.
2332 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2333 int is_write
, hwaddr access_len
)
2335 if (buffer
!= bounce
.buffer
) {
2339 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2342 invalidate_and_set_dirty(addr1
, access_len
);
2344 if (xen_enabled()) {
2345 xen_invalidate_map_cache_entry(buffer
);
2347 memory_region_unref(mr
);
2351 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2353 qemu_vfree(bounce
.buffer
);
2354 bounce
.buffer
= NULL
;
2355 memory_region_unref(bounce
.mr
);
2356 cpu_notify_map_clients();
2359 void *cpu_physical_memory_map(hwaddr addr
,
2363 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2366 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2367 int is_write
, hwaddr access_len
)
2369 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2372 /* warning: addr must be aligned */
2373 static inline uint32_t ldl_phys_internal(AddressSpace
*as
, hwaddr addr
,
2374 enum device_endian endian
)
2382 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2383 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2385 io_mem_read(mr
, addr1
, &val
, 4);
2386 #if defined(TARGET_WORDS_BIGENDIAN)
2387 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2391 if (endian
== DEVICE_BIG_ENDIAN
) {
2397 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2401 case DEVICE_LITTLE_ENDIAN
:
2402 val
= ldl_le_p(ptr
);
2404 case DEVICE_BIG_ENDIAN
:
2405 val
= ldl_be_p(ptr
);
2415 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2417 return ldl_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2420 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2422 return ldl_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2425 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2427 return ldl_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2430 /* warning: addr must be aligned */
2431 static inline uint64_t ldq_phys_internal(AddressSpace
*as
, hwaddr addr
,
2432 enum device_endian endian
)
2440 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2442 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2444 io_mem_read(mr
, addr1
, &val
, 8);
2445 #if defined(TARGET_WORDS_BIGENDIAN)
2446 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2450 if (endian
== DEVICE_BIG_ENDIAN
) {
2456 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2460 case DEVICE_LITTLE_ENDIAN
:
2461 val
= ldq_le_p(ptr
);
2463 case DEVICE_BIG_ENDIAN
:
2464 val
= ldq_be_p(ptr
);
2474 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
2476 return ldq_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2479 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
2481 return ldq_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2484 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
2486 return ldq_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2490 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
2493 address_space_rw(as
, addr
, &val
, 1, 0);
2497 /* warning: addr must be aligned */
2498 static inline uint32_t lduw_phys_internal(AddressSpace
*as
, hwaddr addr
,
2499 enum device_endian endian
)
2507 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2509 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2511 io_mem_read(mr
, addr1
, &val
, 2);
2512 #if defined(TARGET_WORDS_BIGENDIAN)
2513 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2517 if (endian
== DEVICE_BIG_ENDIAN
) {
2523 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2527 case DEVICE_LITTLE_ENDIAN
:
2528 val
= lduw_le_p(ptr
);
2530 case DEVICE_BIG_ENDIAN
:
2531 val
= lduw_be_p(ptr
);
2541 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
2543 return lduw_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2546 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
2548 return lduw_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2551 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
2553 return lduw_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2556 /* warning: addr must be aligned. The ram page is not masked as dirty
2557 and the code inside is not invalidated. It is useful if the dirty
2558 bits are used to track modified PTEs */
2559 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2566 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2568 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2569 io_mem_write(mr
, addr1
, val
, 4);
2571 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2572 ptr
= qemu_get_ram_ptr(addr1
);
2575 if (unlikely(in_migration
)) {
2576 if (cpu_physical_memory_is_clean(addr1
)) {
2577 /* invalidate code */
2578 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2580 cpu_physical_memory_set_dirty_range_nocode(addr1
, 4);
2586 /* warning: addr must be aligned */
2587 static inline void stl_phys_internal(AddressSpace
*as
,
2588 hwaddr addr
, uint32_t val
,
2589 enum device_endian endian
)
2596 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2598 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2599 #if defined(TARGET_WORDS_BIGENDIAN)
2600 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2604 if (endian
== DEVICE_BIG_ENDIAN
) {
2608 io_mem_write(mr
, addr1
, val
, 4);
2611 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2612 ptr
= qemu_get_ram_ptr(addr1
);
2614 case DEVICE_LITTLE_ENDIAN
:
2617 case DEVICE_BIG_ENDIAN
:
2624 invalidate_and_set_dirty(addr1
, 4);
2628 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2630 stl_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2633 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2635 stl_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2638 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2640 stl_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2644 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2647 address_space_rw(as
, addr
, &v
, 1, 1);
2650 /* warning: addr must be aligned */
2651 static inline void stw_phys_internal(AddressSpace
*as
,
2652 hwaddr addr
, uint32_t val
,
2653 enum device_endian endian
)
2660 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2661 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2662 #if defined(TARGET_WORDS_BIGENDIAN)
2663 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2667 if (endian
== DEVICE_BIG_ENDIAN
) {
2671 io_mem_write(mr
, addr1
, val
, 2);
2674 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2675 ptr
= qemu_get_ram_ptr(addr1
);
2677 case DEVICE_LITTLE_ENDIAN
:
2680 case DEVICE_BIG_ENDIAN
:
2687 invalidate_and_set_dirty(addr1
, 2);
2691 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2693 stw_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2696 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2698 stw_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2701 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2703 stw_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2707 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2710 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2713 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2715 val
= cpu_to_le64(val
);
2716 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2719 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2721 val
= cpu_to_be64(val
);
2722 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2725 /* virtual memory access for debug (includes writing to ROM) */
2726 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2727 uint8_t *buf
, int len
, int is_write
)
2734 page
= addr
& TARGET_PAGE_MASK
;
2735 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2736 /* if no physical page mapped, return an error */
2737 if (phys_addr
== -1)
2739 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2742 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2744 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
2746 address_space_rw(cpu
->as
, phys_addr
, buf
, l
, 0);
2757 * A helper function for the _utterly broken_ virtio device model to find out if
2758 * it's running on a big endian machine. Don't do this at home kids!
2760 bool target_words_bigendian(void);
2761 bool target_words_bigendian(void)
2763 #if defined(TARGET_WORDS_BIGENDIAN)
2770 #ifndef CONFIG_USER_ONLY
2771 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2776 mr
= address_space_translate(&address_space_memory
,
2777 phys_addr
, &phys_addr
, &l
, false);
2779 return !(memory_region_is_ram(mr
) ||
2780 memory_region_is_romd(mr
));
2783 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2787 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2788 func(block
->host
, block
->offset
, block
->length
, opaque
);