4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "hw/xen/xen.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
47 #include "exec/cpu-all.h"
49 #include "exec/cputlb.h"
50 #include "translate-all.h"
52 #include "exec/memory-internal.h"
53 #include "exec/ram_addr.h"
54 #include "qemu/cache-utils.h"
56 #include "qemu/range.h"
58 //#define DEBUG_SUBPAGE
60 #if !defined(CONFIG_USER_ONLY)
61 static bool in_migration
;
63 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
65 static MemoryRegion
*system_memory
;
66 static MemoryRegion
*system_io
;
68 AddressSpace address_space_io
;
69 AddressSpace address_space_memory
;
71 MemoryRegion io_mem_rom
, io_mem_notdirty
;
72 static MemoryRegion io_mem_unassigned
;
76 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
77 /* current CPU in the current thread. It is only valid inside
79 DEFINE_TLS(CPUState
*, current_cpu
);
80 /* 0 = Do not count executed instructions.
81 1 = Precise instruction counting.
82 2 = Adaptive rate instruction counting. */
85 #if !defined(CONFIG_USER_ONLY)
87 typedef struct PhysPageEntry PhysPageEntry
;
89 struct PhysPageEntry
{
90 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
92 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
96 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
98 /* Size of the L2 (and L3, etc) page tables. */
99 #define ADDR_SPACE_BITS 64
102 #define P_L2_SIZE (1 << P_L2_BITS)
104 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
106 typedef PhysPageEntry Node
[P_L2_SIZE
];
108 typedef struct PhysPageMap
{
109 unsigned sections_nb
;
110 unsigned sections_nb_alloc
;
112 unsigned nodes_nb_alloc
;
114 MemoryRegionSection
*sections
;
117 struct AddressSpaceDispatch
{
118 /* This is a multi-level map on the physical address space.
119 * The bottom level has pointers to MemoryRegionSections.
121 PhysPageEntry phys_map
;
126 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
127 typedef struct subpage_t
{
131 uint16_t sub_section
[TARGET_PAGE_SIZE
];
134 #define PHYS_SECTION_UNASSIGNED 0
135 #define PHYS_SECTION_NOTDIRTY 1
136 #define PHYS_SECTION_ROM 2
137 #define PHYS_SECTION_WATCH 3
139 static void io_mem_init(void);
140 static void memory_map_init(void);
141 static void tcg_commit(MemoryListener
*listener
);
143 static MemoryRegion io_mem_watch
;
146 #if !defined(CONFIG_USER_ONLY)
148 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
150 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
151 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
152 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
153 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
157 static uint32_t phys_map_node_alloc(PhysPageMap
*map
)
162 ret
= map
->nodes_nb
++;
163 assert(ret
!= PHYS_MAP_NODE_NIL
);
164 assert(ret
!= map
->nodes_nb_alloc
);
165 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
166 map
->nodes
[ret
][i
].skip
= 1;
167 map
->nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
172 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
173 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
178 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
180 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
181 lp
->ptr
= phys_map_node_alloc(map
);
182 p
= map
->nodes
[lp
->ptr
];
184 for (i
= 0; i
< P_L2_SIZE
; i
++) {
186 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
190 p
= map
->nodes
[lp
->ptr
];
192 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
194 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
195 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
201 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
207 static void phys_page_set(AddressSpaceDispatch
*d
,
208 hwaddr index
, hwaddr nb
,
211 /* Wildly overreserve - it doesn't matter much. */
212 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
214 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
217 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
218 * and update our entry so we can skip it and go directly to the destination.
220 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
222 unsigned valid_ptr
= P_L2_SIZE
;
227 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
232 for (i
= 0; i
< P_L2_SIZE
; i
++) {
233 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
240 phys_page_compact(&p
[i
], nodes
, compacted
);
244 /* We can only compress if there's only one child. */
249 assert(valid_ptr
< P_L2_SIZE
);
251 /* Don't compress if it won't fit in the # of bits we have. */
252 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
256 lp
->ptr
= p
[valid_ptr
].ptr
;
257 if (!p
[valid_ptr
].skip
) {
258 /* If our only child is a leaf, make this a leaf. */
259 /* By design, we should have made this node a leaf to begin with so we
260 * should never reach here.
261 * But since it's so simple to handle this, let's do it just in case we
266 lp
->skip
+= p
[valid_ptr
].skip
;
270 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
272 DECLARE_BITMAP(compacted
, nodes_nb
);
274 if (d
->phys_map
.skip
) {
275 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
279 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
280 Node
*nodes
, MemoryRegionSection
*sections
)
283 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
286 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
287 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
288 return §ions
[PHYS_SECTION_UNASSIGNED
];
291 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
294 if (sections
[lp
.ptr
].size
.hi
||
295 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
296 sections
[lp
.ptr
].size
.lo
, addr
)) {
297 return §ions
[lp
.ptr
];
299 return §ions
[PHYS_SECTION_UNASSIGNED
];
303 bool memory_region_is_unassigned(MemoryRegion
*mr
)
305 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
306 && mr
!= &io_mem_watch
;
309 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
311 bool resolve_subpage
)
313 MemoryRegionSection
*section
;
316 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
317 if (resolve_subpage
&& section
->mr
->subpage
) {
318 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
319 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
324 static MemoryRegionSection
*
325 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
326 hwaddr
*plen
, bool resolve_subpage
)
328 MemoryRegionSection
*section
;
331 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
332 /* Compute offset within MemoryRegionSection */
333 addr
-= section
->offset_within_address_space
;
335 /* Compute offset within MemoryRegion */
336 *xlat
= addr
+ section
->offset_within_region
;
338 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
339 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
343 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
345 if (memory_region_is_ram(mr
)) {
346 return !(is_write
&& mr
->readonly
);
348 if (memory_region_is_romd(mr
)) {
355 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
356 hwaddr
*xlat
, hwaddr
*plen
,
360 MemoryRegionSection
*section
;
365 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
368 if (!mr
->iommu_ops
) {
372 iotlb
= mr
->iommu_ops
->translate(mr
, addr
);
373 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
374 | (addr
& iotlb
.addr_mask
));
375 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
376 if (!(iotlb
.perm
& (1 << is_write
))) {
377 mr
= &io_mem_unassigned
;
381 as
= iotlb
.target_as
;
384 if (memory_access_is_direct(mr
, is_write
)) {
385 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
386 len
= MIN(page
, len
);
394 MemoryRegionSection
*
395 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
398 MemoryRegionSection
*section
;
399 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
401 assert(!section
->mr
->iommu_ops
);
406 void cpu_exec_init_all(void)
408 #if !defined(CONFIG_USER_ONLY)
409 qemu_mutex_init(&ram_list
.mutex
);
415 #if !defined(CONFIG_USER_ONLY)
417 static int cpu_common_post_load(void *opaque
, int version_id
)
419 CPUState
*cpu
= opaque
;
421 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
422 version_id is increased. */
423 cpu
->interrupt_request
&= ~0x01;
424 tlb_flush(cpu
->env_ptr
, 1);
429 const VMStateDescription vmstate_cpu_common
= {
430 .name
= "cpu_common",
432 .minimum_version_id
= 1,
433 .minimum_version_id_old
= 1,
434 .post_load
= cpu_common_post_load
,
435 .fields
= (VMStateField
[]) {
436 VMSTATE_UINT32(halted
, CPUState
),
437 VMSTATE_UINT32(interrupt_request
, CPUState
),
438 VMSTATE_END_OF_LIST()
444 CPUState
*qemu_get_cpu(int index
)
449 if (cpu
->cpu_index
== index
) {
457 #if !defined(CONFIG_USER_ONLY)
458 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
460 /* We only support one address space per cpu at the moment. */
461 assert(cpu
->as
== as
);
463 if (cpu
->tcg_as_listener
) {
464 memory_listener_unregister(cpu
->tcg_as_listener
);
466 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
468 cpu
->tcg_as_listener
->commit
= tcg_commit
;
469 memory_listener_register(cpu
->tcg_as_listener
, as
);
473 void cpu_exec_init(CPUArchState
*env
)
475 CPUState
*cpu
= ENV_GET_CPU(env
);
476 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
480 #if defined(CONFIG_USER_ONLY)
484 CPU_FOREACH(some_cpu
) {
487 cpu
->cpu_index
= cpu_index
;
489 QTAILQ_INIT(&env
->breakpoints
);
490 QTAILQ_INIT(&env
->watchpoints
);
491 #ifndef CONFIG_USER_ONLY
492 cpu
->as
= &address_space_memory
;
493 cpu
->thread_id
= qemu_get_thread_id();
495 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
496 #if defined(CONFIG_USER_ONLY)
499 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
500 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
502 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
503 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
504 cpu_save
, cpu_load
, env
);
505 assert(cc
->vmsd
== NULL
);
506 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
508 if (cc
->vmsd
!= NULL
) {
509 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
513 #if defined(TARGET_HAS_ICE)
514 #if defined(CONFIG_USER_ONLY)
515 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
517 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
520 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
522 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
524 tb_invalidate_phys_addr(cpu
->as
,
525 phys
| (pc
& ~TARGET_PAGE_MASK
));
529 #endif /* TARGET_HAS_ICE */
531 #if defined(CONFIG_USER_ONLY)
532 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
537 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
538 int flags
, CPUWatchpoint
**watchpoint
)
543 /* Add a watchpoint. */
544 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
545 int flags
, CPUWatchpoint
**watchpoint
)
547 target_ulong len_mask
= ~(len
- 1);
550 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
551 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
552 len
== 0 || len
> TARGET_PAGE_SIZE
) {
553 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
554 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
557 wp
= g_malloc(sizeof(*wp
));
560 wp
->len_mask
= len_mask
;
563 /* keep all GDB-injected watchpoints in front */
565 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
567 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
569 tlb_flush_page(env
, addr
);
576 /* Remove a specific watchpoint. */
577 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
580 target_ulong len_mask
= ~(len
- 1);
583 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
584 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
585 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
586 cpu_watchpoint_remove_by_ref(env
, wp
);
593 /* Remove a specific watchpoint by reference. */
594 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
596 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
598 tlb_flush_page(env
, watchpoint
->vaddr
);
603 /* Remove all matching watchpoints. */
604 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
606 CPUWatchpoint
*wp
, *next
;
608 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
609 if (wp
->flags
& mask
)
610 cpu_watchpoint_remove_by_ref(env
, wp
);
615 /* Add a breakpoint. */
616 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
617 CPUBreakpoint
**breakpoint
)
619 #if defined(TARGET_HAS_ICE)
622 bp
= g_malloc(sizeof(*bp
));
627 /* keep all GDB-injected breakpoints in front */
628 if (flags
& BP_GDB
) {
629 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
631 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
634 breakpoint_invalidate(ENV_GET_CPU(env
), pc
);
645 /* Remove a specific breakpoint. */
646 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
648 #if defined(TARGET_HAS_ICE)
651 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
652 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
653 cpu_breakpoint_remove_by_ref(env
, bp
);
663 /* Remove a specific breakpoint by reference. */
664 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
666 #if defined(TARGET_HAS_ICE)
667 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
669 breakpoint_invalidate(ENV_GET_CPU(env
), breakpoint
->pc
);
675 /* Remove all matching breakpoints. */
676 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
678 #if defined(TARGET_HAS_ICE)
679 CPUBreakpoint
*bp
, *next
;
681 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
682 if (bp
->flags
& mask
)
683 cpu_breakpoint_remove_by_ref(env
, bp
);
688 /* enable or disable single step mode. EXCP_DEBUG is returned by the
689 CPU loop after each instruction */
690 void cpu_single_step(CPUState
*cpu
, int enabled
)
692 #if defined(TARGET_HAS_ICE)
693 if (cpu
->singlestep_enabled
!= enabled
) {
694 cpu
->singlestep_enabled
= enabled
;
696 kvm_update_guest_debug(cpu
, 0);
698 /* must flush all the translated code to avoid inconsistencies */
699 /* XXX: only flush what is necessary */
700 CPUArchState
*env
= cpu
->env_ptr
;
707 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
709 CPUState
*cpu
= ENV_GET_CPU(env
);
715 fprintf(stderr
, "qemu: fatal: ");
716 vfprintf(stderr
, fmt
, ap
);
717 fprintf(stderr
, "\n");
718 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
719 if (qemu_log_enabled()) {
720 qemu_log("qemu: fatal: ");
721 qemu_log_vprintf(fmt
, ap2
);
723 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
729 #if defined(CONFIG_USER_ONLY)
731 struct sigaction act
;
732 sigfillset(&act
.sa_mask
);
733 act
.sa_handler
= SIG_DFL
;
734 sigaction(SIGABRT
, &act
, NULL
);
740 #if !defined(CONFIG_USER_ONLY)
741 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
745 /* The list is protected by the iothread lock here. */
746 block
= ram_list
.mru_block
;
747 if (block
&& addr
- block
->offset
< block
->length
) {
750 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
751 if (addr
- block
->offset
< block
->length
) {
756 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
760 ram_list
.mru_block
= block
;
764 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
770 end
= TARGET_PAGE_ALIGN(start
+ length
);
771 start
&= TARGET_PAGE_MASK
;
773 block
= qemu_get_ram_block(start
);
774 assert(block
== qemu_get_ram_block(end
- 1));
775 start1
= (uintptr_t)block
->host
+ (start
- block
->offset
);
776 cpu_tlb_reset_dirty_all(start1
, length
);
779 /* Note: start and end must be within the same ram block. */
780 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,
785 cpu_physical_memory_clear_dirty_range(start
, length
, client
);
788 tlb_reset_dirty_range_all(start
, length
);
792 static void cpu_physical_memory_set_dirty_tracking(bool enable
)
794 in_migration
= enable
;
797 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
798 MemoryRegionSection
*section
,
800 hwaddr paddr
, hwaddr xlat
,
802 target_ulong
*address
)
807 if (memory_region_is_ram(section
->mr
)) {
809 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
811 if (!section
->readonly
) {
812 iotlb
|= PHYS_SECTION_NOTDIRTY
;
814 iotlb
|= PHYS_SECTION_ROM
;
817 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
821 /* Make accesses to pages with watchpoints go via the
822 watchpoint trap routines. */
823 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
824 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
825 /* Avoid trapping reads of pages with a write breakpoint. */
826 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
827 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
828 *address
|= TLB_MMIO
;
836 #endif /* defined(CONFIG_USER_ONLY) */
838 #if !defined(CONFIG_USER_ONLY)
840 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
842 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
844 static void *(*phys_mem_alloc
)(size_t size
) = qemu_anon_ram_alloc
;
847 * Set a custom physical guest memory alloator.
848 * Accelerators with unusual needs may need this. Hopefully, we can
849 * get rid of it eventually.
851 void phys_mem_set_alloc(void *(*alloc
)(size_t))
853 phys_mem_alloc
= alloc
;
856 static uint16_t phys_section_add(PhysPageMap
*map
,
857 MemoryRegionSection
*section
)
859 /* The physical section number is ORed with a page-aligned
860 * pointer to produce the iotlb entries. Thus it should
861 * never overflow into the page-aligned value.
863 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
865 if (map
->sections_nb
== map
->sections_nb_alloc
) {
866 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
867 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
868 map
->sections_nb_alloc
);
870 map
->sections
[map
->sections_nb
] = *section
;
871 memory_region_ref(section
->mr
);
872 return map
->sections_nb
++;
875 static void phys_section_destroy(MemoryRegion
*mr
)
877 memory_region_unref(mr
);
880 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
881 memory_region_destroy(&subpage
->iomem
);
886 static void phys_sections_free(PhysPageMap
*map
)
888 while (map
->sections_nb
> 0) {
889 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
890 phys_section_destroy(section
->mr
);
892 g_free(map
->sections
);
896 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
899 hwaddr base
= section
->offset_within_address_space
901 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
902 d
->map
.nodes
, d
->map
.sections
);
903 MemoryRegionSection subsection
= {
904 .offset_within_address_space
= base
,
905 .size
= int128_make64(TARGET_PAGE_SIZE
),
909 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
911 if (!(existing
->mr
->subpage
)) {
912 subpage
= subpage_init(d
->as
, base
);
913 subsection
.address_space
= d
->as
;
914 subsection
.mr
= &subpage
->iomem
;
915 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
916 phys_section_add(&d
->map
, &subsection
));
918 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
920 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
921 end
= start
+ int128_get64(section
->size
) - 1;
922 subpage_register(subpage
, start
, end
,
923 phys_section_add(&d
->map
, section
));
927 static void register_multipage(AddressSpaceDispatch
*d
,
928 MemoryRegionSection
*section
)
930 hwaddr start_addr
= section
->offset_within_address_space
;
931 uint16_t section_index
= phys_section_add(&d
->map
, section
);
932 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
936 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
939 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
941 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
942 AddressSpaceDispatch
*d
= as
->next_dispatch
;
943 MemoryRegionSection now
= *section
, remain
= *section
;
944 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
946 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
947 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
948 - now
.offset_within_address_space
;
950 now
.size
= int128_min(int128_make64(left
), now
.size
);
951 register_subpage(d
, &now
);
953 now
.size
= int128_zero();
955 while (int128_ne(remain
.size
, now
.size
)) {
956 remain
.size
= int128_sub(remain
.size
, now
.size
);
957 remain
.offset_within_address_space
+= int128_get64(now
.size
);
958 remain
.offset_within_region
+= int128_get64(now
.size
);
960 if (int128_lt(remain
.size
, page_size
)) {
961 register_subpage(d
, &now
);
962 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
963 now
.size
= page_size
;
964 register_subpage(d
, &now
);
966 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
967 register_multipage(d
, &now
);
972 void qemu_flush_coalesced_mmio_buffer(void)
975 kvm_flush_coalesced_mmio_buffer();
978 void qemu_mutex_lock_ramlist(void)
980 qemu_mutex_lock(&ram_list
.mutex
);
983 void qemu_mutex_unlock_ramlist(void)
985 qemu_mutex_unlock(&ram_list
.mutex
);
992 #define HUGETLBFS_MAGIC 0x958458f6
994 static long gethugepagesize(const char *path
)
1000 ret
= statfs(path
, &fs
);
1001 } while (ret
!= 0 && errno
== EINTR
);
1008 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1009 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1014 static sigjmp_buf sigjump
;
1016 static void sigbus_handler(int signal
)
1018 siglongjmp(sigjump
, 1);
1021 static void *file_ram_alloc(RAMBlock
*block
,
1026 char *sanitized_name
;
1030 unsigned long hpagesize
;
1032 hpagesize
= gethugepagesize(path
);
1037 if (memory
< hpagesize
) {
1041 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1042 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1046 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1047 sanitized_name
= g_strdup(block
->mr
->name
);
1048 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1053 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1055 g_free(sanitized_name
);
1057 fd
= mkstemp(filename
);
1059 perror("unable to create backing store for hugepages");
1066 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1069 * ftruncate is not supported by hugetlbfs in older
1070 * hosts, so don't bother bailing out on errors.
1071 * If anything goes wrong with it under other filesystems,
1074 if (ftruncate(fd
, memory
))
1075 perror("ftruncate");
1077 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
1078 if (area
== MAP_FAILED
) {
1079 perror("file_ram_alloc: can't mmap RAM pages");
1086 struct sigaction act
, oldact
;
1087 sigset_t set
, oldset
;
1089 memset(&act
, 0, sizeof(act
));
1090 act
.sa_handler
= &sigbus_handler
;
1093 ret
= sigaction(SIGBUS
, &act
, &oldact
);
1095 perror("file_ram_alloc: failed to install signal handler");
1099 /* unblock SIGBUS */
1101 sigaddset(&set
, SIGBUS
);
1102 pthread_sigmask(SIG_UNBLOCK
, &set
, &oldset
);
1104 if (sigsetjmp(sigjump
, 1)) {
1105 fprintf(stderr
, "file_ram_alloc: failed to preallocate pages\n");
1109 /* MAP_POPULATE silently ignores failures */
1110 for (i
= 0; i
< (memory
/hpagesize
); i
++) {
1111 memset(area
+ (hpagesize
*i
), 0, 1);
1114 ret
= sigaction(SIGBUS
, &oldact
, NULL
);
1116 perror("file_ram_alloc: failed to reinstall signal handler");
1120 pthread_sigmask(SIG_SETMASK
, &oldset
, NULL
);
1127 static void *file_ram_alloc(RAMBlock
*block
,
1131 fprintf(stderr
, "-mem-path not supported on this host\n");
1136 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1138 RAMBlock
*block
, *next_block
;
1139 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1141 assert(size
!= 0); /* it would hand out same offset multiple times */
1143 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1146 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1147 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1149 end
= block
->offset
+ block
->length
;
1151 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1152 if (next_block
->offset
>= end
) {
1153 next
= MIN(next
, next_block
->offset
);
1156 if (next
- end
>= size
&& next
- end
< mingap
) {
1158 mingap
= next
- end
;
1162 if (offset
== RAM_ADDR_MAX
) {
1163 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1171 ram_addr_t
last_ram_offset(void)
1174 ram_addr_t last
= 0;
1176 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1177 last
= MAX(last
, block
->offset
+ block
->length
);
1182 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1186 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1187 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1188 "dump-guest-core", true)) {
1189 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1191 perror("qemu_madvise");
1192 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1193 "but dump_guest_core=off specified\n");
1198 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1200 RAMBlock
*new_block
, *block
;
1203 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1204 if (block
->offset
== addr
) {
1210 assert(!new_block
->idstr
[0]);
1213 char *id
= qdev_get_dev_path(dev
);
1215 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1219 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1221 /* This assumes the iothread lock is taken here too. */
1222 qemu_mutex_lock_ramlist();
1223 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1224 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1225 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1230 qemu_mutex_unlock_ramlist();
1233 static int memory_try_enable_merging(void *addr
, size_t len
)
1235 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1236 /* disabled by the user */
1240 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1243 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1246 RAMBlock
*block
, *new_block
;
1247 ram_addr_t old_ram_size
, new_ram_size
;
1249 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1251 size
= TARGET_PAGE_ALIGN(size
);
1252 new_block
= g_malloc0(sizeof(*new_block
));
1255 /* This assumes the iothread lock is taken here too. */
1256 qemu_mutex_lock_ramlist();
1258 new_block
->offset
= find_ram_offset(size
);
1260 new_block
->host
= host
;
1261 new_block
->flags
|= RAM_PREALLOC_MASK
;
1262 } else if (xen_enabled()) {
1264 fprintf(stderr
, "-mem-path not supported with Xen\n");
1267 xen_ram_alloc(new_block
->offset
, size
, mr
);
1270 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1272 * file_ram_alloc() needs to allocate just like
1273 * phys_mem_alloc, but we haven't bothered to provide
1277 "-mem-path not supported with this accelerator\n");
1280 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1282 if (!new_block
->host
) {
1283 new_block
->host
= phys_mem_alloc(size
);
1284 if (!new_block
->host
) {
1285 fprintf(stderr
, "Cannot set up guest memory '%s': %s\n",
1286 new_block
->mr
->name
, strerror(errno
));
1289 memory_try_enable_merging(new_block
->host
, size
);
1292 new_block
->length
= size
;
1294 /* Keep the list sorted from biggest to smallest block. */
1295 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1296 if (block
->length
< new_block
->length
) {
1301 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1303 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1305 ram_list
.mru_block
= NULL
;
1308 qemu_mutex_unlock_ramlist();
1310 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1312 if (new_ram_size
> old_ram_size
) {
1314 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1315 ram_list
.dirty_memory
[i
] =
1316 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1317 old_ram_size
, new_ram_size
);
1320 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
);
1322 qemu_ram_setup_dump(new_block
->host
, size
);
1323 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1324 qemu_madvise(new_block
->host
, size
, QEMU_MADV_DONTFORK
);
1327 kvm_setup_guest_memory(new_block
->host
, size
);
1329 return new_block
->offset
;
1332 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1334 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1337 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1341 /* This assumes the iothread lock is taken here too. */
1342 qemu_mutex_lock_ramlist();
1343 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1344 if (addr
== block
->offset
) {
1345 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1346 ram_list
.mru_block
= NULL
;
1352 qemu_mutex_unlock_ramlist();
1355 void qemu_ram_free(ram_addr_t addr
)
1359 /* This assumes the iothread lock is taken here too. */
1360 qemu_mutex_lock_ramlist();
1361 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1362 if (addr
== block
->offset
) {
1363 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1364 ram_list
.mru_block
= NULL
;
1366 if (block
->flags
& RAM_PREALLOC_MASK
) {
1368 } else if (xen_enabled()) {
1369 xen_invalidate_map_cache_entry(block
->host
);
1371 } else if (block
->fd
>= 0) {
1372 munmap(block
->host
, block
->length
);
1376 qemu_anon_ram_free(block
->host
, block
->length
);
1382 qemu_mutex_unlock_ramlist();
1387 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1394 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1395 offset
= addr
- block
->offset
;
1396 if (offset
< block
->length
) {
1397 vaddr
= block
->host
+ offset
;
1398 if (block
->flags
& RAM_PREALLOC_MASK
) {
1400 } else if (xen_enabled()) {
1404 munmap(vaddr
, length
);
1405 if (block
->fd
>= 0) {
1407 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1410 flags
|= MAP_PRIVATE
;
1412 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1413 flags
, block
->fd
, offset
);
1416 * Remap needs to match alloc. Accelerators that
1417 * set phys_mem_alloc never remap. If they did,
1418 * we'd need a remap hook here.
1420 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1422 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1423 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1426 if (area
!= vaddr
) {
1427 fprintf(stderr
, "Could not remap addr: "
1428 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1432 memory_try_enable_merging(vaddr
, length
);
1433 qemu_ram_setup_dump(vaddr
, length
);
1439 #endif /* !_WIN32 */
1441 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1442 With the exception of the softmmu code in this file, this should
1443 only be used for local memory (e.g. video ram) that the device owns,
1444 and knows it isn't going to access beyond the end of the block.
1446 It should not be used for general purpose DMA.
1447 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1449 void *qemu_get_ram_ptr(ram_addr_t addr
)
1451 RAMBlock
*block
= qemu_get_ram_block(addr
);
1453 if (xen_enabled()) {
1454 /* We need to check if the requested address is in the RAM
1455 * because we don't want to map the entire memory in QEMU.
1456 * In that case just map until the end of the page.
1458 if (block
->offset
== 0) {
1459 return xen_map_cache(addr
, 0, 0);
1460 } else if (block
->host
== NULL
) {
1462 xen_map_cache(block
->offset
, block
->length
, 1);
1465 return block
->host
+ (addr
- block
->offset
);
1468 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1469 * but takes a size argument */
1470 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1475 if (xen_enabled()) {
1476 return xen_map_cache(addr
, *size
, 1);
1480 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1481 if (addr
- block
->offset
< block
->length
) {
1482 if (addr
- block
->offset
+ *size
> block
->length
)
1483 *size
= block
->length
- addr
+ block
->offset
;
1484 return block
->host
+ (addr
- block
->offset
);
1488 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1493 /* Some of the softmmu routines need to translate from a host pointer
1494 (typically a TLB entry) back to a ram offset. */
1495 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1498 uint8_t *host
= ptr
;
1500 if (xen_enabled()) {
1501 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1502 return qemu_get_ram_block(*ram_addr
)->mr
;
1505 block
= ram_list
.mru_block
;
1506 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1510 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1511 /* This case append when the block is not mapped. */
1512 if (block
->host
== NULL
) {
1515 if (host
- block
->host
< block
->length
) {
1523 *ram_addr
= block
->offset
+ (host
- block
->host
);
1527 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1528 uint64_t val
, unsigned size
)
1530 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1531 tb_invalidate_phys_page_fast(ram_addr
, size
);
1535 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1538 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1541 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1546 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_MIGRATION
);
1547 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_VGA
);
1548 /* we remove the notdirty callback only if the code has been
1550 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1551 CPUArchState
*env
= current_cpu
->env_ptr
;
1552 tlb_set_dirty(env
, env
->mem_io_vaddr
);
1556 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1557 unsigned size
, bool is_write
)
1562 static const MemoryRegionOps notdirty_mem_ops
= {
1563 .write
= notdirty_mem_write
,
1564 .valid
.accepts
= notdirty_mem_accepts
,
1565 .endianness
= DEVICE_NATIVE_ENDIAN
,
1568 /* Generate a debug exception if a watchpoint has been hit. */
1569 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1571 CPUArchState
*env
= current_cpu
->env_ptr
;
1572 target_ulong pc
, cs_base
;
1577 if (env
->watchpoint_hit
) {
1578 /* We re-entered the check after replacing the TB. Now raise
1579 * the debug interrupt so that is will trigger after the
1580 * current instruction. */
1581 cpu_interrupt(ENV_GET_CPU(env
), CPU_INTERRUPT_DEBUG
);
1584 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1585 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1586 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1587 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1588 wp
->flags
|= BP_WATCHPOINT_HIT
;
1589 if (!env
->watchpoint_hit
) {
1590 env
->watchpoint_hit
= wp
;
1591 tb_check_watchpoint(env
);
1592 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1593 env
->exception_index
= EXCP_DEBUG
;
1596 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1597 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
1598 cpu_resume_from_signal(env
, NULL
);
1602 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1607 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1608 so these check for a hit then pass through to the normal out-of-line
1610 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1613 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1615 case 1: return ldub_phys(&address_space_memory
, addr
);
1616 case 2: return lduw_phys(&address_space_memory
, addr
);
1617 case 4: return ldl_phys(&address_space_memory
, addr
);
1622 static void watch_mem_write(void *opaque
, hwaddr addr
,
1623 uint64_t val
, unsigned size
)
1625 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1628 stb_phys(&address_space_memory
, addr
, val
);
1631 stw_phys(&address_space_memory
, addr
, val
);
1634 stl_phys(&address_space_memory
, addr
, val
);
1640 static const MemoryRegionOps watch_mem_ops
= {
1641 .read
= watch_mem_read
,
1642 .write
= watch_mem_write
,
1643 .endianness
= DEVICE_NATIVE_ENDIAN
,
1646 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1649 subpage_t
*subpage
= opaque
;
1652 #if defined(DEBUG_SUBPAGE)
1653 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
1654 subpage
, len
, addr
);
1656 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1669 static void subpage_write(void *opaque
, hwaddr addr
,
1670 uint64_t value
, unsigned len
)
1672 subpage_t
*subpage
= opaque
;
1675 #if defined(DEBUG_SUBPAGE)
1676 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1677 " value %"PRIx64
"\n",
1678 __func__
, subpage
, len
, addr
, value
);
1693 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1696 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1697 unsigned len
, bool is_write
)
1699 subpage_t
*subpage
= opaque
;
1700 #if defined(DEBUG_SUBPAGE)
1701 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
1702 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1705 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1709 static const MemoryRegionOps subpage_ops
= {
1710 .read
= subpage_read
,
1711 .write
= subpage_write
,
1712 .valid
.accepts
= subpage_accepts
,
1713 .endianness
= DEVICE_NATIVE_ENDIAN
,
1716 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1721 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1723 idx
= SUBPAGE_IDX(start
);
1724 eidx
= SUBPAGE_IDX(end
);
1725 #if defined(DEBUG_SUBPAGE)
1726 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1727 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
1729 for (; idx
<= eidx
; idx
++) {
1730 mmio
->sub_section
[idx
] = section
;
1736 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1740 mmio
= g_malloc0(sizeof(subpage_t
));
1744 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1745 "subpage", TARGET_PAGE_SIZE
);
1746 mmio
->iomem
.subpage
= true;
1747 #if defined(DEBUG_SUBPAGE)
1748 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
1749 mmio
, base
, TARGET_PAGE_SIZE
);
1751 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1756 static uint16_t dummy_section(PhysPageMap
*map
, MemoryRegion
*mr
)
1758 MemoryRegionSection section
= {
1759 .address_space
= &address_space_memory
,
1761 .offset_within_address_space
= 0,
1762 .offset_within_region
= 0,
1763 .size
= int128_2_64(),
1766 return phys_section_add(map
, §ion
);
1769 MemoryRegion
*iotlb_to_region(AddressSpace
*as
, hwaddr index
)
1771 return as
->dispatch
->map
.sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1774 static void io_mem_init(void)
1776 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, "rom", UINT64_MAX
);
1777 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1778 "unassigned", UINT64_MAX
);
1779 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1780 "notdirty", UINT64_MAX
);
1781 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1782 "watch", UINT64_MAX
);
1785 static void mem_begin(MemoryListener
*listener
)
1787 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1788 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
1791 n
= dummy_section(&d
->map
, &io_mem_unassigned
);
1792 assert(n
== PHYS_SECTION_UNASSIGNED
);
1793 n
= dummy_section(&d
->map
, &io_mem_notdirty
);
1794 assert(n
== PHYS_SECTION_NOTDIRTY
);
1795 n
= dummy_section(&d
->map
, &io_mem_rom
);
1796 assert(n
== PHYS_SECTION_ROM
);
1797 n
= dummy_section(&d
->map
, &io_mem_watch
);
1798 assert(n
== PHYS_SECTION_WATCH
);
1800 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
1802 as
->next_dispatch
= d
;
1805 static void mem_commit(MemoryListener
*listener
)
1807 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1808 AddressSpaceDispatch
*cur
= as
->dispatch
;
1809 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1811 phys_page_compact_all(next
, next
->map
.nodes_nb
);
1813 as
->dispatch
= next
;
1816 phys_sections_free(&cur
->map
);
1821 static void tcg_commit(MemoryListener
*listener
)
1825 /* since each CPU stores ram addresses in its TLB cache, we must
1826 reset the modified entries */
1829 CPUArchState
*env
= cpu
->env_ptr
;
1831 /* FIXME: Disentangle the cpu.h circular files deps so we can
1832 directly get the right CPU from listener. */
1833 if (cpu
->tcg_as_listener
!= listener
) {
1840 static void core_log_global_start(MemoryListener
*listener
)
1842 cpu_physical_memory_set_dirty_tracking(true);
1845 static void core_log_global_stop(MemoryListener
*listener
)
1847 cpu_physical_memory_set_dirty_tracking(false);
1850 static MemoryListener core_memory_listener
= {
1851 .log_global_start
= core_log_global_start
,
1852 .log_global_stop
= core_log_global_stop
,
1856 void address_space_init_dispatch(AddressSpace
*as
)
1858 as
->dispatch
= NULL
;
1859 as
->dispatch_listener
= (MemoryListener
) {
1861 .commit
= mem_commit
,
1862 .region_add
= mem_add
,
1863 .region_nop
= mem_add
,
1866 memory_listener_register(&as
->dispatch_listener
, as
);
1869 void address_space_destroy_dispatch(AddressSpace
*as
)
1871 AddressSpaceDispatch
*d
= as
->dispatch
;
1873 memory_listener_unregister(&as
->dispatch_listener
);
1875 as
->dispatch
= NULL
;
1878 static void memory_map_init(void)
1880 system_memory
= g_malloc(sizeof(*system_memory
));
1882 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
1883 address_space_init(&address_space_memory
, system_memory
, "memory");
1885 system_io
= g_malloc(sizeof(*system_io
));
1886 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
1888 address_space_init(&address_space_io
, system_io
, "I/O");
1890 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1893 MemoryRegion
*get_system_memory(void)
1895 return system_memory
;
1898 MemoryRegion
*get_system_io(void)
1903 #endif /* !defined(CONFIG_USER_ONLY) */
1905 /* physical memory access (slow version, mainly for debug) */
1906 #if defined(CONFIG_USER_ONLY)
1907 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
1908 uint8_t *buf
, int len
, int is_write
)
1915 page
= addr
& TARGET_PAGE_MASK
;
1916 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1919 flags
= page_get_flags(page
);
1920 if (!(flags
& PAGE_VALID
))
1923 if (!(flags
& PAGE_WRITE
))
1925 /* XXX: this code should not depend on lock_user */
1926 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1929 unlock_user(p
, addr
, l
);
1931 if (!(flags
& PAGE_READ
))
1933 /* XXX: this code should not depend on lock_user */
1934 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1937 unlock_user(p
, addr
, 0);
1948 static void invalidate_and_set_dirty(hwaddr addr
,
1951 if (cpu_physical_memory_is_clean(addr
)) {
1952 /* invalidate code */
1953 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1955 cpu_physical_memory_set_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
1956 cpu_physical_memory_set_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
1958 xen_modified_memory(addr
, length
);
1961 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
1963 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
1965 /* Regions are assumed to support 1-4 byte accesses unless
1966 otherwise specified. */
1967 if (access_size_max
== 0) {
1968 access_size_max
= 4;
1971 /* Bound the maximum access by the alignment of the address. */
1972 if (!mr
->ops
->impl
.unaligned
) {
1973 unsigned align_size_max
= addr
& -addr
;
1974 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
1975 access_size_max
= align_size_max
;
1979 /* Don't attempt accesses larger than the maximum. */
1980 if (l
> access_size_max
) {
1981 l
= access_size_max
;
1984 l
= 1 << (qemu_fls(l
) - 1);
1990 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
1991 int len
, bool is_write
)
2002 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2005 if (!memory_access_is_direct(mr
, is_write
)) {
2006 l
= memory_access_size(mr
, l
, addr1
);
2007 /* XXX: could force current_cpu to NULL to avoid
2011 /* 64 bit write access */
2013 error
|= io_mem_write(mr
, addr1
, val
, 8);
2016 /* 32 bit write access */
2018 error
|= io_mem_write(mr
, addr1
, val
, 4);
2021 /* 16 bit write access */
2023 error
|= io_mem_write(mr
, addr1
, val
, 2);
2026 /* 8 bit write access */
2028 error
|= io_mem_write(mr
, addr1
, val
, 1);
2034 addr1
+= memory_region_get_ram_addr(mr
);
2036 ptr
= qemu_get_ram_ptr(addr1
);
2037 memcpy(ptr
, buf
, l
);
2038 invalidate_and_set_dirty(addr1
, l
);
2041 if (!memory_access_is_direct(mr
, is_write
)) {
2043 l
= memory_access_size(mr
, l
, addr1
);
2046 /* 64 bit read access */
2047 error
|= io_mem_read(mr
, addr1
, &val
, 8);
2051 /* 32 bit read access */
2052 error
|= io_mem_read(mr
, addr1
, &val
, 4);
2056 /* 16 bit read access */
2057 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2061 /* 8 bit read access */
2062 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2070 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2071 memcpy(buf
, ptr
, l
);
2082 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2083 const uint8_t *buf
, int len
)
2085 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2088 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2090 return address_space_rw(as
, addr
, buf
, len
, false);
2094 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2095 int len
, int is_write
)
2097 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2100 enum write_rom_type
{
2105 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2106 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2115 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2117 if (!(memory_region_is_ram(mr
) ||
2118 memory_region_is_romd(mr
))) {
2121 addr1
+= memory_region_get_ram_addr(mr
);
2123 ptr
= qemu_get_ram_ptr(addr1
);
2126 memcpy(ptr
, buf
, l
);
2127 invalidate_and_set_dirty(addr1
, l
);
2130 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2140 /* used for ROM loading : can write in RAM and ROM */
2141 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2142 const uint8_t *buf
, int len
)
2144 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2147 void cpu_flush_icache_range(hwaddr start
, int len
)
2150 * This function should do the same thing as an icache flush that was
2151 * triggered from within the guest. For TCG we are always cache coherent,
2152 * so there is no need to flush anything. For KVM / Xen we need to flush
2153 * the host's instruction cache at least.
2155 if (tcg_enabled()) {
2159 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2160 start
, NULL
, len
, FLUSH_CACHE
);
2170 static BounceBuffer bounce
;
2172 typedef struct MapClient
{
2174 void (*callback
)(void *opaque
);
2175 QLIST_ENTRY(MapClient
) link
;
2178 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2179 = QLIST_HEAD_INITIALIZER(map_client_list
);
2181 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2183 MapClient
*client
= g_malloc(sizeof(*client
));
2185 client
->opaque
= opaque
;
2186 client
->callback
= callback
;
2187 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2191 static void cpu_unregister_map_client(void *_client
)
2193 MapClient
*client
= (MapClient
*)_client
;
2195 QLIST_REMOVE(client
, link
);
2199 static void cpu_notify_map_clients(void)
2203 while (!QLIST_EMPTY(&map_client_list
)) {
2204 client
= QLIST_FIRST(&map_client_list
);
2205 client
->callback(client
->opaque
);
2206 cpu_unregister_map_client(client
);
2210 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2217 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2218 if (!memory_access_is_direct(mr
, is_write
)) {
2219 l
= memory_access_size(mr
, l
, addr
);
2220 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2231 /* Map a physical memory region into a host virtual address.
2232 * May map a subset of the requested range, given by and returned in *plen.
2233 * May return NULL if resources needed to perform the mapping are exhausted.
2234 * Use only for reads OR writes - not for read-modify-write operations.
2235 * Use cpu_register_map_client() to know when retrying the map operation is
2236 * likely to succeed.
2238 void *address_space_map(AddressSpace
*as
,
2245 hwaddr l
, xlat
, base
;
2246 MemoryRegion
*mr
, *this_mr
;
2254 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2255 if (!memory_access_is_direct(mr
, is_write
)) {
2256 if (bounce
.buffer
) {
2259 /* Avoid unbounded allocations */
2260 l
= MIN(l
, TARGET_PAGE_SIZE
);
2261 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2265 memory_region_ref(mr
);
2268 address_space_read(as
, addr
, bounce
.buffer
, l
);
2272 return bounce
.buffer
;
2276 raddr
= memory_region_get_ram_addr(mr
);
2287 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2288 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2293 memory_region_ref(mr
);
2295 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2298 /* Unmaps a memory region previously mapped by address_space_map().
2299 * Will also mark the memory as dirty if is_write == 1. access_len gives
2300 * the amount of memory that was actually read or written by the caller.
2302 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2303 int is_write
, hwaddr access_len
)
2305 if (buffer
!= bounce
.buffer
) {
2309 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2312 while (access_len
) {
2314 l
= TARGET_PAGE_SIZE
;
2317 invalidate_and_set_dirty(addr1
, l
);
2322 if (xen_enabled()) {
2323 xen_invalidate_map_cache_entry(buffer
);
2325 memory_region_unref(mr
);
2329 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2331 qemu_vfree(bounce
.buffer
);
2332 bounce
.buffer
= NULL
;
2333 memory_region_unref(bounce
.mr
);
2334 cpu_notify_map_clients();
2337 void *cpu_physical_memory_map(hwaddr addr
,
2341 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2344 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2345 int is_write
, hwaddr access_len
)
2347 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2350 /* warning: addr must be aligned */
2351 static inline uint32_t ldl_phys_internal(AddressSpace
*as
, hwaddr addr
,
2352 enum device_endian endian
)
2360 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2361 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2363 io_mem_read(mr
, addr1
, &val
, 4);
2364 #if defined(TARGET_WORDS_BIGENDIAN)
2365 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2369 if (endian
== DEVICE_BIG_ENDIAN
) {
2375 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2379 case DEVICE_LITTLE_ENDIAN
:
2380 val
= ldl_le_p(ptr
);
2382 case DEVICE_BIG_ENDIAN
:
2383 val
= ldl_be_p(ptr
);
2393 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2395 return ldl_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2398 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2400 return ldl_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2403 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2405 return ldl_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2408 /* warning: addr must be aligned */
2409 static inline uint64_t ldq_phys_internal(AddressSpace
*as
, hwaddr addr
,
2410 enum device_endian endian
)
2418 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2420 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2422 io_mem_read(mr
, addr1
, &val
, 8);
2423 #if defined(TARGET_WORDS_BIGENDIAN)
2424 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2428 if (endian
== DEVICE_BIG_ENDIAN
) {
2434 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2438 case DEVICE_LITTLE_ENDIAN
:
2439 val
= ldq_le_p(ptr
);
2441 case DEVICE_BIG_ENDIAN
:
2442 val
= ldq_be_p(ptr
);
2452 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
2454 return ldq_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2457 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
2459 return ldq_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2462 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
2464 return ldq_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2468 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
2471 address_space_rw(as
, addr
, &val
, 1, 0);
2475 /* warning: addr must be aligned */
2476 static inline uint32_t lduw_phys_internal(AddressSpace
*as
, hwaddr addr
,
2477 enum device_endian endian
)
2485 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2487 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2489 io_mem_read(mr
, addr1
, &val
, 2);
2490 #if defined(TARGET_WORDS_BIGENDIAN)
2491 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2495 if (endian
== DEVICE_BIG_ENDIAN
) {
2501 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2505 case DEVICE_LITTLE_ENDIAN
:
2506 val
= lduw_le_p(ptr
);
2508 case DEVICE_BIG_ENDIAN
:
2509 val
= lduw_be_p(ptr
);
2519 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
2521 return lduw_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2524 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
2526 return lduw_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2529 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
2531 return lduw_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2534 /* warning: addr must be aligned. The ram page is not masked as dirty
2535 and the code inside is not invalidated. It is useful if the dirty
2536 bits are used to track modified PTEs */
2537 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2544 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2546 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2547 io_mem_write(mr
, addr1
, val
, 4);
2549 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2550 ptr
= qemu_get_ram_ptr(addr1
);
2553 if (unlikely(in_migration
)) {
2554 if (cpu_physical_memory_is_clean(addr1
)) {
2555 /* invalidate code */
2556 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2558 cpu_physical_memory_set_dirty_flag(addr1
,
2559 DIRTY_MEMORY_MIGRATION
);
2560 cpu_physical_memory_set_dirty_flag(addr1
, DIRTY_MEMORY_VGA
);
2566 /* warning: addr must be aligned */
2567 static inline void stl_phys_internal(AddressSpace
*as
,
2568 hwaddr addr
, uint32_t val
,
2569 enum device_endian endian
)
2576 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2578 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2579 #if defined(TARGET_WORDS_BIGENDIAN)
2580 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2584 if (endian
== DEVICE_BIG_ENDIAN
) {
2588 io_mem_write(mr
, addr1
, val
, 4);
2591 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2592 ptr
= qemu_get_ram_ptr(addr1
);
2594 case DEVICE_LITTLE_ENDIAN
:
2597 case DEVICE_BIG_ENDIAN
:
2604 invalidate_and_set_dirty(addr1
, 4);
2608 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2610 stl_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2613 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2615 stl_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2618 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2620 stl_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2624 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2627 address_space_rw(as
, addr
, &v
, 1, 1);
2630 /* warning: addr must be aligned */
2631 static inline void stw_phys_internal(AddressSpace
*as
,
2632 hwaddr addr
, uint32_t val
,
2633 enum device_endian endian
)
2640 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2641 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2642 #if defined(TARGET_WORDS_BIGENDIAN)
2643 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2647 if (endian
== DEVICE_BIG_ENDIAN
) {
2651 io_mem_write(mr
, addr1
, val
, 2);
2654 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2655 ptr
= qemu_get_ram_ptr(addr1
);
2657 case DEVICE_LITTLE_ENDIAN
:
2660 case DEVICE_BIG_ENDIAN
:
2667 invalidate_and_set_dirty(addr1
, 2);
2671 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2673 stw_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2676 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2678 stw_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2681 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2683 stw_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2687 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2690 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2693 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2695 val
= cpu_to_le64(val
);
2696 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2699 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2701 val
= cpu_to_be64(val
);
2702 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2705 /* virtual memory access for debug (includes writing to ROM) */
2706 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2707 uint8_t *buf
, int len
, int is_write
)
2714 page
= addr
& TARGET_PAGE_MASK
;
2715 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2716 /* if no physical page mapped, return an error */
2717 if (phys_addr
== -1)
2719 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2722 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2724 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
2726 address_space_rw(cpu
->as
, phys_addr
, buf
, l
, 0);
2736 #if !defined(CONFIG_USER_ONLY)
2739 * A helper function for the _utterly broken_ virtio device model to find out if
2740 * it's running on a big endian machine. Don't do this at home kids!
2742 bool virtio_is_big_endian(void);
2743 bool virtio_is_big_endian(void)
2745 #if defined(TARGET_WORDS_BIGENDIAN)
2754 #ifndef CONFIG_USER_ONLY
2755 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2760 mr
= address_space_translate(&address_space_memory
,
2761 phys_addr
, &phys_addr
, &l
, false);
2763 return !(memory_region_is_ram(mr
) ||
2764 memory_region_is_romd(mr
));
2767 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2771 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2772 func(block
->host
, block
->offset
, block
->length
, opaque
);