4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
53 //#define DEBUG_SUBPAGE
55 #if !defined(CONFIG_USER_ONLY)
57 static int in_migration
;
59 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
61 static MemoryRegion
*system_memory
;
62 static MemoryRegion
*system_io
;
64 AddressSpace address_space_io
;
65 AddressSpace address_space_memory
;
66 DMAContext dma_context_memory
;
68 MemoryRegion io_mem_rom
, io_mem_notdirty
;
69 static MemoryRegion io_mem_unassigned
;
73 CPUArchState
*first_cpu
;
74 /* current CPU in the current thread. It is only valid inside
76 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
77 /* 0 = Do not count executed instructions.
78 1 = Precise instruction counting.
79 2 = Adaptive rate instruction counting. */
82 #if !defined(CONFIG_USER_ONLY)
84 typedef struct PhysPageEntry PhysPageEntry
;
86 struct PhysPageEntry
{
88 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
92 struct AddressSpaceDispatch
{
93 /* This is a multi-level map on the physical address space.
94 * The bottom level has pointers to MemoryRegionSections.
96 PhysPageEntry phys_map
;
97 MemoryListener listener
;
101 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
102 typedef struct subpage_t
{
106 uint16_t sub_section
[TARGET_PAGE_SIZE
];
109 static MemoryRegionSection
*phys_sections
;
110 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
111 static uint16_t phys_section_unassigned
;
112 static uint16_t phys_section_notdirty
;
113 static uint16_t phys_section_rom
;
114 static uint16_t phys_section_watch
;
116 /* Simple allocator for PhysPageEntry nodes */
117 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
118 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
120 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
122 static void io_mem_init(void);
123 static void memory_map_init(void);
124 static void *qemu_safe_ram_ptr(ram_addr_t addr
);
126 static MemoryRegion io_mem_watch
;
129 #if !defined(CONFIG_USER_ONLY)
131 static void phys_map_node_reserve(unsigned nodes
)
133 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
134 typedef PhysPageEntry Node
[L2_SIZE
];
135 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
136 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
137 phys_map_nodes_nb
+ nodes
);
138 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
139 phys_map_nodes_nb_alloc
);
143 static uint16_t phys_map_node_alloc(void)
148 ret
= phys_map_nodes_nb
++;
149 assert(ret
!= PHYS_MAP_NODE_NIL
);
150 assert(ret
!= phys_map_nodes_nb_alloc
);
151 for (i
= 0; i
< L2_SIZE
; ++i
) {
152 phys_map_nodes
[ret
][i
].is_leaf
= 0;
153 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
158 static void phys_map_nodes_reset(void)
160 phys_map_nodes_nb
= 0;
164 static void phys_page_set_level(PhysPageEntry
*lp
, hwaddr
*index
,
165 hwaddr
*nb
, uint16_t leaf
,
170 hwaddr step
= (hwaddr
)1 << (level
* L2_BITS
);
172 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
173 lp
->ptr
= phys_map_node_alloc();
174 p
= phys_map_nodes
[lp
->ptr
];
176 for (i
= 0; i
< L2_SIZE
; i
++) {
178 p
[i
].ptr
= phys_section_unassigned
;
182 p
= phys_map_nodes
[lp
->ptr
];
184 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
186 while (*nb
&& lp
< &p
[L2_SIZE
]) {
187 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
193 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
199 static void phys_page_set(AddressSpaceDispatch
*d
,
200 hwaddr index
, hwaddr nb
,
203 /* Wildly overreserve - it doesn't matter much. */
204 phys_map_node_reserve(3 * P_L2_LEVELS
);
206 phys_page_set_level(&d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
209 static MemoryRegionSection
*phys_page_find(AddressSpaceDispatch
*d
, hwaddr index
)
211 PhysPageEntry lp
= d
->phys_map
;
215 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
216 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
217 return &phys_sections
[phys_section_unassigned
];
219 p
= phys_map_nodes
[lp
.ptr
];
220 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
222 return &phys_sections
[lp
.ptr
];
225 bool memory_region_is_unassigned(MemoryRegion
*mr
)
227 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
228 && mr
!= &io_mem_watch
;
231 static MemoryRegionSection
*address_space_lookup_region(AddressSpace
*as
,
233 bool resolve_subpage
)
235 MemoryRegionSection
*section
;
238 section
= phys_page_find(as
->dispatch
, addr
>> TARGET_PAGE_BITS
);
239 if (resolve_subpage
&& section
->mr
->subpage
) {
240 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
241 section
= &phys_sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
246 static MemoryRegionSection
*
247 address_space_translate_internal(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
248 hwaddr
*plen
, bool resolve_subpage
)
250 MemoryRegionSection
*section
;
253 section
= address_space_lookup_region(as
, addr
, resolve_subpage
);
254 /* Compute offset within MemoryRegionSection */
255 addr
-= section
->offset_within_address_space
;
257 /* Compute offset within MemoryRegion */
258 *xlat
= addr
+ section
->offset_within_region
;
260 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
261 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
265 MemoryRegionSection
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
266 hwaddr
*xlat
, hwaddr
*plen
,
269 return address_space_translate_internal(as
, addr
, xlat
, plen
, true);
272 MemoryRegionSection
*
273 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
276 return address_space_translate_internal(as
, addr
, xlat
, plen
, false);
280 void cpu_exec_init_all(void)
282 #if !defined(CONFIG_USER_ONLY)
283 qemu_mutex_init(&ram_list
.mutex
);
289 #if !defined(CONFIG_USER_ONLY)
291 static int cpu_common_post_load(void *opaque
, int version_id
)
293 CPUState
*cpu
= opaque
;
295 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
296 version_id is increased. */
297 cpu
->interrupt_request
&= ~0x01;
298 tlb_flush(cpu
->env_ptr
, 1);
303 static const VMStateDescription vmstate_cpu_common
= {
304 .name
= "cpu_common",
306 .minimum_version_id
= 1,
307 .minimum_version_id_old
= 1,
308 .post_load
= cpu_common_post_load
,
309 .fields
= (VMStateField
[]) {
310 VMSTATE_UINT32(halted
, CPUState
),
311 VMSTATE_UINT32(interrupt_request
, CPUState
),
312 VMSTATE_END_OF_LIST()
316 #define vmstate_cpu_common vmstate_dummy
319 CPUState
*qemu_get_cpu(int index
)
321 CPUArchState
*env
= first_cpu
;
322 CPUState
*cpu
= NULL
;
325 cpu
= ENV_GET_CPU(env
);
326 if (cpu
->cpu_index
== index
) {
332 return env
? cpu
: NULL
;
335 void qemu_for_each_cpu(void (*func
)(CPUState
*cpu
, void *data
), void *data
)
337 CPUArchState
*env
= first_cpu
;
340 func(ENV_GET_CPU(env
), data
);
345 void cpu_exec_init(CPUArchState
*env
)
347 CPUState
*cpu
= ENV_GET_CPU(env
);
348 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
352 #if defined(CONFIG_USER_ONLY)
355 env
->next_cpu
= NULL
;
358 while (*penv
!= NULL
) {
359 penv
= &(*penv
)->next_cpu
;
362 cpu
->cpu_index
= cpu_index
;
364 QTAILQ_INIT(&env
->breakpoints
);
365 QTAILQ_INIT(&env
->watchpoints
);
366 #ifndef CONFIG_USER_ONLY
367 cpu
->thread_id
= qemu_get_thread_id();
370 #if defined(CONFIG_USER_ONLY)
373 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
374 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
375 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
376 cpu_save
, cpu_load
, env
);
377 assert(cc
->vmsd
== NULL
);
379 if (cc
->vmsd
!= NULL
) {
380 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
384 #if defined(TARGET_HAS_ICE)
385 #if defined(CONFIG_USER_ONLY)
386 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
388 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
391 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
393 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
394 (pc
& ~TARGET_PAGE_MASK
));
397 #endif /* TARGET_HAS_ICE */
399 #if defined(CONFIG_USER_ONLY)
400 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
405 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
406 int flags
, CPUWatchpoint
**watchpoint
)
411 /* Add a watchpoint. */
412 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
413 int flags
, CPUWatchpoint
**watchpoint
)
415 target_ulong len_mask
= ~(len
- 1);
418 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
419 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
420 len
== 0 || len
> TARGET_PAGE_SIZE
) {
421 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
422 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
425 wp
= g_malloc(sizeof(*wp
));
428 wp
->len_mask
= len_mask
;
431 /* keep all GDB-injected watchpoints in front */
433 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
435 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
437 tlb_flush_page(env
, addr
);
444 /* Remove a specific watchpoint. */
445 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
448 target_ulong len_mask
= ~(len
- 1);
451 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
452 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
453 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
454 cpu_watchpoint_remove_by_ref(env
, wp
);
461 /* Remove a specific watchpoint by reference. */
462 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
464 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
466 tlb_flush_page(env
, watchpoint
->vaddr
);
471 /* Remove all matching watchpoints. */
472 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
474 CPUWatchpoint
*wp
, *next
;
476 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
477 if (wp
->flags
& mask
)
478 cpu_watchpoint_remove_by_ref(env
, wp
);
483 /* Add a breakpoint. */
484 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
485 CPUBreakpoint
**breakpoint
)
487 #if defined(TARGET_HAS_ICE)
490 bp
= g_malloc(sizeof(*bp
));
495 /* keep all GDB-injected breakpoints in front */
497 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
499 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
501 breakpoint_invalidate(env
, pc
);
511 /* Remove a specific breakpoint. */
512 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
514 #if defined(TARGET_HAS_ICE)
517 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
518 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
519 cpu_breakpoint_remove_by_ref(env
, bp
);
529 /* Remove a specific breakpoint by reference. */
530 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
532 #if defined(TARGET_HAS_ICE)
533 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
535 breakpoint_invalidate(env
, breakpoint
->pc
);
541 /* Remove all matching breakpoints. */
542 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
544 #if defined(TARGET_HAS_ICE)
545 CPUBreakpoint
*bp
, *next
;
547 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
548 if (bp
->flags
& mask
)
549 cpu_breakpoint_remove_by_ref(env
, bp
);
554 /* enable or disable single step mode. EXCP_DEBUG is returned by the
555 CPU loop after each instruction */
556 void cpu_single_step(CPUArchState
*env
, int enabled
)
558 #if defined(TARGET_HAS_ICE)
559 if (env
->singlestep_enabled
!= enabled
) {
560 env
->singlestep_enabled
= enabled
;
562 kvm_update_guest_debug(env
, 0);
564 /* must flush all the translated code to avoid inconsistencies */
565 /* XXX: only flush what is necessary */
572 void cpu_exit(CPUArchState
*env
)
574 CPUState
*cpu
= ENV_GET_CPU(env
);
576 cpu
->exit_request
= 1;
577 cpu
->tcg_exit_req
= 1;
580 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
587 fprintf(stderr
, "qemu: fatal: ");
588 vfprintf(stderr
, fmt
, ap
);
589 fprintf(stderr
, "\n");
590 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
591 if (qemu_log_enabled()) {
592 qemu_log("qemu: fatal: ");
593 qemu_log_vprintf(fmt
, ap2
);
595 log_cpu_state(env
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
601 #if defined(CONFIG_USER_ONLY)
603 struct sigaction act
;
604 sigfillset(&act
.sa_mask
);
605 act
.sa_handler
= SIG_DFL
;
606 sigaction(SIGABRT
, &act
, NULL
);
612 CPUArchState
*cpu_copy(CPUArchState
*env
)
614 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
615 CPUArchState
*next_cpu
= new_env
->next_cpu
;
616 #if defined(TARGET_HAS_ICE)
621 memcpy(new_env
, env
, sizeof(CPUArchState
));
623 /* Preserve chaining. */
624 new_env
->next_cpu
= next_cpu
;
626 /* Clone all break/watchpoints.
627 Note: Once we support ptrace with hw-debug register access, make sure
628 BP_CPU break/watchpoints are handled correctly on clone. */
629 QTAILQ_INIT(&env
->breakpoints
);
630 QTAILQ_INIT(&env
->watchpoints
);
631 #if defined(TARGET_HAS_ICE)
632 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
633 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
635 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
636 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
644 #if !defined(CONFIG_USER_ONLY)
645 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
650 /* we modify the TLB cache so that the dirty bit will be set again
651 when accessing the range */
652 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
653 /* Check that we don't span multiple blocks - this breaks the
654 address comparisons below. */
655 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
656 != (end
- 1) - start
) {
659 cpu_tlb_reset_dirty_all(start1
, length
);
663 /* Note: start and end must be within the same ram block. */
664 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
669 start
&= TARGET_PAGE_MASK
;
670 end
= TARGET_PAGE_ALIGN(end
);
672 length
= end
- start
;
675 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
678 tlb_reset_dirty_range_all(start
, end
, length
);
682 static int cpu_physical_memory_set_dirty_tracking(int enable
)
685 in_migration
= enable
;
689 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
690 MemoryRegionSection
*section
,
692 hwaddr paddr
, hwaddr xlat
,
694 target_ulong
*address
)
699 if (memory_region_is_ram(section
->mr
)) {
701 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
703 if (!section
->readonly
) {
704 iotlb
|= phys_section_notdirty
;
706 iotlb
|= phys_section_rom
;
709 iotlb
= section
- phys_sections
;
713 /* Make accesses to pages with watchpoints go via the
714 watchpoint trap routines. */
715 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
716 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
717 /* Avoid trapping reads of pages with a write breakpoint. */
718 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
719 iotlb
= phys_section_watch
+ paddr
;
720 *address
|= TLB_MMIO
;
728 #endif /* defined(CONFIG_USER_ONLY) */
730 #if !defined(CONFIG_USER_ONLY)
732 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
734 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
735 static void destroy_page_desc(uint16_t section_index
)
737 MemoryRegionSection
*section
= &phys_sections
[section_index
];
738 MemoryRegion
*mr
= section
->mr
;
741 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
742 memory_region_destroy(&subpage
->iomem
);
747 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
752 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
756 p
= phys_map_nodes
[lp
->ptr
];
757 for (i
= 0; i
< L2_SIZE
; ++i
) {
759 destroy_l2_mapping(&p
[i
], level
- 1);
761 destroy_page_desc(p
[i
].ptr
);
765 lp
->ptr
= PHYS_MAP_NODE_NIL
;
768 static void destroy_all_mappings(AddressSpaceDispatch
*d
)
770 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
771 phys_map_nodes_reset();
774 static uint16_t phys_section_add(MemoryRegionSection
*section
)
776 /* The physical section number is ORed with a page-aligned
777 * pointer to produce the iotlb entries. Thus it should
778 * never overflow into the page-aligned value.
780 assert(phys_sections_nb
< TARGET_PAGE_SIZE
);
782 if (phys_sections_nb
== phys_sections_nb_alloc
) {
783 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
784 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
785 phys_sections_nb_alloc
);
787 phys_sections
[phys_sections_nb
] = *section
;
788 return phys_sections_nb
++;
791 static void phys_sections_clear(void)
793 phys_sections_nb
= 0;
796 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
799 hwaddr base
= section
->offset_within_address_space
801 MemoryRegionSection
*existing
= phys_page_find(d
, base
>> TARGET_PAGE_BITS
);
802 MemoryRegionSection subsection
= {
803 .offset_within_address_space
= base
,
804 .size
= TARGET_PAGE_SIZE
,
808 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
810 if (!(existing
->mr
->subpage
)) {
811 subpage
= subpage_init(d
->as
, base
);
812 subsection
.mr
= &subpage
->iomem
;
813 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
814 phys_section_add(&subsection
));
816 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
818 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
819 end
= start
+ section
->size
- 1;
820 subpage_register(subpage
, start
, end
, phys_section_add(section
));
824 static void register_multipage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
826 hwaddr start_addr
= section
->offset_within_address_space
;
827 ram_addr_t size
= section
->size
;
829 uint16_t section_index
= phys_section_add(section
);
834 phys_page_set(d
, addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
838 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS
> MAX_PHYS_ADDR_SPACE_BITS
)
840 static MemoryRegionSection
limit(MemoryRegionSection section
)
842 section
.size
= MIN(section
.offset_within_address_space
+ section
.size
,
844 - section
.offset_within_address_space
;
849 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
851 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
852 MemoryRegionSection now
= limit(*section
), remain
= limit(*section
);
854 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
855 || (now
.size
< TARGET_PAGE_SIZE
)) {
856 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
857 - now
.offset_within_address_space
,
859 register_subpage(d
, &now
);
860 remain
.size
-= now
.size
;
861 remain
.offset_within_address_space
+= now
.size
;
862 remain
.offset_within_region
+= now
.size
;
864 while (remain
.size
>= TARGET_PAGE_SIZE
) {
866 if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
867 now
.size
= TARGET_PAGE_SIZE
;
868 register_subpage(d
, &now
);
870 now
.size
&= TARGET_PAGE_MASK
;
871 register_multipage(d
, &now
);
873 remain
.size
-= now
.size
;
874 remain
.offset_within_address_space
+= now
.size
;
875 remain
.offset_within_region
+= now
.size
;
879 register_subpage(d
, &now
);
883 void qemu_flush_coalesced_mmio_buffer(void)
886 kvm_flush_coalesced_mmio_buffer();
889 void qemu_mutex_lock_ramlist(void)
891 qemu_mutex_lock(&ram_list
.mutex
);
894 void qemu_mutex_unlock_ramlist(void)
896 qemu_mutex_unlock(&ram_list
.mutex
);
899 #if defined(__linux__) && !defined(TARGET_S390X)
903 #define HUGETLBFS_MAGIC 0x958458f6
905 static long gethugepagesize(const char *path
)
911 ret
= statfs(path
, &fs
);
912 } while (ret
!= 0 && errno
== EINTR
);
919 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
920 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
925 static void *file_ram_alloc(RAMBlock
*block
,
930 char *sanitized_name
;
937 unsigned long hpagesize
;
939 hpagesize
= gethugepagesize(path
);
944 if (memory
< hpagesize
) {
948 if (kvm_enabled() && !kvm_has_sync_mmu()) {
949 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
953 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
954 sanitized_name
= g_strdup(block
->mr
->name
);
955 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
960 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
962 g_free(sanitized_name
);
964 fd
= mkstemp(filename
);
966 perror("unable to create backing store for hugepages");
973 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
976 * ftruncate is not supported by hugetlbfs in older
977 * hosts, so don't bother bailing out on errors.
978 * If anything goes wrong with it under other filesystems,
981 if (ftruncate(fd
, memory
))
985 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
986 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
987 * to sidestep this quirk.
989 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
990 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
992 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
994 if (area
== MAP_FAILED
) {
995 perror("file_ram_alloc: can't mmap RAM pages");
1004 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1006 RAMBlock
*block
, *next_block
;
1007 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1009 assert(size
!= 0); /* it would hand out same offset multiple times */
1011 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1014 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1015 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1017 end
= block
->offset
+ block
->length
;
1019 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1020 if (next_block
->offset
>= end
) {
1021 next
= MIN(next
, next_block
->offset
);
1024 if (next
- end
>= size
&& next
- end
< mingap
) {
1026 mingap
= next
- end
;
1030 if (offset
== RAM_ADDR_MAX
) {
1031 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1039 ram_addr_t
last_ram_offset(void)
1042 ram_addr_t last
= 0;
1044 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1045 last
= MAX(last
, block
->offset
+ block
->length
);
1050 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1053 QemuOpts
*machine_opts
;
1055 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1056 machine_opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
1058 !qemu_opt_get_bool(machine_opts
, "dump-guest-core", true)) {
1059 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1061 perror("qemu_madvise");
1062 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1063 "but dump_guest_core=off specified\n");
1068 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1070 RAMBlock
*new_block
, *block
;
1073 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1074 if (block
->offset
== addr
) {
1080 assert(!new_block
->idstr
[0]);
1083 char *id
= qdev_get_dev_path(dev
);
1085 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1089 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1091 /* This assumes the iothread lock is taken here too. */
1092 qemu_mutex_lock_ramlist();
1093 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1094 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1095 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1100 qemu_mutex_unlock_ramlist();
1103 static int memory_try_enable_merging(void *addr
, size_t len
)
1107 opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
1108 if (opts
&& !qemu_opt_get_bool(opts
, "mem-merge", true)) {
1109 /* disabled by the user */
1113 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1116 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1119 RAMBlock
*block
, *new_block
;
1121 size
= TARGET_PAGE_ALIGN(size
);
1122 new_block
= g_malloc0(sizeof(*new_block
));
1124 /* This assumes the iothread lock is taken here too. */
1125 qemu_mutex_lock_ramlist();
1127 new_block
->offset
= find_ram_offset(size
);
1129 new_block
->host
= host
;
1130 new_block
->flags
|= RAM_PREALLOC_MASK
;
1133 #if defined (__linux__) && !defined(TARGET_S390X)
1134 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1135 if (!new_block
->host
) {
1136 new_block
->host
= qemu_anon_ram_alloc(size
);
1137 memory_try_enable_merging(new_block
->host
, size
);
1140 fprintf(stderr
, "-mem-path option unsupported\n");
1144 if (xen_enabled()) {
1145 xen_ram_alloc(new_block
->offset
, size
, mr
);
1146 } else if (kvm_enabled()) {
1147 /* some s390/kvm configurations have special constraints */
1148 new_block
->host
= kvm_ram_alloc(size
);
1150 new_block
->host
= qemu_anon_ram_alloc(size
);
1152 memory_try_enable_merging(new_block
->host
, size
);
1155 new_block
->length
= size
;
1157 /* Keep the list sorted from biggest to smallest block. */
1158 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1159 if (block
->length
< new_block
->length
) {
1164 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1166 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1168 ram_list
.mru_block
= NULL
;
1171 qemu_mutex_unlock_ramlist();
1173 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
1174 last_ram_offset() >> TARGET_PAGE_BITS
);
1175 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
1176 0, size
>> TARGET_PAGE_BITS
);
1177 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
1179 qemu_ram_setup_dump(new_block
->host
, size
);
1180 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1183 kvm_setup_guest_memory(new_block
->host
, size
);
1185 return new_block
->offset
;
1188 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1190 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1193 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1197 /* This assumes the iothread lock is taken here too. */
1198 qemu_mutex_lock_ramlist();
1199 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1200 if (addr
== block
->offset
) {
1201 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1202 ram_list
.mru_block
= NULL
;
1208 qemu_mutex_unlock_ramlist();
1211 void qemu_ram_free(ram_addr_t addr
)
1215 /* This assumes the iothread lock is taken here too. */
1216 qemu_mutex_lock_ramlist();
1217 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1218 if (addr
== block
->offset
) {
1219 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1220 ram_list
.mru_block
= NULL
;
1222 if (block
->flags
& RAM_PREALLOC_MASK
) {
1224 } else if (mem_path
) {
1225 #if defined (__linux__) && !defined(TARGET_S390X)
1227 munmap(block
->host
, block
->length
);
1230 qemu_anon_ram_free(block
->host
, block
->length
);
1236 if (xen_enabled()) {
1237 xen_invalidate_map_cache_entry(block
->host
);
1239 qemu_anon_ram_free(block
->host
, block
->length
);
1246 qemu_mutex_unlock_ramlist();
1251 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1258 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1259 offset
= addr
- block
->offset
;
1260 if (offset
< block
->length
) {
1261 vaddr
= block
->host
+ offset
;
1262 if (block
->flags
& RAM_PREALLOC_MASK
) {
1266 munmap(vaddr
, length
);
1268 #if defined(__linux__) && !defined(TARGET_S390X)
1271 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1274 flags
|= MAP_PRIVATE
;
1276 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1277 flags
, block
->fd
, offset
);
1279 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1280 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1287 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1288 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
1289 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
1292 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1293 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1297 if (area
!= vaddr
) {
1298 fprintf(stderr
, "Could not remap addr: "
1299 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1303 memory_try_enable_merging(vaddr
, length
);
1304 qemu_ram_setup_dump(vaddr
, length
);
1310 #endif /* !_WIN32 */
1312 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1313 With the exception of the softmmu code in this file, this should
1314 only be used for local memory (e.g. video ram) that the device owns,
1315 and knows it isn't going to access beyond the end of the block.
1317 It should not be used for general purpose DMA.
1318 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1320 void *qemu_get_ram_ptr(ram_addr_t addr
)
1324 /* The list is protected by the iothread lock here. */
1325 block
= ram_list
.mru_block
;
1326 if (block
&& addr
- block
->offset
< block
->length
) {
1329 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1330 if (addr
- block
->offset
< block
->length
) {
1335 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1339 ram_list
.mru_block
= block
;
1340 if (xen_enabled()) {
1341 /* We need to check if the requested address is in the RAM
1342 * because we don't want to map the entire memory in QEMU.
1343 * In that case just map until the end of the page.
1345 if (block
->offset
== 0) {
1346 return xen_map_cache(addr
, 0, 0);
1347 } else if (block
->host
== NULL
) {
1349 xen_map_cache(block
->offset
, block
->length
, 1);
1352 return block
->host
+ (addr
- block
->offset
);
1355 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1356 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1358 * ??? Is this still necessary?
1360 static void *qemu_safe_ram_ptr(ram_addr_t addr
)
1364 /* The list is protected by the iothread lock here. */
1365 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1366 if (addr
- block
->offset
< block
->length
) {
1367 if (xen_enabled()) {
1368 /* We need to check if the requested address is in the RAM
1369 * because we don't want to map the entire memory in QEMU.
1370 * In that case just map until the end of the page.
1372 if (block
->offset
== 0) {
1373 return xen_map_cache(addr
, 0, 0);
1374 } else if (block
->host
== NULL
) {
1376 xen_map_cache(block
->offset
, block
->length
, 1);
1379 return block
->host
+ (addr
- block
->offset
);
1383 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1389 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1390 * but takes a size argument */
1391 static void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
1396 if (xen_enabled()) {
1397 return xen_map_cache(addr
, *size
, 1);
1401 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1402 if (addr
- block
->offset
< block
->length
) {
1403 if (addr
- block
->offset
+ *size
> block
->length
)
1404 *size
= block
->length
- addr
+ block
->offset
;
1405 return block
->host
+ (addr
- block
->offset
);
1409 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1414 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1417 uint8_t *host
= ptr
;
1419 if (xen_enabled()) {
1420 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1424 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1425 /* This case append when the block is not mapped. */
1426 if (block
->host
== NULL
) {
1429 if (host
- block
->host
< block
->length
) {
1430 *ram_addr
= block
->offset
+ (host
- block
->host
);
1438 /* Some of the softmmu routines need to translate from a host pointer
1439 (typically a TLB entry) back to a ram offset. */
1440 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
1442 ram_addr_t ram_addr
;
1444 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
1445 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
1451 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1452 uint64_t val
, unsigned size
)
1455 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1456 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1457 tb_invalidate_phys_page_fast(ram_addr
, size
);
1458 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1462 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1465 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1468 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1473 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1474 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
1475 /* we remove the notdirty callback only if the code has been
1477 if (dirty_flags
== 0xff)
1478 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
1481 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1482 unsigned size
, bool is_write
)
1487 static const MemoryRegionOps notdirty_mem_ops
= {
1488 .write
= notdirty_mem_write
,
1489 .valid
.accepts
= notdirty_mem_accepts
,
1490 .endianness
= DEVICE_NATIVE_ENDIAN
,
1493 /* Generate a debug exception if a watchpoint has been hit. */
1494 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1496 CPUArchState
*env
= cpu_single_env
;
1497 target_ulong pc
, cs_base
;
1502 if (env
->watchpoint_hit
) {
1503 /* We re-entered the check after replacing the TB. Now raise
1504 * the debug interrupt so that is will trigger after the
1505 * current instruction. */
1506 cpu_interrupt(ENV_GET_CPU(env
), CPU_INTERRUPT_DEBUG
);
1509 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1510 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1511 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1512 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1513 wp
->flags
|= BP_WATCHPOINT_HIT
;
1514 if (!env
->watchpoint_hit
) {
1515 env
->watchpoint_hit
= wp
;
1516 tb_check_watchpoint(env
);
1517 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1518 env
->exception_index
= EXCP_DEBUG
;
1521 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1522 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
1523 cpu_resume_from_signal(env
, NULL
);
1527 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1532 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1533 so these check for a hit then pass through to the normal out-of-line
1535 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1538 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1540 case 1: return ldub_phys(addr
);
1541 case 2: return lduw_phys(addr
);
1542 case 4: return ldl_phys(addr
);
1547 static void watch_mem_write(void *opaque
, hwaddr addr
,
1548 uint64_t val
, unsigned size
)
1550 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1553 stb_phys(addr
, val
);
1556 stw_phys(addr
, val
);
1559 stl_phys(addr
, val
);
1565 static const MemoryRegionOps watch_mem_ops
= {
1566 .read
= watch_mem_read
,
1567 .write
= watch_mem_write
,
1568 .endianness
= DEVICE_NATIVE_ENDIAN
,
1571 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1574 subpage_t
*subpage
= opaque
;
1577 #if defined(DEBUG_SUBPAGE)
1578 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
"\n", __func__
,
1579 subpage
, len
, addr
);
1581 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1594 static void subpage_write(void *opaque
, hwaddr addr
,
1595 uint64_t value
, unsigned len
)
1597 subpage_t
*subpage
= opaque
;
1600 #if defined(DEBUG_SUBPAGE)
1601 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1602 " value %"PRIx64
"\n",
1603 __func__
, subpage
, len
, addr
, value
);
1618 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1621 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1622 unsigned size
, bool is_write
)
1624 subpage_t
*subpage
= opaque
;
1625 #if defined(DEBUG_SUBPAGE)
1626 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx
"\n",
1627 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1630 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1634 static const MemoryRegionOps subpage_ops
= {
1635 .read
= subpage_read
,
1636 .write
= subpage_write
,
1637 .valid
.accepts
= subpage_accepts
,
1638 .endianness
= DEVICE_NATIVE_ENDIAN
,
1641 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1646 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1648 idx
= SUBPAGE_IDX(start
);
1649 eidx
= SUBPAGE_IDX(end
);
1650 #if defined(DEBUG_SUBPAGE)
1651 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
1652 mmio
, start
, end
, idx
, eidx
, memory
);
1654 for (; idx
<= eidx
; idx
++) {
1655 mmio
->sub_section
[idx
] = section
;
1661 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1665 mmio
= g_malloc0(sizeof(subpage_t
));
1669 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
1670 "subpage", TARGET_PAGE_SIZE
);
1671 mmio
->iomem
.subpage
= true;
1672 #if defined(DEBUG_SUBPAGE)
1673 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
1674 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
1676 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
1681 static uint16_t dummy_section(MemoryRegion
*mr
)
1683 MemoryRegionSection section
= {
1685 .offset_within_address_space
= 0,
1686 .offset_within_region
= 0,
1690 return phys_section_add(§ion
);
1693 MemoryRegion
*iotlb_to_region(hwaddr index
)
1695 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1698 static void io_mem_init(void)
1700 memory_region_init_io(&io_mem_rom
, &unassigned_mem_ops
, NULL
, "rom", UINT64_MAX
);
1701 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
1702 "unassigned", UINT64_MAX
);
1703 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
1704 "notdirty", UINT64_MAX
);
1705 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
1706 "watch", UINT64_MAX
);
1709 static void mem_begin(MemoryListener
*listener
)
1711 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
1713 destroy_all_mappings(d
);
1714 d
->phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
1717 static void core_begin(MemoryListener
*listener
)
1719 phys_sections_clear();
1720 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
1721 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
1722 phys_section_rom
= dummy_section(&io_mem_rom
);
1723 phys_section_watch
= dummy_section(&io_mem_watch
);
1726 static void tcg_commit(MemoryListener
*listener
)
1730 /* since each CPU stores ram addresses in its TLB cache, we must
1731 reset the modified entries */
1733 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1738 static void core_log_global_start(MemoryListener
*listener
)
1740 cpu_physical_memory_set_dirty_tracking(1);
1743 static void core_log_global_stop(MemoryListener
*listener
)
1745 cpu_physical_memory_set_dirty_tracking(0);
1748 static void io_region_add(MemoryListener
*listener
,
1749 MemoryRegionSection
*section
)
1751 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
1753 mrio
->mr
= section
->mr
;
1754 mrio
->offset
= section
->offset_within_region
;
1755 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
1756 section
->offset_within_address_space
, section
->size
);
1757 ioport_register(&mrio
->iorange
);
1760 static void io_region_del(MemoryListener
*listener
,
1761 MemoryRegionSection
*section
)
1763 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
1766 static MemoryListener core_memory_listener
= {
1767 .begin
= core_begin
,
1768 .log_global_start
= core_log_global_start
,
1769 .log_global_stop
= core_log_global_stop
,
1773 static MemoryListener io_memory_listener
= {
1774 .region_add
= io_region_add
,
1775 .region_del
= io_region_del
,
1779 static MemoryListener tcg_memory_listener
= {
1780 .commit
= tcg_commit
,
1783 void address_space_init_dispatch(AddressSpace
*as
)
1785 AddressSpaceDispatch
*d
= g_new(AddressSpaceDispatch
, 1);
1787 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
1788 d
->listener
= (MemoryListener
) {
1790 .region_add
= mem_add
,
1791 .region_nop
= mem_add
,
1796 memory_listener_register(&d
->listener
, as
);
1799 void address_space_destroy_dispatch(AddressSpace
*as
)
1801 AddressSpaceDispatch
*d
= as
->dispatch
;
1803 memory_listener_unregister(&d
->listener
);
1804 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
1806 as
->dispatch
= NULL
;
1809 static void memory_map_init(void)
1811 system_memory
= g_malloc(sizeof(*system_memory
));
1812 memory_region_init(system_memory
, "system", INT64_MAX
);
1813 address_space_init(&address_space_memory
, system_memory
);
1814 address_space_memory
.name
= "memory";
1816 system_io
= g_malloc(sizeof(*system_io
));
1817 memory_region_init(system_io
, "io", 65536);
1818 address_space_init(&address_space_io
, system_io
);
1819 address_space_io
.name
= "I/O";
1821 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1822 memory_listener_register(&io_memory_listener
, &address_space_io
);
1823 memory_listener_register(&tcg_memory_listener
, &address_space_memory
);
1825 dma_context_init(&dma_context_memory
, &address_space_memory
,
1829 MemoryRegion
*get_system_memory(void)
1831 return system_memory
;
1834 MemoryRegion
*get_system_io(void)
1839 #endif /* !defined(CONFIG_USER_ONLY) */
1841 /* physical memory access (slow version, mainly for debug) */
1842 #if defined(CONFIG_USER_ONLY)
1843 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
1844 uint8_t *buf
, int len
, int is_write
)
1851 page
= addr
& TARGET_PAGE_MASK
;
1852 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1855 flags
= page_get_flags(page
);
1856 if (!(flags
& PAGE_VALID
))
1859 if (!(flags
& PAGE_WRITE
))
1861 /* XXX: this code should not depend on lock_user */
1862 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1865 unlock_user(p
, addr
, l
);
1867 if (!(flags
& PAGE_READ
))
1869 /* XXX: this code should not depend on lock_user */
1870 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1873 unlock_user(p
, addr
, 0);
1884 static void invalidate_and_set_dirty(hwaddr addr
,
1887 if (!cpu_physical_memory_is_dirty(addr
)) {
1888 /* invalidate code */
1889 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1891 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
1893 xen_modified_memory(addr
, length
);
1896 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
1898 if (memory_region_is_ram(mr
)) {
1899 return !(is_write
&& mr
->readonly
);
1901 if (memory_region_is_romd(mr
)) {
1908 static inline int memory_access_size(MemoryRegion
*mr
, int l
, hwaddr addr
)
1910 if (l
>= 4 && (((addr
& 3) == 0 || mr
->ops
->impl
.unaligned
))) {
1913 if (l
>= 2 && (((addr
& 1) == 0) || mr
->ops
->impl
.unaligned
)) {
1919 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
1920 int len
, bool is_write
)
1926 MemoryRegionSection
*section
;
1931 section
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
1934 if (!memory_access_is_direct(section
->mr
, is_write
)) {
1935 l
= memory_access_size(section
->mr
, l
, addr1
);
1936 /* XXX: could force cpu_single_env to NULL to avoid
1939 /* 32 bit write access */
1941 error
|= io_mem_write(section
->mr
, addr1
, val
, 4);
1942 } else if (l
== 2) {
1943 /* 16 bit write access */
1945 error
|= io_mem_write(section
->mr
, addr1
, val
, 2);
1947 /* 8 bit write access */
1949 error
|= io_mem_write(section
->mr
, addr1
, val
, 1);
1952 addr1
+= memory_region_get_ram_addr(section
->mr
);
1954 ptr
= qemu_get_ram_ptr(addr1
);
1955 memcpy(ptr
, buf
, l
);
1956 invalidate_and_set_dirty(addr1
, l
);
1959 if (!memory_access_is_direct(section
->mr
, is_write
)) {
1961 l
= memory_access_size(section
->mr
, l
, addr1
);
1963 /* 32 bit read access */
1964 error
|= io_mem_read(section
->mr
, addr1
, &val
, 4);
1966 } else if (l
== 2) {
1967 /* 16 bit read access */
1968 error
|= io_mem_read(section
->mr
, addr1
, &val
, 2);
1971 /* 8 bit read access */
1972 error
|= io_mem_read(section
->mr
, addr1
, &val
, 1);
1977 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
+ addr1
);
1978 memcpy(buf
, ptr
, l
);
1989 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
1990 const uint8_t *buf
, int len
)
1992 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
1995 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
1997 return address_space_rw(as
, addr
, buf
, len
, false);
2001 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2002 int len
, int is_write
)
2004 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2007 /* used for ROM loading : can write in RAM and ROM */
2008 void cpu_physical_memory_write_rom(hwaddr addr
,
2009 const uint8_t *buf
, int len
)
2014 MemoryRegionSection
*section
;
2018 section
= address_space_translate(&address_space_memory
,
2019 addr
, &addr1
, &l
, true);
2021 if (!(memory_region_is_ram(section
->mr
) ||
2022 memory_region_is_romd(section
->mr
))) {
2025 addr1
+= memory_region_get_ram_addr(section
->mr
);
2027 ptr
= qemu_get_ram_ptr(addr1
);
2028 memcpy(ptr
, buf
, l
);
2029 invalidate_and_set_dirty(addr1
, l
);
2043 static BounceBuffer bounce
;
2045 typedef struct MapClient
{
2047 void (*callback
)(void *opaque
);
2048 QLIST_ENTRY(MapClient
) link
;
2051 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2052 = QLIST_HEAD_INITIALIZER(map_client_list
);
2054 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2056 MapClient
*client
= g_malloc(sizeof(*client
));
2058 client
->opaque
= opaque
;
2059 client
->callback
= callback
;
2060 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2064 static void cpu_unregister_map_client(void *_client
)
2066 MapClient
*client
= (MapClient
*)_client
;
2068 QLIST_REMOVE(client
, link
);
2072 static void cpu_notify_map_clients(void)
2076 while (!QLIST_EMPTY(&map_client_list
)) {
2077 client
= QLIST_FIRST(&map_client_list
);
2078 client
->callback(client
->opaque
);
2079 cpu_unregister_map_client(client
);
2083 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2085 MemoryRegionSection
*section
;
2090 section
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2091 if (!memory_access_is_direct(section
->mr
, is_write
)) {
2092 l
= memory_access_size(section
->mr
, l
, addr
);
2093 if (!memory_region_access_valid(section
->mr
, xlat
, l
, is_write
)) {
2104 /* Map a physical memory region into a host virtual address.
2105 * May map a subset of the requested range, given by and returned in *plen.
2106 * May return NULL if resources needed to perform the mapping are exhausted.
2107 * Use only for reads OR writes - not for read-modify-write operations.
2108 * Use cpu_register_map_client() to know when retrying the map operation is
2109 * likely to succeed.
2111 void *address_space_map(AddressSpace
*as
,
2119 MemoryRegionSection
*section
;
2120 ram_addr_t raddr
= RAM_ADDR_MAX
;
2126 section
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2128 if (!memory_access_is_direct(section
->mr
, is_write
)) {
2129 if (todo
|| bounce
.buffer
) {
2132 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
2136 address_space_read(as
, addr
, bounce
.buffer
, l
);
2140 return bounce
.buffer
;
2143 raddr
= memory_region_get_ram_addr(section
->mr
) + xlat
;
2145 if (memory_region_get_ram_addr(section
->mr
) + xlat
!= raddr
+ todo
) {
2155 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
2160 /* Unmaps a memory region previously mapped by address_space_map().
2161 * Will also mark the memory as dirty if is_write == 1. access_len gives
2162 * the amount of memory that was actually read or written by the caller.
2164 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2165 int is_write
, hwaddr access_len
)
2167 if (buffer
!= bounce
.buffer
) {
2169 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
2170 while (access_len
) {
2172 l
= TARGET_PAGE_SIZE
;
2175 invalidate_and_set_dirty(addr1
, l
);
2180 if (xen_enabled()) {
2181 xen_invalidate_map_cache_entry(buffer
);
2186 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2188 qemu_vfree(bounce
.buffer
);
2189 bounce
.buffer
= NULL
;
2190 cpu_notify_map_clients();
2193 void *cpu_physical_memory_map(hwaddr addr
,
2197 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2200 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2201 int is_write
, hwaddr access_len
)
2203 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2206 /* warning: addr must be aligned */
2207 static inline uint32_t ldl_phys_internal(hwaddr addr
,
2208 enum device_endian endian
)
2212 MemoryRegionSection
*section
;
2216 section
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2218 if (l
< 4 || !memory_access_is_direct(section
->mr
, false)) {
2220 io_mem_read(section
->mr
, addr1
, &val
, 4);
2221 #if defined(TARGET_WORDS_BIGENDIAN)
2222 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2226 if (endian
== DEVICE_BIG_ENDIAN
) {
2232 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2236 case DEVICE_LITTLE_ENDIAN
:
2237 val
= ldl_le_p(ptr
);
2239 case DEVICE_BIG_ENDIAN
:
2240 val
= ldl_be_p(ptr
);
2250 uint32_t ldl_phys(hwaddr addr
)
2252 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2255 uint32_t ldl_le_phys(hwaddr addr
)
2257 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2260 uint32_t ldl_be_phys(hwaddr addr
)
2262 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2265 /* warning: addr must be aligned */
2266 static inline uint64_t ldq_phys_internal(hwaddr addr
,
2267 enum device_endian endian
)
2271 MemoryRegionSection
*section
;
2275 section
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2277 if (l
< 8 || !memory_access_is_direct(section
->mr
, false)) {
2279 io_mem_read(section
->mr
, addr1
, &val
, 8);
2280 #if defined(TARGET_WORDS_BIGENDIAN)
2281 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2285 if (endian
== DEVICE_BIG_ENDIAN
) {
2291 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2295 case DEVICE_LITTLE_ENDIAN
:
2296 val
= ldq_le_p(ptr
);
2298 case DEVICE_BIG_ENDIAN
:
2299 val
= ldq_be_p(ptr
);
2309 uint64_t ldq_phys(hwaddr addr
)
2311 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2314 uint64_t ldq_le_phys(hwaddr addr
)
2316 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2319 uint64_t ldq_be_phys(hwaddr addr
)
2321 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2325 uint32_t ldub_phys(hwaddr addr
)
2328 cpu_physical_memory_read(addr
, &val
, 1);
2332 /* warning: addr must be aligned */
2333 static inline uint32_t lduw_phys_internal(hwaddr addr
,
2334 enum device_endian endian
)
2338 MemoryRegionSection
*section
;
2342 section
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2344 if (l
< 2 || !memory_access_is_direct(section
->mr
, false)) {
2346 io_mem_read(section
->mr
, addr1
, &val
, 2);
2347 #if defined(TARGET_WORDS_BIGENDIAN)
2348 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2352 if (endian
== DEVICE_BIG_ENDIAN
) {
2358 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2362 case DEVICE_LITTLE_ENDIAN
:
2363 val
= lduw_le_p(ptr
);
2365 case DEVICE_BIG_ENDIAN
:
2366 val
= lduw_be_p(ptr
);
2376 uint32_t lduw_phys(hwaddr addr
)
2378 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2381 uint32_t lduw_le_phys(hwaddr addr
)
2383 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2386 uint32_t lduw_be_phys(hwaddr addr
)
2388 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2391 /* warning: addr must be aligned. The ram page is not masked as dirty
2392 and the code inside is not invalidated. It is useful if the dirty
2393 bits are used to track modified PTEs */
2394 void stl_phys_notdirty(hwaddr addr
, uint32_t val
)
2397 MemoryRegionSection
*section
;
2401 section
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2403 if (l
< 4 || !memory_access_is_direct(section
->mr
, true)) {
2404 io_mem_write(section
->mr
, addr1
, val
, 4);
2406 addr1
+= memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
;
2407 ptr
= qemu_get_ram_ptr(addr1
);
2410 if (unlikely(in_migration
)) {
2411 if (!cpu_physical_memory_is_dirty(addr1
)) {
2412 /* invalidate code */
2413 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2415 cpu_physical_memory_set_dirty_flags(
2416 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
2422 /* warning: addr must be aligned */
2423 static inline void stl_phys_internal(hwaddr addr
, uint32_t val
,
2424 enum device_endian endian
)
2427 MemoryRegionSection
*section
;
2431 section
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2433 if (l
< 4 || !memory_access_is_direct(section
->mr
, true)) {
2434 #if defined(TARGET_WORDS_BIGENDIAN)
2435 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2439 if (endian
== DEVICE_BIG_ENDIAN
) {
2443 io_mem_write(section
->mr
, addr1
, val
, 4);
2446 addr1
+= memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
;
2447 ptr
= qemu_get_ram_ptr(addr1
);
2449 case DEVICE_LITTLE_ENDIAN
:
2452 case DEVICE_BIG_ENDIAN
:
2459 invalidate_and_set_dirty(addr1
, 4);
2463 void stl_phys(hwaddr addr
, uint32_t val
)
2465 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2468 void stl_le_phys(hwaddr addr
, uint32_t val
)
2470 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2473 void stl_be_phys(hwaddr addr
, uint32_t val
)
2475 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2479 void stb_phys(hwaddr addr
, uint32_t val
)
2482 cpu_physical_memory_write(addr
, &v
, 1);
2485 /* warning: addr must be aligned */
2486 static inline void stw_phys_internal(hwaddr addr
, uint32_t val
,
2487 enum device_endian endian
)
2490 MemoryRegionSection
*section
;
2494 section
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2496 if (l
< 2 || !memory_access_is_direct(section
->mr
, true)) {
2497 #if defined(TARGET_WORDS_BIGENDIAN)
2498 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2502 if (endian
== DEVICE_BIG_ENDIAN
) {
2506 io_mem_write(section
->mr
, addr1
, val
, 2);
2509 addr1
+= memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
;
2510 ptr
= qemu_get_ram_ptr(addr1
);
2512 case DEVICE_LITTLE_ENDIAN
:
2515 case DEVICE_BIG_ENDIAN
:
2522 invalidate_and_set_dirty(addr1
, 2);
2526 void stw_phys(hwaddr addr
, uint32_t val
)
2528 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2531 void stw_le_phys(hwaddr addr
, uint32_t val
)
2533 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2536 void stw_be_phys(hwaddr addr
, uint32_t val
)
2538 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2542 void stq_phys(hwaddr addr
, uint64_t val
)
2545 cpu_physical_memory_write(addr
, &val
, 8);
2548 void stq_le_phys(hwaddr addr
, uint64_t val
)
2550 val
= cpu_to_le64(val
);
2551 cpu_physical_memory_write(addr
, &val
, 8);
2554 void stq_be_phys(hwaddr addr
, uint64_t val
)
2556 val
= cpu_to_be64(val
);
2557 cpu_physical_memory_write(addr
, &val
, 8);
2560 /* virtual memory access for debug (includes writing to ROM) */
2561 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
2562 uint8_t *buf
, int len
, int is_write
)
2569 page
= addr
& TARGET_PAGE_MASK
;
2570 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2571 /* if no physical page mapped, return an error */
2572 if (phys_addr
== -1)
2574 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2577 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2579 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
2581 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
2590 #if !defined(CONFIG_USER_ONLY)
2593 * A helper function for the _utterly broken_ virtio device model to find out if
2594 * it's running on a big endian machine. Don't do this at home kids!
2596 bool virtio_is_big_endian(void);
2597 bool virtio_is_big_endian(void)
2599 #if defined(TARGET_WORDS_BIGENDIAN)
2608 #ifndef CONFIG_USER_ONLY
2609 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2611 MemoryRegionSection
*section
;
2614 section
= address_space_translate(&address_space_memory
,
2615 phys_addr
, &phys_addr
, &l
, false);
2617 return !(memory_region_is_ram(section
->mr
) ||
2618 memory_region_is_romd(section
->mr
));