4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
53 //#define DEBUG_UNASSIGNED
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
58 static int in_migration
;
60 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
62 static MemoryRegion
*system_memory
;
63 static MemoryRegion
*system_io
;
65 AddressSpace address_space_io
;
66 AddressSpace address_space_memory
;
67 DMAContext dma_context_memory
;
69 MemoryRegion io_mem_rom
, io_mem_notdirty
;
70 static MemoryRegion io_mem_unassigned
, io_mem_subpage_ram
;
74 CPUArchState
*first_cpu
;
75 /* current CPU in the current thread. It is only valid inside
77 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
78 /* 0 = Do not count executed instructions.
79 1 = Precise instruction counting.
80 2 = Adaptive rate instruction counting. */
83 #if !defined(CONFIG_USER_ONLY)
85 static MemoryRegionSection
*phys_sections
;
86 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
87 static uint16_t phys_section_unassigned
;
88 static uint16_t phys_section_notdirty
;
89 static uint16_t phys_section_rom
;
90 static uint16_t phys_section_watch
;
92 /* Simple allocator for PhysPageEntry nodes */
93 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
94 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
96 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
98 static void io_mem_init(void);
99 static void memory_map_init(void);
100 static void *qemu_safe_ram_ptr(ram_addr_t addr
);
102 static MemoryRegion io_mem_watch
;
105 #if !defined(CONFIG_USER_ONLY)
107 static void phys_map_node_reserve(unsigned nodes
)
109 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
110 typedef PhysPageEntry Node
[L2_SIZE
];
111 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
112 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
113 phys_map_nodes_nb
+ nodes
);
114 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
115 phys_map_nodes_nb_alloc
);
119 static uint16_t phys_map_node_alloc(void)
124 ret
= phys_map_nodes_nb
++;
125 assert(ret
!= PHYS_MAP_NODE_NIL
);
126 assert(ret
!= phys_map_nodes_nb_alloc
);
127 for (i
= 0; i
< L2_SIZE
; ++i
) {
128 phys_map_nodes
[ret
][i
].is_leaf
= 0;
129 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
134 static void phys_map_nodes_reset(void)
136 phys_map_nodes_nb
= 0;
140 static void phys_page_set_level(PhysPageEntry
*lp
, hwaddr
*index
,
141 hwaddr
*nb
, uint16_t leaf
,
146 hwaddr step
= (hwaddr
)1 << (level
* L2_BITS
);
148 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
149 lp
->ptr
= phys_map_node_alloc();
150 p
= phys_map_nodes
[lp
->ptr
];
152 for (i
= 0; i
< L2_SIZE
; i
++) {
154 p
[i
].ptr
= phys_section_unassigned
;
158 p
= phys_map_nodes
[lp
->ptr
];
160 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
162 while (*nb
&& lp
< &p
[L2_SIZE
]) {
163 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
169 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
175 static void phys_page_set(AddressSpaceDispatch
*d
,
176 hwaddr index
, hwaddr nb
,
179 /* Wildly overreserve - it doesn't matter much. */
180 phys_map_node_reserve(3 * P_L2_LEVELS
);
182 phys_page_set_level(&d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
185 MemoryRegionSection
*phys_page_find(AddressSpaceDispatch
*d
, hwaddr index
)
187 PhysPageEntry lp
= d
->phys_map
;
191 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
192 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
193 return &phys_sections
[phys_section_unassigned
];
195 p
= phys_map_nodes
[lp
.ptr
];
196 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
198 return &phys_sections
[lp
.ptr
];
201 bool memory_region_is_unassigned(MemoryRegion
*mr
)
203 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
204 && mr
!= &io_mem_watch
;
208 void cpu_exec_init_all(void)
210 #if !defined(CONFIG_USER_ONLY)
211 qemu_mutex_init(&ram_list
.mutex
);
217 #if !defined(CONFIG_USER_ONLY)
219 static int cpu_common_post_load(void *opaque
, int version_id
)
221 CPUState
*cpu
= opaque
;
223 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
224 version_id is increased. */
225 cpu
->interrupt_request
&= ~0x01;
226 tlb_flush(cpu
->env_ptr
, 1);
231 static const VMStateDescription vmstate_cpu_common
= {
232 .name
= "cpu_common",
234 .minimum_version_id
= 1,
235 .minimum_version_id_old
= 1,
236 .post_load
= cpu_common_post_load
,
237 .fields
= (VMStateField
[]) {
238 VMSTATE_UINT32(halted
, CPUState
),
239 VMSTATE_UINT32(interrupt_request
, CPUState
),
240 VMSTATE_END_OF_LIST()
244 #define vmstate_cpu_common vmstate_dummy
247 CPUState
*qemu_get_cpu(int index
)
249 CPUArchState
*env
= first_cpu
;
250 CPUState
*cpu
= NULL
;
253 cpu
= ENV_GET_CPU(env
);
254 if (cpu
->cpu_index
== index
) {
260 return env
? cpu
: NULL
;
263 void qemu_for_each_cpu(void (*func
)(CPUState
*cpu
, void *data
), void *data
)
265 CPUArchState
*env
= first_cpu
;
268 func(ENV_GET_CPU(env
), data
);
273 void cpu_exec_init(CPUArchState
*env
)
275 CPUState
*cpu
= ENV_GET_CPU(env
);
276 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
280 #if defined(CONFIG_USER_ONLY)
283 env
->next_cpu
= NULL
;
286 while (*penv
!= NULL
) {
287 penv
= &(*penv
)->next_cpu
;
290 cpu
->cpu_index
= cpu_index
;
292 QTAILQ_INIT(&env
->breakpoints
);
293 QTAILQ_INIT(&env
->watchpoints
);
294 #ifndef CONFIG_USER_ONLY
295 cpu
->thread_id
= qemu_get_thread_id();
298 #if defined(CONFIG_USER_ONLY)
301 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
302 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
303 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
304 cpu_save
, cpu_load
, env
);
305 assert(cc
->vmsd
== NULL
);
307 if (cc
->vmsd
!= NULL
) {
308 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
312 #if defined(TARGET_HAS_ICE)
313 #if defined(CONFIG_USER_ONLY)
314 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
316 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
319 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
321 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
322 (pc
& ~TARGET_PAGE_MASK
));
325 #endif /* TARGET_HAS_ICE */
327 #if defined(CONFIG_USER_ONLY)
328 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
333 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
334 int flags
, CPUWatchpoint
**watchpoint
)
339 /* Add a watchpoint. */
340 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
341 int flags
, CPUWatchpoint
**watchpoint
)
343 target_ulong len_mask
= ~(len
- 1);
346 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
347 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
348 len
== 0 || len
> TARGET_PAGE_SIZE
) {
349 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
350 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
353 wp
= g_malloc(sizeof(*wp
));
356 wp
->len_mask
= len_mask
;
359 /* keep all GDB-injected watchpoints in front */
361 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
363 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
365 tlb_flush_page(env
, addr
);
372 /* Remove a specific watchpoint. */
373 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
376 target_ulong len_mask
= ~(len
- 1);
379 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
380 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
381 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
382 cpu_watchpoint_remove_by_ref(env
, wp
);
389 /* Remove a specific watchpoint by reference. */
390 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
392 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
394 tlb_flush_page(env
, watchpoint
->vaddr
);
399 /* Remove all matching watchpoints. */
400 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
402 CPUWatchpoint
*wp
, *next
;
404 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
405 if (wp
->flags
& mask
)
406 cpu_watchpoint_remove_by_ref(env
, wp
);
411 /* Add a breakpoint. */
412 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
413 CPUBreakpoint
**breakpoint
)
415 #if defined(TARGET_HAS_ICE)
418 bp
= g_malloc(sizeof(*bp
));
423 /* keep all GDB-injected breakpoints in front */
425 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
427 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
429 breakpoint_invalidate(env
, pc
);
439 /* Remove a specific breakpoint. */
440 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
442 #if defined(TARGET_HAS_ICE)
445 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
446 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
447 cpu_breakpoint_remove_by_ref(env
, bp
);
457 /* Remove a specific breakpoint by reference. */
458 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
460 #if defined(TARGET_HAS_ICE)
461 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
463 breakpoint_invalidate(env
, breakpoint
->pc
);
469 /* Remove all matching breakpoints. */
470 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
472 #if defined(TARGET_HAS_ICE)
473 CPUBreakpoint
*bp
, *next
;
475 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
476 if (bp
->flags
& mask
)
477 cpu_breakpoint_remove_by_ref(env
, bp
);
482 /* enable or disable single step mode. EXCP_DEBUG is returned by the
483 CPU loop after each instruction */
484 void cpu_single_step(CPUArchState
*env
, int enabled
)
486 #if defined(TARGET_HAS_ICE)
487 if (env
->singlestep_enabled
!= enabled
) {
488 env
->singlestep_enabled
= enabled
;
490 kvm_update_guest_debug(env
, 0);
492 /* must flush all the translated code to avoid inconsistencies */
493 /* XXX: only flush what is necessary */
500 void cpu_exit(CPUArchState
*env
)
502 CPUState
*cpu
= ENV_GET_CPU(env
);
504 cpu
->exit_request
= 1;
505 cpu
->tcg_exit_req
= 1;
508 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
515 fprintf(stderr
, "qemu: fatal: ");
516 vfprintf(stderr
, fmt
, ap
);
517 fprintf(stderr
, "\n");
518 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
519 if (qemu_log_enabled()) {
520 qemu_log("qemu: fatal: ");
521 qemu_log_vprintf(fmt
, ap2
);
523 log_cpu_state(env
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
529 #if defined(CONFIG_USER_ONLY)
531 struct sigaction act
;
532 sigfillset(&act
.sa_mask
);
533 act
.sa_handler
= SIG_DFL
;
534 sigaction(SIGABRT
, &act
, NULL
);
540 CPUArchState
*cpu_copy(CPUArchState
*env
)
542 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
543 CPUArchState
*next_cpu
= new_env
->next_cpu
;
544 #if defined(TARGET_HAS_ICE)
549 memcpy(new_env
, env
, sizeof(CPUArchState
));
551 /* Preserve chaining. */
552 new_env
->next_cpu
= next_cpu
;
554 /* Clone all break/watchpoints.
555 Note: Once we support ptrace with hw-debug register access, make sure
556 BP_CPU break/watchpoints are handled correctly on clone. */
557 QTAILQ_INIT(&env
->breakpoints
);
558 QTAILQ_INIT(&env
->watchpoints
);
559 #if defined(TARGET_HAS_ICE)
560 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
561 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
563 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
564 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
572 #if !defined(CONFIG_USER_ONLY)
573 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
578 /* we modify the TLB cache so that the dirty bit will be set again
579 when accessing the range */
580 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
581 /* Check that we don't span multiple blocks - this breaks the
582 address comparisons below. */
583 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
584 != (end
- 1) - start
) {
587 cpu_tlb_reset_dirty_all(start1
, length
);
591 /* Note: start and end must be within the same ram block. */
592 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
597 start
&= TARGET_PAGE_MASK
;
598 end
= TARGET_PAGE_ALIGN(end
);
600 length
= end
- start
;
603 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
606 tlb_reset_dirty_range_all(start
, end
, length
);
610 static int cpu_physical_memory_set_dirty_tracking(int enable
)
613 in_migration
= enable
;
617 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
618 MemoryRegionSection
*section
,
622 target_ulong
*address
)
627 if (memory_region_is_ram(section
->mr
)) {
629 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
630 + memory_region_section_addr(section
, paddr
);
631 if (!section
->readonly
) {
632 iotlb
|= phys_section_notdirty
;
634 iotlb
|= phys_section_rom
;
637 iotlb
= section
- phys_sections
;
638 iotlb
+= memory_region_section_addr(section
, paddr
);
641 /* Make accesses to pages with watchpoints go via the
642 watchpoint trap routines. */
643 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
644 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
645 /* Avoid trapping reads of pages with a write breakpoint. */
646 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
647 iotlb
= phys_section_watch
+ paddr
;
648 *address
|= TLB_MMIO
;
656 #endif /* defined(CONFIG_USER_ONLY) */
658 #if !defined(CONFIG_USER_ONLY)
660 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
661 typedef struct subpage_t
{
664 uint16_t sub_section
[TARGET_PAGE_SIZE
];
667 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
669 static subpage_t
*subpage_init(hwaddr base
);
670 static void destroy_page_desc(uint16_t section_index
)
672 MemoryRegionSection
*section
= &phys_sections
[section_index
];
673 MemoryRegion
*mr
= section
->mr
;
676 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
677 memory_region_destroy(&subpage
->iomem
);
682 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
687 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
691 p
= phys_map_nodes
[lp
->ptr
];
692 for (i
= 0; i
< L2_SIZE
; ++i
) {
694 destroy_l2_mapping(&p
[i
], level
- 1);
696 destroy_page_desc(p
[i
].ptr
);
700 lp
->ptr
= PHYS_MAP_NODE_NIL
;
703 static void destroy_all_mappings(AddressSpaceDispatch
*d
)
705 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
706 phys_map_nodes_reset();
709 static uint16_t phys_section_add(MemoryRegionSection
*section
)
711 /* The physical section number is ORed with a page-aligned
712 * pointer to produce the iotlb entries. Thus it should
713 * never overflow into the page-aligned value.
715 assert(phys_sections_nb
< TARGET_PAGE_SIZE
);
717 if (phys_sections_nb
== phys_sections_nb_alloc
) {
718 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
719 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
720 phys_sections_nb_alloc
);
722 phys_sections
[phys_sections_nb
] = *section
;
723 return phys_sections_nb
++;
726 static void phys_sections_clear(void)
728 phys_sections_nb
= 0;
731 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
734 hwaddr base
= section
->offset_within_address_space
736 MemoryRegionSection
*existing
= phys_page_find(d
, base
>> TARGET_PAGE_BITS
);
737 MemoryRegionSection subsection
= {
738 .offset_within_address_space
= base
,
739 .size
= TARGET_PAGE_SIZE
,
743 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
745 if (!(existing
->mr
->subpage
)) {
746 subpage
= subpage_init(base
);
747 subsection
.mr
= &subpage
->iomem
;
748 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
749 phys_section_add(&subsection
));
751 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
753 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
754 end
= start
+ section
->size
- 1;
755 subpage_register(subpage
, start
, end
, phys_section_add(section
));
759 static void register_multipage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
761 hwaddr start_addr
= section
->offset_within_address_space
;
762 ram_addr_t size
= section
->size
;
764 uint16_t section_index
= phys_section_add(section
);
769 phys_page_set(d
, addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
773 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS
> MAX_PHYS_ADDR_SPACE_BITS
)
775 static MemoryRegionSection
limit(MemoryRegionSection section
)
777 section
.size
= MIN(section
.offset_within_address_space
+ section
.size
,
779 - section
.offset_within_address_space
;
784 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
786 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
787 MemoryRegionSection now
= limit(*section
), remain
= limit(*section
);
789 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
790 || (now
.size
< TARGET_PAGE_SIZE
)) {
791 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
792 - now
.offset_within_address_space
,
794 register_subpage(d
, &now
);
795 remain
.size
-= now
.size
;
796 remain
.offset_within_address_space
+= now
.size
;
797 remain
.offset_within_region
+= now
.size
;
799 while (remain
.size
>= TARGET_PAGE_SIZE
) {
801 if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
802 now
.size
= TARGET_PAGE_SIZE
;
803 register_subpage(d
, &now
);
805 now
.size
&= TARGET_PAGE_MASK
;
806 register_multipage(d
, &now
);
808 remain
.size
-= now
.size
;
809 remain
.offset_within_address_space
+= now
.size
;
810 remain
.offset_within_region
+= now
.size
;
814 register_subpage(d
, &now
);
818 void qemu_flush_coalesced_mmio_buffer(void)
821 kvm_flush_coalesced_mmio_buffer();
824 void qemu_mutex_lock_ramlist(void)
826 qemu_mutex_lock(&ram_list
.mutex
);
829 void qemu_mutex_unlock_ramlist(void)
831 qemu_mutex_unlock(&ram_list
.mutex
);
834 #if defined(__linux__) && !defined(TARGET_S390X)
838 #define HUGETLBFS_MAGIC 0x958458f6
840 static long gethugepagesize(const char *path
)
846 ret
= statfs(path
, &fs
);
847 } while (ret
!= 0 && errno
== EINTR
);
854 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
855 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
860 static void *file_ram_alloc(RAMBlock
*block
,
865 char *sanitized_name
;
872 unsigned long hpagesize
;
874 hpagesize
= gethugepagesize(path
);
879 if (memory
< hpagesize
) {
883 if (kvm_enabled() && !kvm_has_sync_mmu()) {
884 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
888 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
889 sanitized_name
= g_strdup(block
->mr
->name
);
890 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
895 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
897 g_free(sanitized_name
);
899 fd
= mkstemp(filename
);
901 perror("unable to create backing store for hugepages");
908 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
911 * ftruncate is not supported by hugetlbfs in older
912 * hosts, so don't bother bailing out on errors.
913 * If anything goes wrong with it under other filesystems,
916 if (ftruncate(fd
, memory
))
920 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
921 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
922 * to sidestep this quirk.
924 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
925 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
927 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
929 if (area
== MAP_FAILED
) {
930 perror("file_ram_alloc: can't mmap RAM pages");
939 static ram_addr_t
find_ram_offset(ram_addr_t size
)
941 RAMBlock
*block
, *next_block
;
942 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
944 assert(size
!= 0); /* it would hand out same offset multiple times */
946 if (QTAILQ_EMPTY(&ram_list
.blocks
))
949 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
950 ram_addr_t end
, next
= RAM_ADDR_MAX
;
952 end
= block
->offset
+ block
->length
;
954 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
955 if (next_block
->offset
>= end
) {
956 next
= MIN(next
, next_block
->offset
);
959 if (next
- end
>= size
&& next
- end
< mingap
) {
965 if (offset
== RAM_ADDR_MAX
) {
966 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
974 ram_addr_t
last_ram_offset(void)
979 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
980 last
= MAX(last
, block
->offset
+ block
->length
);
985 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
988 QemuOpts
*machine_opts
;
990 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
991 machine_opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
993 !qemu_opt_get_bool(machine_opts
, "dump-guest-core", true)) {
994 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
996 perror("qemu_madvise");
997 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
998 "but dump_guest_core=off specified\n");
1003 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1005 RAMBlock
*new_block
, *block
;
1008 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1009 if (block
->offset
== addr
) {
1015 assert(!new_block
->idstr
[0]);
1018 char *id
= qdev_get_dev_path(dev
);
1020 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1024 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1026 /* This assumes the iothread lock is taken here too. */
1027 qemu_mutex_lock_ramlist();
1028 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1029 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1030 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1035 qemu_mutex_unlock_ramlist();
1038 static int memory_try_enable_merging(void *addr
, size_t len
)
1042 opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
1043 if (opts
&& !qemu_opt_get_bool(opts
, "mem-merge", true)) {
1044 /* disabled by the user */
1048 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1051 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1054 RAMBlock
*block
, *new_block
;
1056 size
= TARGET_PAGE_ALIGN(size
);
1057 new_block
= g_malloc0(sizeof(*new_block
));
1059 /* This assumes the iothread lock is taken here too. */
1060 qemu_mutex_lock_ramlist();
1062 new_block
->offset
= find_ram_offset(size
);
1064 new_block
->host
= host
;
1065 new_block
->flags
|= RAM_PREALLOC_MASK
;
1068 #if defined (__linux__) && !defined(TARGET_S390X)
1069 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1070 if (!new_block
->host
) {
1071 new_block
->host
= qemu_anon_ram_alloc(size
);
1072 memory_try_enable_merging(new_block
->host
, size
);
1075 fprintf(stderr
, "-mem-path option unsupported\n");
1079 if (xen_enabled()) {
1080 xen_ram_alloc(new_block
->offset
, size
, mr
);
1081 } else if (kvm_enabled()) {
1082 /* some s390/kvm configurations have special constraints */
1083 new_block
->host
= kvm_ram_alloc(size
);
1085 new_block
->host
= qemu_anon_ram_alloc(size
);
1087 memory_try_enable_merging(new_block
->host
, size
);
1090 new_block
->length
= size
;
1092 /* Keep the list sorted from biggest to smallest block. */
1093 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1094 if (block
->length
< new_block
->length
) {
1099 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1101 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1103 ram_list
.mru_block
= NULL
;
1106 qemu_mutex_unlock_ramlist();
1108 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
1109 last_ram_offset() >> TARGET_PAGE_BITS
);
1110 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
1111 0, size
>> TARGET_PAGE_BITS
);
1112 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
1114 qemu_ram_setup_dump(new_block
->host
, size
);
1115 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1118 kvm_setup_guest_memory(new_block
->host
, size
);
1120 return new_block
->offset
;
1123 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1125 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1128 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1132 /* This assumes the iothread lock is taken here too. */
1133 qemu_mutex_lock_ramlist();
1134 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1135 if (addr
== block
->offset
) {
1136 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1137 ram_list
.mru_block
= NULL
;
1143 qemu_mutex_unlock_ramlist();
1146 void qemu_ram_free(ram_addr_t addr
)
1150 /* This assumes the iothread lock is taken here too. */
1151 qemu_mutex_lock_ramlist();
1152 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1153 if (addr
== block
->offset
) {
1154 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1155 ram_list
.mru_block
= NULL
;
1157 if (block
->flags
& RAM_PREALLOC_MASK
) {
1159 } else if (mem_path
) {
1160 #if defined (__linux__) && !defined(TARGET_S390X)
1162 munmap(block
->host
, block
->length
);
1165 qemu_anon_ram_free(block
->host
, block
->length
);
1171 if (xen_enabled()) {
1172 xen_invalidate_map_cache_entry(block
->host
);
1174 qemu_anon_ram_free(block
->host
, block
->length
);
1181 qemu_mutex_unlock_ramlist();
1186 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1193 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1194 offset
= addr
- block
->offset
;
1195 if (offset
< block
->length
) {
1196 vaddr
= block
->host
+ offset
;
1197 if (block
->flags
& RAM_PREALLOC_MASK
) {
1201 munmap(vaddr
, length
);
1203 #if defined(__linux__) && !defined(TARGET_S390X)
1206 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1209 flags
|= MAP_PRIVATE
;
1211 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1212 flags
, block
->fd
, offset
);
1214 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1215 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1222 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1223 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
1224 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
1227 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1228 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1232 if (area
!= vaddr
) {
1233 fprintf(stderr
, "Could not remap addr: "
1234 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1238 memory_try_enable_merging(vaddr
, length
);
1239 qemu_ram_setup_dump(vaddr
, length
);
1245 #endif /* !_WIN32 */
1247 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1248 With the exception of the softmmu code in this file, this should
1249 only be used for local memory (e.g. video ram) that the device owns,
1250 and knows it isn't going to access beyond the end of the block.
1252 It should not be used for general purpose DMA.
1253 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1255 void *qemu_get_ram_ptr(ram_addr_t addr
)
1259 /* The list is protected by the iothread lock here. */
1260 block
= ram_list
.mru_block
;
1261 if (block
&& addr
- block
->offset
< block
->length
) {
1264 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1265 if (addr
- block
->offset
< block
->length
) {
1270 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1274 ram_list
.mru_block
= block
;
1275 if (xen_enabled()) {
1276 /* We need to check if the requested address is in the RAM
1277 * because we don't want to map the entire memory in QEMU.
1278 * In that case just map until the end of the page.
1280 if (block
->offset
== 0) {
1281 return xen_map_cache(addr
, 0, 0);
1282 } else if (block
->host
== NULL
) {
1284 xen_map_cache(block
->offset
, block
->length
, 1);
1287 return block
->host
+ (addr
- block
->offset
);
1290 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1291 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1293 * ??? Is this still necessary?
1295 static void *qemu_safe_ram_ptr(ram_addr_t addr
)
1299 /* The list is protected by the iothread lock here. */
1300 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1301 if (addr
- block
->offset
< block
->length
) {
1302 if (xen_enabled()) {
1303 /* We need to check if the requested address is in the RAM
1304 * because we don't want to map the entire memory in QEMU.
1305 * In that case just map until the end of the page.
1307 if (block
->offset
== 0) {
1308 return xen_map_cache(addr
, 0, 0);
1309 } else if (block
->host
== NULL
) {
1311 xen_map_cache(block
->offset
, block
->length
, 1);
1314 return block
->host
+ (addr
- block
->offset
);
1318 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1324 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1325 * but takes a size argument */
1326 static void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
1331 if (xen_enabled()) {
1332 return xen_map_cache(addr
, *size
, 1);
1336 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1337 if (addr
- block
->offset
< block
->length
) {
1338 if (addr
- block
->offset
+ *size
> block
->length
)
1339 *size
= block
->length
- addr
+ block
->offset
;
1340 return block
->host
+ (addr
- block
->offset
);
1344 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1349 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1352 uint8_t *host
= ptr
;
1354 if (xen_enabled()) {
1355 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1359 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1360 /* This case append when the block is not mapped. */
1361 if (block
->host
== NULL
) {
1364 if (host
- block
->host
< block
->length
) {
1365 *ram_addr
= block
->offset
+ (host
- block
->host
);
1373 /* Some of the softmmu routines need to translate from a host pointer
1374 (typically a TLB entry) back to a ram offset. */
1375 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
1377 ram_addr_t ram_addr
;
1379 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
1380 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
1386 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1389 #ifdef DEBUG_UNASSIGNED
1390 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
1392 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1393 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
1398 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1399 uint64_t val
, unsigned size
)
1401 #ifdef DEBUG_UNASSIGNED
1402 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
1404 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1405 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
1409 static const MemoryRegionOps unassigned_mem_ops
= {
1410 .read
= unassigned_mem_read
,
1411 .write
= unassigned_mem_write
,
1412 .endianness
= DEVICE_NATIVE_ENDIAN
,
1415 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1416 uint64_t val
, unsigned size
)
1419 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1420 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1421 tb_invalidate_phys_page_fast(ram_addr
, size
);
1422 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1426 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1429 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1432 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1437 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1438 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
1439 /* we remove the notdirty callback only if the code has been
1441 if (dirty_flags
== 0xff)
1442 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
1445 static const MemoryRegionOps notdirty_mem_ops
= {
1446 .read
= unassigned_mem_read
,
1447 .write
= notdirty_mem_write
,
1448 .endianness
= DEVICE_NATIVE_ENDIAN
,
1451 /* Generate a debug exception if a watchpoint has been hit. */
1452 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1454 CPUArchState
*env
= cpu_single_env
;
1455 target_ulong pc
, cs_base
;
1460 if (env
->watchpoint_hit
) {
1461 /* We re-entered the check after replacing the TB. Now raise
1462 * the debug interrupt so that is will trigger after the
1463 * current instruction. */
1464 cpu_interrupt(ENV_GET_CPU(env
), CPU_INTERRUPT_DEBUG
);
1467 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1468 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1469 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1470 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1471 wp
->flags
|= BP_WATCHPOINT_HIT
;
1472 if (!env
->watchpoint_hit
) {
1473 env
->watchpoint_hit
= wp
;
1474 tb_check_watchpoint(env
);
1475 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1476 env
->exception_index
= EXCP_DEBUG
;
1479 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1480 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
1481 cpu_resume_from_signal(env
, NULL
);
1485 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1490 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1491 so these check for a hit then pass through to the normal out-of-line
1493 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1496 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1498 case 1: return ldub_phys(addr
);
1499 case 2: return lduw_phys(addr
);
1500 case 4: return ldl_phys(addr
);
1505 static void watch_mem_write(void *opaque
, hwaddr addr
,
1506 uint64_t val
, unsigned size
)
1508 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1511 stb_phys(addr
, val
);
1514 stw_phys(addr
, val
);
1517 stl_phys(addr
, val
);
1523 static const MemoryRegionOps watch_mem_ops
= {
1524 .read
= watch_mem_read
,
1525 .write
= watch_mem_write
,
1526 .endianness
= DEVICE_NATIVE_ENDIAN
,
1529 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1532 subpage_t
*mmio
= opaque
;
1533 unsigned int idx
= SUBPAGE_IDX(addr
);
1534 MemoryRegionSection
*section
;
1535 #if defined(DEBUG_SUBPAGE)
1536 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
1537 mmio
, len
, addr
, idx
);
1540 section
= &phys_sections
[mmio
->sub_section
[idx
]];
1542 addr
-= section
->offset_within_address_space
;
1543 addr
+= section
->offset_within_region
;
1544 return io_mem_read(section
->mr
, addr
, len
);
1547 static void subpage_write(void *opaque
, hwaddr addr
,
1548 uint64_t value
, unsigned len
)
1550 subpage_t
*mmio
= opaque
;
1551 unsigned int idx
= SUBPAGE_IDX(addr
);
1552 MemoryRegionSection
*section
;
1553 #if defined(DEBUG_SUBPAGE)
1554 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1555 " idx %d value %"PRIx64
"\n",
1556 __func__
, mmio
, len
, addr
, idx
, value
);
1559 section
= &phys_sections
[mmio
->sub_section
[idx
]];
1561 addr
-= section
->offset_within_address_space
;
1562 addr
+= section
->offset_within_region
;
1563 io_mem_write(section
->mr
, addr
, value
, len
);
1566 static const MemoryRegionOps subpage_ops
= {
1567 .read
= subpage_read
,
1568 .write
= subpage_write
,
1569 .endianness
= DEVICE_NATIVE_ENDIAN
,
1572 static uint64_t subpage_ram_read(void *opaque
, hwaddr addr
,
1575 ram_addr_t raddr
= addr
;
1576 void *ptr
= qemu_get_ram_ptr(raddr
);
1578 case 1: return ldub_p(ptr
);
1579 case 2: return lduw_p(ptr
);
1580 case 4: return ldl_p(ptr
);
1585 static void subpage_ram_write(void *opaque
, hwaddr addr
,
1586 uint64_t value
, unsigned size
)
1588 ram_addr_t raddr
= addr
;
1589 void *ptr
= qemu_get_ram_ptr(raddr
);
1591 case 1: return stb_p(ptr
, value
);
1592 case 2: return stw_p(ptr
, value
);
1593 case 4: return stl_p(ptr
, value
);
1598 static const MemoryRegionOps subpage_ram_ops
= {
1599 .read
= subpage_ram_read
,
1600 .write
= subpage_ram_write
,
1601 .endianness
= DEVICE_NATIVE_ENDIAN
,
1604 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1609 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1611 idx
= SUBPAGE_IDX(start
);
1612 eidx
= SUBPAGE_IDX(end
);
1613 #if defined(DEBUG_SUBPAGE)
1614 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
1615 mmio
, start
, end
, idx
, eidx
, memory
);
1617 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
1618 MemoryRegionSection new_section
= phys_sections
[section
];
1619 new_section
.mr
= &io_mem_subpage_ram
;
1620 section
= phys_section_add(&new_section
);
1622 for (; idx
<= eidx
; idx
++) {
1623 mmio
->sub_section
[idx
] = section
;
1629 static subpage_t
*subpage_init(hwaddr base
)
1633 mmio
= g_malloc0(sizeof(subpage_t
));
1636 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
1637 "subpage", TARGET_PAGE_SIZE
);
1638 mmio
->iomem
.subpage
= true;
1639 #if defined(DEBUG_SUBPAGE)
1640 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
1641 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
1643 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
1648 static uint16_t dummy_section(MemoryRegion
*mr
)
1650 MemoryRegionSection section
= {
1652 .offset_within_address_space
= 0,
1653 .offset_within_region
= 0,
1657 return phys_section_add(§ion
);
1660 MemoryRegion
*iotlb_to_region(hwaddr index
)
1662 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1665 static void io_mem_init(void)
1667 memory_region_init_io(&io_mem_rom
, &unassigned_mem_ops
, NULL
, "rom", UINT64_MAX
);
1668 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
1669 "unassigned", UINT64_MAX
);
1670 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
1671 "notdirty", UINT64_MAX
);
1672 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
1673 "subpage-ram", UINT64_MAX
);
1674 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
1675 "watch", UINT64_MAX
);
1678 static void mem_begin(MemoryListener
*listener
)
1680 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
1682 destroy_all_mappings(d
);
1683 d
->phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
1686 static void core_begin(MemoryListener
*listener
)
1688 phys_sections_clear();
1689 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
1690 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
1691 phys_section_rom
= dummy_section(&io_mem_rom
);
1692 phys_section_watch
= dummy_section(&io_mem_watch
);
1695 static void tcg_commit(MemoryListener
*listener
)
1699 /* since each CPU stores ram addresses in its TLB cache, we must
1700 reset the modified entries */
1702 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1707 static void core_log_global_start(MemoryListener
*listener
)
1709 cpu_physical_memory_set_dirty_tracking(1);
1712 static void core_log_global_stop(MemoryListener
*listener
)
1714 cpu_physical_memory_set_dirty_tracking(0);
1717 static void io_region_add(MemoryListener
*listener
,
1718 MemoryRegionSection
*section
)
1720 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
1722 mrio
->mr
= section
->mr
;
1723 mrio
->offset
= section
->offset_within_region
;
1724 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
1725 section
->offset_within_address_space
, section
->size
);
1726 ioport_register(&mrio
->iorange
);
1729 static void io_region_del(MemoryListener
*listener
,
1730 MemoryRegionSection
*section
)
1732 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
1735 static MemoryListener core_memory_listener
= {
1736 .begin
= core_begin
,
1737 .log_global_start
= core_log_global_start
,
1738 .log_global_stop
= core_log_global_stop
,
1742 static MemoryListener io_memory_listener
= {
1743 .region_add
= io_region_add
,
1744 .region_del
= io_region_del
,
1748 static MemoryListener tcg_memory_listener
= {
1749 .commit
= tcg_commit
,
1752 void address_space_init_dispatch(AddressSpace
*as
)
1754 AddressSpaceDispatch
*d
= g_new(AddressSpaceDispatch
, 1);
1756 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
1757 d
->listener
= (MemoryListener
) {
1759 .region_add
= mem_add
,
1760 .region_nop
= mem_add
,
1764 memory_listener_register(&d
->listener
, as
);
1767 void address_space_destroy_dispatch(AddressSpace
*as
)
1769 AddressSpaceDispatch
*d
= as
->dispatch
;
1771 memory_listener_unregister(&d
->listener
);
1772 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
1774 as
->dispatch
= NULL
;
1777 static void memory_map_init(void)
1779 system_memory
= g_malloc(sizeof(*system_memory
));
1780 memory_region_init(system_memory
, "system", INT64_MAX
);
1781 address_space_init(&address_space_memory
, system_memory
);
1782 address_space_memory
.name
= "memory";
1784 system_io
= g_malloc(sizeof(*system_io
));
1785 memory_region_init(system_io
, "io", 65536);
1786 address_space_init(&address_space_io
, system_io
);
1787 address_space_io
.name
= "I/O";
1789 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1790 memory_listener_register(&io_memory_listener
, &address_space_io
);
1791 memory_listener_register(&tcg_memory_listener
, &address_space_memory
);
1793 dma_context_init(&dma_context_memory
, &address_space_memory
,
1797 MemoryRegion
*get_system_memory(void)
1799 return system_memory
;
1802 MemoryRegion
*get_system_io(void)
1807 #endif /* !defined(CONFIG_USER_ONLY) */
1809 /* physical memory access (slow version, mainly for debug) */
1810 #if defined(CONFIG_USER_ONLY)
1811 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
1812 uint8_t *buf
, int len
, int is_write
)
1819 page
= addr
& TARGET_PAGE_MASK
;
1820 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1823 flags
= page_get_flags(page
);
1824 if (!(flags
& PAGE_VALID
))
1827 if (!(flags
& PAGE_WRITE
))
1829 /* XXX: this code should not depend on lock_user */
1830 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1833 unlock_user(p
, addr
, l
);
1835 if (!(flags
& PAGE_READ
))
1837 /* XXX: this code should not depend on lock_user */
1838 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1841 unlock_user(p
, addr
, 0);
1852 static void invalidate_and_set_dirty(hwaddr addr
,
1855 if (!cpu_physical_memory_is_dirty(addr
)) {
1856 /* invalidate code */
1857 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1859 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
1861 xen_modified_memory(addr
, length
);
1864 void address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
1865 int len
, bool is_write
)
1867 AddressSpaceDispatch
*d
= as
->dispatch
;
1872 MemoryRegionSection
*section
;
1875 page
= addr
& TARGET_PAGE_MASK
;
1876 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1879 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
1882 if (!memory_region_is_ram(section
->mr
)) {
1884 addr1
= memory_region_section_addr(section
, addr
);
1885 /* XXX: could force cpu_single_env to NULL to avoid
1887 if (l
>= 4 && ((addr1
& 3) == 0)) {
1888 /* 32 bit write access */
1890 io_mem_write(section
->mr
, addr1
, val
, 4);
1892 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
1893 /* 16 bit write access */
1895 io_mem_write(section
->mr
, addr1
, val
, 2);
1898 /* 8 bit write access */
1900 io_mem_write(section
->mr
, addr1
, val
, 1);
1903 } else if (!section
->readonly
) {
1905 addr1
= memory_region_get_ram_addr(section
->mr
)
1906 + memory_region_section_addr(section
, addr
);
1908 ptr
= qemu_get_ram_ptr(addr1
);
1909 memcpy(ptr
, buf
, l
);
1910 invalidate_and_set_dirty(addr1
, l
);
1913 if (!(memory_region_is_ram(section
->mr
) ||
1914 memory_region_is_romd(section
->mr
))) {
1917 addr1
= memory_region_section_addr(section
, addr
);
1918 if (l
>= 4 && ((addr1
& 3) == 0)) {
1919 /* 32 bit read access */
1920 val
= io_mem_read(section
->mr
, addr1
, 4);
1923 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
1924 /* 16 bit read access */
1925 val
= io_mem_read(section
->mr
, addr1
, 2);
1929 /* 8 bit read access */
1930 val
= io_mem_read(section
->mr
, addr1
, 1);
1936 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
1937 + memory_region_section_addr(section
,
1939 memcpy(buf
, ptr
, l
);
1948 void address_space_write(AddressSpace
*as
, hwaddr addr
,
1949 const uint8_t *buf
, int len
)
1951 address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
1955 * address_space_read: read from an address space.
1957 * @as: #AddressSpace to be accessed
1958 * @addr: address within that address space
1959 * @buf: buffer with the data transferred
1961 void address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
1963 address_space_rw(as
, addr
, buf
, len
, false);
1967 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
1968 int len
, int is_write
)
1970 return address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
1973 /* used for ROM loading : can write in RAM and ROM */
1974 void cpu_physical_memory_write_rom(hwaddr addr
,
1975 const uint8_t *buf
, int len
)
1977 AddressSpaceDispatch
*d
= address_space_memory
.dispatch
;
1981 MemoryRegionSection
*section
;
1984 page
= addr
& TARGET_PAGE_MASK
;
1985 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1988 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
1990 if (!(memory_region_is_ram(section
->mr
) ||
1991 memory_region_is_romd(section
->mr
))) {
1994 unsigned long addr1
;
1995 addr1
= memory_region_get_ram_addr(section
->mr
)
1996 + memory_region_section_addr(section
, addr
);
1998 ptr
= qemu_get_ram_ptr(addr1
);
1999 memcpy(ptr
, buf
, l
);
2000 invalidate_and_set_dirty(addr1
, l
);
2014 static BounceBuffer bounce
;
2016 typedef struct MapClient
{
2018 void (*callback
)(void *opaque
);
2019 QLIST_ENTRY(MapClient
) link
;
2022 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2023 = QLIST_HEAD_INITIALIZER(map_client_list
);
2025 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2027 MapClient
*client
= g_malloc(sizeof(*client
));
2029 client
->opaque
= opaque
;
2030 client
->callback
= callback
;
2031 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2035 static void cpu_unregister_map_client(void *_client
)
2037 MapClient
*client
= (MapClient
*)_client
;
2039 QLIST_REMOVE(client
, link
);
2043 static void cpu_notify_map_clients(void)
2047 while (!QLIST_EMPTY(&map_client_list
)) {
2048 client
= QLIST_FIRST(&map_client_list
);
2049 client
->callback(client
->opaque
);
2050 cpu_unregister_map_client(client
);
2054 /* Map a physical memory region into a host virtual address.
2055 * May map a subset of the requested range, given by and returned in *plen.
2056 * May return NULL if resources needed to perform the mapping are exhausted.
2057 * Use only for reads OR writes - not for read-modify-write operations.
2058 * Use cpu_register_map_client() to know when retrying the map operation is
2059 * likely to succeed.
2061 void *address_space_map(AddressSpace
*as
,
2066 AddressSpaceDispatch
*d
= as
->dispatch
;
2071 MemoryRegionSection
*section
;
2072 ram_addr_t raddr
= RAM_ADDR_MAX
;
2077 page
= addr
& TARGET_PAGE_MASK
;
2078 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2081 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
2083 if (!(memory_region_is_ram(section
->mr
) && !section
->readonly
)) {
2084 if (todo
|| bounce
.buffer
) {
2087 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
2091 address_space_read(as
, addr
, bounce
.buffer
, l
);
2095 return bounce
.buffer
;
2098 raddr
= memory_region_get_ram_addr(section
->mr
)
2099 + memory_region_section_addr(section
, addr
);
2107 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
2112 /* Unmaps a memory region previously mapped by address_space_map().
2113 * Will also mark the memory as dirty if is_write == 1. access_len gives
2114 * the amount of memory that was actually read or written by the caller.
2116 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2117 int is_write
, hwaddr access_len
)
2119 if (buffer
!= bounce
.buffer
) {
2121 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
2122 while (access_len
) {
2124 l
= TARGET_PAGE_SIZE
;
2127 invalidate_and_set_dirty(addr1
, l
);
2132 if (xen_enabled()) {
2133 xen_invalidate_map_cache_entry(buffer
);
2138 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2140 qemu_vfree(bounce
.buffer
);
2141 bounce
.buffer
= NULL
;
2142 cpu_notify_map_clients();
2145 void *cpu_physical_memory_map(hwaddr addr
,
2149 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2152 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2153 int is_write
, hwaddr access_len
)
2155 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2158 /* warning: addr must be aligned */
2159 static inline uint32_t ldl_phys_internal(hwaddr addr
,
2160 enum device_endian endian
)
2164 MemoryRegionSection
*section
;
2166 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2168 if (!(memory_region_is_ram(section
->mr
) ||
2169 memory_region_is_romd(section
->mr
))) {
2171 addr
= memory_region_section_addr(section
, addr
);
2172 val
= io_mem_read(section
->mr
, addr
, 4);
2173 #if defined(TARGET_WORDS_BIGENDIAN)
2174 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2178 if (endian
== DEVICE_BIG_ENDIAN
) {
2184 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2186 + memory_region_section_addr(section
, addr
));
2188 case DEVICE_LITTLE_ENDIAN
:
2189 val
= ldl_le_p(ptr
);
2191 case DEVICE_BIG_ENDIAN
:
2192 val
= ldl_be_p(ptr
);
2202 uint32_t ldl_phys(hwaddr addr
)
2204 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2207 uint32_t ldl_le_phys(hwaddr addr
)
2209 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2212 uint32_t ldl_be_phys(hwaddr addr
)
2214 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2217 /* warning: addr must be aligned */
2218 static inline uint64_t ldq_phys_internal(hwaddr addr
,
2219 enum device_endian endian
)
2223 MemoryRegionSection
*section
;
2225 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2227 if (!(memory_region_is_ram(section
->mr
) ||
2228 memory_region_is_romd(section
->mr
))) {
2230 addr
= memory_region_section_addr(section
, addr
);
2232 /* XXX This is broken when device endian != cpu endian.
2233 Fix and add "endian" variable check */
2234 #ifdef TARGET_WORDS_BIGENDIAN
2235 val
= io_mem_read(section
->mr
, addr
, 4) << 32;
2236 val
|= io_mem_read(section
->mr
, addr
+ 4, 4);
2238 val
= io_mem_read(section
->mr
, addr
, 4);
2239 val
|= io_mem_read(section
->mr
, addr
+ 4, 4) << 32;
2243 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2245 + memory_region_section_addr(section
, addr
));
2247 case DEVICE_LITTLE_ENDIAN
:
2248 val
= ldq_le_p(ptr
);
2250 case DEVICE_BIG_ENDIAN
:
2251 val
= ldq_be_p(ptr
);
2261 uint64_t ldq_phys(hwaddr addr
)
2263 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2266 uint64_t ldq_le_phys(hwaddr addr
)
2268 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2271 uint64_t ldq_be_phys(hwaddr addr
)
2273 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2277 uint32_t ldub_phys(hwaddr addr
)
2280 cpu_physical_memory_read(addr
, &val
, 1);
2284 /* warning: addr must be aligned */
2285 static inline uint32_t lduw_phys_internal(hwaddr addr
,
2286 enum device_endian endian
)
2290 MemoryRegionSection
*section
;
2292 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2294 if (!(memory_region_is_ram(section
->mr
) ||
2295 memory_region_is_romd(section
->mr
))) {
2297 addr
= memory_region_section_addr(section
, addr
);
2298 val
= io_mem_read(section
->mr
, addr
, 2);
2299 #if defined(TARGET_WORDS_BIGENDIAN)
2300 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2304 if (endian
== DEVICE_BIG_ENDIAN
) {
2310 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2312 + memory_region_section_addr(section
, addr
));
2314 case DEVICE_LITTLE_ENDIAN
:
2315 val
= lduw_le_p(ptr
);
2317 case DEVICE_BIG_ENDIAN
:
2318 val
= lduw_be_p(ptr
);
2328 uint32_t lduw_phys(hwaddr addr
)
2330 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2333 uint32_t lduw_le_phys(hwaddr addr
)
2335 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2338 uint32_t lduw_be_phys(hwaddr addr
)
2340 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2343 /* warning: addr must be aligned. The ram page is not masked as dirty
2344 and the code inside is not invalidated. It is useful if the dirty
2345 bits are used to track modified PTEs */
2346 void stl_phys_notdirty(hwaddr addr
, uint32_t val
)
2349 MemoryRegionSection
*section
;
2351 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2353 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2354 addr
= memory_region_section_addr(section
, addr
);
2355 if (memory_region_is_ram(section
->mr
)) {
2356 section
= &phys_sections
[phys_section_rom
];
2358 io_mem_write(section
->mr
, addr
, val
, 4);
2360 unsigned long addr1
= (memory_region_get_ram_addr(section
->mr
)
2362 + memory_region_section_addr(section
, addr
);
2363 ptr
= qemu_get_ram_ptr(addr1
);
2366 if (unlikely(in_migration
)) {
2367 if (!cpu_physical_memory_is_dirty(addr1
)) {
2368 /* invalidate code */
2369 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2371 cpu_physical_memory_set_dirty_flags(
2372 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
2378 /* warning: addr must be aligned */
2379 static inline void stl_phys_internal(hwaddr addr
, uint32_t val
,
2380 enum device_endian endian
)
2383 MemoryRegionSection
*section
;
2385 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2387 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2388 addr
= memory_region_section_addr(section
, addr
);
2389 if (memory_region_is_ram(section
->mr
)) {
2390 section
= &phys_sections
[phys_section_rom
];
2392 #if defined(TARGET_WORDS_BIGENDIAN)
2393 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2397 if (endian
== DEVICE_BIG_ENDIAN
) {
2401 io_mem_write(section
->mr
, addr
, val
, 4);
2403 unsigned long addr1
;
2404 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
2405 + memory_region_section_addr(section
, addr
);
2407 ptr
= qemu_get_ram_ptr(addr1
);
2409 case DEVICE_LITTLE_ENDIAN
:
2412 case DEVICE_BIG_ENDIAN
:
2419 invalidate_and_set_dirty(addr1
, 4);
2423 void stl_phys(hwaddr addr
, uint32_t val
)
2425 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2428 void stl_le_phys(hwaddr addr
, uint32_t val
)
2430 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2433 void stl_be_phys(hwaddr addr
, uint32_t val
)
2435 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2439 void stb_phys(hwaddr addr
, uint32_t val
)
2442 cpu_physical_memory_write(addr
, &v
, 1);
2445 /* warning: addr must be aligned */
2446 static inline void stw_phys_internal(hwaddr addr
, uint32_t val
,
2447 enum device_endian endian
)
2450 MemoryRegionSection
*section
;
2452 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2454 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2455 addr
= memory_region_section_addr(section
, addr
);
2456 if (memory_region_is_ram(section
->mr
)) {
2457 section
= &phys_sections
[phys_section_rom
];
2459 #if defined(TARGET_WORDS_BIGENDIAN)
2460 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2464 if (endian
== DEVICE_BIG_ENDIAN
) {
2468 io_mem_write(section
->mr
, addr
, val
, 2);
2470 unsigned long addr1
;
2471 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
2472 + memory_region_section_addr(section
, addr
);
2474 ptr
= qemu_get_ram_ptr(addr1
);
2476 case DEVICE_LITTLE_ENDIAN
:
2479 case DEVICE_BIG_ENDIAN
:
2486 invalidate_and_set_dirty(addr1
, 2);
2490 void stw_phys(hwaddr addr
, uint32_t val
)
2492 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2495 void stw_le_phys(hwaddr addr
, uint32_t val
)
2497 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2500 void stw_be_phys(hwaddr addr
, uint32_t val
)
2502 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2506 void stq_phys(hwaddr addr
, uint64_t val
)
2509 cpu_physical_memory_write(addr
, &val
, 8);
2512 void stq_le_phys(hwaddr addr
, uint64_t val
)
2514 val
= cpu_to_le64(val
);
2515 cpu_physical_memory_write(addr
, &val
, 8);
2518 void stq_be_phys(hwaddr addr
, uint64_t val
)
2520 val
= cpu_to_be64(val
);
2521 cpu_physical_memory_write(addr
, &val
, 8);
2524 /* virtual memory access for debug (includes writing to ROM) */
2525 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
2526 uint8_t *buf
, int len
, int is_write
)
2533 page
= addr
& TARGET_PAGE_MASK
;
2534 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2535 /* if no physical page mapped, return an error */
2536 if (phys_addr
== -1)
2538 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2541 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2543 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
2545 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
2554 #if !defined(CONFIG_USER_ONLY)
2557 * A helper function for the _utterly broken_ virtio device model to find out if
2558 * it's running on a big endian machine. Don't do this at home kids!
2560 bool virtio_is_big_endian(void);
2561 bool virtio_is_big_endian(void)
2563 #if defined(TARGET_WORDS_BIGENDIAN)
2572 #ifndef CONFIG_USER_ONLY
2573 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2575 MemoryRegionSection
*section
;
2577 section
= phys_page_find(address_space_memory
.dispatch
,
2578 phys_addr
>> TARGET_PAGE_BITS
);
2580 return !(memory_region_is_ram(section
->mr
) ||
2581 memory_region_is_romd(section
->mr
));