4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
53 //#define DEBUG_UNASSIGNED
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
58 static int in_migration
;
60 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
62 static MemoryRegion
*system_memory
;
63 static MemoryRegion
*system_io
;
65 AddressSpace address_space_io
;
66 AddressSpace address_space_memory
;
67 DMAContext dma_context_memory
;
69 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
70 static MemoryRegion io_mem_subpage_ram
;
74 CPUArchState
*first_cpu
;
75 /* current CPU in the current thread. It is only valid inside
77 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
78 /* 0 = Do not count executed instructions.
79 1 = Precise instruction counting.
80 2 = Adaptive rate instruction counting. */
83 #if !defined(CONFIG_USER_ONLY)
85 static MemoryRegionSection
*phys_sections
;
86 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
87 static uint16_t phys_section_unassigned
;
88 static uint16_t phys_section_notdirty
;
89 static uint16_t phys_section_rom
;
90 static uint16_t phys_section_watch
;
92 /* Simple allocator for PhysPageEntry nodes */
93 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
94 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
96 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
98 static void io_mem_init(void);
99 static void memory_map_init(void);
100 static void *qemu_safe_ram_ptr(ram_addr_t addr
);
102 static MemoryRegion io_mem_watch
;
105 #if !defined(CONFIG_USER_ONLY)
107 static void phys_map_node_reserve(unsigned nodes
)
109 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
110 typedef PhysPageEntry Node
[L2_SIZE
];
111 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
112 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
113 phys_map_nodes_nb
+ nodes
);
114 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
115 phys_map_nodes_nb_alloc
);
119 static uint16_t phys_map_node_alloc(void)
124 ret
= phys_map_nodes_nb
++;
125 assert(ret
!= PHYS_MAP_NODE_NIL
);
126 assert(ret
!= phys_map_nodes_nb_alloc
);
127 for (i
= 0; i
< L2_SIZE
; ++i
) {
128 phys_map_nodes
[ret
][i
].is_leaf
= 0;
129 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
134 static void phys_map_nodes_reset(void)
136 phys_map_nodes_nb
= 0;
140 static void phys_page_set_level(PhysPageEntry
*lp
, hwaddr
*index
,
141 hwaddr
*nb
, uint16_t leaf
,
146 hwaddr step
= (hwaddr
)1 << (level
* L2_BITS
);
148 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
149 lp
->ptr
= phys_map_node_alloc();
150 p
= phys_map_nodes
[lp
->ptr
];
152 for (i
= 0; i
< L2_SIZE
; i
++) {
154 p
[i
].ptr
= phys_section_unassigned
;
158 p
= phys_map_nodes
[lp
->ptr
];
160 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
162 while (*nb
&& lp
< &p
[L2_SIZE
]) {
163 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
169 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
175 static void phys_page_set(AddressSpaceDispatch
*d
,
176 hwaddr index
, hwaddr nb
,
179 /* Wildly overreserve - it doesn't matter much. */
180 phys_map_node_reserve(3 * P_L2_LEVELS
);
182 phys_page_set_level(&d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
185 MemoryRegionSection
*phys_page_find(AddressSpaceDispatch
*d
, hwaddr index
)
187 PhysPageEntry lp
= d
->phys_map
;
190 uint16_t s_index
= phys_section_unassigned
;
192 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
193 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
196 p
= phys_map_nodes
[lp
.ptr
];
197 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
202 return &phys_sections
[s_index
];
205 bool memory_region_is_unassigned(MemoryRegion
*mr
)
207 return mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
208 && mr
!= &io_mem_notdirty
&& !mr
->rom_device
209 && mr
!= &io_mem_watch
;
213 void cpu_exec_init_all(void)
215 #if !defined(CONFIG_USER_ONLY)
216 qemu_mutex_init(&ram_list
.mutex
);
222 #if !defined(CONFIG_USER_ONLY)
224 static int cpu_common_post_load(void *opaque
, int version_id
)
226 CPUState
*cpu
= opaque
;
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 cpu
->interrupt_request
&= ~0x01;
231 tlb_flush(cpu
->env_ptr
, 1);
236 static const VMStateDescription vmstate_cpu_common
= {
237 .name
= "cpu_common",
239 .minimum_version_id
= 1,
240 .minimum_version_id_old
= 1,
241 .post_load
= cpu_common_post_load
,
242 .fields
= (VMStateField
[]) {
243 VMSTATE_UINT32(halted
, CPUState
),
244 VMSTATE_UINT32(interrupt_request
, CPUState
),
245 VMSTATE_END_OF_LIST()
249 #define vmstate_cpu_common vmstate_dummy
252 CPUState
*qemu_get_cpu(int index
)
254 CPUArchState
*env
= first_cpu
;
255 CPUState
*cpu
= NULL
;
258 cpu
= ENV_GET_CPU(env
);
259 if (cpu
->cpu_index
== index
) {
265 return env
? cpu
: NULL
;
268 void cpu_exec_init(CPUArchState
*env
)
270 CPUState
*cpu
= ENV_GET_CPU(env
);
271 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
275 #if defined(CONFIG_USER_ONLY)
278 env
->next_cpu
= NULL
;
281 while (*penv
!= NULL
) {
282 penv
= &(*penv
)->next_cpu
;
285 cpu
->cpu_index
= cpu_index
;
287 QTAILQ_INIT(&env
->breakpoints
);
288 QTAILQ_INIT(&env
->watchpoints
);
289 #ifndef CONFIG_USER_ONLY
290 cpu
->thread_id
= qemu_get_thread_id();
293 #if defined(CONFIG_USER_ONLY)
296 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
297 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
298 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
299 cpu_save
, cpu_load
, env
);
300 assert(cc
->vmsd
== NULL
);
302 if (cc
->vmsd
!= NULL
) {
303 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
307 #if defined(TARGET_HAS_ICE)
308 #if defined(CONFIG_USER_ONLY)
309 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
311 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
314 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
316 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
317 (pc
& ~TARGET_PAGE_MASK
));
320 #endif /* TARGET_HAS_ICE */
322 #if defined(CONFIG_USER_ONLY)
323 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
328 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
329 int flags
, CPUWatchpoint
**watchpoint
)
334 /* Add a watchpoint. */
335 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
336 int flags
, CPUWatchpoint
**watchpoint
)
338 target_ulong len_mask
= ~(len
- 1);
341 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
342 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
343 len
== 0 || len
> TARGET_PAGE_SIZE
) {
344 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
345 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
348 wp
= g_malloc(sizeof(*wp
));
351 wp
->len_mask
= len_mask
;
354 /* keep all GDB-injected watchpoints in front */
356 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
358 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
360 tlb_flush_page(env
, addr
);
367 /* Remove a specific watchpoint. */
368 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
371 target_ulong len_mask
= ~(len
- 1);
374 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
375 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
376 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
377 cpu_watchpoint_remove_by_ref(env
, wp
);
384 /* Remove a specific watchpoint by reference. */
385 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
387 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
389 tlb_flush_page(env
, watchpoint
->vaddr
);
394 /* Remove all matching watchpoints. */
395 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
397 CPUWatchpoint
*wp
, *next
;
399 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
400 if (wp
->flags
& mask
)
401 cpu_watchpoint_remove_by_ref(env
, wp
);
406 /* Add a breakpoint. */
407 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
408 CPUBreakpoint
**breakpoint
)
410 #if defined(TARGET_HAS_ICE)
413 bp
= g_malloc(sizeof(*bp
));
418 /* keep all GDB-injected breakpoints in front */
420 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
422 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
424 breakpoint_invalidate(env
, pc
);
434 /* Remove a specific breakpoint. */
435 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
437 #if defined(TARGET_HAS_ICE)
440 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
441 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
442 cpu_breakpoint_remove_by_ref(env
, bp
);
452 /* Remove a specific breakpoint by reference. */
453 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
455 #if defined(TARGET_HAS_ICE)
456 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
458 breakpoint_invalidate(env
, breakpoint
->pc
);
464 /* Remove all matching breakpoints. */
465 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
467 #if defined(TARGET_HAS_ICE)
468 CPUBreakpoint
*bp
, *next
;
470 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
471 if (bp
->flags
& mask
)
472 cpu_breakpoint_remove_by_ref(env
, bp
);
477 /* enable or disable single step mode. EXCP_DEBUG is returned by the
478 CPU loop after each instruction */
479 void cpu_single_step(CPUArchState
*env
, int enabled
)
481 #if defined(TARGET_HAS_ICE)
482 if (env
->singlestep_enabled
!= enabled
) {
483 env
->singlestep_enabled
= enabled
;
485 kvm_update_guest_debug(env
, 0);
487 /* must flush all the translated code to avoid inconsistencies */
488 /* XXX: only flush what is necessary */
495 void cpu_reset_interrupt(CPUArchState
*env
, int mask
)
497 CPUState
*cpu
= ENV_GET_CPU(env
);
499 cpu
->interrupt_request
&= ~mask
;
502 void cpu_exit(CPUArchState
*env
)
504 CPUState
*cpu
= ENV_GET_CPU(env
);
506 cpu
->exit_request
= 1;
507 cpu
->tcg_exit_req
= 1;
510 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
517 fprintf(stderr
, "qemu: fatal: ");
518 vfprintf(stderr
, fmt
, ap
);
519 fprintf(stderr
, "\n");
520 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
521 if (qemu_log_enabled()) {
522 qemu_log("qemu: fatal: ");
523 qemu_log_vprintf(fmt
, ap2
);
525 log_cpu_state(env
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
531 #if defined(CONFIG_USER_ONLY)
533 struct sigaction act
;
534 sigfillset(&act
.sa_mask
);
535 act
.sa_handler
= SIG_DFL
;
536 sigaction(SIGABRT
, &act
, NULL
);
542 CPUArchState
*cpu_copy(CPUArchState
*env
)
544 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
545 CPUArchState
*next_cpu
= new_env
->next_cpu
;
546 #if defined(TARGET_HAS_ICE)
551 memcpy(new_env
, env
, sizeof(CPUArchState
));
553 /* Preserve chaining. */
554 new_env
->next_cpu
= next_cpu
;
556 /* Clone all break/watchpoints.
557 Note: Once we support ptrace with hw-debug register access, make sure
558 BP_CPU break/watchpoints are handled correctly on clone. */
559 QTAILQ_INIT(&env
->breakpoints
);
560 QTAILQ_INIT(&env
->watchpoints
);
561 #if defined(TARGET_HAS_ICE)
562 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
563 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
565 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
566 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
574 #if !defined(CONFIG_USER_ONLY)
575 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
580 /* we modify the TLB cache so that the dirty bit will be set again
581 when accessing the range */
582 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
583 /* Check that we don't span multiple blocks - this breaks the
584 address comparisons below. */
585 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
586 != (end
- 1) - start
) {
589 cpu_tlb_reset_dirty_all(start1
, length
);
593 /* Note: start and end must be within the same ram block. */
594 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
599 start
&= TARGET_PAGE_MASK
;
600 end
= TARGET_PAGE_ALIGN(end
);
602 length
= end
- start
;
605 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
608 tlb_reset_dirty_range_all(start
, end
, length
);
612 static int cpu_physical_memory_set_dirty_tracking(int enable
)
615 in_migration
= enable
;
619 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
620 MemoryRegionSection
*section
,
624 target_ulong
*address
)
629 if (memory_region_is_ram(section
->mr
)) {
631 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
632 + memory_region_section_addr(section
, paddr
);
633 if (!section
->readonly
) {
634 iotlb
|= phys_section_notdirty
;
636 iotlb
|= phys_section_rom
;
639 /* IO handlers are currently passed a physical address.
640 It would be nice to pass an offset from the base address
641 of that region. This would avoid having to special case RAM,
642 and avoid full address decoding in every device.
643 We can't use the high bits of pd for this because
644 IO_MEM_ROMD uses these as a ram address. */
645 iotlb
= section
- phys_sections
;
646 iotlb
+= memory_region_section_addr(section
, paddr
);
649 /* Make accesses to pages with watchpoints go via the
650 watchpoint trap routines. */
651 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
652 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
653 /* Avoid trapping reads of pages with a write breakpoint. */
654 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
655 iotlb
= phys_section_watch
+ paddr
;
656 *address
|= TLB_MMIO
;
664 #endif /* defined(CONFIG_USER_ONLY) */
666 #if !defined(CONFIG_USER_ONLY)
668 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
669 typedef struct subpage_t
{
672 uint16_t sub_section
[TARGET_PAGE_SIZE
];
675 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
677 static subpage_t
*subpage_init(hwaddr base
);
678 static void destroy_page_desc(uint16_t section_index
)
680 MemoryRegionSection
*section
= &phys_sections
[section_index
];
681 MemoryRegion
*mr
= section
->mr
;
684 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
685 memory_region_destroy(&subpage
->iomem
);
690 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
695 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
699 p
= phys_map_nodes
[lp
->ptr
];
700 for (i
= 0; i
< L2_SIZE
; ++i
) {
702 destroy_l2_mapping(&p
[i
], level
- 1);
704 destroy_page_desc(p
[i
].ptr
);
708 lp
->ptr
= PHYS_MAP_NODE_NIL
;
711 static void destroy_all_mappings(AddressSpaceDispatch
*d
)
713 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
714 phys_map_nodes_reset();
717 static uint16_t phys_section_add(MemoryRegionSection
*section
)
719 if (phys_sections_nb
== phys_sections_nb_alloc
) {
720 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
721 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
722 phys_sections_nb_alloc
);
724 phys_sections
[phys_sections_nb
] = *section
;
725 return phys_sections_nb
++;
728 static void phys_sections_clear(void)
730 phys_sections_nb
= 0;
733 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
736 hwaddr base
= section
->offset_within_address_space
738 MemoryRegionSection
*existing
= phys_page_find(d
, base
>> TARGET_PAGE_BITS
);
739 MemoryRegionSection subsection
= {
740 .offset_within_address_space
= base
,
741 .size
= TARGET_PAGE_SIZE
,
745 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
747 if (!(existing
->mr
->subpage
)) {
748 subpage
= subpage_init(base
);
749 subsection
.mr
= &subpage
->iomem
;
750 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
751 phys_section_add(&subsection
));
753 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
755 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
756 end
= start
+ section
->size
- 1;
757 subpage_register(subpage
, start
, end
, phys_section_add(section
));
761 static void register_multipage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
763 hwaddr start_addr
= section
->offset_within_address_space
;
764 ram_addr_t size
= section
->size
;
766 uint16_t section_index
= phys_section_add(section
);
771 phys_page_set(d
, addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
775 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
777 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
778 MemoryRegionSection now
= *section
, remain
= *section
;
780 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
781 || (now
.size
< TARGET_PAGE_SIZE
)) {
782 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
783 - now
.offset_within_address_space
,
785 register_subpage(d
, &now
);
786 remain
.size
-= now
.size
;
787 remain
.offset_within_address_space
+= now
.size
;
788 remain
.offset_within_region
+= now
.size
;
790 while (remain
.size
>= TARGET_PAGE_SIZE
) {
792 if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
793 now
.size
= TARGET_PAGE_SIZE
;
794 register_subpage(d
, &now
);
796 now
.size
&= TARGET_PAGE_MASK
;
797 register_multipage(d
, &now
);
799 remain
.size
-= now
.size
;
800 remain
.offset_within_address_space
+= now
.size
;
801 remain
.offset_within_region
+= now
.size
;
805 register_subpage(d
, &now
);
809 void qemu_flush_coalesced_mmio_buffer(void)
812 kvm_flush_coalesced_mmio_buffer();
815 void qemu_mutex_lock_ramlist(void)
817 qemu_mutex_lock(&ram_list
.mutex
);
820 void qemu_mutex_unlock_ramlist(void)
822 qemu_mutex_unlock(&ram_list
.mutex
);
825 #if defined(__linux__) && !defined(TARGET_S390X)
829 #define HUGETLBFS_MAGIC 0x958458f6
831 static long gethugepagesize(const char *path
)
837 ret
= statfs(path
, &fs
);
838 } while (ret
!= 0 && errno
== EINTR
);
845 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
846 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
851 static void *file_ram_alloc(RAMBlock
*block
,
861 unsigned long hpagesize
;
863 hpagesize
= gethugepagesize(path
);
868 if (memory
< hpagesize
) {
872 if (kvm_enabled() && !kvm_has_sync_mmu()) {
873 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
877 filename
= g_strdup_printf("%s/qemu_back_mem.XXXXXX", path
);
879 fd
= mkstemp(filename
);
881 perror("unable to create backing store for hugepages");
888 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
891 * ftruncate is not supported by hugetlbfs in older
892 * hosts, so don't bother bailing out on errors.
893 * If anything goes wrong with it under other filesystems,
896 if (ftruncate(fd
, memory
))
900 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
901 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
902 * to sidestep this quirk.
904 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
905 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
907 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
909 if (area
== MAP_FAILED
) {
910 perror("file_ram_alloc: can't mmap RAM pages");
919 static ram_addr_t
find_ram_offset(ram_addr_t size
)
921 RAMBlock
*block
, *next_block
;
922 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
924 if (QTAILQ_EMPTY(&ram_list
.blocks
))
927 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
928 ram_addr_t end
, next
= RAM_ADDR_MAX
;
930 end
= block
->offset
+ block
->length
;
932 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
933 if (next_block
->offset
>= end
) {
934 next
= MIN(next
, next_block
->offset
);
937 if (next
- end
>= size
&& next
- end
< mingap
) {
943 if (offset
== RAM_ADDR_MAX
) {
944 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
952 ram_addr_t
last_ram_offset(void)
957 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
958 last
= MAX(last
, block
->offset
+ block
->length
);
963 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
966 QemuOpts
*machine_opts
;
968 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
969 machine_opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
971 !qemu_opt_get_bool(machine_opts
, "dump-guest-core", true)) {
972 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
974 perror("qemu_madvise");
975 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
976 "but dump_guest_core=off specified\n");
981 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
983 RAMBlock
*new_block
, *block
;
986 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
987 if (block
->offset
== addr
) {
993 assert(!new_block
->idstr
[0]);
996 char *id
= qdev_get_dev_path(dev
);
998 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1002 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1004 /* This assumes the iothread lock is taken here too. */
1005 qemu_mutex_lock_ramlist();
1006 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1007 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1008 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1013 qemu_mutex_unlock_ramlist();
1016 static int memory_try_enable_merging(void *addr
, size_t len
)
1020 opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
1021 if (opts
&& !qemu_opt_get_bool(opts
, "mem-merge", true)) {
1022 /* disabled by the user */
1026 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1029 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1032 RAMBlock
*block
, *new_block
;
1034 size
= TARGET_PAGE_ALIGN(size
);
1035 new_block
= g_malloc0(sizeof(*new_block
));
1037 /* This assumes the iothread lock is taken here too. */
1038 qemu_mutex_lock_ramlist();
1040 new_block
->offset
= find_ram_offset(size
);
1042 new_block
->host
= host
;
1043 new_block
->flags
|= RAM_PREALLOC_MASK
;
1046 #if defined (__linux__) && !defined(TARGET_S390X)
1047 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1048 if (!new_block
->host
) {
1049 new_block
->host
= qemu_vmalloc(size
);
1050 memory_try_enable_merging(new_block
->host
, size
);
1053 fprintf(stderr
, "-mem-path option unsupported\n");
1057 if (xen_enabled()) {
1058 xen_ram_alloc(new_block
->offset
, size
, mr
);
1059 } else if (kvm_enabled()) {
1060 /* some s390/kvm configurations have special constraints */
1061 new_block
->host
= kvm_vmalloc(size
);
1063 new_block
->host
= qemu_vmalloc(size
);
1065 memory_try_enable_merging(new_block
->host
, size
);
1068 new_block
->length
= size
;
1070 /* Keep the list sorted from biggest to smallest block. */
1071 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1072 if (block
->length
< new_block
->length
) {
1077 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1079 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1081 ram_list
.mru_block
= NULL
;
1084 qemu_mutex_unlock_ramlist();
1086 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
1087 last_ram_offset() >> TARGET_PAGE_BITS
);
1088 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
1089 0, size
>> TARGET_PAGE_BITS
);
1090 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
1092 qemu_ram_setup_dump(new_block
->host
, size
);
1093 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1096 kvm_setup_guest_memory(new_block
->host
, size
);
1098 return new_block
->offset
;
1101 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1103 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1106 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1110 /* This assumes the iothread lock is taken here too. */
1111 qemu_mutex_lock_ramlist();
1112 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1113 if (addr
== block
->offset
) {
1114 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1115 ram_list
.mru_block
= NULL
;
1121 qemu_mutex_unlock_ramlist();
1124 void qemu_ram_free(ram_addr_t addr
)
1128 /* This assumes the iothread lock is taken here too. */
1129 qemu_mutex_lock_ramlist();
1130 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1131 if (addr
== block
->offset
) {
1132 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1133 ram_list
.mru_block
= NULL
;
1135 if (block
->flags
& RAM_PREALLOC_MASK
) {
1137 } else if (mem_path
) {
1138 #if defined (__linux__) && !defined(TARGET_S390X)
1140 munmap(block
->host
, block
->length
);
1143 qemu_vfree(block
->host
);
1149 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1150 munmap(block
->host
, block
->length
);
1152 if (xen_enabled()) {
1153 xen_invalidate_map_cache_entry(block
->host
);
1155 qemu_vfree(block
->host
);
1163 qemu_mutex_unlock_ramlist();
1168 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1175 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1176 offset
= addr
- block
->offset
;
1177 if (offset
< block
->length
) {
1178 vaddr
= block
->host
+ offset
;
1179 if (block
->flags
& RAM_PREALLOC_MASK
) {
1183 munmap(vaddr
, length
);
1185 #if defined(__linux__) && !defined(TARGET_S390X)
1188 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1191 flags
|= MAP_PRIVATE
;
1193 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1194 flags
, block
->fd
, offset
);
1196 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1197 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1204 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1205 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
1206 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
1209 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1210 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1214 if (area
!= vaddr
) {
1215 fprintf(stderr
, "Could not remap addr: "
1216 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1220 memory_try_enable_merging(vaddr
, length
);
1221 qemu_ram_setup_dump(vaddr
, length
);
1227 #endif /* !_WIN32 */
1229 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1230 With the exception of the softmmu code in this file, this should
1231 only be used for local memory (e.g. video ram) that the device owns,
1232 and knows it isn't going to access beyond the end of the block.
1234 It should not be used for general purpose DMA.
1235 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1237 void *qemu_get_ram_ptr(ram_addr_t addr
)
1241 /* The list is protected by the iothread lock here. */
1242 block
= ram_list
.mru_block
;
1243 if (block
&& addr
- block
->offset
< block
->length
) {
1246 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1247 if (addr
- block
->offset
< block
->length
) {
1252 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1256 ram_list
.mru_block
= block
;
1257 if (xen_enabled()) {
1258 /* We need to check if the requested address is in the RAM
1259 * because we don't want to map the entire memory in QEMU.
1260 * In that case just map until the end of the page.
1262 if (block
->offset
== 0) {
1263 return xen_map_cache(addr
, 0, 0);
1264 } else if (block
->host
== NULL
) {
1266 xen_map_cache(block
->offset
, block
->length
, 1);
1269 return block
->host
+ (addr
- block
->offset
);
1272 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1273 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1275 * ??? Is this still necessary?
1277 static void *qemu_safe_ram_ptr(ram_addr_t addr
)
1281 /* The list is protected by the iothread lock here. */
1282 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1283 if (addr
- block
->offset
< block
->length
) {
1284 if (xen_enabled()) {
1285 /* We need to check if the requested address is in the RAM
1286 * because we don't want to map the entire memory in QEMU.
1287 * In that case just map until the end of the page.
1289 if (block
->offset
== 0) {
1290 return xen_map_cache(addr
, 0, 0);
1291 } else if (block
->host
== NULL
) {
1293 xen_map_cache(block
->offset
, block
->length
, 1);
1296 return block
->host
+ (addr
- block
->offset
);
1300 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1306 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1307 * but takes a size argument */
1308 static void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
1313 if (xen_enabled()) {
1314 return xen_map_cache(addr
, *size
, 1);
1318 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1319 if (addr
- block
->offset
< block
->length
) {
1320 if (addr
- block
->offset
+ *size
> block
->length
)
1321 *size
= block
->length
- addr
+ block
->offset
;
1322 return block
->host
+ (addr
- block
->offset
);
1326 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1331 void qemu_put_ram_ptr(void *addr
)
1333 trace_qemu_put_ram_ptr(addr
);
1336 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1339 uint8_t *host
= ptr
;
1341 if (xen_enabled()) {
1342 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1346 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1347 /* This case append when the block is not mapped. */
1348 if (block
->host
== NULL
) {
1351 if (host
- block
->host
< block
->length
) {
1352 *ram_addr
= block
->offset
+ (host
- block
->host
);
1360 /* Some of the softmmu routines need to translate from a host pointer
1361 (typically a TLB entry) back to a ram offset. */
1362 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
1364 ram_addr_t ram_addr
;
1366 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
1367 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
1373 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1376 #ifdef DEBUG_UNASSIGNED
1377 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
1379 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1380 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
1385 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1386 uint64_t val
, unsigned size
)
1388 #ifdef DEBUG_UNASSIGNED
1389 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
1391 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1392 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
1396 static const MemoryRegionOps unassigned_mem_ops
= {
1397 .read
= unassigned_mem_read
,
1398 .write
= unassigned_mem_write
,
1399 .endianness
= DEVICE_NATIVE_ENDIAN
,
1402 static uint64_t error_mem_read(void *opaque
, hwaddr addr
,
1408 static void error_mem_write(void *opaque
, hwaddr addr
,
1409 uint64_t value
, unsigned size
)
1414 static const MemoryRegionOps error_mem_ops
= {
1415 .read
= error_mem_read
,
1416 .write
= error_mem_write
,
1417 .endianness
= DEVICE_NATIVE_ENDIAN
,
1420 static const MemoryRegionOps rom_mem_ops
= {
1421 .read
= error_mem_read
,
1422 .write
= unassigned_mem_write
,
1423 .endianness
= DEVICE_NATIVE_ENDIAN
,
1426 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1427 uint64_t val
, unsigned size
)
1430 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1431 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1432 #if !defined(CONFIG_USER_ONLY)
1433 tb_invalidate_phys_page_fast(ram_addr
, size
);
1434 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1439 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1442 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1445 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1450 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1451 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
1452 /* we remove the notdirty callback only if the code has been
1454 if (dirty_flags
== 0xff)
1455 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
1458 static const MemoryRegionOps notdirty_mem_ops
= {
1459 .read
= error_mem_read
,
1460 .write
= notdirty_mem_write
,
1461 .endianness
= DEVICE_NATIVE_ENDIAN
,
1464 /* Generate a debug exception if a watchpoint has been hit. */
1465 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1467 CPUArchState
*env
= cpu_single_env
;
1468 target_ulong pc
, cs_base
;
1473 if (env
->watchpoint_hit
) {
1474 /* We re-entered the check after replacing the TB. Now raise
1475 * the debug interrupt so that is will trigger after the
1476 * current instruction. */
1477 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
1480 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1481 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1482 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1483 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1484 wp
->flags
|= BP_WATCHPOINT_HIT
;
1485 if (!env
->watchpoint_hit
) {
1486 env
->watchpoint_hit
= wp
;
1487 tb_check_watchpoint(env
);
1488 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1489 env
->exception_index
= EXCP_DEBUG
;
1492 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1493 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
1494 cpu_resume_from_signal(env
, NULL
);
1498 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1503 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1504 so these check for a hit then pass through to the normal out-of-line
1506 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1509 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1511 case 1: return ldub_phys(addr
);
1512 case 2: return lduw_phys(addr
);
1513 case 4: return ldl_phys(addr
);
1518 static void watch_mem_write(void *opaque
, hwaddr addr
,
1519 uint64_t val
, unsigned size
)
1521 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1524 stb_phys(addr
, val
);
1527 stw_phys(addr
, val
);
1530 stl_phys(addr
, val
);
1536 static const MemoryRegionOps watch_mem_ops
= {
1537 .read
= watch_mem_read
,
1538 .write
= watch_mem_write
,
1539 .endianness
= DEVICE_NATIVE_ENDIAN
,
1542 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1545 subpage_t
*mmio
= opaque
;
1546 unsigned int idx
= SUBPAGE_IDX(addr
);
1547 MemoryRegionSection
*section
;
1548 #if defined(DEBUG_SUBPAGE)
1549 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
1550 mmio
, len
, addr
, idx
);
1553 section
= &phys_sections
[mmio
->sub_section
[idx
]];
1555 addr
-= section
->offset_within_address_space
;
1556 addr
+= section
->offset_within_region
;
1557 return io_mem_read(section
->mr
, addr
, len
);
1560 static void subpage_write(void *opaque
, hwaddr addr
,
1561 uint64_t value
, unsigned len
)
1563 subpage_t
*mmio
= opaque
;
1564 unsigned int idx
= SUBPAGE_IDX(addr
);
1565 MemoryRegionSection
*section
;
1566 #if defined(DEBUG_SUBPAGE)
1567 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1568 " idx %d value %"PRIx64
"\n",
1569 __func__
, mmio
, len
, addr
, idx
, value
);
1572 section
= &phys_sections
[mmio
->sub_section
[idx
]];
1574 addr
-= section
->offset_within_address_space
;
1575 addr
+= section
->offset_within_region
;
1576 io_mem_write(section
->mr
, addr
, value
, len
);
1579 static const MemoryRegionOps subpage_ops
= {
1580 .read
= subpage_read
,
1581 .write
= subpage_write
,
1582 .endianness
= DEVICE_NATIVE_ENDIAN
,
1585 static uint64_t subpage_ram_read(void *opaque
, hwaddr addr
,
1588 ram_addr_t raddr
= addr
;
1589 void *ptr
= qemu_get_ram_ptr(raddr
);
1591 case 1: return ldub_p(ptr
);
1592 case 2: return lduw_p(ptr
);
1593 case 4: return ldl_p(ptr
);
1598 static void subpage_ram_write(void *opaque
, hwaddr addr
,
1599 uint64_t value
, unsigned size
)
1601 ram_addr_t raddr
= addr
;
1602 void *ptr
= qemu_get_ram_ptr(raddr
);
1604 case 1: return stb_p(ptr
, value
);
1605 case 2: return stw_p(ptr
, value
);
1606 case 4: return stl_p(ptr
, value
);
1611 static const MemoryRegionOps subpage_ram_ops
= {
1612 .read
= subpage_ram_read
,
1613 .write
= subpage_ram_write
,
1614 .endianness
= DEVICE_NATIVE_ENDIAN
,
1617 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1622 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1624 idx
= SUBPAGE_IDX(start
);
1625 eidx
= SUBPAGE_IDX(end
);
1626 #if defined(DEBUG_SUBPAGE)
1627 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
1628 mmio
, start
, end
, idx
, eidx
, memory
);
1630 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
1631 MemoryRegionSection new_section
= phys_sections
[section
];
1632 new_section
.mr
= &io_mem_subpage_ram
;
1633 section
= phys_section_add(&new_section
);
1635 for (; idx
<= eidx
; idx
++) {
1636 mmio
->sub_section
[idx
] = section
;
1642 static subpage_t
*subpage_init(hwaddr base
)
1646 mmio
= g_malloc0(sizeof(subpage_t
));
1649 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
1650 "subpage", TARGET_PAGE_SIZE
);
1651 mmio
->iomem
.subpage
= true;
1652 #if defined(DEBUG_SUBPAGE)
1653 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
1654 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
1656 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
1661 static uint16_t dummy_section(MemoryRegion
*mr
)
1663 MemoryRegionSection section
= {
1665 .offset_within_address_space
= 0,
1666 .offset_within_region
= 0,
1670 return phys_section_add(§ion
);
1673 MemoryRegion
*iotlb_to_region(hwaddr index
)
1675 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1678 static void io_mem_init(void)
1680 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
1681 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
1682 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
1683 "unassigned", UINT64_MAX
);
1684 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
1685 "notdirty", UINT64_MAX
);
1686 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
1687 "subpage-ram", UINT64_MAX
);
1688 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
1689 "watch", UINT64_MAX
);
1692 static void mem_begin(MemoryListener
*listener
)
1694 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
1696 destroy_all_mappings(d
);
1697 d
->phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
1700 static void core_begin(MemoryListener
*listener
)
1702 phys_sections_clear();
1703 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
1704 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
1705 phys_section_rom
= dummy_section(&io_mem_rom
);
1706 phys_section_watch
= dummy_section(&io_mem_watch
);
1709 static void tcg_commit(MemoryListener
*listener
)
1713 /* since each CPU stores ram addresses in its TLB cache, we must
1714 reset the modified entries */
1716 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1721 static void core_log_global_start(MemoryListener
*listener
)
1723 cpu_physical_memory_set_dirty_tracking(1);
1726 static void core_log_global_stop(MemoryListener
*listener
)
1728 cpu_physical_memory_set_dirty_tracking(0);
1731 static void io_region_add(MemoryListener
*listener
,
1732 MemoryRegionSection
*section
)
1734 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
1736 mrio
->mr
= section
->mr
;
1737 mrio
->offset
= section
->offset_within_region
;
1738 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
1739 section
->offset_within_address_space
, section
->size
);
1740 ioport_register(&mrio
->iorange
);
1743 static void io_region_del(MemoryListener
*listener
,
1744 MemoryRegionSection
*section
)
1746 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
1749 static MemoryListener core_memory_listener
= {
1750 .begin
= core_begin
,
1751 .log_global_start
= core_log_global_start
,
1752 .log_global_stop
= core_log_global_stop
,
1756 static MemoryListener io_memory_listener
= {
1757 .region_add
= io_region_add
,
1758 .region_del
= io_region_del
,
1762 static MemoryListener tcg_memory_listener
= {
1763 .commit
= tcg_commit
,
1766 void address_space_init_dispatch(AddressSpace
*as
)
1768 AddressSpaceDispatch
*d
= g_new(AddressSpaceDispatch
, 1);
1770 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
1771 d
->listener
= (MemoryListener
) {
1773 .region_add
= mem_add
,
1774 .region_nop
= mem_add
,
1778 memory_listener_register(&d
->listener
, as
);
1781 void address_space_destroy_dispatch(AddressSpace
*as
)
1783 AddressSpaceDispatch
*d
= as
->dispatch
;
1785 memory_listener_unregister(&d
->listener
);
1786 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
1788 as
->dispatch
= NULL
;
1791 static void memory_map_init(void)
1793 system_memory
= g_malloc(sizeof(*system_memory
));
1794 memory_region_init(system_memory
, "system", INT64_MAX
);
1795 address_space_init(&address_space_memory
, system_memory
);
1796 address_space_memory
.name
= "memory";
1798 system_io
= g_malloc(sizeof(*system_io
));
1799 memory_region_init(system_io
, "io", 65536);
1800 address_space_init(&address_space_io
, system_io
);
1801 address_space_io
.name
= "I/O";
1803 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1804 memory_listener_register(&io_memory_listener
, &address_space_io
);
1805 memory_listener_register(&tcg_memory_listener
, &address_space_memory
);
1807 dma_context_init(&dma_context_memory
, &address_space_memory
,
1811 MemoryRegion
*get_system_memory(void)
1813 return system_memory
;
1816 MemoryRegion
*get_system_io(void)
1821 #endif /* !defined(CONFIG_USER_ONLY) */
1823 /* physical memory access (slow version, mainly for debug) */
1824 #if defined(CONFIG_USER_ONLY)
1825 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
1826 uint8_t *buf
, int len
, int is_write
)
1833 page
= addr
& TARGET_PAGE_MASK
;
1834 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1837 flags
= page_get_flags(page
);
1838 if (!(flags
& PAGE_VALID
))
1841 if (!(flags
& PAGE_WRITE
))
1843 /* XXX: this code should not depend on lock_user */
1844 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1847 unlock_user(p
, addr
, l
);
1849 if (!(flags
& PAGE_READ
))
1851 /* XXX: this code should not depend on lock_user */
1852 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1855 unlock_user(p
, addr
, 0);
1866 static void invalidate_and_set_dirty(hwaddr addr
,
1869 if (!cpu_physical_memory_is_dirty(addr
)) {
1870 /* invalidate code */
1871 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1873 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
1875 xen_modified_memory(addr
, length
);
1878 void address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
1879 int len
, bool is_write
)
1881 AddressSpaceDispatch
*d
= as
->dispatch
;
1886 MemoryRegionSection
*section
;
1889 page
= addr
& TARGET_PAGE_MASK
;
1890 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1893 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
1896 if (!memory_region_is_ram(section
->mr
)) {
1898 addr1
= memory_region_section_addr(section
, addr
);
1899 /* XXX: could force cpu_single_env to NULL to avoid
1901 if (l
>= 4 && ((addr1
& 3) == 0)) {
1902 /* 32 bit write access */
1904 io_mem_write(section
->mr
, addr1
, val
, 4);
1906 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
1907 /* 16 bit write access */
1909 io_mem_write(section
->mr
, addr1
, val
, 2);
1912 /* 8 bit write access */
1914 io_mem_write(section
->mr
, addr1
, val
, 1);
1917 } else if (!section
->readonly
) {
1919 addr1
= memory_region_get_ram_addr(section
->mr
)
1920 + memory_region_section_addr(section
, addr
);
1922 ptr
= qemu_get_ram_ptr(addr1
);
1923 memcpy(ptr
, buf
, l
);
1924 invalidate_and_set_dirty(addr1
, l
);
1925 qemu_put_ram_ptr(ptr
);
1928 if (!(memory_region_is_ram(section
->mr
) ||
1929 memory_region_is_romd(section
->mr
))) {
1932 addr1
= memory_region_section_addr(section
, addr
);
1933 if (l
>= 4 && ((addr1
& 3) == 0)) {
1934 /* 32 bit read access */
1935 val
= io_mem_read(section
->mr
, addr1
, 4);
1938 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
1939 /* 16 bit read access */
1940 val
= io_mem_read(section
->mr
, addr1
, 2);
1944 /* 8 bit read access */
1945 val
= io_mem_read(section
->mr
, addr1
, 1);
1951 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
1952 + memory_region_section_addr(section
,
1954 memcpy(buf
, ptr
, l
);
1955 qemu_put_ram_ptr(ptr
);
1964 void address_space_write(AddressSpace
*as
, hwaddr addr
,
1965 const uint8_t *buf
, int len
)
1967 address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
1971 * address_space_read: read from an address space.
1973 * @as: #AddressSpace to be accessed
1974 * @addr: address within that address space
1975 * @buf: buffer with the data transferred
1977 void address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
1979 address_space_rw(as
, addr
, buf
, len
, false);
1983 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
1984 int len
, int is_write
)
1986 return address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
1989 /* used for ROM loading : can write in RAM and ROM */
1990 void cpu_physical_memory_write_rom(hwaddr addr
,
1991 const uint8_t *buf
, int len
)
1993 AddressSpaceDispatch
*d
= address_space_memory
.dispatch
;
1997 MemoryRegionSection
*section
;
2000 page
= addr
& TARGET_PAGE_MASK
;
2001 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2004 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
2006 if (!(memory_region_is_ram(section
->mr
) ||
2007 memory_region_is_romd(section
->mr
))) {
2010 unsigned long addr1
;
2011 addr1
= memory_region_get_ram_addr(section
->mr
)
2012 + memory_region_section_addr(section
, addr
);
2014 ptr
= qemu_get_ram_ptr(addr1
);
2015 memcpy(ptr
, buf
, l
);
2016 invalidate_and_set_dirty(addr1
, l
);
2017 qemu_put_ram_ptr(ptr
);
2031 static BounceBuffer bounce
;
2033 typedef struct MapClient
{
2035 void (*callback
)(void *opaque
);
2036 QLIST_ENTRY(MapClient
) link
;
2039 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2040 = QLIST_HEAD_INITIALIZER(map_client_list
);
2042 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2044 MapClient
*client
= g_malloc(sizeof(*client
));
2046 client
->opaque
= opaque
;
2047 client
->callback
= callback
;
2048 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2052 static void cpu_unregister_map_client(void *_client
)
2054 MapClient
*client
= (MapClient
*)_client
;
2056 QLIST_REMOVE(client
, link
);
2060 static void cpu_notify_map_clients(void)
2064 while (!QLIST_EMPTY(&map_client_list
)) {
2065 client
= QLIST_FIRST(&map_client_list
);
2066 client
->callback(client
->opaque
);
2067 cpu_unregister_map_client(client
);
2071 /* Map a physical memory region into a host virtual address.
2072 * May map a subset of the requested range, given by and returned in *plen.
2073 * May return NULL if resources needed to perform the mapping are exhausted.
2074 * Use only for reads OR writes - not for read-modify-write operations.
2075 * Use cpu_register_map_client() to know when retrying the map operation is
2076 * likely to succeed.
2078 void *address_space_map(AddressSpace
*as
,
2083 AddressSpaceDispatch
*d
= as
->dispatch
;
2088 MemoryRegionSection
*section
;
2089 ram_addr_t raddr
= RAM_ADDR_MAX
;
2094 page
= addr
& TARGET_PAGE_MASK
;
2095 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2098 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
2100 if (!(memory_region_is_ram(section
->mr
) && !section
->readonly
)) {
2101 if (todo
|| bounce
.buffer
) {
2104 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
2108 address_space_read(as
, addr
, bounce
.buffer
, l
);
2112 return bounce
.buffer
;
2115 raddr
= memory_region_get_ram_addr(section
->mr
)
2116 + memory_region_section_addr(section
, addr
);
2124 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
2129 /* Unmaps a memory region previously mapped by address_space_map().
2130 * Will also mark the memory as dirty if is_write == 1. access_len gives
2131 * the amount of memory that was actually read or written by the caller.
2133 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2134 int is_write
, hwaddr access_len
)
2136 if (buffer
!= bounce
.buffer
) {
2138 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
2139 while (access_len
) {
2141 l
= TARGET_PAGE_SIZE
;
2144 invalidate_and_set_dirty(addr1
, l
);
2149 if (xen_enabled()) {
2150 xen_invalidate_map_cache_entry(buffer
);
2155 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2157 qemu_vfree(bounce
.buffer
);
2158 bounce
.buffer
= NULL
;
2159 cpu_notify_map_clients();
2162 void *cpu_physical_memory_map(hwaddr addr
,
2166 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2169 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2170 int is_write
, hwaddr access_len
)
2172 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2175 /* warning: addr must be aligned */
2176 static inline uint32_t ldl_phys_internal(hwaddr addr
,
2177 enum device_endian endian
)
2181 MemoryRegionSection
*section
;
2183 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2185 if (!(memory_region_is_ram(section
->mr
) ||
2186 memory_region_is_romd(section
->mr
))) {
2188 addr
= memory_region_section_addr(section
, addr
);
2189 val
= io_mem_read(section
->mr
, addr
, 4);
2190 #if defined(TARGET_WORDS_BIGENDIAN)
2191 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2195 if (endian
== DEVICE_BIG_ENDIAN
) {
2201 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2203 + memory_region_section_addr(section
, addr
));
2205 case DEVICE_LITTLE_ENDIAN
:
2206 val
= ldl_le_p(ptr
);
2208 case DEVICE_BIG_ENDIAN
:
2209 val
= ldl_be_p(ptr
);
2219 uint32_t ldl_phys(hwaddr addr
)
2221 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2224 uint32_t ldl_le_phys(hwaddr addr
)
2226 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2229 uint32_t ldl_be_phys(hwaddr addr
)
2231 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2234 /* warning: addr must be aligned */
2235 static inline uint64_t ldq_phys_internal(hwaddr addr
,
2236 enum device_endian endian
)
2240 MemoryRegionSection
*section
;
2242 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2244 if (!(memory_region_is_ram(section
->mr
) ||
2245 memory_region_is_romd(section
->mr
))) {
2247 addr
= memory_region_section_addr(section
, addr
);
2249 /* XXX This is broken when device endian != cpu endian.
2250 Fix and add "endian" variable check */
2251 #ifdef TARGET_WORDS_BIGENDIAN
2252 val
= io_mem_read(section
->mr
, addr
, 4) << 32;
2253 val
|= io_mem_read(section
->mr
, addr
+ 4, 4);
2255 val
= io_mem_read(section
->mr
, addr
, 4);
2256 val
|= io_mem_read(section
->mr
, addr
+ 4, 4) << 32;
2260 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2262 + memory_region_section_addr(section
, addr
));
2264 case DEVICE_LITTLE_ENDIAN
:
2265 val
= ldq_le_p(ptr
);
2267 case DEVICE_BIG_ENDIAN
:
2268 val
= ldq_be_p(ptr
);
2278 uint64_t ldq_phys(hwaddr addr
)
2280 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2283 uint64_t ldq_le_phys(hwaddr addr
)
2285 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2288 uint64_t ldq_be_phys(hwaddr addr
)
2290 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2294 uint32_t ldub_phys(hwaddr addr
)
2297 cpu_physical_memory_read(addr
, &val
, 1);
2301 /* warning: addr must be aligned */
2302 static inline uint32_t lduw_phys_internal(hwaddr addr
,
2303 enum device_endian endian
)
2307 MemoryRegionSection
*section
;
2309 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2311 if (!(memory_region_is_ram(section
->mr
) ||
2312 memory_region_is_romd(section
->mr
))) {
2314 addr
= memory_region_section_addr(section
, addr
);
2315 val
= io_mem_read(section
->mr
, addr
, 2);
2316 #if defined(TARGET_WORDS_BIGENDIAN)
2317 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2321 if (endian
== DEVICE_BIG_ENDIAN
) {
2327 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2329 + memory_region_section_addr(section
, addr
));
2331 case DEVICE_LITTLE_ENDIAN
:
2332 val
= lduw_le_p(ptr
);
2334 case DEVICE_BIG_ENDIAN
:
2335 val
= lduw_be_p(ptr
);
2345 uint32_t lduw_phys(hwaddr addr
)
2347 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2350 uint32_t lduw_le_phys(hwaddr addr
)
2352 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2355 uint32_t lduw_be_phys(hwaddr addr
)
2357 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2360 /* warning: addr must be aligned. The ram page is not masked as dirty
2361 and the code inside is not invalidated. It is useful if the dirty
2362 bits are used to track modified PTEs */
2363 void stl_phys_notdirty(hwaddr addr
, uint32_t val
)
2366 MemoryRegionSection
*section
;
2368 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2370 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2371 addr
= memory_region_section_addr(section
, addr
);
2372 if (memory_region_is_ram(section
->mr
)) {
2373 section
= &phys_sections
[phys_section_rom
];
2375 io_mem_write(section
->mr
, addr
, val
, 4);
2377 unsigned long addr1
= (memory_region_get_ram_addr(section
->mr
)
2379 + memory_region_section_addr(section
, addr
);
2380 ptr
= qemu_get_ram_ptr(addr1
);
2383 if (unlikely(in_migration
)) {
2384 if (!cpu_physical_memory_is_dirty(addr1
)) {
2385 /* invalidate code */
2386 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2388 cpu_physical_memory_set_dirty_flags(
2389 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
2395 void stq_phys_notdirty(hwaddr addr
, uint64_t val
)
2398 MemoryRegionSection
*section
;
2400 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2402 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2403 addr
= memory_region_section_addr(section
, addr
);
2404 if (memory_region_is_ram(section
->mr
)) {
2405 section
= &phys_sections
[phys_section_rom
];
2407 #ifdef TARGET_WORDS_BIGENDIAN
2408 io_mem_write(section
->mr
, addr
, val
>> 32, 4);
2409 io_mem_write(section
->mr
, addr
+ 4, (uint32_t)val
, 4);
2411 io_mem_write(section
->mr
, addr
, (uint32_t)val
, 4);
2412 io_mem_write(section
->mr
, addr
+ 4, val
>> 32, 4);
2415 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2417 + memory_region_section_addr(section
, addr
));
2422 /* warning: addr must be aligned */
2423 static inline void stl_phys_internal(hwaddr addr
, uint32_t val
,
2424 enum device_endian endian
)
2427 MemoryRegionSection
*section
;
2429 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2431 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2432 addr
= memory_region_section_addr(section
, addr
);
2433 if (memory_region_is_ram(section
->mr
)) {
2434 section
= &phys_sections
[phys_section_rom
];
2436 #if defined(TARGET_WORDS_BIGENDIAN)
2437 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2441 if (endian
== DEVICE_BIG_ENDIAN
) {
2445 io_mem_write(section
->mr
, addr
, val
, 4);
2447 unsigned long addr1
;
2448 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
2449 + memory_region_section_addr(section
, addr
);
2451 ptr
= qemu_get_ram_ptr(addr1
);
2453 case DEVICE_LITTLE_ENDIAN
:
2456 case DEVICE_BIG_ENDIAN
:
2463 invalidate_and_set_dirty(addr1
, 4);
2467 void stl_phys(hwaddr addr
, uint32_t val
)
2469 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2472 void stl_le_phys(hwaddr addr
, uint32_t val
)
2474 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2477 void stl_be_phys(hwaddr addr
, uint32_t val
)
2479 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2483 void stb_phys(hwaddr addr
, uint32_t val
)
2486 cpu_physical_memory_write(addr
, &v
, 1);
2489 /* warning: addr must be aligned */
2490 static inline void stw_phys_internal(hwaddr addr
, uint32_t val
,
2491 enum device_endian endian
)
2494 MemoryRegionSection
*section
;
2496 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2498 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2499 addr
= memory_region_section_addr(section
, addr
);
2500 if (memory_region_is_ram(section
->mr
)) {
2501 section
= &phys_sections
[phys_section_rom
];
2503 #if defined(TARGET_WORDS_BIGENDIAN)
2504 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2508 if (endian
== DEVICE_BIG_ENDIAN
) {
2512 io_mem_write(section
->mr
, addr
, val
, 2);
2514 unsigned long addr1
;
2515 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
2516 + memory_region_section_addr(section
, addr
);
2518 ptr
= qemu_get_ram_ptr(addr1
);
2520 case DEVICE_LITTLE_ENDIAN
:
2523 case DEVICE_BIG_ENDIAN
:
2530 invalidate_and_set_dirty(addr1
, 2);
2534 void stw_phys(hwaddr addr
, uint32_t val
)
2536 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2539 void stw_le_phys(hwaddr addr
, uint32_t val
)
2541 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2544 void stw_be_phys(hwaddr addr
, uint32_t val
)
2546 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2550 void stq_phys(hwaddr addr
, uint64_t val
)
2553 cpu_physical_memory_write(addr
, &val
, 8);
2556 void stq_le_phys(hwaddr addr
, uint64_t val
)
2558 val
= cpu_to_le64(val
);
2559 cpu_physical_memory_write(addr
, &val
, 8);
2562 void stq_be_phys(hwaddr addr
, uint64_t val
)
2564 val
= cpu_to_be64(val
);
2565 cpu_physical_memory_write(addr
, &val
, 8);
2568 /* virtual memory access for debug (includes writing to ROM) */
2569 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
2570 uint8_t *buf
, int len
, int is_write
)
2577 page
= addr
& TARGET_PAGE_MASK
;
2578 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2579 /* if no physical page mapped, return an error */
2580 if (phys_addr
== -1)
2582 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2585 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2587 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
2589 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
2598 #if !defined(CONFIG_USER_ONLY)
2601 * A helper function for the _utterly broken_ virtio device model to find out if
2602 * it's running on a big endian machine. Don't do this at home kids!
2604 bool virtio_is_big_endian(void);
2605 bool virtio_is_big_endian(void)
2607 #if defined(TARGET_WORDS_BIGENDIAN)
2616 #ifndef CONFIG_USER_ONLY
2617 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2619 MemoryRegionSection
*section
;
2621 section
= phys_page_find(address_space_memory
.dispatch
,
2622 phys_addr
>> TARGET_PAGE_BITS
);
2624 return !(memory_region_is_ram(section
->mr
) ||
2625 memory_region_is_romd(section
->mr
));