4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
53 //#define DEBUG_UNASSIGNED
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
58 static int in_migration
;
60 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
62 static MemoryRegion
*system_memory
;
63 static MemoryRegion
*system_io
;
65 AddressSpace address_space_io
;
66 AddressSpace address_space_memory
;
67 DMAContext dma_context_memory
;
69 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
70 static MemoryRegion io_mem_subpage_ram
;
74 CPUArchState
*first_cpu
;
75 /* current CPU in the current thread. It is only valid inside
77 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
78 /* 0 = Do not count executed instructions.
79 1 = Precise instruction counting.
80 2 = Adaptive rate instruction counting. */
83 #if !defined(CONFIG_USER_ONLY)
85 static MemoryRegionSection
*phys_sections
;
86 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
87 static uint16_t phys_section_unassigned
;
88 static uint16_t phys_section_notdirty
;
89 static uint16_t phys_section_rom
;
90 static uint16_t phys_section_watch
;
92 /* Simple allocator for PhysPageEntry nodes */
93 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
94 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
96 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
98 static void io_mem_init(void);
99 static void memory_map_init(void);
100 static void *qemu_safe_ram_ptr(ram_addr_t addr
);
102 static MemoryRegion io_mem_watch
;
105 #if !defined(CONFIG_USER_ONLY)
107 static void phys_map_node_reserve(unsigned nodes
)
109 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
110 typedef PhysPageEntry Node
[L2_SIZE
];
111 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
112 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
113 phys_map_nodes_nb
+ nodes
);
114 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
115 phys_map_nodes_nb_alloc
);
119 static uint16_t phys_map_node_alloc(void)
124 ret
= phys_map_nodes_nb
++;
125 assert(ret
!= PHYS_MAP_NODE_NIL
);
126 assert(ret
!= phys_map_nodes_nb_alloc
);
127 for (i
= 0; i
< L2_SIZE
; ++i
) {
128 phys_map_nodes
[ret
][i
].is_leaf
= 0;
129 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
134 static void phys_map_nodes_reset(void)
136 phys_map_nodes_nb
= 0;
140 static void phys_page_set_level(PhysPageEntry
*lp
, hwaddr
*index
,
141 hwaddr
*nb
, uint16_t leaf
,
146 hwaddr step
= (hwaddr
)1 << (level
* L2_BITS
);
148 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
149 lp
->ptr
= phys_map_node_alloc();
150 p
= phys_map_nodes
[lp
->ptr
];
152 for (i
= 0; i
< L2_SIZE
; i
++) {
154 p
[i
].ptr
= phys_section_unassigned
;
158 p
= phys_map_nodes
[lp
->ptr
];
160 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
162 while (*nb
&& lp
< &p
[L2_SIZE
]) {
163 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
169 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
175 static void phys_page_set(AddressSpaceDispatch
*d
,
176 hwaddr index
, hwaddr nb
,
179 /* Wildly overreserve - it doesn't matter much. */
180 phys_map_node_reserve(3 * P_L2_LEVELS
);
182 phys_page_set_level(&d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
185 MemoryRegionSection
*phys_page_find(AddressSpaceDispatch
*d
, hwaddr index
)
187 PhysPageEntry lp
= d
->phys_map
;
190 uint16_t s_index
= phys_section_unassigned
;
192 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
193 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
196 p
= phys_map_nodes
[lp
.ptr
];
197 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
202 return &phys_sections
[s_index
];
205 bool memory_region_is_unassigned(MemoryRegion
*mr
)
207 return mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
208 && mr
!= &io_mem_notdirty
&& !mr
->rom_device
209 && mr
!= &io_mem_watch
;
213 void cpu_exec_init_all(void)
215 #if !defined(CONFIG_USER_ONLY)
216 qemu_mutex_init(&ram_list
.mutex
);
222 #if !defined(CONFIG_USER_ONLY)
224 static int cpu_common_post_load(void *opaque
, int version_id
)
226 CPUState
*cpu
= opaque
;
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 cpu
->interrupt_request
&= ~0x01;
231 tlb_flush(cpu
->env_ptr
, 1);
236 static const VMStateDescription vmstate_cpu_common
= {
237 .name
= "cpu_common",
239 .minimum_version_id
= 1,
240 .minimum_version_id_old
= 1,
241 .post_load
= cpu_common_post_load
,
242 .fields
= (VMStateField
[]) {
243 VMSTATE_UINT32(halted
, CPUState
),
244 VMSTATE_UINT32(interrupt_request
, CPUState
),
245 VMSTATE_END_OF_LIST()
249 #define vmstate_cpu_common vmstate_dummy
252 CPUState
*qemu_get_cpu(int index
)
254 CPUArchState
*env
= first_cpu
;
255 CPUState
*cpu
= NULL
;
258 cpu
= ENV_GET_CPU(env
);
259 if (cpu
->cpu_index
== index
) {
265 return env
? cpu
: NULL
;
268 void cpu_exec_init(CPUArchState
*env
)
270 CPUState
*cpu
= ENV_GET_CPU(env
);
271 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
275 #if defined(CONFIG_USER_ONLY)
278 env
->next_cpu
= NULL
;
281 while (*penv
!= NULL
) {
282 penv
= &(*penv
)->next_cpu
;
285 cpu
->cpu_index
= cpu_index
;
287 QTAILQ_INIT(&env
->breakpoints
);
288 QTAILQ_INIT(&env
->watchpoints
);
289 #ifndef CONFIG_USER_ONLY
290 cpu
->thread_id
= qemu_get_thread_id();
293 #if defined(CONFIG_USER_ONLY)
296 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
297 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
298 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
299 cpu_save
, cpu_load
, env
);
300 assert(cc
->vmsd
== NULL
);
302 if (cc
->vmsd
!= NULL
) {
303 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
307 #if defined(TARGET_HAS_ICE)
308 #if defined(CONFIG_USER_ONLY)
309 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
311 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
314 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
316 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
317 (pc
& ~TARGET_PAGE_MASK
));
320 #endif /* TARGET_HAS_ICE */
322 #if defined(CONFIG_USER_ONLY)
323 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
328 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
329 int flags
, CPUWatchpoint
**watchpoint
)
334 /* Add a watchpoint. */
335 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
336 int flags
, CPUWatchpoint
**watchpoint
)
338 target_ulong len_mask
= ~(len
- 1);
341 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
342 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
343 len
== 0 || len
> TARGET_PAGE_SIZE
) {
344 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
345 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
348 wp
= g_malloc(sizeof(*wp
));
351 wp
->len_mask
= len_mask
;
354 /* keep all GDB-injected watchpoints in front */
356 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
358 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
360 tlb_flush_page(env
, addr
);
367 /* Remove a specific watchpoint. */
368 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
371 target_ulong len_mask
= ~(len
- 1);
374 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
375 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
376 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
377 cpu_watchpoint_remove_by_ref(env
, wp
);
384 /* Remove a specific watchpoint by reference. */
385 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
387 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
389 tlb_flush_page(env
, watchpoint
->vaddr
);
394 /* Remove all matching watchpoints. */
395 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
397 CPUWatchpoint
*wp
, *next
;
399 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
400 if (wp
->flags
& mask
)
401 cpu_watchpoint_remove_by_ref(env
, wp
);
406 /* Add a breakpoint. */
407 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
408 CPUBreakpoint
**breakpoint
)
410 #if defined(TARGET_HAS_ICE)
413 bp
= g_malloc(sizeof(*bp
));
418 /* keep all GDB-injected breakpoints in front */
420 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
422 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
424 breakpoint_invalidate(env
, pc
);
434 /* Remove a specific breakpoint. */
435 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
437 #if defined(TARGET_HAS_ICE)
440 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
441 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
442 cpu_breakpoint_remove_by_ref(env
, bp
);
452 /* Remove a specific breakpoint by reference. */
453 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
455 #if defined(TARGET_HAS_ICE)
456 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
458 breakpoint_invalidate(env
, breakpoint
->pc
);
464 /* Remove all matching breakpoints. */
465 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
467 #if defined(TARGET_HAS_ICE)
468 CPUBreakpoint
*bp
, *next
;
470 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
471 if (bp
->flags
& mask
)
472 cpu_breakpoint_remove_by_ref(env
, bp
);
477 /* enable or disable single step mode. EXCP_DEBUG is returned by the
478 CPU loop after each instruction */
479 void cpu_single_step(CPUArchState
*env
, int enabled
)
481 #if defined(TARGET_HAS_ICE)
482 if (env
->singlestep_enabled
!= enabled
) {
483 env
->singlestep_enabled
= enabled
;
485 kvm_update_guest_debug(env
, 0);
487 /* must flush all the translated code to avoid inconsistencies */
488 /* XXX: only flush what is necessary */
495 void cpu_exit(CPUArchState
*env
)
497 CPUState
*cpu
= ENV_GET_CPU(env
);
499 cpu
->exit_request
= 1;
500 cpu
->tcg_exit_req
= 1;
503 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
510 fprintf(stderr
, "qemu: fatal: ");
511 vfprintf(stderr
, fmt
, ap
);
512 fprintf(stderr
, "\n");
513 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
514 if (qemu_log_enabled()) {
515 qemu_log("qemu: fatal: ");
516 qemu_log_vprintf(fmt
, ap2
);
518 log_cpu_state(env
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
524 #if defined(CONFIG_USER_ONLY)
526 struct sigaction act
;
527 sigfillset(&act
.sa_mask
);
528 act
.sa_handler
= SIG_DFL
;
529 sigaction(SIGABRT
, &act
, NULL
);
535 CPUArchState
*cpu_copy(CPUArchState
*env
)
537 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
538 CPUArchState
*next_cpu
= new_env
->next_cpu
;
539 #if defined(TARGET_HAS_ICE)
544 memcpy(new_env
, env
, sizeof(CPUArchState
));
546 /* Preserve chaining. */
547 new_env
->next_cpu
= next_cpu
;
549 /* Clone all break/watchpoints.
550 Note: Once we support ptrace with hw-debug register access, make sure
551 BP_CPU break/watchpoints are handled correctly on clone. */
552 QTAILQ_INIT(&env
->breakpoints
);
553 QTAILQ_INIT(&env
->watchpoints
);
554 #if defined(TARGET_HAS_ICE)
555 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
556 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
558 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
559 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
567 #if !defined(CONFIG_USER_ONLY)
568 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
573 /* we modify the TLB cache so that the dirty bit will be set again
574 when accessing the range */
575 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
576 /* Check that we don't span multiple blocks - this breaks the
577 address comparisons below. */
578 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
579 != (end
- 1) - start
) {
582 cpu_tlb_reset_dirty_all(start1
, length
);
586 /* Note: start and end must be within the same ram block. */
587 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
592 start
&= TARGET_PAGE_MASK
;
593 end
= TARGET_PAGE_ALIGN(end
);
595 length
= end
- start
;
598 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
601 tlb_reset_dirty_range_all(start
, end
, length
);
605 static int cpu_physical_memory_set_dirty_tracking(int enable
)
608 in_migration
= enable
;
612 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
613 MemoryRegionSection
*section
,
617 target_ulong
*address
)
622 if (memory_region_is_ram(section
->mr
)) {
624 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
625 + memory_region_section_addr(section
, paddr
);
626 if (!section
->readonly
) {
627 iotlb
|= phys_section_notdirty
;
629 iotlb
|= phys_section_rom
;
632 /* IO handlers are currently passed a physical address.
633 It would be nice to pass an offset from the base address
634 of that region. This would avoid having to special case RAM,
635 and avoid full address decoding in every device.
636 We can't use the high bits of pd for this because
637 IO_MEM_ROMD uses these as a ram address. */
638 iotlb
= section
- phys_sections
;
639 iotlb
+= memory_region_section_addr(section
, paddr
);
642 /* Make accesses to pages with watchpoints go via the
643 watchpoint trap routines. */
644 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
645 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
646 /* Avoid trapping reads of pages with a write breakpoint. */
647 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
648 iotlb
= phys_section_watch
+ paddr
;
649 *address
|= TLB_MMIO
;
657 #endif /* defined(CONFIG_USER_ONLY) */
659 #if !defined(CONFIG_USER_ONLY)
661 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
662 typedef struct subpage_t
{
665 uint16_t sub_section
[TARGET_PAGE_SIZE
];
668 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
670 static subpage_t
*subpage_init(hwaddr base
);
671 static void destroy_page_desc(uint16_t section_index
)
673 MemoryRegionSection
*section
= &phys_sections
[section_index
];
674 MemoryRegion
*mr
= section
->mr
;
677 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
678 memory_region_destroy(&subpage
->iomem
);
683 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
688 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
692 p
= phys_map_nodes
[lp
->ptr
];
693 for (i
= 0; i
< L2_SIZE
; ++i
) {
695 destroy_l2_mapping(&p
[i
], level
- 1);
697 destroy_page_desc(p
[i
].ptr
);
701 lp
->ptr
= PHYS_MAP_NODE_NIL
;
704 static void destroy_all_mappings(AddressSpaceDispatch
*d
)
706 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
707 phys_map_nodes_reset();
710 static uint16_t phys_section_add(MemoryRegionSection
*section
)
712 if (phys_sections_nb
== phys_sections_nb_alloc
) {
713 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
714 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
715 phys_sections_nb_alloc
);
717 phys_sections
[phys_sections_nb
] = *section
;
718 return phys_sections_nb
++;
721 static void phys_sections_clear(void)
723 phys_sections_nb
= 0;
726 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
729 hwaddr base
= section
->offset_within_address_space
731 MemoryRegionSection
*existing
= phys_page_find(d
, base
>> TARGET_PAGE_BITS
);
732 MemoryRegionSection subsection
= {
733 .offset_within_address_space
= base
,
734 .size
= TARGET_PAGE_SIZE
,
738 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
740 if (!(existing
->mr
->subpage
)) {
741 subpage
= subpage_init(base
);
742 subsection
.mr
= &subpage
->iomem
;
743 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
744 phys_section_add(&subsection
));
746 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
748 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
749 end
= start
+ section
->size
- 1;
750 subpage_register(subpage
, start
, end
, phys_section_add(section
));
754 static void register_multipage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
756 hwaddr start_addr
= section
->offset_within_address_space
;
757 ram_addr_t size
= section
->size
;
759 uint16_t section_index
= phys_section_add(section
);
764 phys_page_set(d
, addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
768 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
770 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
771 MemoryRegionSection now
= *section
, remain
= *section
;
773 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
774 || (now
.size
< TARGET_PAGE_SIZE
)) {
775 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
776 - now
.offset_within_address_space
,
778 register_subpage(d
, &now
);
779 remain
.size
-= now
.size
;
780 remain
.offset_within_address_space
+= now
.size
;
781 remain
.offset_within_region
+= now
.size
;
783 while (remain
.size
>= TARGET_PAGE_SIZE
) {
785 if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
786 now
.size
= TARGET_PAGE_SIZE
;
787 register_subpage(d
, &now
);
789 now
.size
&= TARGET_PAGE_MASK
;
790 register_multipage(d
, &now
);
792 remain
.size
-= now
.size
;
793 remain
.offset_within_address_space
+= now
.size
;
794 remain
.offset_within_region
+= now
.size
;
798 register_subpage(d
, &now
);
802 void qemu_flush_coalesced_mmio_buffer(void)
805 kvm_flush_coalesced_mmio_buffer();
808 void qemu_mutex_lock_ramlist(void)
810 qemu_mutex_lock(&ram_list
.mutex
);
813 void qemu_mutex_unlock_ramlist(void)
815 qemu_mutex_unlock(&ram_list
.mutex
);
818 #if defined(__linux__) && !defined(TARGET_S390X)
822 #define HUGETLBFS_MAGIC 0x958458f6
824 static long gethugepagesize(const char *path
)
830 ret
= statfs(path
, &fs
);
831 } while (ret
!= 0 && errno
== EINTR
);
838 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
839 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
844 static void *file_ram_alloc(RAMBlock
*block
,
849 char *sanitized_name
;
856 unsigned long hpagesize
;
858 hpagesize
= gethugepagesize(path
);
863 if (memory
< hpagesize
) {
867 if (kvm_enabled() && !kvm_has_sync_mmu()) {
868 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
872 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
873 sanitized_name
= g_strdup(block
->mr
->name
);
874 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
879 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
881 g_free(sanitized_name
);
883 fd
= mkstemp(filename
);
885 perror("unable to create backing store for hugepages");
892 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
895 * ftruncate is not supported by hugetlbfs in older
896 * hosts, so don't bother bailing out on errors.
897 * If anything goes wrong with it under other filesystems,
900 if (ftruncate(fd
, memory
))
904 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
905 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
906 * to sidestep this quirk.
908 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
909 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
911 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
913 if (area
== MAP_FAILED
) {
914 perror("file_ram_alloc: can't mmap RAM pages");
923 static ram_addr_t
find_ram_offset(ram_addr_t size
)
925 RAMBlock
*block
, *next_block
;
926 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
928 assert(size
!= 0); /* it would hand out same offset multiple times */
930 if (QTAILQ_EMPTY(&ram_list
.blocks
))
933 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
934 ram_addr_t end
, next
= RAM_ADDR_MAX
;
936 end
= block
->offset
+ block
->length
;
938 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
939 if (next_block
->offset
>= end
) {
940 next
= MIN(next
, next_block
->offset
);
943 if (next
- end
>= size
&& next
- end
< mingap
) {
949 if (offset
== RAM_ADDR_MAX
) {
950 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
958 ram_addr_t
last_ram_offset(void)
963 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
964 last
= MAX(last
, block
->offset
+ block
->length
);
969 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
972 QemuOpts
*machine_opts
;
974 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
975 machine_opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
977 !qemu_opt_get_bool(machine_opts
, "dump-guest-core", true)) {
978 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
980 perror("qemu_madvise");
981 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
982 "but dump_guest_core=off specified\n");
987 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
989 RAMBlock
*new_block
, *block
;
992 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
993 if (block
->offset
== addr
) {
999 assert(!new_block
->idstr
[0]);
1002 char *id
= qdev_get_dev_path(dev
);
1004 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1008 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1010 /* This assumes the iothread lock is taken here too. */
1011 qemu_mutex_lock_ramlist();
1012 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1013 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1014 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1019 qemu_mutex_unlock_ramlist();
1022 static int memory_try_enable_merging(void *addr
, size_t len
)
1026 opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
1027 if (opts
&& !qemu_opt_get_bool(opts
, "mem-merge", true)) {
1028 /* disabled by the user */
1032 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1035 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1038 RAMBlock
*block
, *new_block
;
1040 size
= TARGET_PAGE_ALIGN(size
);
1041 new_block
= g_malloc0(sizeof(*new_block
));
1043 /* This assumes the iothread lock is taken here too. */
1044 qemu_mutex_lock_ramlist();
1046 new_block
->offset
= find_ram_offset(size
);
1048 new_block
->host
= host
;
1049 new_block
->flags
|= RAM_PREALLOC_MASK
;
1052 #if defined (__linux__) && !defined(TARGET_S390X)
1053 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1054 if (!new_block
->host
) {
1055 new_block
->host
= qemu_vmalloc(size
);
1056 memory_try_enable_merging(new_block
->host
, size
);
1059 fprintf(stderr
, "-mem-path option unsupported\n");
1063 if (xen_enabled()) {
1064 xen_ram_alloc(new_block
->offset
, size
, mr
);
1065 } else if (kvm_enabled()) {
1066 /* some s390/kvm configurations have special constraints */
1067 new_block
->host
= kvm_vmalloc(size
);
1069 new_block
->host
= qemu_vmalloc(size
);
1071 memory_try_enable_merging(new_block
->host
, size
);
1074 new_block
->length
= size
;
1076 /* Keep the list sorted from biggest to smallest block. */
1077 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1078 if (block
->length
< new_block
->length
) {
1083 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1085 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1087 ram_list
.mru_block
= NULL
;
1090 qemu_mutex_unlock_ramlist();
1092 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
1093 last_ram_offset() >> TARGET_PAGE_BITS
);
1094 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
1095 0, size
>> TARGET_PAGE_BITS
);
1096 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
1098 qemu_ram_setup_dump(new_block
->host
, size
);
1099 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1102 kvm_setup_guest_memory(new_block
->host
, size
);
1104 return new_block
->offset
;
1107 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1109 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1112 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1116 /* This assumes the iothread lock is taken here too. */
1117 qemu_mutex_lock_ramlist();
1118 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1119 if (addr
== block
->offset
) {
1120 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1121 ram_list
.mru_block
= NULL
;
1127 qemu_mutex_unlock_ramlist();
1130 void qemu_ram_free(ram_addr_t addr
)
1134 /* This assumes the iothread lock is taken here too. */
1135 qemu_mutex_lock_ramlist();
1136 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1137 if (addr
== block
->offset
) {
1138 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1139 ram_list
.mru_block
= NULL
;
1141 if (block
->flags
& RAM_PREALLOC_MASK
) {
1143 } else if (mem_path
) {
1144 #if defined (__linux__) && !defined(TARGET_S390X)
1146 munmap(block
->host
, block
->length
);
1149 qemu_vfree(block
->host
);
1155 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1156 munmap(block
->host
, block
->length
);
1158 if (xen_enabled()) {
1159 xen_invalidate_map_cache_entry(block
->host
);
1161 qemu_vfree(block
->host
);
1169 qemu_mutex_unlock_ramlist();
1174 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1181 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1182 offset
= addr
- block
->offset
;
1183 if (offset
< block
->length
) {
1184 vaddr
= block
->host
+ offset
;
1185 if (block
->flags
& RAM_PREALLOC_MASK
) {
1189 munmap(vaddr
, length
);
1191 #if defined(__linux__) && !defined(TARGET_S390X)
1194 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1197 flags
|= MAP_PRIVATE
;
1199 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1200 flags
, block
->fd
, offset
);
1202 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1203 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1210 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1211 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
1212 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
1215 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1216 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1220 if (area
!= vaddr
) {
1221 fprintf(stderr
, "Could not remap addr: "
1222 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1226 memory_try_enable_merging(vaddr
, length
);
1227 qemu_ram_setup_dump(vaddr
, length
);
1233 #endif /* !_WIN32 */
1235 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1236 With the exception of the softmmu code in this file, this should
1237 only be used for local memory (e.g. video ram) that the device owns,
1238 and knows it isn't going to access beyond the end of the block.
1240 It should not be used for general purpose DMA.
1241 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1243 void *qemu_get_ram_ptr(ram_addr_t addr
)
1247 /* The list is protected by the iothread lock here. */
1248 block
= ram_list
.mru_block
;
1249 if (block
&& addr
- block
->offset
< block
->length
) {
1252 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1253 if (addr
- block
->offset
< block
->length
) {
1258 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1262 ram_list
.mru_block
= block
;
1263 if (xen_enabled()) {
1264 /* We need to check if the requested address is in the RAM
1265 * because we don't want to map the entire memory in QEMU.
1266 * In that case just map until the end of the page.
1268 if (block
->offset
== 0) {
1269 return xen_map_cache(addr
, 0, 0);
1270 } else if (block
->host
== NULL
) {
1272 xen_map_cache(block
->offset
, block
->length
, 1);
1275 return block
->host
+ (addr
- block
->offset
);
1278 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1279 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1281 * ??? Is this still necessary?
1283 static void *qemu_safe_ram_ptr(ram_addr_t addr
)
1287 /* The list is protected by the iothread lock here. */
1288 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1289 if (addr
- block
->offset
< block
->length
) {
1290 if (xen_enabled()) {
1291 /* We need to check if the requested address is in the RAM
1292 * because we don't want to map the entire memory in QEMU.
1293 * In that case just map until the end of the page.
1295 if (block
->offset
== 0) {
1296 return xen_map_cache(addr
, 0, 0);
1297 } else if (block
->host
== NULL
) {
1299 xen_map_cache(block
->offset
, block
->length
, 1);
1302 return block
->host
+ (addr
- block
->offset
);
1306 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1312 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1313 * but takes a size argument */
1314 static void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
1319 if (xen_enabled()) {
1320 return xen_map_cache(addr
, *size
, 1);
1324 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1325 if (addr
- block
->offset
< block
->length
) {
1326 if (addr
- block
->offset
+ *size
> block
->length
)
1327 *size
= block
->length
- addr
+ block
->offset
;
1328 return block
->host
+ (addr
- block
->offset
);
1332 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1337 void qemu_put_ram_ptr(void *addr
)
1339 trace_qemu_put_ram_ptr(addr
);
1342 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1345 uint8_t *host
= ptr
;
1347 if (xen_enabled()) {
1348 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1352 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1353 /* This case append when the block is not mapped. */
1354 if (block
->host
== NULL
) {
1357 if (host
- block
->host
< block
->length
) {
1358 *ram_addr
= block
->offset
+ (host
- block
->host
);
1366 /* Some of the softmmu routines need to translate from a host pointer
1367 (typically a TLB entry) back to a ram offset. */
1368 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
1370 ram_addr_t ram_addr
;
1372 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
1373 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
1379 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1382 #ifdef DEBUG_UNASSIGNED
1383 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
1385 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1386 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
1391 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1392 uint64_t val
, unsigned size
)
1394 #ifdef DEBUG_UNASSIGNED
1395 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
1397 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1398 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
1402 static const MemoryRegionOps unassigned_mem_ops
= {
1403 .read
= unassigned_mem_read
,
1404 .write
= unassigned_mem_write
,
1405 .endianness
= DEVICE_NATIVE_ENDIAN
,
1408 static uint64_t error_mem_read(void *opaque
, hwaddr addr
,
1414 static void error_mem_write(void *opaque
, hwaddr addr
,
1415 uint64_t value
, unsigned size
)
1420 static const MemoryRegionOps error_mem_ops
= {
1421 .read
= error_mem_read
,
1422 .write
= error_mem_write
,
1423 .endianness
= DEVICE_NATIVE_ENDIAN
,
1426 static const MemoryRegionOps rom_mem_ops
= {
1427 .read
= error_mem_read
,
1428 .write
= unassigned_mem_write
,
1429 .endianness
= DEVICE_NATIVE_ENDIAN
,
1432 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1433 uint64_t val
, unsigned size
)
1436 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1437 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1438 #if !defined(CONFIG_USER_ONLY)
1439 tb_invalidate_phys_page_fast(ram_addr
, size
);
1440 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1445 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1448 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1451 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1456 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1457 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
1458 /* we remove the notdirty callback only if the code has been
1460 if (dirty_flags
== 0xff)
1461 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
1464 static const MemoryRegionOps notdirty_mem_ops
= {
1465 .read
= error_mem_read
,
1466 .write
= notdirty_mem_write
,
1467 .endianness
= DEVICE_NATIVE_ENDIAN
,
1470 /* Generate a debug exception if a watchpoint has been hit. */
1471 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1473 CPUArchState
*env
= cpu_single_env
;
1474 target_ulong pc
, cs_base
;
1479 if (env
->watchpoint_hit
) {
1480 /* We re-entered the check after replacing the TB. Now raise
1481 * the debug interrupt so that is will trigger after the
1482 * current instruction. */
1483 cpu_interrupt(ENV_GET_CPU(env
), CPU_INTERRUPT_DEBUG
);
1486 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1487 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1488 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1489 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1490 wp
->flags
|= BP_WATCHPOINT_HIT
;
1491 if (!env
->watchpoint_hit
) {
1492 env
->watchpoint_hit
= wp
;
1493 tb_check_watchpoint(env
);
1494 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1495 env
->exception_index
= EXCP_DEBUG
;
1498 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1499 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
1500 cpu_resume_from_signal(env
, NULL
);
1504 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1509 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1510 so these check for a hit then pass through to the normal out-of-line
1512 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1515 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1517 case 1: return ldub_phys(addr
);
1518 case 2: return lduw_phys(addr
);
1519 case 4: return ldl_phys(addr
);
1524 static void watch_mem_write(void *opaque
, hwaddr addr
,
1525 uint64_t val
, unsigned size
)
1527 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1530 stb_phys(addr
, val
);
1533 stw_phys(addr
, val
);
1536 stl_phys(addr
, val
);
1542 static const MemoryRegionOps watch_mem_ops
= {
1543 .read
= watch_mem_read
,
1544 .write
= watch_mem_write
,
1545 .endianness
= DEVICE_NATIVE_ENDIAN
,
1548 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1551 subpage_t
*mmio
= opaque
;
1552 unsigned int idx
= SUBPAGE_IDX(addr
);
1553 MemoryRegionSection
*section
;
1554 #if defined(DEBUG_SUBPAGE)
1555 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
1556 mmio
, len
, addr
, idx
);
1559 section
= &phys_sections
[mmio
->sub_section
[idx
]];
1561 addr
-= section
->offset_within_address_space
;
1562 addr
+= section
->offset_within_region
;
1563 return io_mem_read(section
->mr
, addr
, len
);
1566 static void subpage_write(void *opaque
, hwaddr addr
,
1567 uint64_t value
, unsigned len
)
1569 subpage_t
*mmio
= opaque
;
1570 unsigned int idx
= SUBPAGE_IDX(addr
);
1571 MemoryRegionSection
*section
;
1572 #if defined(DEBUG_SUBPAGE)
1573 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1574 " idx %d value %"PRIx64
"\n",
1575 __func__
, mmio
, len
, addr
, idx
, value
);
1578 section
= &phys_sections
[mmio
->sub_section
[idx
]];
1580 addr
-= section
->offset_within_address_space
;
1581 addr
+= section
->offset_within_region
;
1582 io_mem_write(section
->mr
, addr
, value
, len
);
1585 static const MemoryRegionOps subpage_ops
= {
1586 .read
= subpage_read
,
1587 .write
= subpage_write
,
1588 .endianness
= DEVICE_NATIVE_ENDIAN
,
1591 static uint64_t subpage_ram_read(void *opaque
, hwaddr addr
,
1594 ram_addr_t raddr
= addr
;
1595 void *ptr
= qemu_get_ram_ptr(raddr
);
1597 case 1: return ldub_p(ptr
);
1598 case 2: return lduw_p(ptr
);
1599 case 4: return ldl_p(ptr
);
1604 static void subpage_ram_write(void *opaque
, hwaddr addr
,
1605 uint64_t value
, unsigned size
)
1607 ram_addr_t raddr
= addr
;
1608 void *ptr
= qemu_get_ram_ptr(raddr
);
1610 case 1: return stb_p(ptr
, value
);
1611 case 2: return stw_p(ptr
, value
);
1612 case 4: return stl_p(ptr
, value
);
1617 static const MemoryRegionOps subpage_ram_ops
= {
1618 .read
= subpage_ram_read
,
1619 .write
= subpage_ram_write
,
1620 .endianness
= DEVICE_NATIVE_ENDIAN
,
1623 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1628 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1630 idx
= SUBPAGE_IDX(start
);
1631 eidx
= SUBPAGE_IDX(end
);
1632 #if defined(DEBUG_SUBPAGE)
1633 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
1634 mmio
, start
, end
, idx
, eidx
, memory
);
1636 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
1637 MemoryRegionSection new_section
= phys_sections
[section
];
1638 new_section
.mr
= &io_mem_subpage_ram
;
1639 section
= phys_section_add(&new_section
);
1641 for (; idx
<= eidx
; idx
++) {
1642 mmio
->sub_section
[idx
] = section
;
1648 static subpage_t
*subpage_init(hwaddr base
)
1652 mmio
= g_malloc0(sizeof(subpage_t
));
1655 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
1656 "subpage", TARGET_PAGE_SIZE
);
1657 mmio
->iomem
.subpage
= true;
1658 #if defined(DEBUG_SUBPAGE)
1659 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
1660 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
1662 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
1667 static uint16_t dummy_section(MemoryRegion
*mr
)
1669 MemoryRegionSection section
= {
1671 .offset_within_address_space
= 0,
1672 .offset_within_region
= 0,
1676 return phys_section_add(§ion
);
1679 MemoryRegion
*iotlb_to_region(hwaddr index
)
1681 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1684 static void io_mem_init(void)
1686 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
1687 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
1688 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
1689 "unassigned", UINT64_MAX
);
1690 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
1691 "notdirty", UINT64_MAX
);
1692 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
1693 "subpage-ram", UINT64_MAX
);
1694 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
1695 "watch", UINT64_MAX
);
1698 static void mem_begin(MemoryListener
*listener
)
1700 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
1702 destroy_all_mappings(d
);
1703 d
->phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
1706 static void core_begin(MemoryListener
*listener
)
1708 phys_sections_clear();
1709 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
1710 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
1711 phys_section_rom
= dummy_section(&io_mem_rom
);
1712 phys_section_watch
= dummy_section(&io_mem_watch
);
1715 static void tcg_commit(MemoryListener
*listener
)
1719 /* since each CPU stores ram addresses in its TLB cache, we must
1720 reset the modified entries */
1722 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1727 static void core_log_global_start(MemoryListener
*listener
)
1729 cpu_physical_memory_set_dirty_tracking(1);
1732 static void core_log_global_stop(MemoryListener
*listener
)
1734 cpu_physical_memory_set_dirty_tracking(0);
1737 static void io_region_add(MemoryListener
*listener
,
1738 MemoryRegionSection
*section
)
1740 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
1742 mrio
->mr
= section
->mr
;
1743 mrio
->offset
= section
->offset_within_region
;
1744 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
1745 section
->offset_within_address_space
, section
->size
);
1746 ioport_register(&mrio
->iorange
);
1749 static void io_region_del(MemoryListener
*listener
,
1750 MemoryRegionSection
*section
)
1752 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
1755 static MemoryListener core_memory_listener
= {
1756 .begin
= core_begin
,
1757 .log_global_start
= core_log_global_start
,
1758 .log_global_stop
= core_log_global_stop
,
1762 static MemoryListener io_memory_listener
= {
1763 .region_add
= io_region_add
,
1764 .region_del
= io_region_del
,
1768 static MemoryListener tcg_memory_listener
= {
1769 .commit
= tcg_commit
,
1772 void address_space_init_dispatch(AddressSpace
*as
)
1774 AddressSpaceDispatch
*d
= g_new(AddressSpaceDispatch
, 1);
1776 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
1777 d
->listener
= (MemoryListener
) {
1779 .region_add
= mem_add
,
1780 .region_nop
= mem_add
,
1784 memory_listener_register(&d
->listener
, as
);
1787 void address_space_destroy_dispatch(AddressSpace
*as
)
1789 AddressSpaceDispatch
*d
= as
->dispatch
;
1791 memory_listener_unregister(&d
->listener
);
1792 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
1794 as
->dispatch
= NULL
;
1797 static void memory_map_init(void)
1799 system_memory
= g_malloc(sizeof(*system_memory
));
1800 memory_region_init(system_memory
, "system", INT64_MAX
);
1801 address_space_init(&address_space_memory
, system_memory
);
1802 address_space_memory
.name
= "memory";
1804 system_io
= g_malloc(sizeof(*system_io
));
1805 memory_region_init(system_io
, "io", 65536);
1806 address_space_init(&address_space_io
, system_io
);
1807 address_space_io
.name
= "I/O";
1809 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1810 memory_listener_register(&io_memory_listener
, &address_space_io
);
1811 memory_listener_register(&tcg_memory_listener
, &address_space_memory
);
1813 dma_context_init(&dma_context_memory
, &address_space_memory
,
1817 MemoryRegion
*get_system_memory(void)
1819 return system_memory
;
1822 MemoryRegion
*get_system_io(void)
1827 #endif /* !defined(CONFIG_USER_ONLY) */
1829 /* physical memory access (slow version, mainly for debug) */
1830 #if defined(CONFIG_USER_ONLY)
1831 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
1832 uint8_t *buf
, int len
, int is_write
)
1839 page
= addr
& TARGET_PAGE_MASK
;
1840 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1843 flags
= page_get_flags(page
);
1844 if (!(flags
& PAGE_VALID
))
1847 if (!(flags
& PAGE_WRITE
))
1849 /* XXX: this code should not depend on lock_user */
1850 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1853 unlock_user(p
, addr
, l
);
1855 if (!(flags
& PAGE_READ
))
1857 /* XXX: this code should not depend on lock_user */
1858 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1861 unlock_user(p
, addr
, 0);
1872 static void invalidate_and_set_dirty(hwaddr addr
,
1875 if (!cpu_physical_memory_is_dirty(addr
)) {
1876 /* invalidate code */
1877 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1879 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
1881 xen_modified_memory(addr
, length
);
1884 void address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
1885 int len
, bool is_write
)
1887 AddressSpaceDispatch
*d
= as
->dispatch
;
1892 MemoryRegionSection
*section
;
1895 page
= addr
& TARGET_PAGE_MASK
;
1896 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1899 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
1902 if (!memory_region_is_ram(section
->mr
)) {
1904 addr1
= memory_region_section_addr(section
, addr
);
1905 /* XXX: could force cpu_single_env to NULL to avoid
1907 if (l
>= 4 && ((addr1
& 3) == 0)) {
1908 /* 32 bit write access */
1910 io_mem_write(section
->mr
, addr1
, val
, 4);
1912 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
1913 /* 16 bit write access */
1915 io_mem_write(section
->mr
, addr1
, val
, 2);
1918 /* 8 bit write access */
1920 io_mem_write(section
->mr
, addr1
, val
, 1);
1923 } else if (!section
->readonly
) {
1925 addr1
= memory_region_get_ram_addr(section
->mr
)
1926 + memory_region_section_addr(section
, addr
);
1928 ptr
= qemu_get_ram_ptr(addr1
);
1929 memcpy(ptr
, buf
, l
);
1930 invalidate_and_set_dirty(addr1
, l
);
1931 qemu_put_ram_ptr(ptr
);
1934 if (!(memory_region_is_ram(section
->mr
) ||
1935 memory_region_is_romd(section
->mr
))) {
1938 addr1
= memory_region_section_addr(section
, addr
);
1939 if (l
>= 4 && ((addr1
& 3) == 0)) {
1940 /* 32 bit read access */
1941 val
= io_mem_read(section
->mr
, addr1
, 4);
1944 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
1945 /* 16 bit read access */
1946 val
= io_mem_read(section
->mr
, addr1
, 2);
1950 /* 8 bit read access */
1951 val
= io_mem_read(section
->mr
, addr1
, 1);
1957 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
1958 + memory_region_section_addr(section
,
1960 memcpy(buf
, ptr
, l
);
1961 qemu_put_ram_ptr(ptr
);
1970 void address_space_write(AddressSpace
*as
, hwaddr addr
,
1971 const uint8_t *buf
, int len
)
1973 address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
1977 * address_space_read: read from an address space.
1979 * @as: #AddressSpace to be accessed
1980 * @addr: address within that address space
1981 * @buf: buffer with the data transferred
1983 void address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
1985 address_space_rw(as
, addr
, buf
, len
, false);
1989 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
1990 int len
, int is_write
)
1992 return address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
1995 /* used for ROM loading : can write in RAM and ROM */
1996 void cpu_physical_memory_write_rom(hwaddr addr
,
1997 const uint8_t *buf
, int len
)
1999 AddressSpaceDispatch
*d
= address_space_memory
.dispatch
;
2003 MemoryRegionSection
*section
;
2006 page
= addr
& TARGET_PAGE_MASK
;
2007 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2010 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
2012 if (!(memory_region_is_ram(section
->mr
) ||
2013 memory_region_is_romd(section
->mr
))) {
2016 unsigned long addr1
;
2017 addr1
= memory_region_get_ram_addr(section
->mr
)
2018 + memory_region_section_addr(section
, addr
);
2020 ptr
= qemu_get_ram_ptr(addr1
);
2021 memcpy(ptr
, buf
, l
);
2022 invalidate_and_set_dirty(addr1
, l
);
2023 qemu_put_ram_ptr(ptr
);
2037 static BounceBuffer bounce
;
2039 typedef struct MapClient
{
2041 void (*callback
)(void *opaque
);
2042 QLIST_ENTRY(MapClient
) link
;
2045 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2046 = QLIST_HEAD_INITIALIZER(map_client_list
);
2048 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2050 MapClient
*client
= g_malloc(sizeof(*client
));
2052 client
->opaque
= opaque
;
2053 client
->callback
= callback
;
2054 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2058 static void cpu_unregister_map_client(void *_client
)
2060 MapClient
*client
= (MapClient
*)_client
;
2062 QLIST_REMOVE(client
, link
);
2066 static void cpu_notify_map_clients(void)
2070 while (!QLIST_EMPTY(&map_client_list
)) {
2071 client
= QLIST_FIRST(&map_client_list
);
2072 client
->callback(client
->opaque
);
2073 cpu_unregister_map_client(client
);
2077 /* Map a physical memory region into a host virtual address.
2078 * May map a subset of the requested range, given by and returned in *plen.
2079 * May return NULL if resources needed to perform the mapping are exhausted.
2080 * Use only for reads OR writes - not for read-modify-write operations.
2081 * Use cpu_register_map_client() to know when retrying the map operation is
2082 * likely to succeed.
2084 void *address_space_map(AddressSpace
*as
,
2089 AddressSpaceDispatch
*d
= as
->dispatch
;
2094 MemoryRegionSection
*section
;
2095 ram_addr_t raddr
= RAM_ADDR_MAX
;
2100 page
= addr
& TARGET_PAGE_MASK
;
2101 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2104 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
2106 if (!(memory_region_is_ram(section
->mr
) && !section
->readonly
)) {
2107 if (todo
|| bounce
.buffer
) {
2110 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
2114 address_space_read(as
, addr
, bounce
.buffer
, l
);
2118 return bounce
.buffer
;
2121 raddr
= memory_region_get_ram_addr(section
->mr
)
2122 + memory_region_section_addr(section
, addr
);
2130 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
2135 /* Unmaps a memory region previously mapped by address_space_map().
2136 * Will also mark the memory as dirty if is_write == 1. access_len gives
2137 * the amount of memory that was actually read or written by the caller.
2139 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2140 int is_write
, hwaddr access_len
)
2142 if (buffer
!= bounce
.buffer
) {
2144 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
2145 while (access_len
) {
2147 l
= TARGET_PAGE_SIZE
;
2150 invalidate_and_set_dirty(addr1
, l
);
2155 if (xen_enabled()) {
2156 xen_invalidate_map_cache_entry(buffer
);
2161 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2163 qemu_vfree(bounce
.buffer
);
2164 bounce
.buffer
= NULL
;
2165 cpu_notify_map_clients();
2168 void *cpu_physical_memory_map(hwaddr addr
,
2172 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2175 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2176 int is_write
, hwaddr access_len
)
2178 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2181 /* warning: addr must be aligned */
2182 static inline uint32_t ldl_phys_internal(hwaddr addr
,
2183 enum device_endian endian
)
2187 MemoryRegionSection
*section
;
2189 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2191 if (!(memory_region_is_ram(section
->mr
) ||
2192 memory_region_is_romd(section
->mr
))) {
2194 addr
= memory_region_section_addr(section
, addr
);
2195 val
= io_mem_read(section
->mr
, addr
, 4);
2196 #if defined(TARGET_WORDS_BIGENDIAN)
2197 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2201 if (endian
== DEVICE_BIG_ENDIAN
) {
2207 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2209 + memory_region_section_addr(section
, addr
));
2211 case DEVICE_LITTLE_ENDIAN
:
2212 val
= ldl_le_p(ptr
);
2214 case DEVICE_BIG_ENDIAN
:
2215 val
= ldl_be_p(ptr
);
2225 uint32_t ldl_phys(hwaddr addr
)
2227 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2230 uint32_t ldl_le_phys(hwaddr addr
)
2232 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2235 uint32_t ldl_be_phys(hwaddr addr
)
2237 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2240 /* warning: addr must be aligned */
2241 static inline uint64_t ldq_phys_internal(hwaddr addr
,
2242 enum device_endian endian
)
2246 MemoryRegionSection
*section
;
2248 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2250 if (!(memory_region_is_ram(section
->mr
) ||
2251 memory_region_is_romd(section
->mr
))) {
2253 addr
= memory_region_section_addr(section
, addr
);
2255 /* XXX This is broken when device endian != cpu endian.
2256 Fix and add "endian" variable check */
2257 #ifdef TARGET_WORDS_BIGENDIAN
2258 val
= io_mem_read(section
->mr
, addr
, 4) << 32;
2259 val
|= io_mem_read(section
->mr
, addr
+ 4, 4);
2261 val
= io_mem_read(section
->mr
, addr
, 4);
2262 val
|= io_mem_read(section
->mr
, addr
+ 4, 4) << 32;
2266 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2268 + memory_region_section_addr(section
, addr
));
2270 case DEVICE_LITTLE_ENDIAN
:
2271 val
= ldq_le_p(ptr
);
2273 case DEVICE_BIG_ENDIAN
:
2274 val
= ldq_be_p(ptr
);
2284 uint64_t ldq_phys(hwaddr addr
)
2286 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2289 uint64_t ldq_le_phys(hwaddr addr
)
2291 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2294 uint64_t ldq_be_phys(hwaddr addr
)
2296 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2300 uint32_t ldub_phys(hwaddr addr
)
2303 cpu_physical_memory_read(addr
, &val
, 1);
2307 /* warning: addr must be aligned */
2308 static inline uint32_t lduw_phys_internal(hwaddr addr
,
2309 enum device_endian endian
)
2313 MemoryRegionSection
*section
;
2315 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2317 if (!(memory_region_is_ram(section
->mr
) ||
2318 memory_region_is_romd(section
->mr
))) {
2320 addr
= memory_region_section_addr(section
, addr
);
2321 val
= io_mem_read(section
->mr
, addr
, 2);
2322 #if defined(TARGET_WORDS_BIGENDIAN)
2323 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2327 if (endian
== DEVICE_BIG_ENDIAN
) {
2333 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2335 + memory_region_section_addr(section
, addr
));
2337 case DEVICE_LITTLE_ENDIAN
:
2338 val
= lduw_le_p(ptr
);
2340 case DEVICE_BIG_ENDIAN
:
2341 val
= lduw_be_p(ptr
);
2351 uint32_t lduw_phys(hwaddr addr
)
2353 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2356 uint32_t lduw_le_phys(hwaddr addr
)
2358 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2361 uint32_t lduw_be_phys(hwaddr addr
)
2363 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2366 /* warning: addr must be aligned. The ram page is not masked as dirty
2367 and the code inside is not invalidated. It is useful if the dirty
2368 bits are used to track modified PTEs */
2369 void stl_phys_notdirty(hwaddr addr
, uint32_t val
)
2372 MemoryRegionSection
*section
;
2374 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2376 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2377 addr
= memory_region_section_addr(section
, addr
);
2378 if (memory_region_is_ram(section
->mr
)) {
2379 section
= &phys_sections
[phys_section_rom
];
2381 io_mem_write(section
->mr
, addr
, val
, 4);
2383 unsigned long addr1
= (memory_region_get_ram_addr(section
->mr
)
2385 + memory_region_section_addr(section
, addr
);
2386 ptr
= qemu_get_ram_ptr(addr1
);
2389 if (unlikely(in_migration
)) {
2390 if (!cpu_physical_memory_is_dirty(addr1
)) {
2391 /* invalidate code */
2392 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2394 cpu_physical_memory_set_dirty_flags(
2395 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
2401 void stq_phys_notdirty(hwaddr addr
, uint64_t val
)
2404 MemoryRegionSection
*section
;
2406 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2408 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2409 addr
= memory_region_section_addr(section
, addr
);
2410 if (memory_region_is_ram(section
->mr
)) {
2411 section
= &phys_sections
[phys_section_rom
];
2413 #ifdef TARGET_WORDS_BIGENDIAN
2414 io_mem_write(section
->mr
, addr
, val
>> 32, 4);
2415 io_mem_write(section
->mr
, addr
+ 4, (uint32_t)val
, 4);
2417 io_mem_write(section
->mr
, addr
, (uint32_t)val
, 4);
2418 io_mem_write(section
->mr
, addr
+ 4, val
>> 32, 4);
2421 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2423 + memory_region_section_addr(section
, addr
));
2428 /* warning: addr must be aligned */
2429 static inline void stl_phys_internal(hwaddr addr
, uint32_t val
,
2430 enum device_endian endian
)
2433 MemoryRegionSection
*section
;
2435 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2437 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2438 addr
= memory_region_section_addr(section
, addr
);
2439 if (memory_region_is_ram(section
->mr
)) {
2440 section
= &phys_sections
[phys_section_rom
];
2442 #if defined(TARGET_WORDS_BIGENDIAN)
2443 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2447 if (endian
== DEVICE_BIG_ENDIAN
) {
2451 io_mem_write(section
->mr
, addr
, val
, 4);
2453 unsigned long addr1
;
2454 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
2455 + memory_region_section_addr(section
, addr
);
2457 ptr
= qemu_get_ram_ptr(addr1
);
2459 case DEVICE_LITTLE_ENDIAN
:
2462 case DEVICE_BIG_ENDIAN
:
2469 invalidate_and_set_dirty(addr1
, 4);
2473 void stl_phys(hwaddr addr
, uint32_t val
)
2475 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2478 void stl_le_phys(hwaddr addr
, uint32_t val
)
2480 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2483 void stl_be_phys(hwaddr addr
, uint32_t val
)
2485 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2489 void stb_phys(hwaddr addr
, uint32_t val
)
2492 cpu_physical_memory_write(addr
, &v
, 1);
2495 /* warning: addr must be aligned */
2496 static inline void stw_phys_internal(hwaddr addr
, uint32_t val
,
2497 enum device_endian endian
)
2500 MemoryRegionSection
*section
;
2502 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2504 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2505 addr
= memory_region_section_addr(section
, addr
);
2506 if (memory_region_is_ram(section
->mr
)) {
2507 section
= &phys_sections
[phys_section_rom
];
2509 #if defined(TARGET_WORDS_BIGENDIAN)
2510 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2514 if (endian
== DEVICE_BIG_ENDIAN
) {
2518 io_mem_write(section
->mr
, addr
, val
, 2);
2520 unsigned long addr1
;
2521 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
2522 + memory_region_section_addr(section
, addr
);
2524 ptr
= qemu_get_ram_ptr(addr1
);
2526 case DEVICE_LITTLE_ENDIAN
:
2529 case DEVICE_BIG_ENDIAN
:
2536 invalidate_and_set_dirty(addr1
, 2);
2540 void stw_phys(hwaddr addr
, uint32_t val
)
2542 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2545 void stw_le_phys(hwaddr addr
, uint32_t val
)
2547 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2550 void stw_be_phys(hwaddr addr
, uint32_t val
)
2552 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2556 void stq_phys(hwaddr addr
, uint64_t val
)
2559 cpu_physical_memory_write(addr
, &val
, 8);
2562 void stq_le_phys(hwaddr addr
, uint64_t val
)
2564 val
= cpu_to_le64(val
);
2565 cpu_physical_memory_write(addr
, &val
, 8);
2568 void stq_be_phys(hwaddr addr
, uint64_t val
)
2570 val
= cpu_to_be64(val
);
2571 cpu_physical_memory_write(addr
, &val
, 8);
2574 /* virtual memory access for debug (includes writing to ROM) */
2575 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
2576 uint8_t *buf
, int len
, int is_write
)
2583 page
= addr
& TARGET_PAGE_MASK
;
2584 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2585 /* if no physical page mapped, return an error */
2586 if (phys_addr
== -1)
2588 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2591 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2593 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
2595 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
2604 #if !defined(CONFIG_USER_ONLY)
2607 * A helper function for the _utterly broken_ virtio device model to find out if
2608 * it's running on a big endian machine. Don't do this at home kids!
2610 bool virtio_is_big_endian(void);
2611 bool virtio_is_big_endian(void)
2613 #if defined(TARGET_WORDS_BIGENDIAN)
2622 #ifndef CONFIG_USER_ONLY
2623 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2625 MemoryRegionSection
*section
;
2627 section
= phys_page_find(address_space_memory
.dispatch
,
2628 phys_addr
>> TARGET_PAGE_BITS
);
2630 return !(memory_region_is_ram(section
->mr
) ||
2631 memory_region_is_romd(section
->mr
));