4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
38 #include "exec-memory.h"
39 #if defined(CONFIG_USER_ONLY)
41 #else /* !CONFIG_USER_ONLY */
42 #include "xen-mapcache.h"
47 #include "translate-all.h"
49 #include "memory-internal.h"
51 //#define DEBUG_UNASSIGNED
52 //#define DEBUG_SUBPAGE
54 #if !defined(CONFIG_USER_ONLY)
56 static int in_migration
;
58 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
60 static MemoryRegion
*system_memory
;
61 static MemoryRegion
*system_io
;
63 AddressSpace address_space_io
;
64 AddressSpace address_space_memory
;
65 DMAContext dma_context_memory
;
67 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
68 static MemoryRegion io_mem_subpage_ram
;
72 CPUArchState
*first_cpu
;
73 /* current CPU in the current thread. It is only valid inside
75 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
76 /* 0 = Do not count executed instructions.
77 1 = Precise instruction counting.
78 2 = Adaptive rate instruction counting. */
81 #if !defined(CONFIG_USER_ONLY)
83 static MemoryRegionSection
*phys_sections
;
84 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
85 static uint16_t phys_section_unassigned
;
86 static uint16_t phys_section_notdirty
;
87 static uint16_t phys_section_rom
;
88 static uint16_t phys_section_watch
;
90 /* Simple allocator for PhysPageEntry nodes */
91 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
92 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
94 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
96 static void io_mem_init(void);
97 static void memory_map_init(void);
98 static void *qemu_safe_ram_ptr(ram_addr_t addr
);
100 static MemoryRegion io_mem_watch
;
103 #if !defined(CONFIG_USER_ONLY)
105 static void phys_map_node_reserve(unsigned nodes
)
107 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
108 typedef PhysPageEntry Node
[L2_SIZE
];
109 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
110 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
111 phys_map_nodes_nb
+ nodes
);
112 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
113 phys_map_nodes_nb_alloc
);
117 static uint16_t phys_map_node_alloc(void)
122 ret
= phys_map_nodes_nb
++;
123 assert(ret
!= PHYS_MAP_NODE_NIL
);
124 assert(ret
!= phys_map_nodes_nb_alloc
);
125 for (i
= 0; i
< L2_SIZE
; ++i
) {
126 phys_map_nodes
[ret
][i
].is_leaf
= 0;
127 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
132 static void phys_map_nodes_reset(void)
134 phys_map_nodes_nb
= 0;
138 static void phys_page_set_level(PhysPageEntry
*lp
, hwaddr
*index
,
139 hwaddr
*nb
, uint16_t leaf
,
144 hwaddr step
= (hwaddr
)1 << (level
* L2_BITS
);
146 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
147 lp
->ptr
= phys_map_node_alloc();
148 p
= phys_map_nodes
[lp
->ptr
];
150 for (i
= 0; i
< L2_SIZE
; i
++) {
152 p
[i
].ptr
= phys_section_unassigned
;
156 p
= phys_map_nodes
[lp
->ptr
];
158 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
160 while (*nb
&& lp
< &p
[L2_SIZE
]) {
161 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
167 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
173 static void phys_page_set(AddressSpaceDispatch
*d
,
174 hwaddr index
, hwaddr nb
,
177 /* Wildly overreserve - it doesn't matter much. */
178 phys_map_node_reserve(3 * P_L2_LEVELS
);
180 phys_page_set_level(&d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
183 MemoryRegionSection
*phys_page_find(AddressSpaceDispatch
*d
, hwaddr index
)
185 PhysPageEntry lp
= d
->phys_map
;
188 uint16_t s_index
= phys_section_unassigned
;
190 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
191 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
194 p
= phys_map_nodes
[lp
.ptr
];
195 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
200 return &phys_sections
[s_index
];
203 bool memory_region_is_unassigned(MemoryRegion
*mr
)
205 return mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
206 && mr
!= &io_mem_notdirty
&& !mr
->rom_device
207 && mr
!= &io_mem_watch
;
211 void cpu_exec_init_all(void)
213 #if !defined(CONFIG_USER_ONLY)
219 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
221 static int cpu_common_post_load(void *opaque
, int version_id
)
223 CPUArchState
*env
= opaque
;
225 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
226 version_id is increased. */
227 env
->interrupt_request
&= ~0x01;
233 static const VMStateDescription vmstate_cpu_common
= {
234 .name
= "cpu_common",
236 .minimum_version_id
= 1,
237 .minimum_version_id_old
= 1,
238 .post_load
= cpu_common_post_load
,
239 .fields
= (VMStateField
[]) {
240 VMSTATE_UINT32(halted
, CPUArchState
),
241 VMSTATE_UINT32(interrupt_request
, CPUArchState
),
242 VMSTATE_END_OF_LIST()
247 CPUArchState
*qemu_get_cpu(int cpu
)
249 CPUArchState
*env
= first_cpu
;
252 if (env
->cpu_index
== cpu
)
260 void cpu_exec_init(CPUArchState
*env
)
262 #ifndef CONFIG_USER_ONLY
263 CPUState
*cpu
= ENV_GET_CPU(env
);
268 #if defined(CONFIG_USER_ONLY)
271 env
->next_cpu
= NULL
;
274 while (*penv
!= NULL
) {
275 penv
= &(*penv
)->next_cpu
;
278 env
->cpu_index
= cpu_index
;
280 QTAILQ_INIT(&env
->breakpoints
);
281 QTAILQ_INIT(&env
->watchpoints
);
282 #ifndef CONFIG_USER_ONLY
283 cpu
->thread_id
= qemu_get_thread_id();
286 #if defined(CONFIG_USER_ONLY)
289 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
290 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
291 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
292 cpu_save
, cpu_load
, env
);
296 #if defined(TARGET_HAS_ICE)
297 #if defined(CONFIG_USER_ONLY)
298 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
300 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
303 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
305 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
306 (pc
& ~TARGET_PAGE_MASK
));
309 #endif /* TARGET_HAS_ICE */
311 #if defined(CONFIG_USER_ONLY)
312 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
317 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
318 int flags
, CPUWatchpoint
**watchpoint
)
323 /* Add a watchpoint. */
324 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
325 int flags
, CPUWatchpoint
**watchpoint
)
327 target_ulong len_mask
= ~(len
- 1);
330 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
331 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
332 len
== 0 || len
> TARGET_PAGE_SIZE
) {
333 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
334 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
337 wp
= g_malloc(sizeof(*wp
));
340 wp
->len_mask
= len_mask
;
343 /* keep all GDB-injected watchpoints in front */
345 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
347 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
349 tlb_flush_page(env
, addr
);
356 /* Remove a specific watchpoint. */
357 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
360 target_ulong len_mask
= ~(len
- 1);
363 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
364 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
365 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
366 cpu_watchpoint_remove_by_ref(env
, wp
);
373 /* Remove a specific watchpoint by reference. */
374 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
376 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
378 tlb_flush_page(env
, watchpoint
->vaddr
);
383 /* Remove all matching watchpoints. */
384 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
386 CPUWatchpoint
*wp
, *next
;
388 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
389 if (wp
->flags
& mask
)
390 cpu_watchpoint_remove_by_ref(env
, wp
);
395 /* Add a breakpoint. */
396 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
397 CPUBreakpoint
**breakpoint
)
399 #if defined(TARGET_HAS_ICE)
402 bp
= g_malloc(sizeof(*bp
));
407 /* keep all GDB-injected breakpoints in front */
409 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
411 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
413 breakpoint_invalidate(env
, pc
);
423 /* Remove a specific breakpoint. */
424 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
426 #if defined(TARGET_HAS_ICE)
429 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
430 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
431 cpu_breakpoint_remove_by_ref(env
, bp
);
441 /* Remove a specific breakpoint by reference. */
442 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
444 #if defined(TARGET_HAS_ICE)
445 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
447 breakpoint_invalidate(env
, breakpoint
->pc
);
453 /* Remove all matching breakpoints. */
454 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
456 #if defined(TARGET_HAS_ICE)
457 CPUBreakpoint
*bp
, *next
;
459 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
460 if (bp
->flags
& mask
)
461 cpu_breakpoint_remove_by_ref(env
, bp
);
466 /* enable or disable single step mode. EXCP_DEBUG is returned by the
467 CPU loop after each instruction */
468 void cpu_single_step(CPUArchState
*env
, int enabled
)
470 #if defined(TARGET_HAS_ICE)
471 if (env
->singlestep_enabled
!= enabled
) {
472 env
->singlestep_enabled
= enabled
;
474 kvm_update_guest_debug(env
, 0);
476 /* must flush all the translated code to avoid inconsistencies */
477 /* XXX: only flush what is necessary */
484 void cpu_reset_interrupt(CPUArchState
*env
, int mask
)
486 env
->interrupt_request
&= ~mask
;
489 void cpu_exit(CPUArchState
*env
)
491 env
->exit_request
= 1;
495 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
502 fprintf(stderr
, "qemu: fatal: ");
503 vfprintf(stderr
, fmt
, ap
);
504 fprintf(stderr
, "\n");
505 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
506 if (qemu_log_enabled()) {
507 qemu_log("qemu: fatal: ");
508 qemu_log_vprintf(fmt
, ap2
);
510 log_cpu_state(env
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
516 #if defined(CONFIG_USER_ONLY)
518 struct sigaction act
;
519 sigfillset(&act
.sa_mask
);
520 act
.sa_handler
= SIG_DFL
;
521 sigaction(SIGABRT
, &act
, NULL
);
527 CPUArchState
*cpu_copy(CPUArchState
*env
)
529 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
530 CPUArchState
*next_cpu
= new_env
->next_cpu
;
531 int cpu_index
= new_env
->cpu_index
;
532 #if defined(TARGET_HAS_ICE)
537 memcpy(new_env
, env
, sizeof(CPUArchState
));
539 /* Preserve chaining and index. */
540 new_env
->next_cpu
= next_cpu
;
541 new_env
->cpu_index
= cpu_index
;
543 /* Clone all break/watchpoints.
544 Note: Once we support ptrace with hw-debug register access, make sure
545 BP_CPU break/watchpoints are handled correctly on clone. */
546 QTAILQ_INIT(&env
->breakpoints
);
547 QTAILQ_INIT(&env
->watchpoints
);
548 #if defined(TARGET_HAS_ICE)
549 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
550 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
552 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
553 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
561 #if !defined(CONFIG_USER_ONLY)
562 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
567 /* we modify the TLB cache so that the dirty bit will be set again
568 when accessing the range */
569 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
570 /* Check that we don't span multiple blocks - this breaks the
571 address comparisons below. */
572 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
573 != (end
- 1) - start
) {
576 cpu_tlb_reset_dirty_all(start1
, length
);
580 /* Note: start and end must be within the same ram block. */
581 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
586 start
&= TARGET_PAGE_MASK
;
587 end
= TARGET_PAGE_ALIGN(end
);
589 length
= end
- start
;
592 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
595 tlb_reset_dirty_range_all(start
, end
, length
);
599 static int cpu_physical_memory_set_dirty_tracking(int enable
)
602 in_migration
= enable
;
606 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
607 MemoryRegionSection
*section
,
611 target_ulong
*address
)
616 if (memory_region_is_ram(section
->mr
)) {
618 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
619 + memory_region_section_addr(section
, paddr
);
620 if (!section
->readonly
) {
621 iotlb
|= phys_section_notdirty
;
623 iotlb
|= phys_section_rom
;
626 /* IO handlers are currently passed a physical address.
627 It would be nice to pass an offset from the base address
628 of that region. This would avoid having to special case RAM,
629 and avoid full address decoding in every device.
630 We can't use the high bits of pd for this because
631 IO_MEM_ROMD uses these as a ram address. */
632 iotlb
= section
- phys_sections
;
633 iotlb
+= memory_region_section_addr(section
, paddr
);
636 /* Make accesses to pages with watchpoints go via the
637 watchpoint trap routines. */
638 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
639 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
640 /* Avoid trapping reads of pages with a write breakpoint. */
641 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
642 iotlb
= phys_section_watch
+ paddr
;
643 *address
|= TLB_MMIO
;
651 #endif /* defined(CONFIG_USER_ONLY) */
653 #if !defined(CONFIG_USER_ONLY)
655 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
656 typedef struct subpage_t
{
659 uint16_t sub_section
[TARGET_PAGE_SIZE
];
662 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
664 static subpage_t
*subpage_init(hwaddr base
);
665 static void destroy_page_desc(uint16_t section_index
)
667 MemoryRegionSection
*section
= &phys_sections
[section_index
];
668 MemoryRegion
*mr
= section
->mr
;
671 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
672 memory_region_destroy(&subpage
->iomem
);
677 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
682 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
686 p
= phys_map_nodes
[lp
->ptr
];
687 for (i
= 0; i
< L2_SIZE
; ++i
) {
689 destroy_l2_mapping(&p
[i
], level
- 1);
691 destroy_page_desc(p
[i
].ptr
);
695 lp
->ptr
= PHYS_MAP_NODE_NIL
;
698 static void destroy_all_mappings(AddressSpaceDispatch
*d
)
700 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
701 phys_map_nodes_reset();
704 static uint16_t phys_section_add(MemoryRegionSection
*section
)
706 if (phys_sections_nb
== phys_sections_nb_alloc
) {
707 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
708 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
709 phys_sections_nb_alloc
);
711 phys_sections
[phys_sections_nb
] = *section
;
712 return phys_sections_nb
++;
715 static void phys_sections_clear(void)
717 phys_sections_nb
= 0;
720 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
723 hwaddr base
= section
->offset_within_address_space
725 MemoryRegionSection
*existing
= phys_page_find(d
, base
>> TARGET_PAGE_BITS
);
726 MemoryRegionSection subsection
= {
727 .offset_within_address_space
= base
,
728 .size
= TARGET_PAGE_SIZE
,
732 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
734 if (!(existing
->mr
->subpage
)) {
735 subpage
= subpage_init(base
);
736 subsection
.mr
= &subpage
->iomem
;
737 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
738 phys_section_add(&subsection
));
740 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
742 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
743 end
= start
+ section
->size
- 1;
744 subpage_register(subpage
, start
, end
, phys_section_add(section
));
748 static void register_multipage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
750 hwaddr start_addr
= section
->offset_within_address_space
;
751 ram_addr_t size
= section
->size
;
753 uint16_t section_index
= phys_section_add(section
);
758 phys_page_set(d
, addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
762 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
764 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
765 MemoryRegionSection now
= *section
, remain
= *section
;
767 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
768 || (now
.size
< TARGET_PAGE_SIZE
)) {
769 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
770 - now
.offset_within_address_space
,
772 register_subpage(d
, &now
);
773 remain
.size
-= now
.size
;
774 remain
.offset_within_address_space
+= now
.size
;
775 remain
.offset_within_region
+= now
.size
;
777 while (remain
.size
>= TARGET_PAGE_SIZE
) {
779 if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
780 now
.size
= TARGET_PAGE_SIZE
;
781 register_subpage(d
, &now
);
783 now
.size
&= TARGET_PAGE_MASK
;
784 register_multipage(d
, &now
);
786 remain
.size
-= now
.size
;
787 remain
.offset_within_address_space
+= now
.size
;
788 remain
.offset_within_region
+= now
.size
;
792 register_subpage(d
, &now
);
796 void qemu_flush_coalesced_mmio_buffer(void)
799 kvm_flush_coalesced_mmio_buffer();
802 #if defined(__linux__) && !defined(TARGET_S390X)
806 #define HUGETLBFS_MAGIC 0x958458f6
808 static long gethugepagesize(const char *path
)
814 ret
= statfs(path
, &fs
);
815 } while (ret
!= 0 && errno
== EINTR
);
822 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
823 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
828 static void *file_ram_alloc(RAMBlock
*block
,
838 unsigned long hpagesize
;
840 hpagesize
= gethugepagesize(path
);
845 if (memory
< hpagesize
) {
849 if (kvm_enabled() && !kvm_has_sync_mmu()) {
850 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
854 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
858 fd
= mkstemp(filename
);
860 perror("unable to create backing store for hugepages");
867 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
870 * ftruncate is not supported by hugetlbfs in older
871 * hosts, so don't bother bailing out on errors.
872 * If anything goes wrong with it under other filesystems,
875 if (ftruncate(fd
, memory
))
879 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
880 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
881 * to sidestep this quirk.
883 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
884 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
886 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
888 if (area
== MAP_FAILED
) {
889 perror("file_ram_alloc: can't mmap RAM pages");
898 static ram_addr_t
find_ram_offset(ram_addr_t size
)
900 RAMBlock
*block
, *next_block
;
901 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
903 if (QLIST_EMPTY(&ram_list
.blocks
))
906 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
907 ram_addr_t end
, next
= RAM_ADDR_MAX
;
909 end
= block
->offset
+ block
->length
;
911 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
912 if (next_block
->offset
>= end
) {
913 next
= MIN(next
, next_block
->offset
);
916 if (next
- end
>= size
&& next
- end
< mingap
) {
922 if (offset
== RAM_ADDR_MAX
) {
923 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
931 ram_addr_t
last_ram_offset(void)
936 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
937 last
= MAX(last
, block
->offset
+ block
->length
);
942 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
945 QemuOpts
*machine_opts
;
947 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
948 machine_opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
950 !qemu_opt_get_bool(machine_opts
, "dump-guest-core", true)) {
951 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
953 perror("qemu_madvise");
954 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
955 "but dump_guest_core=off specified\n");
960 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
962 RAMBlock
*new_block
, *block
;
965 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
966 if (block
->offset
== addr
) {
972 assert(!new_block
->idstr
[0]);
975 char *id
= qdev_get_dev_path(dev
);
977 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
981 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
983 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
984 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
985 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
992 static int memory_try_enable_merging(void *addr
, size_t len
)
996 opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
997 if (opts
&& !qemu_opt_get_bool(opts
, "mem-merge", true)) {
998 /* disabled by the user */
1002 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1005 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1008 RAMBlock
*new_block
;
1010 size
= TARGET_PAGE_ALIGN(size
);
1011 new_block
= g_malloc0(sizeof(*new_block
));
1014 new_block
->offset
= find_ram_offset(size
);
1016 new_block
->host
= host
;
1017 new_block
->flags
|= RAM_PREALLOC_MASK
;
1020 #if defined (__linux__) && !defined(TARGET_S390X)
1021 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1022 if (!new_block
->host
) {
1023 new_block
->host
= qemu_vmalloc(size
);
1024 memory_try_enable_merging(new_block
->host
, size
);
1027 fprintf(stderr
, "-mem-path option unsupported\n");
1031 if (xen_enabled()) {
1032 xen_ram_alloc(new_block
->offset
, size
, mr
);
1033 } else if (kvm_enabled()) {
1034 /* some s390/kvm configurations have special constraints */
1035 new_block
->host
= kvm_vmalloc(size
);
1037 new_block
->host
= qemu_vmalloc(size
);
1039 memory_try_enable_merging(new_block
->host
, size
);
1042 new_block
->length
= size
;
1044 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
1046 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
1047 last_ram_offset() >> TARGET_PAGE_BITS
);
1048 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
1049 0, size
>> TARGET_PAGE_BITS
);
1050 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
1052 qemu_ram_setup_dump(new_block
->host
, size
);
1053 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1056 kvm_setup_guest_memory(new_block
->host
, size
);
1058 return new_block
->offset
;
1061 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1063 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1066 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1070 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
1071 if (addr
== block
->offset
) {
1072 QLIST_REMOVE(block
, next
);
1079 void qemu_ram_free(ram_addr_t addr
)
1083 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
1084 if (addr
== block
->offset
) {
1085 QLIST_REMOVE(block
, next
);
1086 if (block
->flags
& RAM_PREALLOC_MASK
) {
1088 } else if (mem_path
) {
1089 #if defined (__linux__) && !defined(TARGET_S390X)
1091 munmap(block
->host
, block
->length
);
1094 qemu_vfree(block
->host
);
1100 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1101 munmap(block
->host
, block
->length
);
1103 if (xen_enabled()) {
1104 xen_invalidate_map_cache_entry(block
->host
);
1106 qemu_vfree(block
->host
);
1118 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1125 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
1126 offset
= addr
- block
->offset
;
1127 if (offset
< block
->length
) {
1128 vaddr
= block
->host
+ offset
;
1129 if (block
->flags
& RAM_PREALLOC_MASK
) {
1133 munmap(vaddr
, length
);
1135 #if defined(__linux__) && !defined(TARGET_S390X)
1138 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1141 flags
|= MAP_PRIVATE
;
1143 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1144 flags
, block
->fd
, offset
);
1146 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1147 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1154 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1155 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
1156 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
1159 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1160 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1164 if (area
!= vaddr
) {
1165 fprintf(stderr
, "Could not remap addr: "
1166 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1170 memory_try_enable_merging(vaddr
, length
);
1171 qemu_ram_setup_dump(vaddr
, length
);
1177 #endif /* !_WIN32 */
1179 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1180 With the exception of the softmmu code in this file, this should
1181 only be used for local memory (e.g. video ram) that the device owns,
1182 and knows it isn't going to access beyond the end of the block.
1184 It should not be used for general purpose DMA.
1185 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1187 void *qemu_get_ram_ptr(ram_addr_t addr
)
1191 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
1192 if (addr
- block
->offset
< block
->length
) {
1193 /* Move this entry to to start of the list. */
1194 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
1195 QLIST_REMOVE(block
, next
);
1196 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
1198 if (xen_enabled()) {
1199 /* We need to check if the requested address is in the RAM
1200 * because we don't want to map the entire memory in QEMU.
1201 * In that case just map until the end of the page.
1203 if (block
->offset
== 0) {
1204 return xen_map_cache(addr
, 0, 0);
1205 } else if (block
->host
== NULL
) {
1207 xen_map_cache(block
->offset
, block
->length
, 1);
1210 return block
->host
+ (addr
- block
->offset
);
1214 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1220 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1221 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
1223 static void *qemu_safe_ram_ptr(ram_addr_t addr
)
1227 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
1228 if (addr
- block
->offset
< block
->length
) {
1229 if (xen_enabled()) {
1230 /* We need to check if the requested address is in the RAM
1231 * because we don't want to map the entire memory in QEMU.
1232 * In that case just map until the end of the page.
1234 if (block
->offset
== 0) {
1235 return xen_map_cache(addr
, 0, 0);
1236 } else if (block
->host
== NULL
) {
1238 xen_map_cache(block
->offset
, block
->length
, 1);
1241 return block
->host
+ (addr
- block
->offset
);
1245 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1251 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1252 * but takes a size argument */
1253 static void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
1258 if (xen_enabled()) {
1259 return xen_map_cache(addr
, *size
, 1);
1263 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
1264 if (addr
- block
->offset
< block
->length
) {
1265 if (addr
- block
->offset
+ *size
> block
->length
)
1266 *size
= block
->length
- addr
+ block
->offset
;
1267 return block
->host
+ (addr
- block
->offset
);
1271 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1276 void qemu_put_ram_ptr(void *addr
)
1278 trace_qemu_put_ram_ptr(addr
);
1281 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1284 uint8_t *host
= ptr
;
1286 if (xen_enabled()) {
1287 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1291 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
1292 /* This case append when the block is not mapped. */
1293 if (block
->host
== NULL
) {
1296 if (host
- block
->host
< block
->length
) {
1297 *ram_addr
= block
->offset
+ (host
- block
->host
);
1305 /* Some of the softmmu routines need to translate from a host pointer
1306 (typically a TLB entry) back to a ram offset. */
1307 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
1309 ram_addr_t ram_addr
;
1311 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
1312 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
1318 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1321 #ifdef DEBUG_UNASSIGNED
1322 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
1324 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1325 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
1330 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1331 uint64_t val
, unsigned size
)
1333 #ifdef DEBUG_UNASSIGNED
1334 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
1336 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1337 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
1341 static const MemoryRegionOps unassigned_mem_ops
= {
1342 .read
= unassigned_mem_read
,
1343 .write
= unassigned_mem_write
,
1344 .endianness
= DEVICE_NATIVE_ENDIAN
,
1347 static uint64_t error_mem_read(void *opaque
, hwaddr addr
,
1353 static void error_mem_write(void *opaque
, hwaddr addr
,
1354 uint64_t value
, unsigned size
)
1359 static const MemoryRegionOps error_mem_ops
= {
1360 .read
= error_mem_read
,
1361 .write
= error_mem_write
,
1362 .endianness
= DEVICE_NATIVE_ENDIAN
,
1365 static const MemoryRegionOps rom_mem_ops
= {
1366 .read
= error_mem_read
,
1367 .write
= unassigned_mem_write
,
1368 .endianness
= DEVICE_NATIVE_ENDIAN
,
1371 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1372 uint64_t val
, unsigned size
)
1375 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1376 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1377 #if !defined(CONFIG_USER_ONLY)
1378 tb_invalidate_phys_page_fast(ram_addr
, size
);
1379 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1384 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1387 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1390 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1395 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1396 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
1397 /* we remove the notdirty callback only if the code has been
1399 if (dirty_flags
== 0xff)
1400 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
1403 static const MemoryRegionOps notdirty_mem_ops
= {
1404 .read
= error_mem_read
,
1405 .write
= notdirty_mem_write
,
1406 .endianness
= DEVICE_NATIVE_ENDIAN
,
1409 /* Generate a debug exception if a watchpoint has been hit. */
1410 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1412 CPUArchState
*env
= cpu_single_env
;
1413 target_ulong pc
, cs_base
;
1418 if (env
->watchpoint_hit
) {
1419 /* We re-entered the check after replacing the TB. Now raise
1420 * the debug interrupt so that is will trigger after the
1421 * current instruction. */
1422 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
1425 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1426 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1427 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1428 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1429 wp
->flags
|= BP_WATCHPOINT_HIT
;
1430 if (!env
->watchpoint_hit
) {
1431 env
->watchpoint_hit
= wp
;
1432 tb_check_watchpoint(env
);
1433 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1434 env
->exception_index
= EXCP_DEBUG
;
1437 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1438 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
1439 cpu_resume_from_signal(env
, NULL
);
1443 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1448 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1449 so these check for a hit then pass through to the normal out-of-line
1451 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1454 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1456 case 1: return ldub_phys(addr
);
1457 case 2: return lduw_phys(addr
);
1458 case 4: return ldl_phys(addr
);
1463 static void watch_mem_write(void *opaque
, hwaddr addr
,
1464 uint64_t val
, unsigned size
)
1466 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1469 stb_phys(addr
, val
);
1472 stw_phys(addr
, val
);
1475 stl_phys(addr
, val
);
1481 static const MemoryRegionOps watch_mem_ops
= {
1482 .read
= watch_mem_read
,
1483 .write
= watch_mem_write
,
1484 .endianness
= DEVICE_NATIVE_ENDIAN
,
1487 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1490 subpage_t
*mmio
= opaque
;
1491 unsigned int idx
= SUBPAGE_IDX(addr
);
1492 MemoryRegionSection
*section
;
1493 #if defined(DEBUG_SUBPAGE)
1494 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
1495 mmio
, len
, addr
, idx
);
1498 section
= &phys_sections
[mmio
->sub_section
[idx
]];
1500 addr
-= section
->offset_within_address_space
;
1501 addr
+= section
->offset_within_region
;
1502 return io_mem_read(section
->mr
, addr
, len
);
1505 static void subpage_write(void *opaque
, hwaddr addr
,
1506 uint64_t value
, unsigned len
)
1508 subpage_t
*mmio
= opaque
;
1509 unsigned int idx
= SUBPAGE_IDX(addr
);
1510 MemoryRegionSection
*section
;
1511 #if defined(DEBUG_SUBPAGE)
1512 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1513 " idx %d value %"PRIx64
"\n",
1514 __func__
, mmio
, len
, addr
, idx
, value
);
1517 section
= &phys_sections
[mmio
->sub_section
[idx
]];
1519 addr
-= section
->offset_within_address_space
;
1520 addr
+= section
->offset_within_region
;
1521 io_mem_write(section
->mr
, addr
, value
, len
);
1524 static const MemoryRegionOps subpage_ops
= {
1525 .read
= subpage_read
,
1526 .write
= subpage_write
,
1527 .endianness
= DEVICE_NATIVE_ENDIAN
,
1530 static uint64_t subpage_ram_read(void *opaque
, hwaddr addr
,
1533 ram_addr_t raddr
= addr
;
1534 void *ptr
= qemu_get_ram_ptr(raddr
);
1536 case 1: return ldub_p(ptr
);
1537 case 2: return lduw_p(ptr
);
1538 case 4: return ldl_p(ptr
);
1543 static void subpage_ram_write(void *opaque
, hwaddr addr
,
1544 uint64_t value
, unsigned size
)
1546 ram_addr_t raddr
= addr
;
1547 void *ptr
= qemu_get_ram_ptr(raddr
);
1549 case 1: return stb_p(ptr
, value
);
1550 case 2: return stw_p(ptr
, value
);
1551 case 4: return stl_p(ptr
, value
);
1556 static const MemoryRegionOps subpage_ram_ops
= {
1557 .read
= subpage_ram_read
,
1558 .write
= subpage_ram_write
,
1559 .endianness
= DEVICE_NATIVE_ENDIAN
,
1562 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1567 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1569 idx
= SUBPAGE_IDX(start
);
1570 eidx
= SUBPAGE_IDX(end
);
1571 #if defined(DEBUG_SUBPAGE)
1572 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
1573 mmio
, start
, end
, idx
, eidx
, memory
);
1575 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
1576 MemoryRegionSection new_section
= phys_sections
[section
];
1577 new_section
.mr
= &io_mem_subpage_ram
;
1578 section
= phys_section_add(&new_section
);
1580 for (; idx
<= eidx
; idx
++) {
1581 mmio
->sub_section
[idx
] = section
;
1587 static subpage_t
*subpage_init(hwaddr base
)
1591 mmio
= g_malloc0(sizeof(subpage_t
));
1594 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
1595 "subpage", TARGET_PAGE_SIZE
);
1596 mmio
->iomem
.subpage
= true;
1597 #if defined(DEBUG_SUBPAGE)
1598 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
1599 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
1601 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
1606 static uint16_t dummy_section(MemoryRegion
*mr
)
1608 MemoryRegionSection section
= {
1610 .offset_within_address_space
= 0,
1611 .offset_within_region
= 0,
1615 return phys_section_add(§ion
);
1618 MemoryRegion
*iotlb_to_region(hwaddr index
)
1620 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1623 static void io_mem_init(void)
1625 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
1626 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
1627 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
1628 "unassigned", UINT64_MAX
);
1629 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
1630 "notdirty", UINT64_MAX
);
1631 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
1632 "subpage-ram", UINT64_MAX
);
1633 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
1634 "watch", UINT64_MAX
);
1637 static void mem_begin(MemoryListener
*listener
)
1639 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
1641 destroy_all_mappings(d
);
1642 d
->phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
1645 static void core_begin(MemoryListener
*listener
)
1647 phys_sections_clear();
1648 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
1649 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
1650 phys_section_rom
= dummy_section(&io_mem_rom
);
1651 phys_section_watch
= dummy_section(&io_mem_watch
);
1654 static void tcg_commit(MemoryListener
*listener
)
1658 /* since each CPU stores ram addresses in its TLB cache, we must
1659 reset the modified entries */
1661 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1666 static void core_log_global_start(MemoryListener
*listener
)
1668 cpu_physical_memory_set_dirty_tracking(1);
1671 static void core_log_global_stop(MemoryListener
*listener
)
1673 cpu_physical_memory_set_dirty_tracking(0);
1676 static void io_region_add(MemoryListener
*listener
,
1677 MemoryRegionSection
*section
)
1679 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
1681 mrio
->mr
= section
->mr
;
1682 mrio
->offset
= section
->offset_within_region
;
1683 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
1684 section
->offset_within_address_space
, section
->size
);
1685 ioport_register(&mrio
->iorange
);
1688 static void io_region_del(MemoryListener
*listener
,
1689 MemoryRegionSection
*section
)
1691 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
1694 static MemoryListener core_memory_listener
= {
1695 .begin
= core_begin
,
1696 .log_global_start
= core_log_global_start
,
1697 .log_global_stop
= core_log_global_stop
,
1701 static MemoryListener io_memory_listener
= {
1702 .region_add
= io_region_add
,
1703 .region_del
= io_region_del
,
1707 static MemoryListener tcg_memory_listener
= {
1708 .commit
= tcg_commit
,
1711 void address_space_init_dispatch(AddressSpace
*as
)
1713 AddressSpaceDispatch
*d
= g_new(AddressSpaceDispatch
, 1);
1715 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
1716 d
->listener
= (MemoryListener
) {
1718 .region_add
= mem_add
,
1719 .region_nop
= mem_add
,
1723 memory_listener_register(&d
->listener
, as
);
1726 void address_space_destroy_dispatch(AddressSpace
*as
)
1728 AddressSpaceDispatch
*d
= as
->dispatch
;
1730 memory_listener_unregister(&d
->listener
);
1731 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
1733 as
->dispatch
= NULL
;
1736 static void memory_map_init(void)
1738 system_memory
= g_malloc(sizeof(*system_memory
));
1739 memory_region_init(system_memory
, "system", INT64_MAX
);
1740 address_space_init(&address_space_memory
, system_memory
);
1741 address_space_memory
.name
= "memory";
1743 system_io
= g_malloc(sizeof(*system_io
));
1744 memory_region_init(system_io
, "io", 65536);
1745 address_space_init(&address_space_io
, system_io
);
1746 address_space_io
.name
= "I/O";
1748 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1749 memory_listener_register(&io_memory_listener
, &address_space_io
);
1750 memory_listener_register(&tcg_memory_listener
, &address_space_memory
);
1752 dma_context_init(&dma_context_memory
, &address_space_memory
,
1756 MemoryRegion
*get_system_memory(void)
1758 return system_memory
;
1761 MemoryRegion
*get_system_io(void)
1766 #endif /* !defined(CONFIG_USER_ONLY) */
1768 /* physical memory access (slow version, mainly for debug) */
1769 #if defined(CONFIG_USER_ONLY)
1770 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
1771 uint8_t *buf
, int len
, int is_write
)
1778 page
= addr
& TARGET_PAGE_MASK
;
1779 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1782 flags
= page_get_flags(page
);
1783 if (!(flags
& PAGE_VALID
))
1786 if (!(flags
& PAGE_WRITE
))
1788 /* XXX: this code should not depend on lock_user */
1789 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1792 unlock_user(p
, addr
, l
);
1794 if (!(flags
& PAGE_READ
))
1796 /* XXX: this code should not depend on lock_user */
1797 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1800 unlock_user(p
, addr
, 0);
1811 static void invalidate_and_set_dirty(hwaddr addr
,
1814 if (!cpu_physical_memory_is_dirty(addr
)) {
1815 /* invalidate code */
1816 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1818 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
1820 xen_modified_memory(addr
, length
);
1823 void address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
1824 int len
, bool is_write
)
1826 AddressSpaceDispatch
*d
= as
->dispatch
;
1831 MemoryRegionSection
*section
;
1834 page
= addr
& TARGET_PAGE_MASK
;
1835 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1838 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
1841 if (!memory_region_is_ram(section
->mr
)) {
1843 addr1
= memory_region_section_addr(section
, addr
);
1844 /* XXX: could force cpu_single_env to NULL to avoid
1846 if (l
>= 4 && ((addr1
& 3) == 0)) {
1847 /* 32 bit write access */
1849 io_mem_write(section
->mr
, addr1
, val
, 4);
1851 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
1852 /* 16 bit write access */
1854 io_mem_write(section
->mr
, addr1
, val
, 2);
1857 /* 8 bit write access */
1859 io_mem_write(section
->mr
, addr1
, val
, 1);
1862 } else if (!section
->readonly
) {
1864 addr1
= memory_region_get_ram_addr(section
->mr
)
1865 + memory_region_section_addr(section
, addr
);
1867 ptr
= qemu_get_ram_ptr(addr1
);
1868 memcpy(ptr
, buf
, l
);
1869 invalidate_and_set_dirty(addr1
, l
);
1870 qemu_put_ram_ptr(ptr
);
1873 if (!(memory_region_is_ram(section
->mr
) ||
1874 memory_region_is_romd(section
->mr
))) {
1877 addr1
= memory_region_section_addr(section
, addr
);
1878 if (l
>= 4 && ((addr1
& 3) == 0)) {
1879 /* 32 bit read access */
1880 val
= io_mem_read(section
->mr
, addr1
, 4);
1883 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
1884 /* 16 bit read access */
1885 val
= io_mem_read(section
->mr
, addr1
, 2);
1889 /* 8 bit read access */
1890 val
= io_mem_read(section
->mr
, addr1
, 1);
1896 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
1897 + memory_region_section_addr(section
,
1899 memcpy(buf
, ptr
, l
);
1900 qemu_put_ram_ptr(ptr
);
1909 void address_space_write(AddressSpace
*as
, hwaddr addr
,
1910 const uint8_t *buf
, int len
)
1912 address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
1916 * address_space_read: read from an address space.
1918 * @as: #AddressSpace to be accessed
1919 * @addr: address within that address space
1920 * @buf: buffer with the data transferred
1922 void address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
1924 address_space_rw(as
, addr
, buf
, len
, false);
1928 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
1929 int len
, int is_write
)
1931 return address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
1934 /* used for ROM loading : can write in RAM and ROM */
1935 void cpu_physical_memory_write_rom(hwaddr addr
,
1936 const uint8_t *buf
, int len
)
1938 AddressSpaceDispatch
*d
= address_space_memory
.dispatch
;
1942 MemoryRegionSection
*section
;
1945 page
= addr
& TARGET_PAGE_MASK
;
1946 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1949 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
1951 if (!(memory_region_is_ram(section
->mr
) ||
1952 memory_region_is_romd(section
->mr
))) {
1955 unsigned long addr1
;
1956 addr1
= memory_region_get_ram_addr(section
->mr
)
1957 + memory_region_section_addr(section
, addr
);
1959 ptr
= qemu_get_ram_ptr(addr1
);
1960 memcpy(ptr
, buf
, l
);
1961 invalidate_and_set_dirty(addr1
, l
);
1962 qemu_put_ram_ptr(ptr
);
1976 static BounceBuffer bounce
;
1978 typedef struct MapClient
{
1980 void (*callback
)(void *opaque
);
1981 QLIST_ENTRY(MapClient
) link
;
1984 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
1985 = QLIST_HEAD_INITIALIZER(map_client_list
);
1987 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
1989 MapClient
*client
= g_malloc(sizeof(*client
));
1991 client
->opaque
= opaque
;
1992 client
->callback
= callback
;
1993 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
1997 static void cpu_unregister_map_client(void *_client
)
1999 MapClient
*client
= (MapClient
*)_client
;
2001 QLIST_REMOVE(client
, link
);
2005 static void cpu_notify_map_clients(void)
2009 while (!QLIST_EMPTY(&map_client_list
)) {
2010 client
= QLIST_FIRST(&map_client_list
);
2011 client
->callback(client
->opaque
);
2012 cpu_unregister_map_client(client
);
2016 /* Map a physical memory region into a host virtual address.
2017 * May map a subset of the requested range, given by and returned in *plen.
2018 * May return NULL if resources needed to perform the mapping are exhausted.
2019 * Use only for reads OR writes - not for read-modify-write operations.
2020 * Use cpu_register_map_client() to know when retrying the map operation is
2021 * likely to succeed.
2023 void *address_space_map(AddressSpace
*as
,
2028 AddressSpaceDispatch
*d
= as
->dispatch
;
2033 MemoryRegionSection
*section
;
2034 ram_addr_t raddr
= RAM_ADDR_MAX
;
2039 page
= addr
& TARGET_PAGE_MASK
;
2040 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2043 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
2045 if (!(memory_region_is_ram(section
->mr
) && !section
->readonly
)) {
2046 if (todo
|| bounce
.buffer
) {
2049 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
2053 address_space_read(as
, addr
, bounce
.buffer
, l
);
2057 return bounce
.buffer
;
2060 raddr
= memory_region_get_ram_addr(section
->mr
)
2061 + memory_region_section_addr(section
, addr
);
2069 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
2074 /* Unmaps a memory region previously mapped by address_space_map().
2075 * Will also mark the memory as dirty if is_write == 1. access_len gives
2076 * the amount of memory that was actually read or written by the caller.
2078 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2079 int is_write
, hwaddr access_len
)
2081 if (buffer
!= bounce
.buffer
) {
2083 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
2084 while (access_len
) {
2086 l
= TARGET_PAGE_SIZE
;
2089 invalidate_and_set_dirty(addr1
, l
);
2094 if (xen_enabled()) {
2095 xen_invalidate_map_cache_entry(buffer
);
2100 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2102 qemu_vfree(bounce
.buffer
);
2103 bounce
.buffer
= NULL
;
2104 cpu_notify_map_clients();
2107 void *cpu_physical_memory_map(hwaddr addr
,
2111 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2114 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2115 int is_write
, hwaddr access_len
)
2117 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2120 /* warning: addr must be aligned */
2121 static inline uint32_t ldl_phys_internal(hwaddr addr
,
2122 enum device_endian endian
)
2126 MemoryRegionSection
*section
;
2128 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2130 if (!(memory_region_is_ram(section
->mr
) ||
2131 memory_region_is_romd(section
->mr
))) {
2133 addr
= memory_region_section_addr(section
, addr
);
2134 val
= io_mem_read(section
->mr
, addr
, 4);
2135 #if defined(TARGET_WORDS_BIGENDIAN)
2136 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2140 if (endian
== DEVICE_BIG_ENDIAN
) {
2146 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2148 + memory_region_section_addr(section
, addr
));
2150 case DEVICE_LITTLE_ENDIAN
:
2151 val
= ldl_le_p(ptr
);
2153 case DEVICE_BIG_ENDIAN
:
2154 val
= ldl_be_p(ptr
);
2164 uint32_t ldl_phys(hwaddr addr
)
2166 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2169 uint32_t ldl_le_phys(hwaddr addr
)
2171 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2174 uint32_t ldl_be_phys(hwaddr addr
)
2176 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2179 /* warning: addr must be aligned */
2180 static inline uint64_t ldq_phys_internal(hwaddr addr
,
2181 enum device_endian endian
)
2185 MemoryRegionSection
*section
;
2187 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2189 if (!(memory_region_is_ram(section
->mr
) ||
2190 memory_region_is_romd(section
->mr
))) {
2192 addr
= memory_region_section_addr(section
, addr
);
2194 /* XXX This is broken when device endian != cpu endian.
2195 Fix and add "endian" variable check */
2196 #ifdef TARGET_WORDS_BIGENDIAN
2197 val
= io_mem_read(section
->mr
, addr
, 4) << 32;
2198 val
|= io_mem_read(section
->mr
, addr
+ 4, 4);
2200 val
= io_mem_read(section
->mr
, addr
, 4);
2201 val
|= io_mem_read(section
->mr
, addr
+ 4, 4) << 32;
2205 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2207 + memory_region_section_addr(section
, addr
));
2209 case DEVICE_LITTLE_ENDIAN
:
2210 val
= ldq_le_p(ptr
);
2212 case DEVICE_BIG_ENDIAN
:
2213 val
= ldq_be_p(ptr
);
2223 uint64_t ldq_phys(hwaddr addr
)
2225 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2228 uint64_t ldq_le_phys(hwaddr addr
)
2230 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2233 uint64_t ldq_be_phys(hwaddr addr
)
2235 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2239 uint32_t ldub_phys(hwaddr addr
)
2242 cpu_physical_memory_read(addr
, &val
, 1);
2246 /* warning: addr must be aligned */
2247 static inline uint32_t lduw_phys_internal(hwaddr addr
,
2248 enum device_endian endian
)
2252 MemoryRegionSection
*section
;
2254 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2256 if (!(memory_region_is_ram(section
->mr
) ||
2257 memory_region_is_romd(section
->mr
))) {
2259 addr
= memory_region_section_addr(section
, addr
);
2260 val
= io_mem_read(section
->mr
, addr
, 2);
2261 #if defined(TARGET_WORDS_BIGENDIAN)
2262 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2266 if (endian
== DEVICE_BIG_ENDIAN
) {
2272 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2274 + memory_region_section_addr(section
, addr
));
2276 case DEVICE_LITTLE_ENDIAN
:
2277 val
= lduw_le_p(ptr
);
2279 case DEVICE_BIG_ENDIAN
:
2280 val
= lduw_be_p(ptr
);
2290 uint32_t lduw_phys(hwaddr addr
)
2292 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2295 uint32_t lduw_le_phys(hwaddr addr
)
2297 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2300 uint32_t lduw_be_phys(hwaddr addr
)
2302 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2305 /* warning: addr must be aligned. The ram page is not masked as dirty
2306 and the code inside is not invalidated. It is useful if the dirty
2307 bits are used to track modified PTEs */
2308 void stl_phys_notdirty(hwaddr addr
, uint32_t val
)
2311 MemoryRegionSection
*section
;
2313 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2315 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2316 addr
= memory_region_section_addr(section
, addr
);
2317 if (memory_region_is_ram(section
->mr
)) {
2318 section
= &phys_sections
[phys_section_rom
];
2320 io_mem_write(section
->mr
, addr
, val
, 4);
2322 unsigned long addr1
= (memory_region_get_ram_addr(section
->mr
)
2324 + memory_region_section_addr(section
, addr
);
2325 ptr
= qemu_get_ram_ptr(addr1
);
2328 if (unlikely(in_migration
)) {
2329 if (!cpu_physical_memory_is_dirty(addr1
)) {
2330 /* invalidate code */
2331 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2333 cpu_physical_memory_set_dirty_flags(
2334 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
2340 void stq_phys_notdirty(hwaddr addr
, uint64_t val
)
2343 MemoryRegionSection
*section
;
2345 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2347 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2348 addr
= memory_region_section_addr(section
, addr
);
2349 if (memory_region_is_ram(section
->mr
)) {
2350 section
= &phys_sections
[phys_section_rom
];
2352 #ifdef TARGET_WORDS_BIGENDIAN
2353 io_mem_write(section
->mr
, addr
, val
>> 32, 4);
2354 io_mem_write(section
->mr
, addr
+ 4, (uint32_t)val
, 4);
2356 io_mem_write(section
->mr
, addr
, (uint32_t)val
, 4);
2357 io_mem_write(section
->mr
, addr
+ 4, val
>> 32, 4);
2360 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2362 + memory_region_section_addr(section
, addr
));
2367 /* warning: addr must be aligned */
2368 static inline void stl_phys_internal(hwaddr addr
, uint32_t val
,
2369 enum device_endian endian
)
2372 MemoryRegionSection
*section
;
2374 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2376 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2377 addr
= memory_region_section_addr(section
, addr
);
2378 if (memory_region_is_ram(section
->mr
)) {
2379 section
= &phys_sections
[phys_section_rom
];
2381 #if defined(TARGET_WORDS_BIGENDIAN)
2382 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2386 if (endian
== DEVICE_BIG_ENDIAN
) {
2390 io_mem_write(section
->mr
, addr
, val
, 4);
2392 unsigned long addr1
;
2393 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
2394 + memory_region_section_addr(section
, addr
);
2396 ptr
= qemu_get_ram_ptr(addr1
);
2398 case DEVICE_LITTLE_ENDIAN
:
2401 case DEVICE_BIG_ENDIAN
:
2408 invalidate_and_set_dirty(addr1
, 4);
2412 void stl_phys(hwaddr addr
, uint32_t val
)
2414 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2417 void stl_le_phys(hwaddr addr
, uint32_t val
)
2419 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2422 void stl_be_phys(hwaddr addr
, uint32_t val
)
2424 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2428 void stb_phys(hwaddr addr
, uint32_t val
)
2431 cpu_physical_memory_write(addr
, &v
, 1);
2434 /* warning: addr must be aligned */
2435 static inline void stw_phys_internal(hwaddr addr
, uint32_t val
,
2436 enum device_endian endian
)
2439 MemoryRegionSection
*section
;
2441 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2443 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2444 addr
= memory_region_section_addr(section
, addr
);
2445 if (memory_region_is_ram(section
->mr
)) {
2446 section
= &phys_sections
[phys_section_rom
];
2448 #if defined(TARGET_WORDS_BIGENDIAN)
2449 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2453 if (endian
== DEVICE_BIG_ENDIAN
) {
2457 io_mem_write(section
->mr
, addr
, val
, 2);
2459 unsigned long addr1
;
2460 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
2461 + memory_region_section_addr(section
, addr
);
2463 ptr
= qemu_get_ram_ptr(addr1
);
2465 case DEVICE_LITTLE_ENDIAN
:
2468 case DEVICE_BIG_ENDIAN
:
2475 invalidate_and_set_dirty(addr1
, 2);
2479 void stw_phys(hwaddr addr
, uint32_t val
)
2481 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2484 void stw_le_phys(hwaddr addr
, uint32_t val
)
2486 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2489 void stw_be_phys(hwaddr addr
, uint32_t val
)
2491 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2495 void stq_phys(hwaddr addr
, uint64_t val
)
2498 cpu_physical_memory_write(addr
, &val
, 8);
2501 void stq_le_phys(hwaddr addr
, uint64_t val
)
2503 val
= cpu_to_le64(val
);
2504 cpu_physical_memory_write(addr
, &val
, 8);
2507 void stq_be_phys(hwaddr addr
, uint64_t val
)
2509 val
= cpu_to_be64(val
);
2510 cpu_physical_memory_write(addr
, &val
, 8);
2513 /* virtual memory access for debug (includes writing to ROM) */
2514 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
2515 uint8_t *buf
, int len
, int is_write
)
2522 page
= addr
& TARGET_PAGE_MASK
;
2523 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2524 /* if no physical page mapped, return an error */
2525 if (phys_addr
== -1)
2527 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2530 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2532 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
2534 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
2543 #if !defined(CONFIG_USER_ONLY)
2546 * A helper function for the _utterly broken_ virtio device model to find out if
2547 * it's running on a big endian machine. Don't do this at home kids!
2549 bool virtio_is_big_endian(void);
2550 bool virtio_is_big_endian(void)
2552 #if defined(TARGET_WORDS_BIGENDIAN)
2561 #ifndef CONFIG_USER_ONLY
2562 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2564 MemoryRegionSection
*section
;
2566 section
= phys_page_find(address_space_memory
.dispatch
,
2567 phys_addr
>> TARGET_PAGE_BITS
);
2569 return !(memory_region_is_ram(section
->mr
) ||
2570 memory_region_is_romd(section
->mr
));