Merge remote-tracking branch 'remotes/bkoppelmann/tags/pull-tricore-20150330' into...
[qemu.git] / exec.c
blob6d1e1e4c754386d4cc7cdb723a2cd8201be5f2f3
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifndef _WIN32
21 #include <sys/types.h>
22 #include <sys/mman.h>
23 #endif
25 #include "qemu-common.h"
26 #include "cpu.h"
27 #include "tcg.h"
28 #include "hw/hw.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #endif
32 #include "hw/qdev.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
47 #include "trace.h"
48 #endif
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "exec/cputlb.h"
52 #include "translate-all.h"
54 #include "exec/memory-internal.h"
55 #include "exec/ram_addr.h"
57 #include "qemu/range.h"
59 //#define DEBUG_SUBPAGE
61 #if !defined(CONFIG_USER_ONLY)
62 static bool in_migration;
64 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
65 * are protected by the ramlist lock.
67 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
69 static MemoryRegion *system_memory;
70 static MemoryRegion *system_io;
72 AddressSpace address_space_io;
73 AddressSpace address_space_memory;
75 MemoryRegion io_mem_rom, io_mem_notdirty;
76 static MemoryRegion io_mem_unassigned;
78 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
79 #define RAM_PREALLOC (1 << 0)
81 /* RAM is mmap-ed with MAP_SHARED */
82 #define RAM_SHARED (1 << 1)
84 /* Only a portion of RAM (used_length) is actually used, and migrated.
85 * This used_length size can change across reboots.
87 #define RAM_RESIZEABLE (1 << 2)
89 #endif
91 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
92 /* current CPU in the current thread. It is only valid inside
93 cpu_exec() */
94 DEFINE_TLS(CPUState *, current_cpu);
95 /* 0 = Do not count executed instructions.
96 1 = Precise instruction counting.
97 2 = Adaptive rate instruction counting. */
98 int use_icount;
100 #if !defined(CONFIG_USER_ONLY)
102 typedef struct PhysPageEntry PhysPageEntry;
104 struct PhysPageEntry {
105 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
106 uint32_t skip : 6;
107 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
108 uint32_t ptr : 26;
111 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
113 /* Size of the L2 (and L3, etc) page tables. */
114 #define ADDR_SPACE_BITS 64
116 #define P_L2_BITS 9
117 #define P_L2_SIZE (1 << P_L2_BITS)
119 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
121 typedef PhysPageEntry Node[P_L2_SIZE];
123 typedef struct PhysPageMap {
124 struct rcu_head rcu;
126 unsigned sections_nb;
127 unsigned sections_nb_alloc;
128 unsigned nodes_nb;
129 unsigned nodes_nb_alloc;
130 Node *nodes;
131 MemoryRegionSection *sections;
132 } PhysPageMap;
134 struct AddressSpaceDispatch {
135 struct rcu_head rcu;
137 /* This is a multi-level map on the physical address space.
138 * The bottom level has pointers to MemoryRegionSections.
140 PhysPageEntry phys_map;
141 PhysPageMap map;
142 AddressSpace *as;
145 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
146 typedef struct subpage_t {
147 MemoryRegion iomem;
148 AddressSpace *as;
149 hwaddr base;
150 uint16_t sub_section[TARGET_PAGE_SIZE];
151 } subpage_t;
153 #define PHYS_SECTION_UNASSIGNED 0
154 #define PHYS_SECTION_NOTDIRTY 1
155 #define PHYS_SECTION_ROM 2
156 #define PHYS_SECTION_WATCH 3
158 static void io_mem_init(void);
159 static void memory_map_init(void);
160 static void tcg_commit(MemoryListener *listener);
162 static MemoryRegion io_mem_watch;
163 #endif
165 #if !defined(CONFIG_USER_ONLY)
167 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
169 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
171 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
172 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
176 static uint32_t phys_map_node_alloc(PhysPageMap *map)
178 unsigned i;
179 uint32_t ret;
181 ret = map->nodes_nb++;
182 assert(ret != PHYS_MAP_NODE_NIL);
183 assert(ret != map->nodes_nb_alloc);
184 for (i = 0; i < P_L2_SIZE; ++i) {
185 map->nodes[ret][i].skip = 1;
186 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
188 return ret;
191 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
192 hwaddr *index, hwaddr *nb, uint16_t leaf,
193 int level)
195 PhysPageEntry *p;
196 int i;
197 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
199 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
200 lp->ptr = phys_map_node_alloc(map);
201 p = map->nodes[lp->ptr];
202 if (level == 0) {
203 for (i = 0; i < P_L2_SIZE; i++) {
204 p[i].skip = 0;
205 p[i].ptr = PHYS_SECTION_UNASSIGNED;
208 } else {
209 p = map->nodes[lp->ptr];
211 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
213 while (*nb && lp < &p[P_L2_SIZE]) {
214 if ((*index & (step - 1)) == 0 && *nb >= step) {
215 lp->skip = 0;
216 lp->ptr = leaf;
217 *index += step;
218 *nb -= step;
219 } else {
220 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
222 ++lp;
226 static void phys_page_set(AddressSpaceDispatch *d,
227 hwaddr index, hwaddr nb,
228 uint16_t leaf)
230 /* Wildly overreserve - it doesn't matter much. */
231 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
233 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
236 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
237 * and update our entry so we can skip it and go directly to the destination.
239 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
241 unsigned valid_ptr = P_L2_SIZE;
242 int valid = 0;
243 PhysPageEntry *p;
244 int i;
246 if (lp->ptr == PHYS_MAP_NODE_NIL) {
247 return;
250 p = nodes[lp->ptr];
251 for (i = 0; i < P_L2_SIZE; i++) {
252 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
253 continue;
256 valid_ptr = i;
257 valid++;
258 if (p[i].skip) {
259 phys_page_compact(&p[i], nodes, compacted);
263 /* We can only compress if there's only one child. */
264 if (valid != 1) {
265 return;
268 assert(valid_ptr < P_L2_SIZE);
270 /* Don't compress if it won't fit in the # of bits we have. */
271 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
272 return;
275 lp->ptr = p[valid_ptr].ptr;
276 if (!p[valid_ptr].skip) {
277 /* If our only child is a leaf, make this a leaf. */
278 /* By design, we should have made this node a leaf to begin with so we
279 * should never reach here.
280 * But since it's so simple to handle this, let's do it just in case we
281 * change this rule.
283 lp->skip = 0;
284 } else {
285 lp->skip += p[valid_ptr].skip;
289 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
291 DECLARE_BITMAP(compacted, nodes_nb);
293 if (d->phys_map.skip) {
294 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
298 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
299 Node *nodes, MemoryRegionSection *sections)
301 PhysPageEntry *p;
302 hwaddr index = addr >> TARGET_PAGE_BITS;
303 int i;
305 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
306 if (lp.ptr == PHYS_MAP_NODE_NIL) {
307 return &sections[PHYS_SECTION_UNASSIGNED];
309 p = nodes[lp.ptr];
310 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
313 if (sections[lp.ptr].size.hi ||
314 range_covers_byte(sections[lp.ptr].offset_within_address_space,
315 sections[lp.ptr].size.lo, addr)) {
316 return &sections[lp.ptr];
317 } else {
318 return &sections[PHYS_SECTION_UNASSIGNED];
322 bool memory_region_is_unassigned(MemoryRegion *mr)
324 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
325 && mr != &io_mem_watch;
328 /* Called from RCU critical section */
329 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
330 hwaddr addr,
331 bool resolve_subpage)
333 MemoryRegionSection *section;
334 subpage_t *subpage;
336 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
337 if (resolve_subpage && section->mr->subpage) {
338 subpage = container_of(section->mr, subpage_t, iomem);
339 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
341 return section;
344 /* Called from RCU critical section */
345 static MemoryRegionSection *
346 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
347 hwaddr *plen, bool resolve_subpage)
349 MemoryRegionSection *section;
350 Int128 diff;
352 section = address_space_lookup_region(d, addr, resolve_subpage);
353 /* Compute offset within MemoryRegionSection */
354 addr -= section->offset_within_address_space;
356 /* Compute offset within MemoryRegion */
357 *xlat = addr + section->offset_within_region;
359 diff = int128_sub(section->mr->size, int128_make64(addr));
360 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
361 return section;
364 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
366 if (memory_region_is_ram(mr)) {
367 return !(is_write && mr->readonly);
369 if (memory_region_is_romd(mr)) {
370 return !is_write;
373 return false;
376 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
377 hwaddr *xlat, hwaddr *plen,
378 bool is_write)
380 IOMMUTLBEntry iotlb;
381 MemoryRegionSection *section;
382 MemoryRegion *mr;
384 rcu_read_lock();
385 for (;;) {
386 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
387 section = address_space_translate_internal(d, addr, &addr, plen, true);
388 mr = section->mr;
390 if (!mr->iommu_ops) {
391 break;
394 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
395 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
396 | (addr & iotlb.addr_mask));
397 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
398 if (!(iotlb.perm & (1 << is_write))) {
399 mr = &io_mem_unassigned;
400 break;
403 as = iotlb.target_as;
406 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
407 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
408 *plen = MIN(page, *plen);
411 *xlat = addr;
412 rcu_read_unlock();
413 return mr;
416 /* Called from RCU critical section */
417 MemoryRegionSection *
418 address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
419 hwaddr *xlat, hwaddr *plen)
421 MemoryRegionSection *section;
422 section = address_space_translate_internal(cpu->memory_dispatch,
423 addr, xlat, plen, false);
425 assert(!section->mr->iommu_ops);
426 return section;
428 #endif
430 void cpu_exec_init_all(void)
432 #if !defined(CONFIG_USER_ONLY)
433 qemu_mutex_init(&ram_list.mutex);
434 memory_map_init();
435 io_mem_init();
436 #endif
439 #if !defined(CONFIG_USER_ONLY)
441 static int cpu_common_post_load(void *opaque, int version_id)
443 CPUState *cpu = opaque;
445 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
446 version_id is increased. */
447 cpu->interrupt_request &= ~0x01;
448 tlb_flush(cpu, 1);
450 return 0;
453 static int cpu_common_pre_load(void *opaque)
455 CPUState *cpu = opaque;
457 cpu->exception_index = -1;
459 return 0;
462 static bool cpu_common_exception_index_needed(void *opaque)
464 CPUState *cpu = opaque;
466 return tcg_enabled() && cpu->exception_index != -1;
469 static const VMStateDescription vmstate_cpu_common_exception_index = {
470 .name = "cpu_common/exception_index",
471 .version_id = 1,
472 .minimum_version_id = 1,
473 .fields = (VMStateField[]) {
474 VMSTATE_INT32(exception_index, CPUState),
475 VMSTATE_END_OF_LIST()
479 const VMStateDescription vmstate_cpu_common = {
480 .name = "cpu_common",
481 .version_id = 1,
482 .minimum_version_id = 1,
483 .pre_load = cpu_common_pre_load,
484 .post_load = cpu_common_post_load,
485 .fields = (VMStateField[]) {
486 VMSTATE_UINT32(halted, CPUState),
487 VMSTATE_UINT32(interrupt_request, CPUState),
488 VMSTATE_END_OF_LIST()
490 .subsections = (VMStateSubsection[]) {
492 .vmsd = &vmstate_cpu_common_exception_index,
493 .needed = cpu_common_exception_index_needed,
494 } , {
495 /* empty */
500 #endif
502 CPUState *qemu_get_cpu(int index)
504 CPUState *cpu;
506 CPU_FOREACH(cpu) {
507 if (cpu->cpu_index == index) {
508 return cpu;
512 return NULL;
515 #if !defined(CONFIG_USER_ONLY)
516 void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
518 /* We only support one address space per cpu at the moment. */
519 assert(cpu->as == as);
521 if (cpu->tcg_as_listener) {
522 memory_listener_unregister(cpu->tcg_as_listener);
523 } else {
524 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
526 cpu->tcg_as_listener->commit = tcg_commit;
527 memory_listener_register(cpu->tcg_as_listener, as);
529 #endif
531 void cpu_exec_init(CPUArchState *env)
533 CPUState *cpu = ENV_GET_CPU(env);
534 CPUClass *cc = CPU_GET_CLASS(cpu);
535 CPUState *some_cpu;
536 int cpu_index;
538 #if defined(CONFIG_USER_ONLY)
539 cpu_list_lock();
540 #endif
541 cpu_index = 0;
542 CPU_FOREACH(some_cpu) {
543 cpu_index++;
545 cpu->cpu_index = cpu_index;
546 cpu->numa_node = 0;
547 QTAILQ_INIT(&cpu->breakpoints);
548 QTAILQ_INIT(&cpu->watchpoints);
549 #ifndef CONFIG_USER_ONLY
550 cpu->as = &address_space_memory;
551 cpu->thread_id = qemu_get_thread_id();
552 cpu_reload_memory_map(cpu);
553 #endif
554 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
555 #if defined(CONFIG_USER_ONLY)
556 cpu_list_unlock();
557 #endif
558 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
559 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
561 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
562 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
563 cpu_save, cpu_load, env);
564 assert(cc->vmsd == NULL);
565 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
566 #endif
567 if (cc->vmsd != NULL) {
568 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
572 #if defined(CONFIG_USER_ONLY)
573 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
575 tb_invalidate_phys_page_range(pc, pc + 1, 0);
577 #else
578 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
580 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
581 if (phys != -1) {
582 tb_invalidate_phys_addr(cpu->as,
583 phys | (pc & ~TARGET_PAGE_MASK));
586 #endif
588 #if defined(CONFIG_USER_ONLY)
589 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
594 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
595 int flags)
597 return -ENOSYS;
600 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
604 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
605 int flags, CPUWatchpoint **watchpoint)
607 return -ENOSYS;
609 #else
610 /* Add a watchpoint. */
611 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
612 int flags, CPUWatchpoint **watchpoint)
614 CPUWatchpoint *wp;
616 /* forbid ranges which are empty or run off the end of the address space */
617 if (len == 0 || (addr + len - 1) < addr) {
618 error_report("tried to set invalid watchpoint at %"
619 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
620 return -EINVAL;
622 wp = g_malloc(sizeof(*wp));
624 wp->vaddr = addr;
625 wp->len = len;
626 wp->flags = flags;
628 /* keep all GDB-injected watchpoints in front */
629 if (flags & BP_GDB) {
630 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
631 } else {
632 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
635 tlb_flush_page(cpu, addr);
637 if (watchpoint)
638 *watchpoint = wp;
639 return 0;
642 /* Remove a specific watchpoint. */
643 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
644 int flags)
646 CPUWatchpoint *wp;
648 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
649 if (addr == wp->vaddr && len == wp->len
650 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
651 cpu_watchpoint_remove_by_ref(cpu, wp);
652 return 0;
655 return -ENOENT;
658 /* Remove a specific watchpoint by reference. */
659 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
661 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
663 tlb_flush_page(cpu, watchpoint->vaddr);
665 g_free(watchpoint);
668 /* Remove all matching watchpoints. */
669 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
671 CPUWatchpoint *wp, *next;
673 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
674 if (wp->flags & mask) {
675 cpu_watchpoint_remove_by_ref(cpu, wp);
680 /* Return true if this watchpoint address matches the specified
681 * access (ie the address range covered by the watchpoint overlaps
682 * partially or completely with the address range covered by the
683 * access).
685 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
686 vaddr addr,
687 vaddr len)
689 /* We know the lengths are non-zero, but a little caution is
690 * required to avoid errors in the case where the range ends
691 * exactly at the top of the address space and so addr + len
692 * wraps round to zero.
694 vaddr wpend = wp->vaddr + wp->len - 1;
695 vaddr addrend = addr + len - 1;
697 return !(addr > wpend || wp->vaddr > addrend);
700 #endif
702 /* Add a breakpoint. */
703 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
704 CPUBreakpoint **breakpoint)
706 CPUBreakpoint *bp;
708 bp = g_malloc(sizeof(*bp));
710 bp->pc = pc;
711 bp->flags = flags;
713 /* keep all GDB-injected breakpoints in front */
714 if (flags & BP_GDB) {
715 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
716 } else {
717 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
720 breakpoint_invalidate(cpu, pc);
722 if (breakpoint) {
723 *breakpoint = bp;
725 return 0;
728 /* Remove a specific breakpoint. */
729 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
731 CPUBreakpoint *bp;
733 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
734 if (bp->pc == pc && bp->flags == flags) {
735 cpu_breakpoint_remove_by_ref(cpu, bp);
736 return 0;
739 return -ENOENT;
742 /* Remove a specific breakpoint by reference. */
743 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
745 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
747 breakpoint_invalidate(cpu, breakpoint->pc);
749 g_free(breakpoint);
752 /* Remove all matching breakpoints. */
753 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
755 CPUBreakpoint *bp, *next;
757 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
758 if (bp->flags & mask) {
759 cpu_breakpoint_remove_by_ref(cpu, bp);
764 /* enable or disable single step mode. EXCP_DEBUG is returned by the
765 CPU loop after each instruction */
766 void cpu_single_step(CPUState *cpu, int enabled)
768 if (cpu->singlestep_enabled != enabled) {
769 cpu->singlestep_enabled = enabled;
770 if (kvm_enabled()) {
771 kvm_update_guest_debug(cpu, 0);
772 } else {
773 /* must flush all the translated code to avoid inconsistencies */
774 /* XXX: only flush what is necessary */
775 CPUArchState *env = cpu->env_ptr;
776 tb_flush(env);
781 void cpu_abort(CPUState *cpu, const char *fmt, ...)
783 va_list ap;
784 va_list ap2;
786 va_start(ap, fmt);
787 va_copy(ap2, ap);
788 fprintf(stderr, "qemu: fatal: ");
789 vfprintf(stderr, fmt, ap);
790 fprintf(stderr, "\n");
791 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
792 if (qemu_log_enabled()) {
793 qemu_log("qemu: fatal: ");
794 qemu_log_vprintf(fmt, ap2);
795 qemu_log("\n");
796 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
797 qemu_log_flush();
798 qemu_log_close();
800 va_end(ap2);
801 va_end(ap);
802 #if defined(CONFIG_USER_ONLY)
804 struct sigaction act;
805 sigfillset(&act.sa_mask);
806 act.sa_handler = SIG_DFL;
807 sigaction(SIGABRT, &act, NULL);
809 #endif
810 abort();
813 #if !defined(CONFIG_USER_ONLY)
814 /* Called from RCU critical section */
815 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
817 RAMBlock *block;
819 block = atomic_rcu_read(&ram_list.mru_block);
820 if (block && addr - block->offset < block->max_length) {
821 goto found;
823 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
824 if (addr - block->offset < block->max_length) {
825 goto found;
829 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
830 abort();
832 found:
833 /* It is safe to write mru_block outside the iothread lock. This
834 * is what happens:
836 * mru_block = xxx
837 * rcu_read_unlock()
838 * xxx removed from list
839 * rcu_read_lock()
840 * read mru_block
841 * mru_block = NULL;
842 * call_rcu(reclaim_ramblock, xxx);
843 * rcu_read_unlock()
845 * atomic_rcu_set is not needed here. The block was already published
846 * when it was placed into the list. Here we're just making an extra
847 * copy of the pointer.
849 ram_list.mru_block = block;
850 return block;
853 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
855 ram_addr_t start1;
856 RAMBlock *block;
857 ram_addr_t end;
859 end = TARGET_PAGE_ALIGN(start + length);
860 start &= TARGET_PAGE_MASK;
862 rcu_read_lock();
863 block = qemu_get_ram_block(start);
864 assert(block == qemu_get_ram_block(end - 1));
865 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
866 cpu_tlb_reset_dirty_all(start1, length);
867 rcu_read_unlock();
870 /* Note: start and end must be within the same ram block. */
871 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
872 unsigned client)
874 if (length == 0)
875 return;
876 cpu_physical_memory_clear_dirty_range_type(start, length, client);
878 if (tcg_enabled()) {
879 tlb_reset_dirty_range_all(start, length);
883 static void cpu_physical_memory_set_dirty_tracking(bool enable)
885 in_migration = enable;
888 /* Called from RCU critical section */
889 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
890 MemoryRegionSection *section,
891 target_ulong vaddr,
892 hwaddr paddr, hwaddr xlat,
893 int prot,
894 target_ulong *address)
896 hwaddr iotlb;
897 CPUWatchpoint *wp;
899 if (memory_region_is_ram(section->mr)) {
900 /* Normal RAM. */
901 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
902 + xlat;
903 if (!section->readonly) {
904 iotlb |= PHYS_SECTION_NOTDIRTY;
905 } else {
906 iotlb |= PHYS_SECTION_ROM;
908 } else {
909 iotlb = section - section->address_space->dispatch->map.sections;
910 iotlb += xlat;
913 /* Make accesses to pages with watchpoints go via the
914 watchpoint trap routines. */
915 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
916 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
917 /* Avoid trapping reads of pages with a write breakpoint. */
918 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
919 iotlb = PHYS_SECTION_WATCH + paddr;
920 *address |= TLB_MMIO;
921 break;
926 return iotlb;
928 #endif /* defined(CONFIG_USER_ONLY) */
930 #if !defined(CONFIG_USER_ONLY)
932 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
933 uint16_t section);
934 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
936 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
937 qemu_anon_ram_alloc;
940 * Set a custom physical guest memory alloator.
941 * Accelerators with unusual needs may need this. Hopefully, we can
942 * get rid of it eventually.
944 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
946 phys_mem_alloc = alloc;
949 static uint16_t phys_section_add(PhysPageMap *map,
950 MemoryRegionSection *section)
952 /* The physical section number is ORed with a page-aligned
953 * pointer to produce the iotlb entries. Thus it should
954 * never overflow into the page-aligned value.
956 assert(map->sections_nb < TARGET_PAGE_SIZE);
958 if (map->sections_nb == map->sections_nb_alloc) {
959 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
960 map->sections = g_renew(MemoryRegionSection, map->sections,
961 map->sections_nb_alloc);
963 map->sections[map->sections_nb] = *section;
964 memory_region_ref(section->mr);
965 return map->sections_nb++;
968 static void phys_section_destroy(MemoryRegion *mr)
970 memory_region_unref(mr);
972 if (mr->subpage) {
973 subpage_t *subpage = container_of(mr, subpage_t, iomem);
974 object_unref(OBJECT(&subpage->iomem));
975 g_free(subpage);
979 static void phys_sections_free(PhysPageMap *map)
981 while (map->sections_nb > 0) {
982 MemoryRegionSection *section = &map->sections[--map->sections_nb];
983 phys_section_destroy(section->mr);
985 g_free(map->sections);
986 g_free(map->nodes);
989 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
991 subpage_t *subpage;
992 hwaddr base = section->offset_within_address_space
993 & TARGET_PAGE_MASK;
994 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
995 d->map.nodes, d->map.sections);
996 MemoryRegionSection subsection = {
997 .offset_within_address_space = base,
998 .size = int128_make64(TARGET_PAGE_SIZE),
1000 hwaddr start, end;
1002 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1004 if (!(existing->mr->subpage)) {
1005 subpage = subpage_init(d->as, base);
1006 subsection.address_space = d->as;
1007 subsection.mr = &subpage->iomem;
1008 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1009 phys_section_add(&d->map, &subsection));
1010 } else {
1011 subpage = container_of(existing->mr, subpage_t, iomem);
1013 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1014 end = start + int128_get64(section->size) - 1;
1015 subpage_register(subpage, start, end,
1016 phys_section_add(&d->map, section));
1020 static void register_multipage(AddressSpaceDispatch *d,
1021 MemoryRegionSection *section)
1023 hwaddr start_addr = section->offset_within_address_space;
1024 uint16_t section_index = phys_section_add(&d->map, section);
1025 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1026 TARGET_PAGE_BITS));
1028 assert(num_pages);
1029 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1032 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1034 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1035 AddressSpaceDispatch *d = as->next_dispatch;
1036 MemoryRegionSection now = *section, remain = *section;
1037 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1039 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1040 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1041 - now.offset_within_address_space;
1043 now.size = int128_min(int128_make64(left), now.size);
1044 register_subpage(d, &now);
1045 } else {
1046 now.size = int128_zero();
1048 while (int128_ne(remain.size, now.size)) {
1049 remain.size = int128_sub(remain.size, now.size);
1050 remain.offset_within_address_space += int128_get64(now.size);
1051 remain.offset_within_region += int128_get64(now.size);
1052 now = remain;
1053 if (int128_lt(remain.size, page_size)) {
1054 register_subpage(d, &now);
1055 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1056 now.size = page_size;
1057 register_subpage(d, &now);
1058 } else {
1059 now.size = int128_and(now.size, int128_neg(page_size));
1060 register_multipage(d, &now);
1065 void qemu_flush_coalesced_mmio_buffer(void)
1067 if (kvm_enabled())
1068 kvm_flush_coalesced_mmio_buffer();
1071 void qemu_mutex_lock_ramlist(void)
1073 qemu_mutex_lock(&ram_list.mutex);
1076 void qemu_mutex_unlock_ramlist(void)
1078 qemu_mutex_unlock(&ram_list.mutex);
1081 #ifdef __linux__
1083 #include <sys/vfs.h>
1085 #define HUGETLBFS_MAGIC 0x958458f6
1087 static long gethugepagesize(const char *path, Error **errp)
1089 struct statfs fs;
1090 int ret;
1092 do {
1093 ret = statfs(path, &fs);
1094 } while (ret != 0 && errno == EINTR);
1096 if (ret != 0) {
1097 error_setg_errno(errp, errno, "failed to get page size of file %s",
1098 path);
1099 return 0;
1102 if (fs.f_type != HUGETLBFS_MAGIC)
1103 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
1105 return fs.f_bsize;
1108 static void *file_ram_alloc(RAMBlock *block,
1109 ram_addr_t memory,
1110 const char *path,
1111 Error **errp)
1113 char *filename;
1114 char *sanitized_name;
1115 char *c;
1116 void *area = NULL;
1117 int fd;
1118 uint64_t hpagesize;
1119 Error *local_err = NULL;
1121 hpagesize = gethugepagesize(path, &local_err);
1122 if (local_err) {
1123 error_propagate(errp, local_err);
1124 goto error;
1126 block->mr->align = hpagesize;
1128 if (memory < hpagesize) {
1129 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1130 "or larger than huge page size 0x%" PRIx64,
1131 memory, hpagesize);
1132 goto error;
1135 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1136 error_setg(errp,
1137 "host lacks kvm mmu notifiers, -mem-path unsupported");
1138 goto error;
1141 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1142 sanitized_name = g_strdup(memory_region_name(block->mr));
1143 for (c = sanitized_name; *c != '\0'; c++) {
1144 if (*c == '/')
1145 *c = '_';
1148 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1149 sanitized_name);
1150 g_free(sanitized_name);
1152 fd = mkstemp(filename);
1153 if (fd < 0) {
1154 error_setg_errno(errp, errno,
1155 "unable to create backing store for hugepages");
1156 g_free(filename);
1157 goto error;
1159 unlink(filename);
1160 g_free(filename);
1162 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1165 * ftruncate is not supported by hugetlbfs in older
1166 * hosts, so don't bother bailing out on errors.
1167 * If anything goes wrong with it under other filesystems,
1168 * mmap will fail.
1170 if (ftruncate(fd, memory)) {
1171 perror("ftruncate");
1174 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1175 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1176 fd, 0);
1177 if (area == MAP_FAILED) {
1178 error_setg_errno(errp, errno,
1179 "unable to map backing store for hugepages");
1180 close(fd);
1181 goto error;
1184 if (mem_prealloc) {
1185 os_mem_prealloc(fd, area, memory);
1188 block->fd = fd;
1189 return area;
1191 error:
1192 if (mem_prealloc) {
1193 error_report("%s", error_get_pretty(*errp));
1194 exit(1);
1196 return NULL;
1198 #endif
1200 /* Called with the ramlist lock held. */
1201 static ram_addr_t find_ram_offset(ram_addr_t size)
1203 RAMBlock *block, *next_block;
1204 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1206 assert(size != 0); /* it would hand out same offset multiple times */
1208 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1209 return 0;
1212 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1213 ram_addr_t end, next = RAM_ADDR_MAX;
1215 end = block->offset + block->max_length;
1217 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
1218 if (next_block->offset >= end) {
1219 next = MIN(next, next_block->offset);
1222 if (next - end >= size && next - end < mingap) {
1223 offset = end;
1224 mingap = next - end;
1228 if (offset == RAM_ADDR_MAX) {
1229 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1230 (uint64_t)size);
1231 abort();
1234 return offset;
1237 ram_addr_t last_ram_offset(void)
1239 RAMBlock *block;
1240 ram_addr_t last = 0;
1242 rcu_read_lock();
1243 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1244 last = MAX(last, block->offset + block->max_length);
1246 rcu_read_unlock();
1247 return last;
1250 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1252 int ret;
1254 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1255 if (!machine_dump_guest_core(current_machine)) {
1256 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1257 if (ret) {
1258 perror("qemu_madvise");
1259 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1260 "but dump_guest_core=off specified\n");
1265 /* Called within an RCU critical section, or while the ramlist lock
1266 * is held.
1268 static RAMBlock *find_ram_block(ram_addr_t addr)
1270 RAMBlock *block;
1272 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1273 if (block->offset == addr) {
1274 return block;
1278 return NULL;
1281 /* Called with iothread lock held. */
1282 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1284 RAMBlock *new_block, *block;
1286 rcu_read_lock();
1287 new_block = find_ram_block(addr);
1288 assert(new_block);
1289 assert(!new_block->idstr[0]);
1291 if (dev) {
1292 char *id = qdev_get_dev_path(dev);
1293 if (id) {
1294 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1295 g_free(id);
1298 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1300 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1301 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1302 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1303 new_block->idstr);
1304 abort();
1307 rcu_read_unlock();
1310 /* Called with iothread lock held. */
1311 void qemu_ram_unset_idstr(ram_addr_t addr)
1313 RAMBlock *block;
1315 /* FIXME: arch_init.c assumes that this is not called throughout
1316 * migration. Ignore the problem since hot-unplug during migration
1317 * does not work anyway.
1320 rcu_read_lock();
1321 block = find_ram_block(addr);
1322 if (block) {
1323 memset(block->idstr, 0, sizeof(block->idstr));
1325 rcu_read_unlock();
1328 static int memory_try_enable_merging(void *addr, size_t len)
1330 if (!machine_mem_merge(current_machine)) {
1331 /* disabled by the user */
1332 return 0;
1335 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1338 /* Only legal before guest might have detected the memory size: e.g. on
1339 * incoming migration, or right after reset.
1341 * As memory core doesn't know how is memory accessed, it is up to
1342 * resize callback to update device state and/or add assertions to detect
1343 * misuse, if necessary.
1345 int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1347 RAMBlock *block = find_ram_block(base);
1349 assert(block);
1351 newsize = TARGET_PAGE_ALIGN(newsize);
1353 if (block->used_length == newsize) {
1354 return 0;
1357 if (!(block->flags & RAM_RESIZEABLE)) {
1358 error_setg_errno(errp, EINVAL,
1359 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1360 " in != 0x" RAM_ADDR_FMT, block->idstr,
1361 newsize, block->used_length);
1362 return -EINVAL;
1365 if (block->max_length < newsize) {
1366 error_setg_errno(errp, EINVAL,
1367 "Length too large: %s: 0x" RAM_ADDR_FMT
1368 " > 0x" RAM_ADDR_FMT, block->idstr,
1369 newsize, block->max_length);
1370 return -EINVAL;
1373 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1374 block->used_length = newsize;
1375 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1376 memory_region_set_size(block->mr, newsize);
1377 if (block->resized) {
1378 block->resized(block->idstr, newsize, block->host);
1380 return 0;
1383 static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
1385 RAMBlock *block;
1386 RAMBlock *last_block = NULL;
1387 ram_addr_t old_ram_size, new_ram_size;
1389 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1391 qemu_mutex_lock_ramlist();
1392 new_block->offset = find_ram_offset(new_block->max_length);
1394 if (!new_block->host) {
1395 if (xen_enabled()) {
1396 xen_ram_alloc(new_block->offset, new_block->max_length,
1397 new_block->mr);
1398 } else {
1399 new_block->host = phys_mem_alloc(new_block->max_length,
1400 &new_block->mr->align);
1401 if (!new_block->host) {
1402 error_setg_errno(errp, errno,
1403 "cannot set up guest memory '%s'",
1404 memory_region_name(new_block->mr));
1405 qemu_mutex_unlock_ramlist();
1406 return -1;
1408 memory_try_enable_merging(new_block->host, new_block->max_length);
1412 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1413 * QLIST (which has an RCU-friendly variant) does not have insertion at
1414 * tail, so save the last element in last_block.
1416 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1417 last_block = block;
1418 if (block->max_length < new_block->max_length) {
1419 break;
1422 if (block) {
1423 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1424 } else if (last_block) {
1425 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1426 } else { /* list is empty */
1427 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1429 ram_list.mru_block = NULL;
1431 /* Write list before version */
1432 smp_wmb();
1433 ram_list.version++;
1434 qemu_mutex_unlock_ramlist();
1436 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1438 if (new_ram_size > old_ram_size) {
1439 int i;
1441 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1442 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1443 ram_list.dirty_memory[i] =
1444 bitmap_zero_extend(ram_list.dirty_memory[i],
1445 old_ram_size, new_ram_size);
1448 cpu_physical_memory_set_dirty_range(new_block->offset,
1449 new_block->used_length);
1451 if (new_block->host) {
1452 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1453 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1454 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1455 if (kvm_enabled()) {
1456 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1460 return new_block->offset;
1463 #ifdef __linux__
1464 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1465 bool share, const char *mem_path,
1466 Error **errp)
1468 RAMBlock *new_block;
1469 ram_addr_t addr;
1470 Error *local_err = NULL;
1472 if (xen_enabled()) {
1473 error_setg(errp, "-mem-path not supported with Xen");
1474 return -1;
1477 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1479 * file_ram_alloc() needs to allocate just like
1480 * phys_mem_alloc, but we haven't bothered to provide
1481 * a hook there.
1483 error_setg(errp,
1484 "-mem-path not supported with this accelerator");
1485 return -1;
1488 size = TARGET_PAGE_ALIGN(size);
1489 new_block = g_malloc0(sizeof(*new_block));
1490 new_block->mr = mr;
1491 new_block->used_length = size;
1492 new_block->max_length = size;
1493 new_block->flags = share ? RAM_SHARED : 0;
1494 new_block->host = file_ram_alloc(new_block, size,
1495 mem_path, errp);
1496 if (!new_block->host) {
1497 g_free(new_block);
1498 return -1;
1501 addr = ram_block_add(new_block, &local_err);
1502 if (local_err) {
1503 g_free(new_block);
1504 error_propagate(errp, local_err);
1505 return -1;
1507 return addr;
1509 #endif
1511 static
1512 ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1513 void (*resized)(const char*,
1514 uint64_t length,
1515 void *host),
1516 void *host, bool resizeable,
1517 MemoryRegion *mr, Error **errp)
1519 RAMBlock *new_block;
1520 ram_addr_t addr;
1521 Error *local_err = NULL;
1523 size = TARGET_PAGE_ALIGN(size);
1524 max_size = TARGET_PAGE_ALIGN(max_size);
1525 new_block = g_malloc0(sizeof(*new_block));
1526 new_block->mr = mr;
1527 new_block->resized = resized;
1528 new_block->used_length = size;
1529 new_block->max_length = max_size;
1530 assert(max_size >= size);
1531 new_block->fd = -1;
1532 new_block->host = host;
1533 if (host) {
1534 new_block->flags |= RAM_PREALLOC;
1536 if (resizeable) {
1537 new_block->flags |= RAM_RESIZEABLE;
1539 addr = ram_block_add(new_block, &local_err);
1540 if (local_err) {
1541 g_free(new_block);
1542 error_propagate(errp, local_err);
1543 return -1;
1545 return addr;
1548 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1549 MemoryRegion *mr, Error **errp)
1551 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1554 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
1556 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1559 ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1560 void (*resized)(const char*,
1561 uint64_t length,
1562 void *host),
1563 MemoryRegion *mr, Error **errp)
1565 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
1568 void qemu_ram_free_from_ptr(ram_addr_t addr)
1570 RAMBlock *block;
1572 qemu_mutex_lock_ramlist();
1573 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1574 if (addr == block->offset) {
1575 QLIST_REMOVE_RCU(block, next);
1576 ram_list.mru_block = NULL;
1577 /* Write list before version */
1578 smp_wmb();
1579 ram_list.version++;
1580 g_free_rcu(block, rcu);
1581 break;
1584 qemu_mutex_unlock_ramlist();
1587 static void reclaim_ramblock(RAMBlock *block)
1589 if (block->flags & RAM_PREALLOC) {
1591 } else if (xen_enabled()) {
1592 xen_invalidate_map_cache_entry(block->host);
1593 #ifndef _WIN32
1594 } else if (block->fd >= 0) {
1595 munmap(block->host, block->max_length);
1596 close(block->fd);
1597 #endif
1598 } else {
1599 qemu_anon_ram_free(block->host, block->max_length);
1601 g_free(block);
1604 void qemu_ram_free(ram_addr_t addr)
1606 RAMBlock *block;
1608 qemu_mutex_lock_ramlist();
1609 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1610 if (addr == block->offset) {
1611 QLIST_REMOVE_RCU(block, next);
1612 ram_list.mru_block = NULL;
1613 /* Write list before version */
1614 smp_wmb();
1615 ram_list.version++;
1616 call_rcu(block, reclaim_ramblock, rcu);
1617 break;
1620 qemu_mutex_unlock_ramlist();
1623 #ifndef _WIN32
1624 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1626 RAMBlock *block;
1627 ram_addr_t offset;
1628 int flags;
1629 void *area, *vaddr;
1631 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1632 offset = addr - block->offset;
1633 if (offset < block->max_length) {
1634 vaddr = ramblock_ptr(block, offset);
1635 if (block->flags & RAM_PREALLOC) {
1637 } else if (xen_enabled()) {
1638 abort();
1639 } else {
1640 flags = MAP_FIXED;
1641 if (block->fd >= 0) {
1642 flags |= (block->flags & RAM_SHARED ?
1643 MAP_SHARED : MAP_PRIVATE);
1644 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1645 flags, block->fd, offset);
1646 } else {
1648 * Remap needs to match alloc. Accelerators that
1649 * set phys_mem_alloc never remap. If they did,
1650 * we'd need a remap hook here.
1652 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1654 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1655 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1656 flags, -1, 0);
1658 if (area != vaddr) {
1659 fprintf(stderr, "Could not remap addr: "
1660 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1661 length, addr);
1662 exit(1);
1664 memory_try_enable_merging(vaddr, length);
1665 qemu_ram_setup_dump(vaddr, length);
1670 #endif /* !_WIN32 */
1672 int qemu_get_ram_fd(ram_addr_t addr)
1674 RAMBlock *block;
1675 int fd;
1677 rcu_read_lock();
1678 block = qemu_get_ram_block(addr);
1679 fd = block->fd;
1680 rcu_read_unlock();
1681 return fd;
1684 void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1686 RAMBlock *block;
1687 void *ptr;
1689 rcu_read_lock();
1690 block = qemu_get_ram_block(addr);
1691 ptr = ramblock_ptr(block, 0);
1692 rcu_read_unlock();
1693 return ptr;
1696 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1697 * This should not be used for general purpose DMA. Use address_space_map
1698 * or address_space_rw instead. For local memory (e.g. video ram) that the
1699 * device owns, use memory_region_get_ram_ptr.
1701 * By the time this function returns, the returned pointer is not protected
1702 * by RCU anymore. If the caller is not within an RCU critical section and
1703 * does not hold the iothread lock, it must have other means of protecting the
1704 * pointer, such as a reference to the region that includes the incoming
1705 * ram_addr_t.
1707 void *qemu_get_ram_ptr(ram_addr_t addr)
1709 RAMBlock *block;
1710 void *ptr;
1712 rcu_read_lock();
1713 block = qemu_get_ram_block(addr);
1715 if (xen_enabled() && block->host == NULL) {
1716 /* We need to check if the requested address is in the RAM
1717 * because we don't want to map the entire memory in QEMU.
1718 * In that case just map until the end of the page.
1720 if (block->offset == 0) {
1721 ptr = xen_map_cache(addr, 0, 0);
1722 goto unlock;
1725 block->host = xen_map_cache(block->offset, block->max_length, 1);
1727 ptr = ramblock_ptr(block, addr - block->offset);
1729 unlock:
1730 rcu_read_unlock();
1731 return ptr;
1734 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1735 * but takes a size argument.
1737 * By the time this function returns, the returned pointer is not protected
1738 * by RCU anymore. If the caller is not within an RCU critical section and
1739 * does not hold the iothread lock, it must have other means of protecting the
1740 * pointer, such as a reference to the region that includes the incoming
1741 * ram_addr_t.
1743 static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1745 void *ptr;
1746 if (*size == 0) {
1747 return NULL;
1749 if (xen_enabled()) {
1750 return xen_map_cache(addr, *size, 1);
1751 } else {
1752 RAMBlock *block;
1753 rcu_read_lock();
1754 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1755 if (addr - block->offset < block->max_length) {
1756 if (addr - block->offset + *size > block->max_length)
1757 *size = block->max_length - addr + block->offset;
1758 ptr = ramblock_ptr(block, addr - block->offset);
1759 rcu_read_unlock();
1760 return ptr;
1764 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1765 abort();
1769 /* Some of the softmmu routines need to translate from a host pointer
1770 * (typically a TLB entry) back to a ram offset.
1772 * By the time this function returns, the returned pointer is not protected
1773 * by RCU anymore. If the caller is not within an RCU critical section and
1774 * does not hold the iothread lock, it must have other means of protecting the
1775 * pointer, such as a reference to the region that includes the incoming
1776 * ram_addr_t.
1778 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1780 RAMBlock *block;
1781 uint8_t *host = ptr;
1782 MemoryRegion *mr;
1784 if (xen_enabled()) {
1785 rcu_read_lock();
1786 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1787 mr = qemu_get_ram_block(*ram_addr)->mr;
1788 rcu_read_unlock();
1789 return mr;
1792 rcu_read_lock();
1793 block = atomic_rcu_read(&ram_list.mru_block);
1794 if (block && block->host && host - block->host < block->max_length) {
1795 goto found;
1798 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1799 /* This case append when the block is not mapped. */
1800 if (block->host == NULL) {
1801 continue;
1803 if (host - block->host < block->max_length) {
1804 goto found;
1808 rcu_read_unlock();
1809 return NULL;
1811 found:
1812 *ram_addr = block->offset + (host - block->host);
1813 mr = block->mr;
1814 rcu_read_unlock();
1815 return mr;
1818 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1819 uint64_t val, unsigned size)
1821 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1822 tb_invalidate_phys_page_fast(ram_addr, size);
1824 switch (size) {
1825 case 1:
1826 stb_p(qemu_get_ram_ptr(ram_addr), val);
1827 break;
1828 case 2:
1829 stw_p(qemu_get_ram_ptr(ram_addr), val);
1830 break;
1831 case 4:
1832 stl_p(qemu_get_ram_ptr(ram_addr), val);
1833 break;
1834 default:
1835 abort();
1837 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
1838 /* we remove the notdirty callback only if the code has been
1839 flushed */
1840 if (!cpu_physical_memory_is_clean(ram_addr)) {
1841 CPUArchState *env = current_cpu->env_ptr;
1842 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
1846 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1847 unsigned size, bool is_write)
1849 return is_write;
1852 static const MemoryRegionOps notdirty_mem_ops = {
1853 .write = notdirty_mem_write,
1854 .valid.accepts = notdirty_mem_accepts,
1855 .endianness = DEVICE_NATIVE_ENDIAN,
1858 /* Generate a debug exception if a watchpoint has been hit. */
1859 static void check_watchpoint(int offset, int len, int flags)
1861 CPUState *cpu = current_cpu;
1862 CPUArchState *env = cpu->env_ptr;
1863 target_ulong pc, cs_base;
1864 target_ulong vaddr;
1865 CPUWatchpoint *wp;
1866 int cpu_flags;
1868 if (cpu->watchpoint_hit) {
1869 /* We re-entered the check after replacing the TB. Now raise
1870 * the debug interrupt so that is will trigger after the
1871 * current instruction. */
1872 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
1873 return;
1875 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1876 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1877 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1878 && (wp->flags & flags)) {
1879 if (flags == BP_MEM_READ) {
1880 wp->flags |= BP_WATCHPOINT_HIT_READ;
1881 } else {
1882 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1884 wp->hitaddr = vaddr;
1885 if (!cpu->watchpoint_hit) {
1886 cpu->watchpoint_hit = wp;
1887 tb_check_watchpoint(cpu);
1888 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1889 cpu->exception_index = EXCP_DEBUG;
1890 cpu_loop_exit(cpu);
1891 } else {
1892 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1893 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
1894 cpu_resume_from_signal(cpu, NULL);
1897 } else {
1898 wp->flags &= ~BP_WATCHPOINT_HIT;
1903 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1904 so these check for a hit then pass through to the normal out-of-line
1905 phys routines. */
1906 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1907 unsigned size)
1909 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
1910 switch (size) {
1911 case 1: return ldub_phys(&address_space_memory, addr);
1912 case 2: return lduw_phys(&address_space_memory, addr);
1913 case 4: return ldl_phys(&address_space_memory, addr);
1914 default: abort();
1918 static void watch_mem_write(void *opaque, hwaddr addr,
1919 uint64_t val, unsigned size)
1921 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
1922 switch (size) {
1923 case 1:
1924 stb_phys(&address_space_memory, addr, val);
1925 break;
1926 case 2:
1927 stw_phys(&address_space_memory, addr, val);
1928 break;
1929 case 4:
1930 stl_phys(&address_space_memory, addr, val);
1931 break;
1932 default: abort();
1936 static const MemoryRegionOps watch_mem_ops = {
1937 .read = watch_mem_read,
1938 .write = watch_mem_write,
1939 .endianness = DEVICE_NATIVE_ENDIAN,
1942 static uint64_t subpage_read(void *opaque, hwaddr addr,
1943 unsigned len)
1945 subpage_t *subpage = opaque;
1946 uint8_t buf[8];
1948 #if defined(DEBUG_SUBPAGE)
1949 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1950 subpage, len, addr);
1951 #endif
1952 address_space_read(subpage->as, addr + subpage->base, buf, len);
1953 switch (len) {
1954 case 1:
1955 return ldub_p(buf);
1956 case 2:
1957 return lduw_p(buf);
1958 case 4:
1959 return ldl_p(buf);
1960 case 8:
1961 return ldq_p(buf);
1962 default:
1963 abort();
1967 static void subpage_write(void *opaque, hwaddr addr,
1968 uint64_t value, unsigned len)
1970 subpage_t *subpage = opaque;
1971 uint8_t buf[8];
1973 #if defined(DEBUG_SUBPAGE)
1974 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1975 " value %"PRIx64"\n",
1976 __func__, subpage, len, addr, value);
1977 #endif
1978 switch (len) {
1979 case 1:
1980 stb_p(buf, value);
1981 break;
1982 case 2:
1983 stw_p(buf, value);
1984 break;
1985 case 4:
1986 stl_p(buf, value);
1987 break;
1988 case 8:
1989 stq_p(buf, value);
1990 break;
1991 default:
1992 abort();
1994 address_space_write(subpage->as, addr + subpage->base, buf, len);
1997 static bool subpage_accepts(void *opaque, hwaddr addr,
1998 unsigned len, bool is_write)
2000 subpage_t *subpage = opaque;
2001 #if defined(DEBUG_SUBPAGE)
2002 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2003 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2004 #endif
2006 return address_space_access_valid(subpage->as, addr + subpage->base,
2007 len, is_write);
2010 static const MemoryRegionOps subpage_ops = {
2011 .read = subpage_read,
2012 .write = subpage_write,
2013 .impl.min_access_size = 1,
2014 .impl.max_access_size = 8,
2015 .valid.min_access_size = 1,
2016 .valid.max_access_size = 8,
2017 .valid.accepts = subpage_accepts,
2018 .endianness = DEVICE_NATIVE_ENDIAN,
2021 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2022 uint16_t section)
2024 int idx, eidx;
2026 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2027 return -1;
2028 idx = SUBPAGE_IDX(start);
2029 eidx = SUBPAGE_IDX(end);
2030 #if defined(DEBUG_SUBPAGE)
2031 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2032 __func__, mmio, start, end, idx, eidx, section);
2033 #endif
2034 for (; idx <= eidx; idx++) {
2035 mmio->sub_section[idx] = section;
2038 return 0;
2041 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2043 subpage_t *mmio;
2045 mmio = g_malloc0(sizeof(subpage_t));
2047 mmio->as = as;
2048 mmio->base = base;
2049 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2050 NULL, TARGET_PAGE_SIZE);
2051 mmio->iomem.subpage = true;
2052 #if defined(DEBUG_SUBPAGE)
2053 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2054 mmio, base, TARGET_PAGE_SIZE);
2055 #endif
2056 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2058 return mmio;
2061 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2062 MemoryRegion *mr)
2064 assert(as);
2065 MemoryRegionSection section = {
2066 .address_space = as,
2067 .mr = mr,
2068 .offset_within_address_space = 0,
2069 .offset_within_region = 0,
2070 .size = int128_2_64(),
2073 return phys_section_add(map, &section);
2076 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
2078 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2079 MemoryRegionSection *sections = d->map.sections;
2081 return sections[index & ~TARGET_PAGE_MASK].mr;
2084 static void io_mem_init(void)
2086 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2087 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2088 NULL, UINT64_MAX);
2089 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
2090 NULL, UINT64_MAX);
2091 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2092 NULL, UINT64_MAX);
2095 static void mem_begin(MemoryListener *listener)
2097 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2098 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2099 uint16_t n;
2101 n = dummy_section(&d->map, as, &io_mem_unassigned);
2102 assert(n == PHYS_SECTION_UNASSIGNED);
2103 n = dummy_section(&d->map, as, &io_mem_notdirty);
2104 assert(n == PHYS_SECTION_NOTDIRTY);
2105 n = dummy_section(&d->map, as, &io_mem_rom);
2106 assert(n == PHYS_SECTION_ROM);
2107 n = dummy_section(&d->map, as, &io_mem_watch);
2108 assert(n == PHYS_SECTION_WATCH);
2110 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2111 d->as = as;
2112 as->next_dispatch = d;
2115 static void address_space_dispatch_free(AddressSpaceDispatch *d)
2117 phys_sections_free(&d->map);
2118 g_free(d);
2121 static void mem_commit(MemoryListener *listener)
2123 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2124 AddressSpaceDispatch *cur = as->dispatch;
2125 AddressSpaceDispatch *next = as->next_dispatch;
2127 phys_page_compact_all(next, next->map.nodes_nb);
2129 atomic_rcu_set(&as->dispatch, next);
2130 if (cur) {
2131 call_rcu(cur, address_space_dispatch_free, rcu);
2135 static void tcg_commit(MemoryListener *listener)
2137 CPUState *cpu;
2139 /* since each CPU stores ram addresses in its TLB cache, we must
2140 reset the modified entries */
2141 /* XXX: slow ! */
2142 CPU_FOREACH(cpu) {
2143 /* FIXME: Disentangle the cpu.h circular files deps so we can
2144 directly get the right CPU from listener. */
2145 if (cpu->tcg_as_listener != listener) {
2146 continue;
2148 cpu_reload_memory_map(cpu);
2152 static void core_log_global_start(MemoryListener *listener)
2154 cpu_physical_memory_set_dirty_tracking(true);
2157 static void core_log_global_stop(MemoryListener *listener)
2159 cpu_physical_memory_set_dirty_tracking(false);
2162 static MemoryListener core_memory_listener = {
2163 .log_global_start = core_log_global_start,
2164 .log_global_stop = core_log_global_stop,
2165 .priority = 1,
2168 void address_space_init_dispatch(AddressSpace *as)
2170 as->dispatch = NULL;
2171 as->dispatch_listener = (MemoryListener) {
2172 .begin = mem_begin,
2173 .commit = mem_commit,
2174 .region_add = mem_add,
2175 .region_nop = mem_add,
2176 .priority = 0,
2178 memory_listener_register(&as->dispatch_listener, as);
2181 void address_space_unregister(AddressSpace *as)
2183 memory_listener_unregister(&as->dispatch_listener);
2186 void address_space_destroy_dispatch(AddressSpace *as)
2188 AddressSpaceDispatch *d = as->dispatch;
2190 atomic_rcu_set(&as->dispatch, NULL);
2191 if (d) {
2192 call_rcu(d, address_space_dispatch_free, rcu);
2196 static void memory_map_init(void)
2198 system_memory = g_malloc(sizeof(*system_memory));
2200 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2201 address_space_init(&address_space_memory, system_memory, "memory");
2203 system_io = g_malloc(sizeof(*system_io));
2204 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2205 65536);
2206 address_space_init(&address_space_io, system_io, "I/O");
2208 memory_listener_register(&core_memory_listener, &address_space_memory);
2211 MemoryRegion *get_system_memory(void)
2213 return system_memory;
2216 MemoryRegion *get_system_io(void)
2218 return system_io;
2221 #endif /* !defined(CONFIG_USER_ONLY) */
2223 /* physical memory access (slow version, mainly for debug) */
2224 #if defined(CONFIG_USER_ONLY)
2225 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2226 uint8_t *buf, int len, int is_write)
2228 int l, flags;
2229 target_ulong page;
2230 void * p;
2232 while (len > 0) {
2233 page = addr & TARGET_PAGE_MASK;
2234 l = (page + TARGET_PAGE_SIZE) - addr;
2235 if (l > len)
2236 l = len;
2237 flags = page_get_flags(page);
2238 if (!(flags & PAGE_VALID))
2239 return -1;
2240 if (is_write) {
2241 if (!(flags & PAGE_WRITE))
2242 return -1;
2243 /* XXX: this code should not depend on lock_user */
2244 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2245 return -1;
2246 memcpy(p, buf, l);
2247 unlock_user(p, addr, l);
2248 } else {
2249 if (!(flags & PAGE_READ))
2250 return -1;
2251 /* XXX: this code should not depend on lock_user */
2252 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2253 return -1;
2254 memcpy(buf, p, l);
2255 unlock_user(p, addr, 0);
2257 len -= l;
2258 buf += l;
2259 addr += l;
2261 return 0;
2264 #else
2266 static void invalidate_and_set_dirty(hwaddr addr,
2267 hwaddr length)
2269 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2270 tb_invalidate_phys_range(addr, addr + length, 0);
2271 cpu_physical_memory_set_dirty_range_nocode(addr, length);
2273 xen_modified_memory(addr, length);
2276 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2278 unsigned access_size_max = mr->ops->valid.max_access_size;
2280 /* Regions are assumed to support 1-4 byte accesses unless
2281 otherwise specified. */
2282 if (access_size_max == 0) {
2283 access_size_max = 4;
2286 /* Bound the maximum access by the alignment of the address. */
2287 if (!mr->ops->impl.unaligned) {
2288 unsigned align_size_max = addr & -addr;
2289 if (align_size_max != 0 && align_size_max < access_size_max) {
2290 access_size_max = align_size_max;
2294 /* Don't attempt accesses larger than the maximum. */
2295 if (l > access_size_max) {
2296 l = access_size_max;
2298 if (l & (l - 1)) {
2299 l = 1 << (qemu_fls(l) - 1);
2302 return l;
2305 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
2306 int len, bool is_write)
2308 hwaddr l;
2309 uint8_t *ptr;
2310 uint64_t val;
2311 hwaddr addr1;
2312 MemoryRegion *mr;
2313 bool error = false;
2315 while (len > 0) {
2316 l = len;
2317 mr = address_space_translate(as, addr, &addr1, &l, is_write);
2319 if (is_write) {
2320 if (!memory_access_is_direct(mr, is_write)) {
2321 l = memory_access_size(mr, l, addr1);
2322 /* XXX: could force current_cpu to NULL to avoid
2323 potential bugs */
2324 switch (l) {
2325 case 8:
2326 /* 64 bit write access */
2327 val = ldq_p(buf);
2328 error |= io_mem_write(mr, addr1, val, 8);
2329 break;
2330 case 4:
2331 /* 32 bit write access */
2332 val = ldl_p(buf);
2333 error |= io_mem_write(mr, addr1, val, 4);
2334 break;
2335 case 2:
2336 /* 16 bit write access */
2337 val = lduw_p(buf);
2338 error |= io_mem_write(mr, addr1, val, 2);
2339 break;
2340 case 1:
2341 /* 8 bit write access */
2342 val = ldub_p(buf);
2343 error |= io_mem_write(mr, addr1, val, 1);
2344 break;
2345 default:
2346 abort();
2348 } else {
2349 addr1 += memory_region_get_ram_addr(mr);
2350 /* RAM case */
2351 ptr = qemu_get_ram_ptr(addr1);
2352 memcpy(ptr, buf, l);
2353 invalidate_and_set_dirty(addr1, l);
2355 } else {
2356 if (!memory_access_is_direct(mr, is_write)) {
2357 /* I/O case */
2358 l = memory_access_size(mr, l, addr1);
2359 switch (l) {
2360 case 8:
2361 /* 64 bit read access */
2362 error |= io_mem_read(mr, addr1, &val, 8);
2363 stq_p(buf, val);
2364 break;
2365 case 4:
2366 /* 32 bit read access */
2367 error |= io_mem_read(mr, addr1, &val, 4);
2368 stl_p(buf, val);
2369 break;
2370 case 2:
2371 /* 16 bit read access */
2372 error |= io_mem_read(mr, addr1, &val, 2);
2373 stw_p(buf, val);
2374 break;
2375 case 1:
2376 /* 8 bit read access */
2377 error |= io_mem_read(mr, addr1, &val, 1);
2378 stb_p(buf, val);
2379 break;
2380 default:
2381 abort();
2383 } else {
2384 /* RAM case */
2385 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2386 memcpy(buf, ptr, l);
2389 len -= l;
2390 buf += l;
2391 addr += l;
2394 return error;
2397 bool address_space_write(AddressSpace *as, hwaddr addr,
2398 const uint8_t *buf, int len)
2400 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2403 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2405 return address_space_rw(as, addr, buf, len, false);
2409 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2410 int len, int is_write)
2412 address_space_rw(&address_space_memory, addr, buf, len, is_write);
2415 enum write_rom_type {
2416 WRITE_DATA,
2417 FLUSH_CACHE,
2420 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2421 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2423 hwaddr l;
2424 uint8_t *ptr;
2425 hwaddr addr1;
2426 MemoryRegion *mr;
2428 while (len > 0) {
2429 l = len;
2430 mr = address_space_translate(as, addr, &addr1, &l, true);
2432 if (!(memory_region_is_ram(mr) ||
2433 memory_region_is_romd(mr))) {
2434 /* do nothing */
2435 } else {
2436 addr1 += memory_region_get_ram_addr(mr);
2437 /* ROM/RAM case */
2438 ptr = qemu_get_ram_ptr(addr1);
2439 switch (type) {
2440 case WRITE_DATA:
2441 memcpy(ptr, buf, l);
2442 invalidate_and_set_dirty(addr1, l);
2443 break;
2444 case FLUSH_CACHE:
2445 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2446 break;
2449 len -= l;
2450 buf += l;
2451 addr += l;
2455 /* used for ROM loading : can write in RAM and ROM */
2456 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2457 const uint8_t *buf, int len)
2459 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2462 void cpu_flush_icache_range(hwaddr start, int len)
2465 * This function should do the same thing as an icache flush that was
2466 * triggered from within the guest. For TCG we are always cache coherent,
2467 * so there is no need to flush anything. For KVM / Xen we need to flush
2468 * the host's instruction cache at least.
2470 if (tcg_enabled()) {
2471 return;
2474 cpu_physical_memory_write_rom_internal(&address_space_memory,
2475 start, NULL, len, FLUSH_CACHE);
2478 typedef struct {
2479 MemoryRegion *mr;
2480 void *buffer;
2481 hwaddr addr;
2482 hwaddr len;
2483 } BounceBuffer;
2485 static BounceBuffer bounce;
2487 typedef struct MapClient {
2488 void *opaque;
2489 void (*callback)(void *opaque);
2490 QLIST_ENTRY(MapClient) link;
2491 } MapClient;
2493 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2494 = QLIST_HEAD_INITIALIZER(map_client_list);
2496 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2498 MapClient *client = g_malloc(sizeof(*client));
2500 client->opaque = opaque;
2501 client->callback = callback;
2502 QLIST_INSERT_HEAD(&map_client_list, client, link);
2503 return client;
2506 static void cpu_unregister_map_client(void *_client)
2508 MapClient *client = (MapClient *)_client;
2510 QLIST_REMOVE(client, link);
2511 g_free(client);
2514 static void cpu_notify_map_clients(void)
2516 MapClient *client;
2518 while (!QLIST_EMPTY(&map_client_list)) {
2519 client = QLIST_FIRST(&map_client_list);
2520 client->callback(client->opaque);
2521 cpu_unregister_map_client(client);
2525 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2527 MemoryRegion *mr;
2528 hwaddr l, xlat;
2530 while (len > 0) {
2531 l = len;
2532 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2533 if (!memory_access_is_direct(mr, is_write)) {
2534 l = memory_access_size(mr, l, addr);
2535 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2536 return false;
2540 len -= l;
2541 addr += l;
2543 return true;
2546 /* Map a physical memory region into a host virtual address.
2547 * May map a subset of the requested range, given by and returned in *plen.
2548 * May return NULL if resources needed to perform the mapping are exhausted.
2549 * Use only for reads OR writes - not for read-modify-write operations.
2550 * Use cpu_register_map_client() to know when retrying the map operation is
2551 * likely to succeed.
2553 void *address_space_map(AddressSpace *as,
2554 hwaddr addr,
2555 hwaddr *plen,
2556 bool is_write)
2558 hwaddr len = *plen;
2559 hwaddr done = 0;
2560 hwaddr l, xlat, base;
2561 MemoryRegion *mr, *this_mr;
2562 ram_addr_t raddr;
2564 if (len == 0) {
2565 return NULL;
2568 l = len;
2569 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2570 if (!memory_access_is_direct(mr, is_write)) {
2571 if (bounce.buffer) {
2572 return NULL;
2574 /* Avoid unbounded allocations */
2575 l = MIN(l, TARGET_PAGE_SIZE);
2576 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2577 bounce.addr = addr;
2578 bounce.len = l;
2580 memory_region_ref(mr);
2581 bounce.mr = mr;
2582 if (!is_write) {
2583 address_space_read(as, addr, bounce.buffer, l);
2586 *plen = l;
2587 return bounce.buffer;
2590 base = xlat;
2591 raddr = memory_region_get_ram_addr(mr);
2593 for (;;) {
2594 len -= l;
2595 addr += l;
2596 done += l;
2597 if (len == 0) {
2598 break;
2601 l = len;
2602 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2603 if (this_mr != mr || xlat != base + done) {
2604 break;
2608 memory_region_ref(mr);
2609 *plen = done;
2610 return qemu_ram_ptr_length(raddr + base, plen);
2613 /* Unmaps a memory region previously mapped by address_space_map().
2614 * Will also mark the memory as dirty if is_write == 1. access_len gives
2615 * the amount of memory that was actually read or written by the caller.
2617 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2618 int is_write, hwaddr access_len)
2620 if (buffer != bounce.buffer) {
2621 MemoryRegion *mr;
2622 ram_addr_t addr1;
2624 mr = qemu_ram_addr_from_host(buffer, &addr1);
2625 assert(mr != NULL);
2626 if (is_write) {
2627 invalidate_and_set_dirty(addr1, access_len);
2629 if (xen_enabled()) {
2630 xen_invalidate_map_cache_entry(buffer);
2632 memory_region_unref(mr);
2633 return;
2635 if (is_write) {
2636 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2638 qemu_vfree(bounce.buffer);
2639 bounce.buffer = NULL;
2640 memory_region_unref(bounce.mr);
2641 cpu_notify_map_clients();
2644 void *cpu_physical_memory_map(hwaddr addr,
2645 hwaddr *plen,
2646 int is_write)
2648 return address_space_map(&address_space_memory, addr, plen, is_write);
2651 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2652 int is_write, hwaddr access_len)
2654 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2657 /* warning: addr must be aligned */
2658 static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
2659 enum device_endian endian)
2661 uint8_t *ptr;
2662 uint64_t val;
2663 MemoryRegion *mr;
2664 hwaddr l = 4;
2665 hwaddr addr1;
2667 mr = address_space_translate(as, addr, &addr1, &l, false);
2668 if (l < 4 || !memory_access_is_direct(mr, false)) {
2669 /* I/O case */
2670 io_mem_read(mr, addr1, &val, 4);
2671 #if defined(TARGET_WORDS_BIGENDIAN)
2672 if (endian == DEVICE_LITTLE_ENDIAN) {
2673 val = bswap32(val);
2675 #else
2676 if (endian == DEVICE_BIG_ENDIAN) {
2677 val = bswap32(val);
2679 #endif
2680 } else {
2681 /* RAM case */
2682 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2683 & TARGET_PAGE_MASK)
2684 + addr1);
2685 switch (endian) {
2686 case DEVICE_LITTLE_ENDIAN:
2687 val = ldl_le_p(ptr);
2688 break;
2689 case DEVICE_BIG_ENDIAN:
2690 val = ldl_be_p(ptr);
2691 break;
2692 default:
2693 val = ldl_p(ptr);
2694 break;
2697 return val;
2700 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
2702 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2705 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
2707 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2710 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
2712 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2715 /* warning: addr must be aligned */
2716 static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
2717 enum device_endian endian)
2719 uint8_t *ptr;
2720 uint64_t val;
2721 MemoryRegion *mr;
2722 hwaddr l = 8;
2723 hwaddr addr1;
2725 mr = address_space_translate(as, addr, &addr1, &l,
2726 false);
2727 if (l < 8 || !memory_access_is_direct(mr, false)) {
2728 /* I/O case */
2729 io_mem_read(mr, addr1, &val, 8);
2730 #if defined(TARGET_WORDS_BIGENDIAN)
2731 if (endian == DEVICE_LITTLE_ENDIAN) {
2732 val = bswap64(val);
2734 #else
2735 if (endian == DEVICE_BIG_ENDIAN) {
2736 val = bswap64(val);
2738 #endif
2739 } else {
2740 /* RAM case */
2741 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2742 & TARGET_PAGE_MASK)
2743 + addr1);
2744 switch (endian) {
2745 case DEVICE_LITTLE_ENDIAN:
2746 val = ldq_le_p(ptr);
2747 break;
2748 case DEVICE_BIG_ENDIAN:
2749 val = ldq_be_p(ptr);
2750 break;
2751 default:
2752 val = ldq_p(ptr);
2753 break;
2756 return val;
2759 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
2761 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2764 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
2766 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2769 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
2771 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2774 /* XXX: optimize */
2775 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2777 uint8_t val;
2778 address_space_rw(as, addr, &val, 1, 0);
2779 return val;
2782 /* warning: addr must be aligned */
2783 static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
2784 enum device_endian endian)
2786 uint8_t *ptr;
2787 uint64_t val;
2788 MemoryRegion *mr;
2789 hwaddr l = 2;
2790 hwaddr addr1;
2792 mr = address_space_translate(as, addr, &addr1, &l,
2793 false);
2794 if (l < 2 || !memory_access_is_direct(mr, false)) {
2795 /* I/O case */
2796 io_mem_read(mr, addr1, &val, 2);
2797 #if defined(TARGET_WORDS_BIGENDIAN)
2798 if (endian == DEVICE_LITTLE_ENDIAN) {
2799 val = bswap16(val);
2801 #else
2802 if (endian == DEVICE_BIG_ENDIAN) {
2803 val = bswap16(val);
2805 #endif
2806 } else {
2807 /* RAM case */
2808 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2809 & TARGET_PAGE_MASK)
2810 + addr1);
2811 switch (endian) {
2812 case DEVICE_LITTLE_ENDIAN:
2813 val = lduw_le_p(ptr);
2814 break;
2815 case DEVICE_BIG_ENDIAN:
2816 val = lduw_be_p(ptr);
2817 break;
2818 default:
2819 val = lduw_p(ptr);
2820 break;
2823 return val;
2826 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
2828 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2831 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
2833 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2836 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
2838 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2841 /* warning: addr must be aligned. The ram page is not masked as dirty
2842 and the code inside is not invalidated. It is useful if the dirty
2843 bits are used to track modified PTEs */
2844 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
2846 uint8_t *ptr;
2847 MemoryRegion *mr;
2848 hwaddr l = 4;
2849 hwaddr addr1;
2851 mr = address_space_translate(as, addr, &addr1, &l,
2852 true);
2853 if (l < 4 || !memory_access_is_direct(mr, true)) {
2854 io_mem_write(mr, addr1, val, 4);
2855 } else {
2856 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2857 ptr = qemu_get_ram_ptr(addr1);
2858 stl_p(ptr, val);
2860 if (unlikely(in_migration)) {
2861 if (cpu_physical_memory_is_clean(addr1)) {
2862 /* invalidate code */
2863 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2864 /* set dirty bit */
2865 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
2871 /* warning: addr must be aligned */
2872 static inline void stl_phys_internal(AddressSpace *as,
2873 hwaddr addr, uint32_t val,
2874 enum device_endian endian)
2876 uint8_t *ptr;
2877 MemoryRegion *mr;
2878 hwaddr l = 4;
2879 hwaddr addr1;
2881 mr = address_space_translate(as, addr, &addr1, &l,
2882 true);
2883 if (l < 4 || !memory_access_is_direct(mr, true)) {
2884 #if defined(TARGET_WORDS_BIGENDIAN)
2885 if (endian == DEVICE_LITTLE_ENDIAN) {
2886 val = bswap32(val);
2888 #else
2889 if (endian == DEVICE_BIG_ENDIAN) {
2890 val = bswap32(val);
2892 #endif
2893 io_mem_write(mr, addr1, val, 4);
2894 } else {
2895 /* RAM case */
2896 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2897 ptr = qemu_get_ram_ptr(addr1);
2898 switch (endian) {
2899 case DEVICE_LITTLE_ENDIAN:
2900 stl_le_p(ptr, val);
2901 break;
2902 case DEVICE_BIG_ENDIAN:
2903 stl_be_p(ptr, val);
2904 break;
2905 default:
2906 stl_p(ptr, val);
2907 break;
2909 invalidate_and_set_dirty(addr1, 4);
2913 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2915 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2918 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2920 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2923 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2925 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2928 /* XXX: optimize */
2929 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2931 uint8_t v = val;
2932 address_space_rw(as, addr, &v, 1, 1);
2935 /* warning: addr must be aligned */
2936 static inline void stw_phys_internal(AddressSpace *as,
2937 hwaddr addr, uint32_t val,
2938 enum device_endian endian)
2940 uint8_t *ptr;
2941 MemoryRegion *mr;
2942 hwaddr l = 2;
2943 hwaddr addr1;
2945 mr = address_space_translate(as, addr, &addr1, &l, true);
2946 if (l < 2 || !memory_access_is_direct(mr, true)) {
2947 #if defined(TARGET_WORDS_BIGENDIAN)
2948 if (endian == DEVICE_LITTLE_ENDIAN) {
2949 val = bswap16(val);
2951 #else
2952 if (endian == DEVICE_BIG_ENDIAN) {
2953 val = bswap16(val);
2955 #endif
2956 io_mem_write(mr, addr1, val, 2);
2957 } else {
2958 /* RAM case */
2959 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2960 ptr = qemu_get_ram_ptr(addr1);
2961 switch (endian) {
2962 case DEVICE_LITTLE_ENDIAN:
2963 stw_le_p(ptr, val);
2964 break;
2965 case DEVICE_BIG_ENDIAN:
2966 stw_be_p(ptr, val);
2967 break;
2968 default:
2969 stw_p(ptr, val);
2970 break;
2972 invalidate_and_set_dirty(addr1, 2);
2976 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2978 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2981 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2983 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2986 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2988 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2991 /* XXX: optimize */
2992 void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2994 val = tswap64(val);
2995 address_space_rw(as, addr, (void *) &val, 8, 1);
2998 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3000 val = cpu_to_le64(val);
3001 address_space_rw(as, addr, (void *) &val, 8, 1);
3004 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3006 val = cpu_to_be64(val);
3007 address_space_rw(as, addr, (void *) &val, 8, 1);
3010 /* virtual memory access for debug (includes writing to ROM) */
3011 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3012 uint8_t *buf, int len, int is_write)
3014 int l;
3015 hwaddr phys_addr;
3016 target_ulong page;
3018 while (len > 0) {
3019 page = addr & TARGET_PAGE_MASK;
3020 phys_addr = cpu_get_phys_page_debug(cpu, page);
3021 /* if no physical page mapped, return an error */
3022 if (phys_addr == -1)
3023 return -1;
3024 l = (page + TARGET_PAGE_SIZE) - addr;
3025 if (l > len)
3026 l = len;
3027 phys_addr += (addr & ~TARGET_PAGE_MASK);
3028 if (is_write) {
3029 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3030 } else {
3031 address_space_rw(cpu->as, phys_addr, buf, l, 0);
3033 len -= l;
3034 buf += l;
3035 addr += l;
3037 return 0;
3039 #endif
3042 * A helper function for the _utterly broken_ virtio device model to find out if
3043 * it's running on a big endian machine. Don't do this at home kids!
3045 bool target_words_bigendian(void);
3046 bool target_words_bigendian(void)
3048 #if defined(TARGET_WORDS_BIGENDIAN)
3049 return true;
3050 #else
3051 return false;
3052 #endif
3055 #ifndef CONFIG_USER_ONLY
3056 bool cpu_physical_memory_is_io(hwaddr phys_addr)
3058 MemoryRegion*mr;
3059 hwaddr l = 1;
3061 mr = address_space_translate(&address_space_memory,
3062 phys_addr, &phys_addr, &l, false);
3064 return !(memory_region_is_ram(mr) ||
3065 memory_region_is_romd(mr));
3068 void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3070 RAMBlock *block;
3072 rcu_read_lock();
3073 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3074 func(block->host, block->offset, block->used_length, opaque);
3076 rcu_read_unlock();
3078 #endif