Merge remote-tracking branch 'qemu/master'
[qemu/ar7.git] / exec.c
blob1885611333a61c6fdc8fbf10a6c2ac0de02b69fa
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifndef _WIN32
21 #include <sys/types.h>
22 #include <sys/mman.h>
23 #endif
25 #include "qemu-common.h"
26 #include "cpu.h"
27 #include "tcg.h"
28 #include "hw/hw.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #endif
32 #include "hw/qdev.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
47 #include "trace.h"
48 #endif
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "exec/cputlb.h"
52 #include "translate-all.h"
54 #include "exec/memory-internal.h"
55 #include "exec/ram_addr.h"
57 #include "qemu/range.h"
59 //#define DEBUG_SUBPAGE
61 #if !defined(CONFIG_USER_ONLY)
62 static bool in_migration;
64 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
65 * are protected by the ramlist lock.
67 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
69 static MemoryRegion *system_memory;
70 static MemoryRegion *system_io;
72 AddressSpace address_space_io;
73 AddressSpace address_space_memory;
75 MemoryRegion io_mem_rom, io_mem_notdirty;
76 static MemoryRegion io_mem_unassigned;
78 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
79 #define RAM_PREALLOC (1 << 0)
81 /* RAM is mmap-ed with MAP_SHARED */
82 #define RAM_SHARED (1 << 1)
84 /* Only a portion of RAM (used_length) is actually used, and migrated.
85 * This used_length size can change across reboots.
87 #define RAM_RESIZEABLE (1 << 2)
89 #endif
91 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
92 /* current CPU in the current thread. It is only valid inside
93 cpu_exec() */
94 DEFINE_TLS(CPUState *, current_cpu);
95 /* 0 = Do not count executed instructions.
96 1 = Precise instruction counting.
97 2 = Adaptive rate instruction counting. */
98 int use_icount;
100 #if !defined(CONFIG_USER_ONLY)
102 typedef struct PhysPageEntry PhysPageEntry;
104 struct PhysPageEntry {
105 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
106 uint32_t skip : 6;
107 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
108 uint32_t ptr : 26;
111 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
113 /* Size of the L2 (and L3, etc) page tables. */
114 #define ADDR_SPACE_BITS 64
116 #define P_L2_BITS 9
117 #define P_L2_SIZE (1 << P_L2_BITS)
119 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
121 typedef PhysPageEntry Node[P_L2_SIZE];
123 typedef struct PhysPageMap {
124 struct rcu_head rcu;
126 unsigned sections_nb;
127 unsigned sections_nb_alloc;
128 unsigned nodes_nb;
129 unsigned nodes_nb_alloc;
130 Node *nodes;
131 MemoryRegionSection *sections;
132 } PhysPageMap;
134 struct AddressSpaceDispatch {
135 struct rcu_head rcu;
137 /* This is a multi-level map on the physical address space.
138 * The bottom level has pointers to MemoryRegionSections.
140 PhysPageEntry phys_map;
141 PhysPageMap map;
142 AddressSpace *as;
145 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
146 typedef struct subpage_t {
147 MemoryRegion iomem;
148 AddressSpace *as;
149 hwaddr base;
150 uint16_t sub_section[TARGET_PAGE_SIZE];
151 } subpage_t;
153 #define PHYS_SECTION_UNASSIGNED 0
154 #define PHYS_SECTION_NOTDIRTY 1
155 #define PHYS_SECTION_ROM 2
156 #define PHYS_SECTION_WATCH 3
158 static void io_mem_init(void);
159 static void memory_map_init(void);
160 static void tcg_commit(MemoryListener *listener);
162 static MemoryRegion io_mem_watch;
163 #endif
165 #if !defined(CONFIG_USER_ONLY)
167 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
169 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
171 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
172 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
176 static uint32_t phys_map_node_alloc(PhysPageMap *map)
178 unsigned i;
179 uint32_t ret;
181 ret = map->nodes_nb++;
182 assert(ret != PHYS_MAP_NODE_NIL);
183 assert(ret != map->nodes_nb_alloc);
184 for (i = 0; i < P_L2_SIZE; ++i) {
185 map->nodes[ret][i].skip = 1;
186 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
188 return ret;
191 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
192 hwaddr *index, hwaddr *nb, uint16_t leaf,
193 int level)
195 PhysPageEntry *p;
196 int i;
197 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
199 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
200 lp->ptr = phys_map_node_alloc(map);
201 p = map->nodes[lp->ptr];
202 if (level == 0) {
203 for (i = 0; i < P_L2_SIZE; i++) {
204 p[i].skip = 0;
205 p[i].ptr = PHYS_SECTION_UNASSIGNED;
208 } else {
209 p = map->nodes[lp->ptr];
211 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
213 while (*nb && lp < &p[P_L2_SIZE]) {
214 if ((*index & (step - 1)) == 0 && *nb >= step) {
215 lp->skip = 0;
216 lp->ptr = leaf;
217 *index += step;
218 *nb -= step;
219 } else {
220 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
222 ++lp;
226 static void phys_page_set(AddressSpaceDispatch *d,
227 hwaddr index, hwaddr nb,
228 uint16_t leaf)
230 /* Wildly overreserve - it doesn't matter much. */
231 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
233 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
236 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
237 * and update our entry so we can skip it and go directly to the destination.
239 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
241 unsigned valid_ptr = P_L2_SIZE;
242 int valid = 0;
243 PhysPageEntry *p;
244 int i;
246 if (lp->ptr == PHYS_MAP_NODE_NIL) {
247 return;
250 p = nodes[lp->ptr];
251 for (i = 0; i < P_L2_SIZE; i++) {
252 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
253 continue;
256 valid_ptr = i;
257 valid++;
258 if (p[i].skip) {
259 phys_page_compact(&p[i], nodes, compacted);
263 /* We can only compress if there's only one child. */
264 if (valid != 1) {
265 return;
268 assert(valid_ptr < P_L2_SIZE);
270 /* Don't compress if it won't fit in the # of bits we have. */
271 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
272 return;
275 lp->ptr = p[valid_ptr].ptr;
276 if (!p[valid_ptr].skip) {
277 /* If our only child is a leaf, make this a leaf. */
278 /* By design, we should have made this node a leaf to begin with so we
279 * should never reach here.
280 * But since it's so simple to handle this, let's do it just in case we
281 * change this rule.
283 lp->skip = 0;
284 } else {
285 lp->skip += p[valid_ptr].skip;
289 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
291 DECLARE_BITMAP(compacted, nodes_nb);
293 if (d->phys_map.skip) {
294 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
298 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
299 Node *nodes, MemoryRegionSection *sections)
301 PhysPageEntry *p;
302 hwaddr index = addr >> TARGET_PAGE_BITS;
303 int i;
305 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
306 if (lp.ptr == PHYS_MAP_NODE_NIL) {
307 return &sections[PHYS_SECTION_UNASSIGNED];
309 p = nodes[lp.ptr];
310 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
313 if (sections[lp.ptr].size.hi ||
314 range_covers_byte(sections[lp.ptr].offset_within_address_space,
315 sections[lp.ptr].size.lo, addr)) {
316 return &sections[lp.ptr];
317 } else {
318 return &sections[PHYS_SECTION_UNASSIGNED];
322 bool memory_region_is_unassigned(MemoryRegion *mr)
324 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
325 && mr != &io_mem_watch;
328 /* Called from RCU critical section */
329 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
330 hwaddr addr,
331 bool resolve_subpage)
333 MemoryRegionSection *section;
334 subpage_t *subpage;
336 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
337 if (resolve_subpage && section->mr->subpage) {
338 subpage = container_of(section->mr, subpage_t, iomem);
339 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
341 return section;
344 /* Called from RCU critical section */
345 static MemoryRegionSection *
346 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
347 hwaddr *plen, bool resolve_subpage)
349 MemoryRegionSection *section;
350 Int128 diff;
352 section = address_space_lookup_region(d, addr, resolve_subpage);
353 /* Compute offset within MemoryRegionSection */
354 addr -= section->offset_within_address_space;
356 /* Compute offset within MemoryRegion */
357 *xlat = addr + section->offset_within_region;
359 diff = int128_sub(section->mr->size, int128_make64(addr));
360 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
361 return section;
364 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
366 if (memory_region_is_ram(mr)) {
367 return !(is_write && mr->readonly);
369 if (memory_region_is_romd(mr)) {
370 return !is_write;
373 return false;
376 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
377 hwaddr *xlat, hwaddr *plen,
378 bool is_write)
380 IOMMUTLBEntry iotlb;
381 MemoryRegionSection *section;
382 MemoryRegion *mr;
384 rcu_read_lock();
385 for (;;) {
386 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
387 section = address_space_translate_internal(d, addr, &addr, plen, true);
388 mr = section->mr;
390 if (!mr->iommu_ops) {
391 break;
394 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
395 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
396 | (addr & iotlb.addr_mask));
397 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
398 if (!(iotlb.perm & (1 << is_write))) {
399 mr = &io_mem_unassigned;
400 break;
403 as = iotlb.target_as;
406 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
407 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
408 *plen = MIN(page, *plen);
411 *xlat = addr;
412 rcu_read_unlock();
413 return mr;
416 /* Called from RCU critical section */
417 MemoryRegionSection *
418 address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
419 hwaddr *xlat, hwaddr *plen)
421 MemoryRegionSection *section;
422 section = address_space_translate_internal(cpu->memory_dispatch,
423 addr, xlat, plen, false);
425 assert(!section->mr->iommu_ops);
426 return section;
428 #endif
430 void cpu_exec_init_all(void)
432 #if !defined(CONFIG_USER_ONLY)
433 qemu_mutex_init(&ram_list.mutex);
434 memory_map_init();
435 io_mem_init();
436 #endif
439 #if !defined(CONFIG_USER_ONLY)
441 static int cpu_common_post_load(void *opaque, int version_id)
443 CPUState *cpu = opaque;
445 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
446 version_id is increased. */
447 cpu->interrupt_request &= ~0x01;
448 tlb_flush(cpu, 1);
450 return 0;
453 static int cpu_common_pre_load(void *opaque)
455 CPUState *cpu = opaque;
457 cpu->exception_index = -1;
459 return 0;
462 static bool cpu_common_exception_index_needed(void *opaque)
464 CPUState *cpu = opaque;
466 return tcg_enabled() && cpu->exception_index != -1;
469 static const VMStateDescription vmstate_cpu_common_exception_index = {
470 .name = "cpu_common/exception_index",
471 .version_id = 1,
472 .minimum_version_id = 1,
473 .fields = (VMStateField[]) {
474 VMSTATE_INT32(exception_index, CPUState),
475 VMSTATE_END_OF_LIST()
479 const VMStateDescription vmstate_cpu_common = {
480 .name = "cpu_common",
481 .version_id = 1,
482 .minimum_version_id = 1,
483 .pre_load = cpu_common_pre_load,
484 .post_load = cpu_common_post_load,
485 .fields = (VMStateField[]) {
486 VMSTATE_UINT32(halted, CPUState),
487 VMSTATE_UINT32(interrupt_request, CPUState),
488 VMSTATE_END_OF_LIST()
490 .subsections = (VMStateSubsection[]) {
492 .vmsd = &vmstate_cpu_common_exception_index,
493 .needed = cpu_common_exception_index_needed,
494 } , {
495 /* empty */
500 #endif
502 CPUState *qemu_get_cpu(int index)
504 CPUState *cpu;
506 CPU_FOREACH(cpu) {
507 if (cpu->cpu_index == index) {
508 return cpu;
512 return NULL;
515 #if !defined(CONFIG_USER_ONLY)
516 void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
518 /* We only support one address space per cpu at the moment. */
519 assert(cpu->as == as);
521 if (cpu->tcg_as_listener) {
522 memory_listener_unregister(cpu->tcg_as_listener);
523 } else {
524 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
526 cpu->tcg_as_listener->commit = tcg_commit;
527 memory_listener_register(cpu->tcg_as_listener, as);
529 #endif
531 void cpu_exec_init(CPUArchState *env)
533 CPUState *cpu = ENV_GET_CPU(env);
534 CPUClass *cc = CPU_GET_CLASS(cpu);
535 CPUState *some_cpu;
536 int cpu_index;
538 #ifdef TARGET_WORDS_BIGENDIAN
539 cpu->bigendian = true;
540 #else
541 cpu->bigendian = false;
542 #endif
544 #if defined(CONFIG_USER_ONLY)
545 cpu_list_lock();
546 #endif
547 cpu_index = 0;
548 CPU_FOREACH(some_cpu) {
549 cpu_index++;
551 cpu->cpu_index = cpu_index;
552 cpu->numa_node = 0;
553 QTAILQ_INIT(&cpu->breakpoints);
554 QTAILQ_INIT(&cpu->watchpoints);
555 #ifndef CONFIG_USER_ONLY
556 cpu->as = &address_space_memory;
557 cpu->thread_id = qemu_get_thread_id();
558 cpu_reload_memory_map(cpu);
559 #endif
560 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
561 #if defined(CONFIG_USER_ONLY)
562 cpu_list_unlock();
563 #endif
564 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
565 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
567 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
568 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
569 cpu_save, cpu_load, env);
570 assert(cc->vmsd == NULL);
571 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
572 #endif
573 if (cc->vmsd != NULL) {
574 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
578 #if defined(CONFIG_USER_ONLY)
579 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
581 tb_invalidate_phys_page_range(pc, pc + 1, 0);
583 #else
584 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
586 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
587 if (phys != -1) {
588 tb_invalidate_phys_addr(cpu->as,
589 phys | (pc & ~TARGET_PAGE_MASK));
592 #endif
594 #if defined(CONFIG_USER_ONLY)
595 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
600 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
601 int flags)
603 return -ENOSYS;
606 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
610 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
611 int flags, CPUWatchpoint **watchpoint)
613 return -ENOSYS;
615 #else
616 /* Add a watchpoint. */
617 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
618 int flags, CPUWatchpoint **watchpoint)
620 CPUWatchpoint *wp;
622 /* forbid ranges which are empty or run off the end of the address space */
623 if (len == 0 || (addr + len - 1) < addr) {
624 error_report("tried to set invalid watchpoint at %"
625 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
626 return -EINVAL;
628 wp = g_malloc(sizeof(*wp));
630 wp->vaddr = addr;
631 wp->len = len;
632 wp->flags = flags;
634 /* keep all GDB-injected watchpoints in front */
635 if (flags & BP_GDB) {
636 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
637 } else {
638 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
641 tlb_flush_page(cpu, addr);
643 if (watchpoint)
644 *watchpoint = wp;
645 return 0;
648 /* Remove a specific watchpoint. */
649 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
650 int flags)
652 CPUWatchpoint *wp;
654 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
655 if (addr == wp->vaddr && len == wp->len
656 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
657 cpu_watchpoint_remove_by_ref(cpu, wp);
658 return 0;
661 return -ENOENT;
664 /* Remove a specific watchpoint by reference. */
665 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
667 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
669 tlb_flush_page(cpu, watchpoint->vaddr);
671 g_free(watchpoint);
674 /* Remove all matching watchpoints. */
675 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
677 CPUWatchpoint *wp, *next;
679 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
680 if (wp->flags & mask) {
681 cpu_watchpoint_remove_by_ref(cpu, wp);
686 /* Return true if this watchpoint address matches the specified
687 * access (ie the address range covered by the watchpoint overlaps
688 * partially or completely with the address range covered by the
689 * access).
691 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
692 vaddr addr,
693 vaddr len)
695 /* We know the lengths are non-zero, but a little caution is
696 * required to avoid errors in the case where the range ends
697 * exactly at the top of the address space and so addr + len
698 * wraps round to zero.
700 vaddr wpend = wp->vaddr + wp->len - 1;
701 vaddr addrend = addr + len - 1;
703 return !(addr > wpend || wp->vaddr > addrend);
706 #endif
708 /* Add a breakpoint. */
709 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
710 CPUBreakpoint **breakpoint)
712 CPUBreakpoint *bp;
714 bp = g_malloc(sizeof(*bp));
716 bp->pc = pc;
717 bp->flags = flags;
719 /* keep all GDB-injected breakpoints in front */
720 if (flags & BP_GDB) {
721 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
722 } else {
723 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
726 breakpoint_invalidate(cpu, pc);
728 if (breakpoint) {
729 *breakpoint = bp;
731 return 0;
734 /* Remove a specific breakpoint. */
735 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
737 CPUBreakpoint *bp;
739 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
740 if (bp->pc == pc && bp->flags == flags) {
741 cpu_breakpoint_remove_by_ref(cpu, bp);
742 return 0;
745 return -ENOENT;
748 /* Remove a specific breakpoint by reference. */
749 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
751 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
753 breakpoint_invalidate(cpu, breakpoint->pc);
755 g_free(breakpoint);
758 /* Remove all matching breakpoints. */
759 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
761 CPUBreakpoint *bp, *next;
763 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
764 if (bp->flags & mask) {
765 cpu_breakpoint_remove_by_ref(cpu, bp);
770 /* enable or disable single step mode. EXCP_DEBUG is returned by the
771 CPU loop after each instruction */
772 void cpu_single_step(CPUState *cpu, int enabled)
774 if (cpu->singlestep_enabled != enabled) {
775 cpu->singlestep_enabled = enabled;
776 if (kvm_enabled()) {
777 kvm_update_guest_debug(cpu, 0);
778 } else {
779 /* must flush all the translated code to avoid inconsistencies */
780 /* XXX: only flush what is necessary */
781 CPUArchState *env = cpu->env_ptr;
782 tb_flush(env);
787 void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
789 va_list ap;
790 va_list ap2;
792 va_start(ap, fmt);
793 va_copy(ap2, ap);
794 fprintf(stderr, "qemu: fatal: ");
795 vfprintf(stderr, fmt, ap);
796 fprintf(stderr, "\n");
797 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
798 if (qemu_log_enabled()) {
799 qemu_log("qemu: fatal: ");
800 qemu_log_vprintf(fmt, ap2);
801 qemu_log("\n");
802 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
803 qemu_log_flush();
804 qemu_log_close();
806 va_end(ap2);
807 va_end(ap);
808 #if defined(CONFIG_USER_ONLY)
810 struct sigaction act;
811 sigfillset(&act.sa_mask);
812 act.sa_handler = SIG_DFL;
813 sigaction(SIGABRT, &act, NULL);
815 #endif
816 abort();
819 #if !defined(CONFIG_USER_ONLY)
820 /* Called from RCU critical section */
821 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
823 RAMBlock *block;
825 block = atomic_rcu_read(&ram_list.mru_block);
826 if (block && addr - block->offset < block->max_length) {
827 goto found;
829 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
830 if (addr - block->offset < block->max_length) {
831 goto found;
835 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
836 abort();
838 found:
839 /* It is safe to write mru_block outside the iothread lock. This
840 * is what happens:
842 * mru_block = xxx
843 * rcu_read_unlock()
844 * xxx removed from list
845 * rcu_read_lock()
846 * read mru_block
847 * mru_block = NULL;
848 * call_rcu(reclaim_ramblock, xxx);
849 * rcu_read_unlock()
851 * atomic_rcu_set is not needed here. The block was already published
852 * when it was placed into the list. Here we're just making an extra
853 * copy of the pointer.
855 ram_list.mru_block = block;
856 return block;
859 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
861 ram_addr_t start1;
862 RAMBlock *block;
863 ram_addr_t end;
865 end = TARGET_PAGE_ALIGN(start + length);
866 start &= TARGET_PAGE_MASK;
868 rcu_read_lock();
869 block = qemu_get_ram_block(start);
870 assert(block == qemu_get_ram_block(end - 1));
871 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
872 cpu_tlb_reset_dirty_all(start1, length);
873 rcu_read_unlock();
876 /* Note: start and end must be within the same ram block. */
877 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
878 unsigned client)
880 if (length == 0)
881 return;
882 cpu_physical_memory_clear_dirty_range_type(start, length, client);
884 if (tcg_enabled()) {
885 tlb_reset_dirty_range_all(start, length);
889 static void cpu_physical_memory_set_dirty_tracking(bool enable)
891 in_migration = enable;
894 /* Called from RCU critical section */
895 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
896 MemoryRegionSection *section,
897 target_ulong vaddr,
898 hwaddr paddr, hwaddr xlat,
899 int prot,
900 target_ulong *address)
902 hwaddr iotlb;
903 CPUWatchpoint *wp;
905 if (memory_region_is_ram(section->mr)) {
906 /* Normal RAM. */
907 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
908 + xlat;
909 if (!section->readonly) {
910 iotlb |= PHYS_SECTION_NOTDIRTY;
911 } else {
912 iotlb |= PHYS_SECTION_ROM;
914 } else {
915 iotlb = section - section->address_space->dispatch->map.sections;
916 iotlb += xlat;
919 /* Make accesses to pages with watchpoints go via the
920 watchpoint trap routines. */
921 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
922 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
923 /* Avoid trapping reads of pages with a write breakpoint. */
924 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
925 iotlb = PHYS_SECTION_WATCH + paddr;
926 *address |= TLB_MMIO;
927 break;
932 return iotlb;
934 #endif /* defined(CONFIG_USER_ONLY) */
936 #if !defined(CONFIG_USER_ONLY)
938 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
939 uint16_t section);
940 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
942 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
943 qemu_anon_ram_alloc;
946 * Set a custom physical guest memory alloator.
947 * Accelerators with unusual needs may need this. Hopefully, we can
948 * get rid of it eventually.
950 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
952 phys_mem_alloc = alloc;
955 static uint16_t phys_section_add(PhysPageMap *map,
956 MemoryRegionSection *section)
958 /* The physical section number is ORed with a page-aligned
959 * pointer to produce the iotlb entries. Thus it should
960 * never overflow into the page-aligned value.
962 assert(map->sections_nb < TARGET_PAGE_SIZE);
964 if (map->sections_nb == map->sections_nb_alloc) {
965 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
966 map->sections = g_renew(MemoryRegionSection, map->sections,
967 map->sections_nb_alloc);
969 map->sections[map->sections_nb] = *section;
970 memory_region_ref(section->mr);
971 return map->sections_nb++;
974 static void phys_section_destroy(MemoryRegion *mr)
976 memory_region_unref(mr);
978 if (mr->subpage) {
979 subpage_t *subpage = container_of(mr, subpage_t, iomem);
980 object_unref(OBJECT(&subpage->iomem));
981 g_free(subpage);
985 static void phys_sections_free(PhysPageMap *map)
987 while (map->sections_nb > 0) {
988 MemoryRegionSection *section = &map->sections[--map->sections_nb];
989 phys_section_destroy(section->mr);
991 g_free(map->sections);
992 g_free(map->nodes);
995 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
997 subpage_t *subpage;
998 hwaddr base = section->offset_within_address_space
999 & TARGET_PAGE_MASK;
1000 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
1001 d->map.nodes, d->map.sections);
1002 MemoryRegionSection subsection = {
1003 .offset_within_address_space = base,
1004 .size = int128_make64(TARGET_PAGE_SIZE),
1006 hwaddr start, end;
1008 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1010 if (!(existing->mr->subpage)) {
1011 subpage = subpage_init(d->as, base);
1012 subsection.address_space = d->as;
1013 subsection.mr = &subpage->iomem;
1014 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1015 phys_section_add(&d->map, &subsection));
1016 } else {
1017 subpage = container_of(existing->mr, subpage_t, iomem);
1019 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1020 end = start + int128_get64(section->size) - 1;
1021 subpage_register(subpage, start, end,
1022 phys_section_add(&d->map, section));
1026 static void register_multipage(AddressSpaceDispatch *d,
1027 MemoryRegionSection *section)
1029 hwaddr start_addr = section->offset_within_address_space;
1030 uint16_t section_index = phys_section_add(&d->map, section);
1031 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1032 TARGET_PAGE_BITS));
1034 assert(num_pages);
1035 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1038 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1040 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1041 AddressSpaceDispatch *d = as->next_dispatch;
1042 MemoryRegionSection now = *section, remain = *section;
1043 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1045 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1046 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1047 - now.offset_within_address_space;
1049 now.size = int128_min(int128_make64(left), now.size);
1050 register_subpage(d, &now);
1051 } else {
1052 now.size = int128_zero();
1054 while (int128_ne(remain.size, now.size)) {
1055 remain.size = int128_sub(remain.size, now.size);
1056 remain.offset_within_address_space += int128_get64(now.size);
1057 remain.offset_within_region += int128_get64(now.size);
1058 now = remain;
1059 if (int128_lt(remain.size, page_size)) {
1060 register_subpage(d, &now);
1061 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1062 now.size = page_size;
1063 register_subpage(d, &now);
1064 } else {
1065 now.size = int128_and(now.size, int128_neg(page_size));
1066 register_multipage(d, &now);
1071 void qemu_flush_coalesced_mmio_buffer(void)
1073 if (kvm_enabled())
1074 kvm_flush_coalesced_mmio_buffer();
1077 void qemu_mutex_lock_ramlist(void)
1079 qemu_mutex_lock(&ram_list.mutex);
1082 void qemu_mutex_unlock_ramlist(void)
1084 qemu_mutex_unlock(&ram_list.mutex);
1087 #ifdef __linux__
1089 #include <sys/vfs.h>
1091 #define HUGETLBFS_MAGIC 0x958458f6
1093 static long gethugepagesize(const char *path, Error **errp)
1095 struct statfs fs;
1096 int ret;
1098 do {
1099 ret = statfs(path, &fs);
1100 } while (ret != 0 && errno == EINTR);
1102 if (ret != 0) {
1103 error_setg_errno(errp, errno, "failed to get page size of file %s",
1104 path);
1105 return 0;
1108 if (fs.f_type != HUGETLBFS_MAGIC)
1109 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
1111 return fs.f_bsize;
1114 static void *file_ram_alloc(RAMBlock *block,
1115 ram_addr_t memory,
1116 const char *path,
1117 Error **errp)
1119 char *filename;
1120 char *sanitized_name;
1121 char *c;
1122 void * volatile area = NULL;
1123 int fd;
1124 uint64_t hpagesize;
1125 Error *local_err = NULL;
1127 hpagesize = gethugepagesize(path, &local_err);
1128 if (local_err) {
1129 error_propagate(errp, local_err);
1130 goto error;
1132 block->mr->align = hpagesize;
1134 if (memory < hpagesize) {
1135 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1136 "or larger than huge page size 0x%" PRIx64,
1137 memory, hpagesize);
1138 goto error;
1141 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1142 error_setg(errp,
1143 "host lacks kvm mmu notifiers, -mem-path unsupported");
1144 goto error;
1147 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1148 sanitized_name = g_strdup(memory_region_name(block->mr));
1149 for (c = sanitized_name; *c != '\0'; c++) {
1150 if (*c == '/')
1151 *c = '_';
1154 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1155 sanitized_name);
1156 g_free(sanitized_name);
1158 fd = mkstemp(filename);
1159 if (fd < 0) {
1160 error_setg_errno(errp, errno,
1161 "unable to create backing store for hugepages");
1162 g_free(filename);
1163 goto error;
1165 unlink(filename);
1166 g_free(filename);
1168 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1171 * ftruncate is not supported by hugetlbfs in older
1172 * hosts, so don't bother bailing out on errors.
1173 * If anything goes wrong with it under other filesystems,
1174 * mmap will fail.
1176 if (ftruncate(fd, memory)) {
1177 perror("ftruncate");
1180 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1181 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1182 fd, 0);
1183 if (area == MAP_FAILED) {
1184 error_setg_errno(errp, errno,
1185 "unable to map backing store for hugepages");
1186 close(fd);
1187 goto error;
1190 if (mem_prealloc) {
1191 os_mem_prealloc(fd, area, memory);
1194 block->fd = fd;
1195 return area;
1197 error:
1198 if (mem_prealloc) {
1199 error_report("%s", error_get_pretty(*errp));
1200 exit(1);
1202 return NULL;
1204 #endif
1206 /* Called with the ramlist lock held. */
1207 static ram_addr_t find_ram_offset(ram_addr_t size)
1209 RAMBlock *block, *next_block;
1210 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1212 assert(size != 0); /* it would hand out same offset multiple times */
1214 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1215 return 0;
1218 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1219 ram_addr_t end, next = RAM_ADDR_MAX;
1221 end = block->offset + block->max_length;
1223 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
1224 if (next_block->offset >= end) {
1225 next = MIN(next, next_block->offset);
1228 if (next - end >= size && next - end < mingap) {
1229 offset = end;
1230 mingap = next - end;
1234 if (offset == RAM_ADDR_MAX) {
1235 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1236 (uint64_t)size);
1237 abort();
1240 return offset;
1243 ram_addr_t last_ram_offset(void)
1245 RAMBlock *block;
1246 ram_addr_t last = 0;
1248 rcu_read_lock();
1249 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1250 last = MAX(last, block->offset + block->max_length);
1252 rcu_read_unlock();
1253 return last;
1256 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1258 int ret;
1260 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1261 if (!machine_dump_guest_core(current_machine)) {
1262 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1263 if (ret) {
1264 perror("qemu_madvise");
1265 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1266 "but dump_guest_core=off specified\n");
1271 /* Called within an RCU critical section, or while the ramlist lock
1272 * is held.
1274 static RAMBlock *find_ram_block(ram_addr_t addr)
1276 RAMBlock *block;
1278 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1279 if (block->offset == addr) {
1280 return block;
1284 return NULL;
1287 /* Called with iothread lock held. */
1288 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1290 RAMBlock *new_block, *block;
1292 rcu_read_lock();
1293 new_block = find_ram_block(addr);
1294 assert(new_block);
1295 assert(!new_block->idstr[0]);
1297 if (dev) {
1298 char *id = qdev_get_dev_path(dev);
1299 if (id) {
1300 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1301 g_free(id);
1304 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1306 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1307 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1308 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1309 new_block->idstr);
1310 abort();
1313 rcu_read_unlock();
1316 /* Called with iothread lock held. */
1317 void qemu_ram_unset_idstr(ram_addr_t addr)
1319 RAMBlock *block;
1321 /* FIXME: arch_init.c assumes that this is not called throughout
1322 * migration. Ignore the problem since hot-unplug during migration
1323 * does not work anyway.
1326 rcu_read_lock();
1327 block = find_ram_block(addr);
1328 if (block) {
1329 memset(block->idstr, 0, sizeof(block->idstr));
1331 rcu_read_unlock();
1334 static int memory_try_enable_merging(void *addr, size_t len)
1336 if (!machine_mem_merge(current_machine)) {
1337 /* disabled by the user */
1338 return 0;
1341 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1344 /* Only legal before guest might have detected the memory size: e.g. on
1345 * incoming migration, or right after reset.
1347 * As memory core doesn't know how is memory accessed, it is up to
1348 * resize callback to update device state and/or add assertions to detect
1349 * misuse, if necessary.
1351 int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1353 RAMBlock *block = find_ram_block(base);
1355 assert(block);
1357 newsize = TARGET_PAGE_ALIGN(newsize);
1359 if (block->used_length == newsize) {
1360 return 0;
1363 if (!(block->flags & RAM_RESIZEABLE)) {
1364 error_setg_errno(errp, EINVAL,
1365 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1366 " in != 0x" RAM_ADDR_FMT, block->idstr,
1367 newsize, block->used_length);
1368 return -EINVAL;
1371 if (block->max_length < newsize) {
1372 error_setg_errno(errp, EINVAL,
1373 "Length too large: %s: 0x" RAM_ADDR_FMT
1374 " > 0x" RAM_ADDR_FMT, block->idstr,
1375 newsize, block->max_length);
1376 return -EINVAL;
1379 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1380 block->used_length = newsize;
1381 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1382 memory_region_set_size(block->mr, newsize);
1383 if (block->resized) {
1384 block->resized(block->idstr, newsize, block->host);
1386 return 0;
1389 static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
1391 RAMBlock *block;
1392 RAMBlock *last_block = NULL;
1393 ram_addr_t old_ram_size, new_ram_size;
1395 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1397 qemu_mutex_lock_ramlist();
1398 new_block->offset = find_ram_offset(new_block->max_length);
1400 if (!new_block->host) {
1401 if (xen_enabled()) {
1402 xen_ram_alloc(new_block->offset, new_block->max_length,
1403 new_block->mr);
1404 } else {
1405 new_block->host = phys_mem_alloc(new_block->max_length,
1406 &new_block->mr->align);
1407 if (!new_block->host) {
1408 error_setg_errno(errp, errno,
1409 "cannot set up guest memory '%s'",
1410 memory_region_name(new_block->mr));
1411 qemu_mutex_unlock_ramlist();
1412 return -1;
1414 memory_try_enable_merging(new_block->host, new_block->max_length);
1418 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1419 * QLIST (which has an RCU-friendly variant) does not have insertion at
1420 * tail, so save the last element in last_block.
1422 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1423 last_block = block;
1424 if (block->max_length < new_block->max_length) {
1425 break;
1428 if (block) {
1429 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1430 } else if (last_block) {
1431 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1432 } else { /* list is empty */
1433 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1435 ram_list.mru_block = NULL;
1437 /* Write list before version */
1438 smp_wmb();
1439 ram_list.version++;
1440 qemu_mutex_unlock_ramlist();
1442 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1444 if (new_ram_size > old_ram_size) {
1445 int i;
1447 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1448 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1449 ram_list.dirty_memory[i] =
1450 bitmap_zero_extend(ram_list.dirty_memory[i],
1451 old_ram_size, new_ram_size);
1454 cpu_physical_memory_set_dirty_range(new_block->offset,
1455 new_block->used_length);
1457 if (new_block->host) {
1458 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1459 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1460 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1461 if (kvm_enabled()) {
1462 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1466 return new_block->offset;
1469 #ifdef __linux__
1470 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1471 bool share, const char *mem_path,
1472 Error **errp)
1474 RAMBlock *new_block;
1475 ram_addr_t addr;
1476 Error *local_err = NULL;
1478 if (xen_enabled()) {
1479 error_setg(errp, "-mem-path not supported with Xen");
1480 return -1;
1483 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1485 * file_ram_alloc() needs to allocate just like
1486 * phys_mem_alloc, but we haven't bothered to provide
1487 * a hook there.
1489 error_setg(errp,
1490 "-mem-path not supported with this accelerator");
1491 return -1;
1494 size = TARGET_PAGE_ALIGN(size);
1495 new_block = g_malloc0(sizeof(*new_block));
1496 new_block->mr = mr;
1497 new_block->used_length = size;
1498 new_block->max_length = size;
1499 new_block->flags = share ? RAM_SHARED : 0;
1500 new_block->host = file_ram_alloc(new_block, size,
1501 mem_path, errp);
1502 if (!new_block->host) {
1503 g_free(new_block);
1504 return -1;
1507 addr = ram_block_add(new_block, &local_err);
1508 if (local_err) {
1509 g_free(new_block);
1510 error_propagate(errp, local_err);
1511 return -1;
1513 return addr;
1515 #endif
1517 static
1518 ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1519 void (*resized)(const char*,
1520 uint64_t length,
1521 void *host),
1522 void *host, bool resizeable,
1523 MemoryRegion *mr, Error **errp)
1525 RAMBlock *new_block;
1526 ram_addr_t addr;
1527 Error *local_err = NULL;
1529 size = TARGET_PAGE_ALIGN(size);
1530 max_size = TARGET_PAGE_ALIGN(max_size);
1531 new_block = g_malloc0(sizeof(*new_block));
1532 new_block->mr = mr;
1533 new_block->resized = resized;
1534 new_block->used_length = size;
1535 new_block->max_length = max_size;
1536 assert(max_size >= size);
1537 new_block->fd = -1;
1538 new_block->host = host;
1539 if (host) {
1540 new_block->flags |= RAM_PREALLOC;
1542 if (resizeable) {
1543 new_block->flags |= RAM_RESIZEABLE;
1545 addr = ram_block_add(new_block, &local_err);
1546 if (local_err) {
1547 g_free(new_block);
1548 error_propagate(errp, local_err);
1549 return -1;
1551 return addr;
1554 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1555 MemoryRegion *mr, Error **errp)
1557 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1560 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
1562 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1565 ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1566 void (*resized)(const char*,
1567 uint64_t length,
1568 void *host),
1569 MemoryRegion *mr, Error **errp)
1571 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
1574 void qemu_ram_free_from_ptr(ram_addr_t addr)
1576 RAMBlock *block;
1578 qemu_mutex_lock_ramlist();
1579 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1580 if (addr == block->offset) {
1581 QLIST_REMOVE_RCU(block, next);
1582 ram_list.mru_block = NULL;
1583 /* Write list before version */
1584 smp_wmb();
1585 ram_list.version++;
1586 g_free_rcu(block, rcu);
1587 break;
1590 qemu_mutex_unlock_ramlist();
1593 static void reclaim_ramblock(RAMBlock *block)
1595 if (block->flags & RAM_PREALLOC) {
1597 } else if (xen_enabled()) {
1598 xen_invalidate_map_cache_entry(block->host);
1599 #ifndef _WIN32
1600 } else if (block->fd >= 0) {
1601 munmap(block->host, block->max_length);
1602 close(block->fd);
1603 #endif
1604 } else {
1605 qemu_anon_ram_free(block->host, block->max_length);
1607 g_free(block);
1610 void qemu_ram_free(ram_addr_t addr)
1612 RAMBlock *block;
1614 qemu_mutex_lock_ramlist();
1615 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1616 if (addr == block->offset) {
1617 QLIST_REMOVE_RCU(block, next);
1618 ram_list.mru_block = NULL;
1619 /* Write list before version */
1620 smp_wmb();
1621 ram_list.version++;
1622 call_rcu(block, reclaim_ramblock, rcu);
1623 break;
1626 qemu_mutex_unlock_ramlist();
1629 #ifndef _WIN32
1630 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1632 RAMBlock *block;
1633 ram_addr_t offset;
1634 int flags;
1635 void *area, *vaddr;
1637 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1638 offset = addr - block->offset;
1639 if (offset < block->max_length) {
1640 vaddr = ramblock_ptr(block, offset);
1641 if (block->flags & RAM_PREALLOC) {
1643 } else if (xen_enabled()) {
1644 abort();
1645 } else {
1646 flags = MAP_FIXED;
1647 munmap(vaddr, length);
1648 if (block->fd >= 0) {
1649 flags |= (block->flags & RAM_SHARED ?
1650 MAP_SHARED : MAP_PRIVATE);
1651 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1652 flags, block->fd, offset);
1653 } else {
1655 * Remap needs to match alloc. Accelerators that
1656 * set phys_mem_alloc never remap. If they did,
1657 * we'd need a remap hook here.
1659 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1661 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1662 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1663 flags, -1, 0);
1665 if (area != vaddr) {
1666 fprintf(stderr, "Could not remap addr: "
1667 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1668 length, addr);
1669 exit(1);
1671 memory_try_enable_merging(vaddr, length);
1672 qemu_ram_setup_dump(vaddr, length);
1677 #endif /* !_WIN32 */
1679 int qemu_get_ram_fd(ram_addr_t addr)
1681 RAMBlock *block;
1682 int fd;
1684 rcu_read_lock();
1685 block = qemu_get_ram_block(addr);
1686 fd = block->fd;
1687 rcu_read_unlock();
1688 return fd;
1691 void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1693 RAMBlock *block;
1694 void *ptr;
1696 rcu_read_lock();
1697 block = qemu_get_ram_block(addr);
1698 ptr = ramblock_ptr(block, 0);
1699 rcu_read_unlock();
1700 return ptr;
1703 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1704 * This should not be used for general purpose DMA. Use address_space_map
1705 * or address_space_rw instead. For local memory (e.g. video ram) that the
1706 * device owns, use memory_region_get_ram_ptr.
1708 * By the time this function returns, the returned pointer is not protected
1709 * by RCU anymore. If the caller is not within an RCU critical section and
1710 * does not hold the iothread lock, it must have other means of protecting the
1711 * pointer, such as a reference to the region that includes the incoming
1712 * ram_addr_t.
1714 void *qemu_get_ram_ptr(ram_addr_t addr)
1716 RAMBlock *block;
1717 void *ptr;
1719 rcu_read_lock();
1720 block = qemu_get_ram_block(addr);
1722 if (xen_enabled() && block->host == NULL) {
1723 /* We need to check if the requested address is in the RAM
1724 * because we don't want to map the entire memory in QEMU.
1725 * In that case just map until the end of the page.
1727 if (block->offset == 0) {
1728 ptr = xen_map_cache(addr, 0, 0);
1729 goto unlock;
1732 block->host = xen_map_cache(block->offset, block->max_length, 1);
1734 ptr = ramblock_ptr(block, addr - block->offset);
1736 unlock:
1737 rcu_read_unlock();
1738 return ptr;
1741 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1742 * but takes a size argument.
1744 * By the time this function returns, the returned pointer is not protected
1745 * by RCU anymore. If the caller is not within an RCU critical section and
1746 * does not hold the iothread lock, it must have other means of protecting the
1747 * pointer, such as a reference to the region that includes the incoming
1748 * ram_addr_t.
1750 static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1752 void *ptr;
1753 if (*size == 0) {
1754 return NULL;
1756 if (xen_enabled()) {
1757 return xen_map_cache(addr, *size, 1);
1758 } else {
1759 RAMBlock *block;
1760 rcu_read_lock();
1761 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1762 if (addr - block->offset < block->max_length) {
1763 if (addr - block->offset + *size > block->max_length)
1764 *size = block->max_length - addr + block->offset;
1765 ptr = ramblock_ptr(block, addr - block->offset);
1766 rcu_read_unlock();
1767 return ptr;
1771 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1772 abort();
1776 /* Some of the softmmu routines need to translate from a host pointer
1777 * (typically a TLB entry) back to a ram offset.
1779 * By the time this function returns, the returned pointer is not protected
1780 * by RCU anymore. If the caller is not within an RCU critical section and
1781 * does not hold the iothread lock, it must have other means of protecting the
1782 * pointer, such as a reference to the region that includes the incoming
1783 * ram_addr_t.
1785 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1787 RAMBlock *block;
1788 uint8_t *host = ptr;
1789 MemoryRegion *mr;
1791 if (xen_enabled()) {
1792 rcu_read_lock();
1793 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1794 mr = qemu_get_ram_block(*ram_addr)->mr;
1795 rcu_read_unlock();
1796 return mr;
1799 rcu_read_lock();
1800 block = atomic_rcu_read(&ram_list.mru_block);
1801 if (block && block->host && host - block->host < block->max_length) {
1802 goto found;
1805 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1806 /* This case append when the block is not mapped. */
1807 if (block->host == NULL) {
1808 continue;
1810 if (host - block->host < block->max_length) {
1811 goto found;
1815 rcu_read_unlock();
1816 return NULL;
1818 found:
1819 *ram_addr = block->offset + (host - block->host);
1820 mr = block->mr;
1821 rcu_read_unlock();
1822 return mr;
1825 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1826 uint64_t val, unsigned size)
1828 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1829 tb_invalidate_phys_page_fast(ram_addr, size);
1831 switch (size) {
1832 case 1:
1833 stb_p(qemu_get_ram_ptr(ram_addr), val);
1834 break;
1835 case 2:
1836 stw_p(qemu_get_ram_ptr(ram_addr), val);
1837 break;
1838 case 4:
1839 stl_p(qemu_get_ram_ptr(ram_addr), val);
1840 break;
1841 default:
1842 abort();
1844 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
1845 /* we remove the notdirty callback only if the code has been
1846 flushed */
1847 if (!cpu_physical_memory_is_clean(ram_addr)) {
1848 CPUArchState *env = current_cpu->env_ptr;
1849 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
1853 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1854 unsigned size, bool is_write)
1856 return is_write;
1859 static const MemoryRegionOps notdirty_mem_ops = {
1860 .write = notdirty_mem_write,
1861 .valid.accepts = notdirty_mem_accepts,
1862 .endianness = DEVICE_NATIVE_ENDIAN,
1865 /* Generate a debug exception if a watchpoint has been hit. */
1866 static void check_watchpoint(int offset, int len, int flags)
1868 CPUState *cpu = current_cpu;
1869 CPUArchState *env = cpu->env_ptr;
1870 target_ulong pc, cs_base;
1871 target_ulong vaddr;
1872 CPUWatchpoint *wp;
1873 int cpu_flags;
1875 if (cpu->watchpoint_hit) {
1876 /* We re-entered the check after replacing the TB. Now raise
1877 * the debug interrupt so that is will trigger after the
1878 * current instruction. */
1879 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
1880 return;
1882 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1883 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1884 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1885 && (wp->flags & flags)) {
1886 if (flags == BP_MEM_READ) {
1887 wp->flags |= BP_WATCHPOINT_HIT_READ;
1888 } else {
1889 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1891 wp->hitaddr = vaddr;
1892 if (!cpu->watchpoint_hit) {
1893 cpu->watchpoint_hit = wp;
1894 tb_check_watchpoint(cpu);
1895 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1896 cpu->exception_index = EXCP_DEBUG;
1897 cpu_loop_exit(cpu);
1898 } else {
1899 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1900 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
1901 cpu_resume_from_signal(cpu, NULL);
1904 } else {
1905 wp->flags &= ~BP_WATCHPOINT_HIT;
1910 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1911 so these check for a hit then pass through to the normal out-of-line
1912 phys routines. */
1913 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1914 unsigned size)
1916 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
1917 switch (size) {
1918 case 1: return ldub_phys(&address_space_memory, addr);
1919 case 2: return lduw_phys(&address_space_memory, addr);
1920 case 4: return ldl_phys(&address_space_memory, addr);
1921 default: abort();
1925 static void watch_mem_write(void *opaque, hwaddr addr,
1926 uint64_t val, unsigned size)
1928 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
1929 switch (size) {
1930 case 1:
1931 stb_phys(&address_space_memory, addr, val);
1932 break;
1933 case 2:
1934 stw_phys(&address_space_memory, addr, val);
1935 break;
1936 case 4:
1937 stl_phys(&address_space_memory, addr, val);
1938 break;
1939 default: abort();
1943 static const MemoryRegionOps watch_mem_ops = {
1944 .read = watch_mem_read,
1945 .write = watch_mem_write,
1946 .endianness = DEVICE_NATIVE_ENDIAN,
1949 static uint64_t subpage_read(void *opaque, hwaddr addr,
1950 unsigned len)
1952 subpage_t *subpage = opaque;
1953 uint8_t buf[8];
1955 #if defined(DEBUG_SUBPAGE)
1956 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1957 subpage, len, addr);
1958 #endif
1959 address_space_read(subpage->as, addr + subpage->base, buf, len);
1960 switch (len) {
1961 case 1:
1962 return ldub_p(buf);
1963 case 2:
1964 return lduw_p(buf);
1965 case 4:
1966 return ldl_p(buf);
1967 case 8:
1968 return ldq_p(buf);
1969 default:
1970 abort();
1974 static void subpage_write(void *opaque, hwaddr addr,
1975 uint64_t value, unsigned len)
1977 subpage_t *subpage = opaque;
1978 uint8_t buf[8];
1980 #if defined(DEBUG_SUBPAGE)
1981 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1982 " value %"PRIx64"\n",
1983 __func__, subpage, len, addr, value);
1984 #endif
1985 switch (len) {
1986 case 1:
1987 stb_p(buf, value);
1988 break;
1989 case 2:
1990 stw_p(buf, value);
1991 break;
1992 case 4:
1993 stl_p(buf, value);
1994 break;
1995 case 8:
1996 stq_p(buf, value);
1997 break;
1998 default:
1999 abort();
2001 address_space_write(subpage->as, addr + subpage->base, buf, len);
2004 static bool subpage_accepts(void *opaque, hwaddr addr,
2005 unsigned len, bool is_write)
2007 subpage_t *subpage = opaque;
2008 #if defined(DEBUG_SUBPAGE)
2009 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2010 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2011 #endif
2013 return address_space_access_valid(subpage->as, addr + subpage->base,
2014 len, is_write);
2017 static const MemoryRegionOps subpage_ops = {
2018 .read = subpage_read,
2019 .write = subpage_write,
2020 .impl.min_access_size = 1,
2021 .impl.max_access_size = 8,
2022 .valid.min_access_size = 1,
2023 .valid.max_access_size = 8,
2024 .valid.accepts = subpage_accepts,
2025 .endianness = DEVICE_NATIVE_ENDIAN,
2028 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2029 uint16_t section)
2031 int idx, eidx;
2033 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2034 return -1;
2035 idx = SUBPAGE_IDX(start);
2036 eidx = SUBPAGE_IDX(end);
2037 #if defined(DEBUG_SUBPAGE)
2038 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2039 __func__, mmio, start, end, idx, eidx, section);
2040 #endif
2041 for (; idx <= eidx; idx++) {
2042 mmio->sub_section[idx] = section;
2045 return 0;
2048 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2050 subpage_t *mmio;
2052 mmio = g_malloc0(sizeof(subpage_t));
2054 mmio->as = as;
2055 mmio->base = base;
2056 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2057 NULL, TARGET_PAGE_SIZE);
2058 mmio->iomem.subpage = true;
2059 #if defined(DEBUG_SUBPAGE)
2060 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2061 mmio, base, TARGET_PAGE_SIZE);
2062 #endif
2063 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2065 return mmio;
2068 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2069 MemoryRegion *mr)
2071 assert(as);
2072 MemoryRegionSection section = {
2073 .address_space = as,
2074 .mr = mr,
2075 .offset_within_address_space = 0,
2076 .offset_within_region = 0,
2077 .size = int128_2_64(),
2080 return phys_section_add(map, &section);
2083 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
2085 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2086 MemoryRegionSection *sections = d->map.sections;
2088 return sections[index & ~TARGET_PAGE_MASK].mr;
2091 static void io_mem_init(void)
2093 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2094 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2095 NULL, UINT64_MAX);
2096 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
2097 NULL, UINT64_MAX);
2098 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2099 NULL, UINT64_MAX);
2102 static void mem_begin(MemoryListener *listener)
2104 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2105 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2106 uint16_t n;
2108 n = dummy_section(&d->map, as, &io_mem_unassigned);
2109 assert(n == PHYS_SECTION_UNASSIGNED);
2110 n = dummy_section(&d->map, as, &io_mem_notdirty);
2111 assert(n == PHYS_SECTION_NOTDIRTY);
2112 n = dummy_section(&d->map, as, &io_mem_rom);
2113 assert(n == PHYS_SECTION_ROM);
2114 n = dummy_section(&d->map, as, &io_mem_watch);
2115 assert(n == PHYS_SECTION_WATCH);
2117 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2118 d->as = as;
2119 as->next_dispatch = d;
2122 static void address_space_dispatch_free(AddressSpaceDispatch *d)
2124 phys_sections_free(&d->map);
2125 g_free(d);
2128 static void mem_commit(MemoryListener *listener)
2130 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2131 AddressSpaceDispatch *cur = as->dispatch;
2132 AddressSpaceDispatch *next = as->next_dispatch;
2134 phys_page_compact_all(next, next->map.nodes_nb);
2136 atomic_rcu_set(&as->dispatch, next);
2137 if (cur) {
2138 call_rcu(cur, address_space_dispatch_free, rcu);
2142 static void tcg_commit(MemoryListener *listener)
2144 CPUState *cpu;
2146 /* since each CPU stores ram addresses in its TLB cache, we must
2147 reset the modified entries */
2148 /* XXX: slow ! */
2149 CPU_FOREACH(cpu) {
2150 /* FIXME: Disentangle the cpu.h circular files deps so we can
2151 directly get the right CPU from listener. */
2152 if (cpu->tcg_as_listener != listener) {
2153 continue;
2155 cpu_reload_memory_map(cpu);
2159 static void core_log_global_start(MemoryListener *listener)
2161 cpu_physical_memory_set_dirty_tracking(true);
2164 static void core_log_global_stop(MemoryListener *listener)
2166 cpu_physical_memory_set_dirty_tracking(false);
2169 static MemoryListener core_memory_listener = {
2170 .log_global_start = core_log_global_start,
2171 .log_global_stop = core_log_global_stop,
2172 .priority = 1,
2175 void address_space_init_dispatch(AddressSpace *as)
2177 as->dispatch = NULL;
2178 as->dispatch_listener = (MemoryListener) {
2179 .begin = mem_begin,
2180 .commit = mem_commit,
2181 .region_add = mem_add,
2182 .region_nop = mem_add,
2183 .priority = 0,
2185 memory_listener_register(&as->dispatch_listener, as);
2188 void address_space_unregister(AddressSpace *as)
2190 memory_listener_unregister(&as->dispatch_listener);
2193 void address_space_destroy_dispatch(AddressSpace *as)
2195 AddressSpaceDispatch *d = as->dispatch;
2197 atomic_rcu_set(&as->dispatch, NULL);
2198 if (d) {
2199 call_rcu(d, address_space_dispatch_free, rcu);
2203 static void memory_map_init(void)
2205 system_memory = g_malloc(sizeof(*system_memory));
2207 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2208 address_space_init(&address_space_memory, system_memory, "memory");
2210 system_io = g_malloc(sizeof(*system_io));
2211 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2212 65536);
2213 address_space_init(&address_space_io, system_io, "I/O");
2215 memory_listener_register(&core_memory_listener, &address_space_memory);
2218 MemoryRegion *get_system_memory(void)
2220 return system_memory;
2223 MemoryRegion *get_system_io(void)
2225 return system_io;
2228 #endif /* !defined(CONFIG_USER_ONLY) */
2230 /* physical memory access (slow version, mainly for debug) */
2231 #if defined(CONFIG_USER_ONLY)
2232 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2233 uint8_t *buf, int len, int is_write)
2235 int l, flags;
2236 target_ulong page;
2237 void * p;
2239 while (len > 0) {
2240 page = addr & TARGET_PAGE_MASK;
2241 l = (page + TARGET_PAGE_SIZE) - addr;
2242 if (l > len)
2243 l = len;
2244 flags = page_get_flags(page);
2245 if (!(flags & PAGE_VALID))
2246 return -1;
2247 if (is_write) {
2248 if (!(flags & PAGE_WRITE))
2249 return -1;
2250 /* XXX: this code should not depend on lock_user */
2251 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2252 return -1;
2253 memcpy(p, buf, l);
2254 unlock_user(p, addr, l);
2255 } else {
2256 if (!(flags & PAGE_READ))
2257 return -1;
2258 /* XXX: this code should not depend on lock_user */
2259 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2260 return -1;
2261 memcpy(buf, p, l);
2262 unlock_user(p, addr, 0);
2264 len -= l;
2265 buf += l;
2266 addr += l;
2268 return 0;
2271 #else
2273 static void invalidate_and_set_dirty(hwaddr addr,
2274 hwaddr length)
2276 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2277 tb_invalidate_phys_range(addr, addr + length, 0);
2278 cpu_physical_memory_set_dirty_range_nocode(addr, length);
2280 xen_modified_memory(addr, length);
2283 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2285 unsigned access_size_max = mr->ops->valid.max_access_size;
2287 /* Regions are assumed to support 1-4 byte accesses unless
2288 otherwise specified. */
2289 if (access_size_max == 0) {
2290 access_size_max = 4;
2293 /* Bound the maximum access by the alignment of the address. */
2294 if (!mr->ops->impl.unaligned) {
2295 unsigned align_size_max = addr & -addr;
2296 if (align_size_max != 0 && align_size_max < access_size_max) {
2297 access_size_max = align_size_max;
2301 /* Don't attempt accesses larger than the maximum. */
2302 if (l > access_size_max) {
2303 l = access_size_max;
2305 if (l & (l - 1)) {
2306 l = 1 << (qemu_fls(l) - 1);
2309 return l;
2312 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
2313 int len, bool is_write)
2315 hwaddr l;
2316 uint8_t *ptr;
2317 uint64_t val;
2318 hwaddr addr1;
2319 MemoryRegion *mr;
2320 bool error = false;
2322 while (len > 0) {
2323 l = len;
2324 mr = address_space_translate(as, addr, &addr1, &l, is_write);
2326 if (is_write) {
2327 if (!memory_access_is_direct(mr, is_write)) {
2328 l = memory_access_size(mr, l, addr1);
2329 /* XXX: could force current_cpu to NULL to avoid
2330 potential bugs */
2331 switch (l) {
2332 case 8:
2333 /* 64 bit write access */
2334 val = ldq_p(buf);
2335 error |= io_mem_write(mr, addr1, val, 8);
2336 break;
2337 case 4:
2338 /* 32 bit write access */
2339 val = ldl_p(buf);
2340 error |= io_mem_write(mr, addr1, val, 4);
2341 break;
2342 case 2:
2343 /* 16 bit write access */
2344 val = lduw_p(buf);
2345 error |= io_mem_write(mr, addr1, val, 2);
2346 break;
2347 case 1:
2348 /* 8 bit write access */
2349 val = ldub_p(buf);
2350 error |= io_mem_write(mr, addr1, val, 1);
2351 break;
2352 default:
2353 abort();
2355 } else {
2356 addr1 += memory_region_get_ram_addr(mr);
2357 /* RAM case */
2358 ptr = qemu_get_ram_ptr(addr1);
2359 memcpy(ptr, buf, l);
2360 invalidate_and_set_dirty(addr1, l);
2362 } else {
2363 if (!memory_access_is_direct(mr, is_write)) {
2364 /* I/O case */
2365 l = memory_access_size(mr, l, addr1);
2366 switch (l) {
2367 case 8:
2368 /* 64 bit read access */
2369 error |= io_mem_read(mr, addr1, &val, 8);
2370 stq_p(buf, val);
2371 break;
2372 case 4:
2373 /* 32 bit read access */
2374 error |= io_mem_read(mr, addr1, &val, 4);
2375 stl_p(buf, val);
2376 break;
2377 case 2:
2378 /* 16 bit read access */
2379 error |= io_mem_read(mr, addr1, &val, 2);
2380 stw_p(buf, val);
2381 break;
2382 case 1:
2383 /* 8 bit read access */
2384 error |= io_mem_read(mr, addr1, &val, 1);
2385 stb_p(buf, val);
2386 break;
2387 default:
2388 abort();
2390 } else {
2391 /* RAM case */
2392 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2393 memcpy(buf, ptr, l);
2396 len -= l;
2397 buf += l;
2398 addr += l;
2401 return error;
2404 bool address_space_write(AddressSpace *as, hwaddr addr,
2405 const uint8_t *buf, int len)
2407 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2410 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2412 return address_space_rw(as, addr, buf, len, false);
2416 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2417 int len, int is_write)
2419 address_space_rw(&address_space_memory, addr, buf, len, is_write);
2422 enum write_rom_type {
2423 WRITE_DATA,
2424 FLUSH_CACHE,
2427 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2428 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2430 hwaddr l;
2431 uint8_t *ptr;
2432 hwaddr addr1;
2433 MemoryRegion *mr;
2435 while (len > 0) {
2436 l = len;
2437 mr = address_space_translate(as, addr, &addr1, &l, true);
2439 if (!(memory_region_is_ram(mr) ||
2440 memory_region_is_romd(mr))) {
2441 /* do nothing */
2442 } else {
2443 addr1 += memory_region_get_ram_addr(mr);
2444 /* ROM/RAM case */
2445 ptr = qemu_get_ram_ptr(addr1);
2446 switch (type) {
2447 case WRITE_DATA:
2448 memcpy(ptr, buf, l);
2449 invalidate_and_set_dirty(addr1, l);
2450 break;
2451 case FLUSH_CACHE:
2452 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2453 break;
2456 len -= l;
2457 buf += l;
2458 addr += l;
2462 /* used for ROM loading : can write in RAM and ROM */
2463 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2464 const uint8_t *buf, int len)
2466 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2469 void cpu_flush_icache_range(hwaddr start, int len)
2472 * This function should do the same thing as an icache flush that was
2473 * triggered from within the guest. For TCG we are always cache coherent,
2474 * so there is no need to flush anything. For KVM / Xen we need to flush
2475 * the host's instruction cache at least.
2477 if (tcg_enabled()) {
2478 return;
2481 cpu_physical_memory_write_rom_internal(&address_space_memory,
2482 start, NULL, len, FLUSH_CACHE);
2485 typedef struct {
2486 MemoryRegion *mr;
2487 void *buffer;
2488 hwaddr addr;
2489 hwaddr len;
2490 } BounceBuffer;
2492 static BounceBuffer bounce;
2494 typedef struct MapClient {
2495 void *opaque;
2496 void (*callback)(void *opaque);
2497 QLIST_ENTRY(MapClient) link;
2498 } MapClient;
2500 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2501 = QLIST_HEAD_INITIALIZER(map_client_list);
2503 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2505 MapClient *client = g_malloc(sizeof(*client));
2507 client->opaque = opaque;
2508 client->callback = callback;
2509 QLIST_INSERT_HEAD(&map_client_list, client, link);
2510 return client;
2513 static void cpu_unregister_map_client(void *_client)
2515 MapClient *client = (MapClient *)_client;
2517 QLIST_REMOVE(client, link);
2518 g_free(client);
2521 static void cpu_notify_map_clients(void)
2523 MapClient *client;
2525 while (!QLIST_EMPTY(&map_client_list)) {
2526 client = QLIST_FIRST(&map_client_list);
2527 client->callback(client->opaque);
2528 cpu_unregister_map_client(client);
2532 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2534 MemoryRegion *mr;
2535 hwaddr l, xlat;
2537 while (len > 0) {
2538 l = len;
2539 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2540 if (!memory_access_is_direct(mr, is_write)) {
2541 l = memory_access_size(mr, l, addr);
2542 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2543 return false;
2547 len -= l;
2548 addr += l;
2550 return true;
2553 /* Map a physical memory region into a host virtual address.
2554 * May map a subset of the requested range, given by and returned in *plen.
2555 * May return NULL if resources needed to perform the mapping are exhausted.
2556 * Use only for reads OR writes - not for read-modify-write operations.
2557 * Use cpu_register_map_client() to know when retrying the map operation is
2558 * likely to succeed.
2560 void *address_space_map(AddressSpace *as,
2561 hwaddr addr,
2562 hwaddr *plen,
2563 bool is_write)
2565 hwaddr len = *plen;
2566 hwaddr done = 0;
2567 hwaddr l, xlat, base;
2568 MemoryRegion *mr, *this_mr;
2569 ram_addr_t raddr;
2571 if (len == 0) {
2572 return NULL;
2575 l = len;
2576 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2577 if (!memory_access_is_direct(mr, is_write)) {
2578 if (bounce.buffer) {
2579 return NULL;
2581 /* Avoid unbounded allocations */
2582 l = MIN(l, TARGET_PAGE_SIZE);
2583 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2584 bounce.addr = addr;
2585 bounce.len = l;
2587 memory_region_ref(mr);
2588 bounce.mr = mr;
2589 if (!is_write) {
2590 address_space_read(as, addr, bounce.buffer, l);
2593 *plen = l;
2594 return bounce.buffer;
2597 base = xlat;
2598 raddr = memory_region_get_ram_addr(mr);
2600 for (;;) {
2601 len -= l;
2602 addr += l;
2603 done += l;
2604 if (len == 0) {
2605 break;
2608 l = len;
2609 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2610 if (this_mr != mr || xlat != base + done) {
2611 break;
2615 memory_region_ref(mr);
2616 *plen = done;
2617 return qemu_ram_ptr_length(raddr + base, plen);
2620 /* Unmaps a memory region previously mapped by address_space_map().
2621 * Will also mark the memory as dirty if is_write == 1. access_len gives
2622 * the amount of memory that was actually read or written by the caller.
2624 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2625 int is_write, hwaddr access_len)
2627 if (buffer != bounce.buffer) {
2628 MemoryRegion *mr;
2629 ram_addr_t addr1;
2631 mr = qemu_ram_addr_from_host(buffer, &addr1);
2632 assert(mr != NULL);
2633 if (is_write) {
2634 invalidate_and_set_dirty(addr1, access_len);
2636 if (xen_enabled()) {
2637 xen_invalidate_map_cache_entry(buffer);
2639 memory_region_unref(mr);
2640 return;
2642 if (is_write) {
2643 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2645 qemu_vfree(bounce.buffer);
2646 bounce.buffer = NULL;
2647 memory_region_unref(bounce.mr);
2648 cpu_notify_map_clients();
2651 void *cpu_physical_memory_map(hwaddr addr,
2652 hwaddr *plen,
2653 int is_write)
2655 return address_space_map(&address_space_memory, addr, plen, is_write);
2658 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2659 int is_write, hwaddr access_len)
2661 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2664 /* warning: addr must be aligned */
2665 static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
2666 enum device_endian endian)
2668 uint8_t *ptr;
2669 uint64_t val;
2670 MemoryRegion *mr;
2671 hwaddr l = 4;
2672 hwaddr addr1;
2674 mr = address_space_translate(as, addr, &addr1, &l, false);
2675 if (l < 4 || !memory_access_is_direct(mr, false)) {
2676 /* I/O case */
2677 io_mem_read(mr, addr1, &val, 4);
2678 #if defined(TARGET_WORDS_BIGENDIAN)
2679 if (endian == DEVICE_LITTLE_ENDIAN) {
2680 val = bswap32(val);
2682 #else
2683 if (endian == DEVICE_BIG_ENDIAN) {
2684 val = bswap32(val);
2686 #endif
2687 } else {
2688 /* RAM case */
2689 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2690 & TARGET_PAGE_MASK)
2691 + addr1);
2692 switch (endian) {
2693 case DEVICE_LITTLE_ENDIAN:
2694 val = ldl_le_p(ptr);
2695 break;
2696 case DEVICE_BIG_ENDIAN:
2697 val = ldl_be_p(ptr);
2698 break;
2699 default:
2700 val = ldl_p(ptr);
2701 break;
2704 return val;
2707 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
2709 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2712 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
2714 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2717 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
2719 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2722 /* warning: addr must be aligned */
2723 static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
2724 enum device_endian endian)
2726 uint8_t *ptr;
2727 uint64_t val;
2728 MemoryRegion *mr;
2729 hwaddr l = 8;
2730 hwaddr addr1;
2732 mr = address_space_translate(as, addr, &addr1, &l,
2733 false);
2734 if (l < 8 || !memory_access_is_direct(mr, false)) {
2735 /* I/O case */
2736 io_mem_read(mr, addr1, &val, 8);
2737 #if defined(TARGET_WORDS_BIGENDIAN)
2738 if (endian == DEVICE_LITTLE_ENDIAN) {
2739 val = bswap64(val);
2741 #else
2742 if (endian == DEVICE_BIG_ENDIAN) {
2743 val = bswap64(val);
2745 #endif
2746 } else {
2747 /* RAM case */
2748 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2749 & TARGET_PAGE_MASK)
2750 + addr1);
2751 switch (endian) {
2752 case DEVICE_LITTLE_ENDIAN:
2753 val = ldq_le_p(ptr);
2754 break;
2755 case DEVICE_BIG_ENDIAN:
2756 val = ldq_be_p(ptr);
2757 break;
2758 default:
2759 val = ldq_p(ptr);
2760 break;
2763 return val;
2766 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
2768 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2771 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
2773 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2776 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
2778 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2781 /* XXX: optimize */
2782 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2784 uint8_t val;
2785 address_space_rw(as, addr, &val, 1, 0);
2786 return val;
2789 /* warning: addr must be aligned */
2790 static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
2791 enum device_endian endian)
2793 uint8_t *ptr;
2794 uint64_t val;
2795 MemoryRegion *mr;
2796 hwaddr l = 2;
2797 hwaddr addr1;
2799 mr = address_space_translate(as, addr, &addr1, &l,
2800 false);
2801 if (l < 2 || !memory_access_is_direct(mr, false)) {
2802 /* I/O case */
2803 io_mem_read(mr, addr1, &val, 2);
2804 #if defined(TARGET_WORDS_BIGENDIAN)
2805 if (endian == DEVICE_LITTLE_ENDIAN) {
2806 val = bswap16(val);
2808 #else
2809 if (endian == DEVICE_BIG_ENDIAN) {
2810 val = bswap16(val);
2812 #endif
2813 } else {
2814 /* RAM case */
2815 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2816 & TARGET_PAGE_MASK)
2817 + addr1);
2818 switch (endian) {
2819 case DEVICE_LITTLE_ENDIAN:
2820 val = lduw_le_p(ptr);
2821 break;
2822 case DEVICE_BIG_ENDIAN:
2823 val = lduw_be_p(ptr);
2824 break;
2825 default:
2826 val = lduw_p(ptr);
2827 break;
2830 return val;
2833 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
2835 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2838 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
2840 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2843 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
2845 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2848 /* warning: addr must be aligned. The ram page is not masked as dirty
2849 and the code inside is not invalidated. It is useful if the dirty
2850 bits are used to track modified PTEs */
2851 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
2853 uint8_t *ptr;
2854 MemoryRegion *mr;
2855 hwaddr l = 4;
2856 hwaddr addr1;
2858 mr = address_space_translate(as, addr, &addr1, &l,
2859 true);
2860 if (l < 4 || !memory_access_is_direct(mr, true)) {
2861 io_mem_write(mr, addr1, val, 4);
2862 } else {
2863 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2864 ptr = qemu_get_ram_ptr(addr1);
2865 stl_p(ptr, val);
2867 if (unlikely(in_migration)) {
2868 if (cpu_physical_memory_is_clean(addr1)) {
2869 /* invalidate code */
2870 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2871 /* set dirty bit */
2872 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
2878 /* warning: addr must be aligned */
2879 static inline void stl_phys_internal(AddressSpace *as,
2880 hwaddr addr, uint32_t val,
2881 enum device_endian endian)
2883 uint8_t *ptr;
2884 MemoryRegion *mr;
2885 hwaddr l = 4;
2886 hwaddr addr1;
2888 mr = address_space_translate(as, addr, &addr1, &l,
2889 true);
2890 if (l < 4 || !memory_access_is_direct(mr, true)) {
2891 #if defined(TARGET_WORDS_BIGENDIAN)
2892 if (endian == DEVICE_LITTLE_ENDIAN) {
2893 val = bswap32(val);
2895 #else
2896 if (endian == DEVICE_BIG_ENDIAN) {
2897 val = bswap32(val);
2899 #endif
2900 io_mem_write(mr, addr1, val, 4);
2901 } else {
2902 /* RAM case */
2903 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2904 ptr = qemu_get_ram_ptr(addr1);
2905 switch (endian) {
2906 case DEVICE_LITTLE_ENDIAN:
2907 stl_le_p(ptr, val);
2908 break;
2909 case DEVICE_BIG_ENDIAN:
2910 stl_be_p(ptr, val);
2911 break;
2912 default:
2913 stl_p(ptr, val);
2914 break;
2916 invalidate_and_set_dirty(addr1, 4);
2920 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2922 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2925 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2927 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2930 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2932 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2935 /* XXX: optimize */
2936 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2938 uint8_t v = val;
2939 address_space_rw(as, addr, &v, 1, 1);
2942 /* warning: addr must be aligned */
2943 static inline void stw_phys_internal(AddressSpace *as,
2944 hwaddr addr, uint32_t val,
2945 enum device_endian endian)
2947 uint8_t *ptr;
2948 MemoryRegion *mr;
2949 hwaddr l = 2;
2950 hwaddr addr1;
2952 mr = address_space_translate(as, addr, &addr1, &l, true);
2953 if (l < 2 || !memory_access_is_direct(mr, true)) {
2954 #if defined(TARGET_WORDS_BIGENDIAN)
2955 if (endian == DEVICE_LITTLE_ENDIAN) {
2956 val = bswap16(val);
2958 #else
2959 if (endian == DEVICE_BIG_ENDIAN) {
2960 val = bswap16(val);
2962 #endif
2963 io_mem_write(mr, addr1, val, 2);
2964 } else {
2965 /* RAM case */
2966 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2967 ptr = qemu_get_ram_ptr(addr1);
2968 switch (endian) {
2969 case DEVICE_LITTLE_ENDIAN:
2970 stw_le_p(ptr, val);
2971 break;
2972 case DEVICE_BIG_ENDIAN:
2973 stw_be_p(ptr, val);
2974 break;
2975 default:
2976 stw_p(ptr, val);
2977 break;
2979 invalidate_and_set_dirty(addr1, 2);
2983 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2985 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2988 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2990 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2993 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2995 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2998 /* XXX: optimize */
2999 void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3001 val = tswap64(val);
3002 address_space_rw(as, addr, (void *) &val, 8, 1);
3005 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3007 val = cpu_to_le64(val);
3008 address_space_rw(as, addr, (void *) &val, 8, 1);
3011 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3013 val = cpu_to_be64(val);
3014 address_space_rw(as, addr, (void *) &val, 8, 1);
3017 /* virtual memory access for debug (includes writing to ROM) */
3018 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3019 uint8_t *buf, int len, int is_write)
3021 int l;
3022 hwaddr phys_addr;
3023 target_ulong page;
3025 while (len > 0) {
3026 page = addr & TARGET_PAGE_MASK;
3027 phys_addr = cpu_get_phys_page_debug(cpu, page);
3028 /* if no physical page mapped, return an error */
3029 if (phys_addr == -1)
3030 return -1;
3031 l = (page + TARGET_PAGE_SIZE) - addr;
3032 if (l > len)
3033 l = len;
3034 phys_addr += (addr & ~TARGET_PAGE_MASK);
3035 if (is_write) {
3036 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3037 } else {
3038 address_space_rw(cpu->as, phys_addr, buf, l, 0);
3040 len -= l;
3041 buf += l;
3042 addr += l;
3044 return 0;
3046 #endif
3049 * A helper function for the _utterly broken_ virtio device model to find out if
3050 * it's running on a big endian machine. Don't do this at home kids!
3052 bool target_words_bigendian(void);
3053 bool target_words_bigendian(void)
3055 #if defined(TARGET_WORDS_BIGENDIAN)
3056 return true;
3057 #else
3058 return false;
3059 #endif
3062 #ifndef CONFIG_USER_ONLY
3063 bool cpu_physical_memory_is_io(hwaddr phys_addr)
3065 MemoryRegion*mr;
3066 hwaddr l = 1;
3068 mr = address_space_translate(&address_space_memory,
3069 phys_addr, &phys_addr, &l, false);
3071 return !(memory_region_is_ram(mr) ||
3072 memory_region_is_romd(mr));
3075 void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3077 RAMBlock *block;
3079 rcu_read_lock();
3080 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3081 func(block->host, block->offset, block->used_length, opaque);
3083 rcu_read_unlock();
3085 #endif