Merge remote-tracking branch 'qemu/master'
[qemu/ar7.git] / exec.c
blobcce0f864f461729d91f5b0b8ad2689da31ba295e
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifndef _WIN32
21 #include <sys/types.h>
22 #include <sys/mman.h>
23 #endif
25 #include "qemu-common.h"
26 #include "cpu.h"
27 #include "tcg.h"
28 #include "hw/hw.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #endif
32 #include "hw/qdev.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
47 #include "trace.h"
48 #endif
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "exec/cputlb.h"
52 #include "translate-all.h"
54 #include "exec/memory-internal.h"
55 #include "exec/ram_addr.h"
57 #include "qemu/range.h"
59 //#define DEBUG_SUBPAGE
61 #if !defined(CONFIG_USER_ONLY)
62 static bool in_migration;
64 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
65 * are protected by the ramlist lock.
67 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
69 static MemoryRegion *system_memory;
70 static MemoryRegion *system_io;
72 AddressSpace address_space_io;
73 AddressSpace address_space_memory;
75 MemoryRegion io_mem_rom, io_mem_notdirty;
76 static MemoryRegion io_mem_unassigned;
78 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
79 #define RAM_PREALLOC (1 << 0)
81 /* RAM is mmap-ed with MAP_SHARED */
82 #define RAM_SHARED (1 << 1)
84 /* Only a portion of RAM (used_length) is actually used, and migrated.
85 * This used_length size can change across reboots.
87 #define RAM_RESIZEABLE (1 << 2)
89 #endif
91 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
92 /* current CPU in the current thread. It is only valid inside
93 cpu_exec() */
94 DEFINE_TLS(CPUState *, current_cpu);
95 /* 0 = Do not count executed instructions.
96 1 = Precise instruction counting.
97 2 = Adaptive rate instruction counting. */
98 int use_icount;
100 #if !defined(CONFIG_USER_ONLY)
102 typedef struct PhysPageEntry PhysPageEntry;
104 struct PhysPageEntry {
105 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
106 uint32_t skip : 6;
107 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
108 uint32_t ptr : 26;
111 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
113 /* Size of the L2 (and L3, etc) page tables. */
114 #define ADDR_SPACE_BITS 64
116 #define P_L2_BITS 9
117 #define P_L2_SIZE (1 << P_L2_BITS)
119 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
121 typedef PhysPageEntry Node[P_L2_SIZE];
123 typedef struct PhysPageMap {
124 struct rcu_head rcu;
126 unsigned sections_nb;
127 unsigned sections_nb_alloc;
128 unsigned nodes_nb;
129 unsigned nodes_nb_alloc;
130 Node *nodes;
131 MemoryRegionSection *sections;
132 } PhysPageMap;
134 struct AddressSpaceDispatch {
135 struct rcu_head rcu;
137 /* This is a multi-level map on the physical address space.
138 * The bottom level has pointers to MemoryRegionSections.
140 PhysPageEntry phys_map;
141 PhysPageMap map;
142 AddressSpace *as;
145 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
146 typedef struct subpage_t {
147 MemoryRegion iomem;
148 AddressSpace *as;
149 hwaddr base;
150 uint16_t sub_section[TARGET_PAGE_SIZE];
151 } subpage_t;
153 #define PHYS_SECTION_UNASSIGNED 0
154 #define PHYS_SECTION_NOTDIRTY 1
155 #define PHYS_SECTION_ROM 2
156 #define PHYS_SECTION_WATCH 3
158 static void io_mem_init(void);
159 static void memory_map_init(void);
160 static void tcg_commit(MemoryListener *listener);
162 static MemoryRegion io_mem_watch;
163 #endif
165 #if !defined(CONFIG_USER_ONLY)
167 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
169 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
171 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
172 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
176 static uint32_t phys_map_node_alloc(PhysPageMap *map)
178 unsigned i;
179 uint32_t ret;
181 ret = map->nodes_nb++;
182 assert(ret != PHYS_MAP_NODE_NIL);
183 assert(ret != map->nodes_nb_alloc);
184 for (i = 0; i < P_L2_SIZE; ++i) {
185 map->nodes[ret][i].skip = 1;
186 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
188 return ret;
191 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
192 hwaddr *index, hwaddr *nb, uint16_t leaf,
193 int level)
195 PhysPageEntry *p;
196 int i;
197 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
199 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
200 lp->ptr = phys_map_node_alloc(map);
201 p = map->nodes[lp->ptr];
202 if (level == 0) {
203 for (i = 0; i < P_L2_SIZE; i++) {
204 p[i].skip = 0;
205 p[i].ptr = PHYS_SECTION_UNASSIGNED;
208 } else {
209 p = map->nodes[lp->ptr];
211 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
213 while (*nb && lp < &p[P_L2_SIZE]) {
214 if ((*index & (step - 1)) == 0 && *nb >= step) {
215 lp->skip = 0;
216 lp->ptr = leaf;
217 *index += step;
218 *nb -= step;
219 } else {
220 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
222 ++lp;
226 static void phys_page_set(AddressSpaceDispatch *d,
227 hwaddr index, hwaddr nb,
228 uint16_t leaf)
230 /* Wildly overreserve - it doesn't matter much. */
231 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
233 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
236 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
237 * and update our entry so we can skip it and go directly to the destination.
239 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
241 unsigned valid_ptr = P_L2_SIZE;
242 int valid = 0;
243 PhysPageEntry *p;
244 int i;
246 if (lp->ptr == PHYS_MAP_NODE_NIL) {
247 return;
250 p = nodes[lp->ptr];
251 for (i = 0; i < P_L2_SIZE; i++) {
252 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
253 continue;
256 valid_ptr = i;
257 valid++;
258 if (p[i].skip) {
259 phys_page_compact(&p[i], nodes, compacted);
263 /* We can only compress if there's only one child. */
264 if (valid != 1) {
265 return;
268 assert(valid_ptr < P_L2_SIZE);
270 /* Don't compress if it won't fit in the # of bits we have. */
271 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
272 return;
275 lp->ptr = p[valid_ptr].ptr;
276 if (!p[valid_ptr].skip) {
277 /* If our only child is a leaf, make this a leaf. */
278 /* By design, we should have made this node a leaf to begin with so we
279 * should never reach here.
280 * But since it's so simple to handle this, let's do it just in case we
281 * change this rule.
283 lp->skip = 0;
284 } else {
285 lp->skip += p[valid_ptr].skip;
289 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
291 DECLARE_BITMAP(compacted, nodes_nb);
293 if (d->phys_map.skip) {
294 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
298 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
299 Node *nodes, MemoryRegionSection *sections)
301 PhysPageEntry *p;
302 hwaddr index = addr >> TARGET_PAGE_BITS;
303 int i;
305 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
306 if (lp.ptr == PHYS_MAP_NODE_NIL) {
307 return &sections[PHYS_SECTION_UNASSIGNED];
309 p = nodes[lp.ptr];
310 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
313 if (sections[lp.ptr].size.hi ||
314 range_covers_byte(sections[lp.ptr].offset_within_address_space,
315 sections[lp.ptr].size.lo, addr)) {
316 return &sections[lp.ptr];
317 } else {
318 return &sections[PHYS_SECTION_UNASSIGNED];
322 bool memory_region_is_unassigned(MemoryRegion *mr)
324 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
325 && mr != &io_mem_watch;
328 /* Called from RCU critical section */
329 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
330 hwaddr addr,
331 bool resolve_subpage)
333 MemoryRegionSection *section;
334 subpage_t *subpage;
336 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
337 if (resolve_subpage && section->mr->subpage) {
338 subpage = container_of(section->mr, subpage_t, iomem);
339 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
341 return section;
344 /* Called from RCU critical section */
345 static MemoryRegionSection *
346 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
347 hwaddr *plen, bool resolve_subpage)
349 MemoryRegionSection *section;
350 Int128 diff;
352 section = address_space_lookup_region(d, addr, resolve_subpage);
353 /* Compute offset within MemoryRegionSection */
354 addr -= section->offset_within_address_space;
356 /* Compute offset within MemoryRegion */
357 *xlat = addr + section->offset_within_region;
359 diff = int128_sub(section->mr->size, int128_make64(addr));
360 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
361 return section;
364 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
366 if (memory_region_is_ram(mr)) {
367 return !(is_write && mr->readonly);
369 if (memory_region_is_romd(mr)) {
370 return !is_write;
373 return false;
376 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
377 hwaddr *xlat, hwaddr *plen,
378 bool is_write)
380 IOMMUTLBEntry iotlb;
381 MemoryRegionSection *section;
382 MemoryRegion *mr;
383 hwaddr len = *plen;
385 rcu_read_lock();
386 for (;;) {
387 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
388 section = address_space_translate_internal(d, addr, &addr, plen, true);
389 mr = section->mr;
391 if (!mr->iommu_ops) {
392 break;
395 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
396 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
397 | (addr & iotlb.addr_mask));
398 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
399 if (!(iotlb.perm & (1 << is_write))) {
400 mr = &io_mem_unassigned;
401 break;
404 as = iotlb.target_as;
407 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
408 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
409 len = MIN(page, len);
412 *plen = len;
413 *xlat = addr;
414 rcu_read_unlock();
415 return mr;
418 /* Called from RCU critical section */
419 MemoryRegionSection *
420 address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
421 hwaddr *xlat, hwaddr *plen)
423 MemoryRegionSection *section;
424 section = address_space_translate_internal(cpu->memory_dispatch,
425 addr, xlat, plen, false);
427 assert(!section->mr->iommu_ops);
428 return section;
430 #endif
432 void cpu_exec_init_all(void)
434 #if !defined(CONFIG_USER_ONLY)
435 qemu_mutex_init(&ram_list.mutex);
436 memory_map_init();
437 io_mem_init();
438 #endif
441 #if !defined(CONFIG_USER_ONLY)
443 static int cpu_common_post_load(void *opaque, int version_id)
445 CPUState *cpu = opaque;
447 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
448 version_id is increased. */
449 cpu->interrupt_request &= ~0x01;
450 tlb_flush(cpu, 1);
452 return 0;
455 static int cpu_common_pre_load(void *opaque)
457 CPUState *cpu = opaque;
459 cpu->exception_index = -1;
461 return 0;
464 static bool cpu_common_exception_index_needed(void *opaque)
466 CPUState *cpu = opaque;
468 return tcg_enabled() && cpu->exception_index != -1;
471 static const VMStateDescription vmstate_cpu_common_exception_index = {
472 .name = "cpu_common/exception_index",
473 .version_id = 1,
474 .minimum_version_id = 1,
475 .fields = (VMStateField[]) {
476 VMSTATE_INT32(exception_index, CPUState),
477 VMSTATE_END_OF_LIST()
481 const VMStateDescription vmstate_cpu_common = {
482 .name = "cpu_common",
483 .version_id = 1,
484 .minimum_version_id = 1,
485 .pre_load = cpu_common_pre_load,
486 .post_load = cpu_common_post_load,
487 .fields = (VMStateField[]) {
488 VMSTATE_UINT32(halted, CPUState),
489 VMSTATE_UINT32(interrupt_request, CPUState),
490 VMSTATE_END_OF_LIST()
492 .subsections = (VMStateSubsection[]) {
494 .vmsd = &vmstate_cpu_common_exception_index,
495 .needed = cpu_common_exception_index_needed,
496 } , {
497 /* empty */
502 #endif
504 CPUState *qemu_get_cpu(int index)
506 CPUState *cpu;
508 CPU_FOREACH(cpu) {
509 if (cpu->cpu_index == index) {
510 return cpu;
514 return NULL;
517 #if !defined(CONFIG_USER_ONLY)
518 void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
520 /* We only support one address space per cpu at the moment. */
521 assert(cpu->as == as);
523 if (cpu->tcg_as_listener) {
524 memory_listener_unregister(cpu->tcg_as_listener);
525 } else {
526 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
528 cpu->tcg_as_listener->commit = tcg_commit;
529 memory_listener_register(cpu->tcg_as_listener, as);
531 #endif
533 void cpu_exec_init(CPUArchState *env)
535 CPUState *cpu = ENV_GET_CPU(env);
536 CPUClass *cc = CPU_GET_CLASS(cpu);
537 CPUState *some_cpu;
538 int cpu_index;
540 #ifdef TARGET_WORDS_BIGENDIAN
541 cpu->bigendian = true;
542 #else
543 cpu->bigendian = false;
544 #endif
546 #if defined(CONFIG_USER_ONLY)
547 cpu_list_lock();
548 #endif
549 cpu_index = 0;
550 CPU_FOREACH(some_cpu) {
551 cpu_index++;
553 cpu->cpu_index = cpu_index;
554 cpu->numa_node = 0;
555 QTAILQ_INIT(&cpu->breakpoints);
556 QTAILQ_INIT(&cpu->watchpoints);
557 #ifndef CONFIG_USER_ONLY
558 cpu->as = &address_space_memory;
559 cpu->thread_id = qemu_get_thread_id();
560 cpu_reload_memory_map(cpu);
561 #endif
562 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
563 #if defined(CONFIG_USER_ONLY)
564 cpu_list_unlock();
565 #endif
566 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
567 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
569 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
570 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
571 cpu_save, cpu_load, env);
572 assert(cc->vmsd == NULL);
573 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
574 #endif
575 if (cc->vmsd != NULL) {
576 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
580 #if defined(CONFIG_USER_ONLY)
581 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
583 tb_invalidate_phys_page_range(pc, pc + 1, 0);
585 #else
586 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
588 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
589 if (phys != -1) {
590 tb_invalidate_phys_addr(cpu->as,
591 phys | (pc & ~TARGET_PAGE_MASK));
594 #endif
596 #if defined(CONFIG_USER_ONLY)
597 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
602 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
603 int flags)
605 return -ENOSYS;
608 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
612 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
613 int flags, CPUWatchpoint **watchpoint)
615 return -ENOSYS;
617 #else
618 /* Add a watchpoint. */
619 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
620 int flags, CPUWatchpoint **watchpoint)
622 CPUWatchpoint *wp;
624 /* forbid ranges which are empty or run off the end of the address space */
625 if (len == 0 || (addr + len - 1) < addr) {
626 error_report("tried to set invalid watchpoint at %"
627 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
628 return -EINVAL;
630 wp = g_malloc(sizeof(*wp));
632 wp->vaddr = addr;
633 wp->len = len;
634 wp->flags = flags;
636 /* keep all GDB-injected watchpoints in front */
637 if (flags & BP_GDB) {
638 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
639 } else {
640 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
643 tlb_flush_page(cpu, addr);
645 if (watchpoint)
646 *watchpoint = wp;
647 return 0;
650 /* Remove a specific watchpoint. */
651 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
652 int flags)
654 CPUWatchpoint *wp;
656 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
657 if (addr == wp->vaddr && len == wp->len
658 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
659 cpu_watchpoint_remove_by_ref(cpu, wp);
660 return 0;
663 return -ENOENT;
666 /* Remove a specific watchpoint by reference. */
667 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
669 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
671 tlb_flush_page(cpu, watchpoint->vaddr);
673 g_free(watchpoint);
676 /* Remove all matching watchpoints. */
677 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
679 CPUWatchpoint *wp, *next;
681 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
682 if (wp->flags & mask) {
683 cpu_watchpoint_remove_by_ref(cpu, wp);
688 /* Return true if this watchpoint address matches the specified
689 * access (ie the address range covered by the watchpoint overlaps
690 * partially or completely with the address range covered by the
691 * access).
693 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
694 vaddr addr,
695 vaddr len)
697 /* We know the lengths are non-zero, but a little caution is
698 * required to avoid errors in the case where the range ends
699 * exactly at the top of the address space and so addr + len
700 * wraps round to zero.
702 vaddr wpend = wp->vaddr + wp->len - 1;
703 vaddr addrend = addr + len - 1;
705 return !(addr > wpend || wp->vaddr > addrend);
708 #endif
710 /* Add a breakpoint. */
711 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
712 CPUBreakpoint **breakpoint)
714 CPUBreakpoint *bp;
716 bp = g_malloc(sizeof(*bp));
718 bp->pc = pc;
719 bp->flags = flags;
721 /* keep all GDB-injected breakpoints in front */
722 if (flags & BP_GDB) {
723 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
724 } else {
725 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
728 breakpoint_invalidate(cpu, pc);
730 if (breakpoint) {
731 *breakpoint = bp;
733 return 0;
736 /* Remove a specific breakpoint. */
737 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
739 CPUBreakpoint *bp;
741 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
742 if (bp->pc == pc && bp->flags == flags) {
743 cpu_breakpoint_remove_by_ref(cpu, bp);
744 return 0;
747 return -ENOENT;
750 /* Remove a specific breakpoint by reference. */
751 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
753 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
755 breakpoint_invalidate(cpu, breakpoint->pc);
757 g_free(breakpoint);
760 /* Remove all matching breakpoints. */
761 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
763 CPUBreakpoint *bp, *next;
765 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
766 if (bp->flags & mask) {
767 cpu_breakpoint_remove_by_ref(cpu, bp);
772 /* enable or disable single step mode. EXCP_DEBUG is returned by the
773 CPU loop after each instruction */
774 void cpu_single_step(CPUState *cpu, int enabled)
776 if (cpu->singlestep_enabled != enabled) {
777 cpu->singlestep_enabled = enabled;
778 if (kvm_enabled()) {
779 kvm_update_guest_debug(cpu, 0);
780 } else {
781 /* must flush all the translated code to avoid inconsistencies */
782 /* XXX: only flush what is necessary */
783 CPUArchState *env = cpu->env_ptr;
784 tb_flush(env);
789 void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
791 va_list ap;
792 va_list ap2;
794 va_start(ap, fmt);
795 va_copy(ap2, ap);
796 fprintf(stderr, "qemu: fatal: ");
797 vfprintf(stderr, fmt, ap);
798 fprintf(stderr, "\n");
799 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
800 if (qemu_log_enabled()) {
801 qemu_log("qemu: fatal: ");
802 qemu_log_vprintf(fmt, ap2);
803 qemu_log("\n");
804 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
805 qemu_log_flush();
806 qemu_log_close();
808 va_end(ap2);
809 va_end(ap);
810 #if defined(CONFIG_USER_ONLY)
812 struct sigaction act;
813 sigfillset(&act.sa_mask);
814 act.sa_handler = SIG_DFL;
815 sigaction(SIGABRT, &act, NULL);
817 #endif
818 abort();
821 #if !defined(CONFIG_USER_ONLY)
822 /* Called from RCU critical section */
823 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
825 RAMBlock *block;
827 block = atomic_rcu_read(&ram_list.mru_block);
828 if (block && addr - block->offset < block->max_length) {
829 goto found;
831 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
832 if (addr - block->offset < block->max_length) {
833 goto found;
837 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
838 abort();
840 found:
841 /* It is safe to write mru_block outside the iothread lock. This
842 * is what happens:
844 * mru_block = xxx
845 * rcu_read_unlock()
846 * xxx removed from list
847 * rcu_read_lock()
848 * read mru_block
849 * mru_block = NULL;
850 * call_rcu(reclaim_ramblock, xxx);
851 * rcu_read_unlock()
853 * atomic_rcu_set is not needed here. The block was already published
854 * when it was placed into the list. Here we're just making an extra
855 * copy of the pointer.
857 ram_list.mru_block = block;
858 return block;
861 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
863 ram_addr_t start1;
864 RAMBlock *block;
865 ram_addr_t end;
867 end = TARGET_PAGE_ALIGN(start + length);
868 start &= TARGET_PAGE_MASK;
870 rcu_read_lock();
871 block = qemu_get_ram_block(start);
872 assert(block == qemu_get_ram_block(end - 1));
873 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
874 cpu_tlb_reset_dirty_all(start1, length);
875 rcu_read_unlock();
878 /* Note: start and end must be within the same ram block. */
879 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
880 unsigned client)
882 if (length == 0)
883 return;
884 cpu_physical_memory_clear_dirty_range_type(start, length, client);
886 if (tcg_enabled()) {
887 tlb_reset_dirty_range_all(start, length);
891 static void cpu_physical_memory_set_dirty_tracking(bool enable)
893 in_migration = enable;
896 /* Called from RCU critical section */
897 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
898 MemoryRegionSection *section,
899 target_ulong vaddr,
900 hwaddr paddr, hwaddr xlat,
901 int prot,
902 target_ulong *address)
904 hwaddr iotlb;
905 CPUWatchpoint *wp;
907 if (memory_region_is_ram(section->mr)) {
908 /* Normal RAM. */
909 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
910 + xlat;
911 if (!section->readonly) {
912 iotlb |= PHYS_SECTION_NOTDIRTY;
913 } else {
914 iotlb |= PHYS_SECTION_ROM;
916 } else {
917 iotlb = section - section->address_space->dispatch->map.sections;
918 iotlb += xlat;
921 /* Make accesses to pages with watchpoints go via the
922 watchpoint trap routines. */
923 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
924 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
925 /* Avoid trapping reads of pages with a write breakpoint. */
926 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
927 iotlb = PHYS_SECTION_WATCH + paddr;
928 *address |= TLB_MMIO;
929 break;
934 return iotlb;
936 #endif /* defined(CONFIG_USER_ONLY) */
938 #if !defined(CONFIG_USER_ONLY)
940 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
941 uint16_t section);
942 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
944 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
945 qemu_anon_ram_alloc;
948 * Set a custom physical guest memory alloator.
949 * Accelerators with unusual needs may need this. Hopefully, we can
950 * get rid of it eventually.
952 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
954 phys_mem_alloc = alloc;
957 static uint16_t phys_section_add(PhysPageMap *map,
958 MemoryRegionSection *section)
960 /* The physical section number is ORed with a page-aligned
961 * pointer to produce the iotlb entries. Thus it should
962 * never overflow into the page-aligned value.
964 assert(map->sections_nb < TARGET_PAGE_SIZE);
966 if (map->sections_nb == map->sections_nb_alloc) {
967 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
968 map->sections = g_renew(MemoryRegionSection, map->sections,
969 map->sections_nb_alloc);
971 map->sections[map->sections_nb] = *section;
972 memory_region_ref(section->mr);
973 return map->sections_nb++;
976 static void phys_section_destroy(MemoryRegion *mr)
978 memory_region_unref(mr);
980 if (mr->subpage) {
981 subpage_t *subpage = container_of(mr, subpage_t, iomem);
982 object_unref(OBJECT(&subpage->iomem));
983 g_free(subpage);
987 static void phys_sections_free(PhysPageMap *map)
989 while (map->sections_nb > 0) {
990 MemoryRegionSection *section = &map->sections[--map->sections_nb];
991 phys_section_destroy(section->mr);
993 g_free(map->sections);
994 g_free(map->nodes);
997 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
999 subpage_t *subpage;
1000 hwaddr base = section->offset_within_address_space
1001 & TARGET_PAGE_MASK;
1002 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
1003 d->map.nodes, d->map.sections);
1004 MemoryRegionSection subsection = {
1005 .offset_within_address_space = base,
1006 .size = int128_make64(TARGET_PAGE_SIZE),
1008 hwaddr start, end;
1010 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1012 if (!(existing->mr->subpage)) {
1013 subpage = subpage_init(d->as, base);
1014 subsection.address_space = d->as;
1015 subsection.mr = &subpage->iomem;
1016 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1017 phys_section_add(&d->map, &subsection));
1018 } else {
1019 subpage = container_of(existing->mr, subpage_t, iomem);
1021 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1022 end = start + int128_get64(section->size) - 1;
1023 subpage_register(subpage, start, end,
1024 phys_section_add(&d->map, section));
1028 static void register_multipage(AddressSpaceDispatch *d,
1029 MemoryRegionSection *section)
1031 hwaddr start_addr = section->offset_within_address_space;
1032 uint16_t section_index = phys_section_add(&d->map, section);
1033 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1034 TARGET_PAGE_BITS));
1036 assert(num_pages);
1037 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1040 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1042 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1043 AddressSpaceDispatch *d = as->next_dispatch;
1044 MemoryRegionSection now = *section, remain = *section;
1045 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1047 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1048 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1049 - now.offset_within_address_space;
1051 now.size = int128_min(int128_make64(left), now.size);
1052 register_subpage(d, &now);
1053 } else {
1054 now.size = int128_zero();
1056 while (int128_ne(remain.size, now.size)) {
1057 remain.size = int128_sub(remain.size, now.size);
1058 remain.offset_within_address_space += int128_get64(now.size);
1059 remain.offset_within_region += int128_get64(now.size);
1060 now = remain;
1061 if (int128_lt(remain.size, page_size)) {
1062 register_subpage(d, &now);
1063 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1064 now.size = page_size;
1065 register_subpage(d, &now);
1066 } else {
1067 now.size = int128_and(now.size, int128_neg(page_size));
1068 register_multipage(d, &now);
1073 void qemu_flush_coalesced_mmio_buffer(void)
1075 if (kvm_enabled())
1076 kvm_flush_coalesced_mmio_buffer();
1079 void qemu_mutex_lock_ramlist(void)
1081 qemu_mutex_lock(&ram_list.mutex);
1084 void qemu_mutex_unlock_ramlist(void)
1086 qemu_mutex_unlock(&ram_list.mutex);
1089 #ifdef __linux__
1091 #include <sys/vfs.h>
1093 #define HUGETLBFS_MAGIC 0x958458f6
1095 static long gethugepagesize(const char *path, Error **errp)
1097 struct statfs fs;
1098 int ret;
1100 do {
1101 ret = statfs(path, &fs);
1102 } while (ret != 0 && errno == EINTR);
1104 if (ret != 0) {
1105 error_setg_errno(errp, errno, "failed to get page size of file %s",
1106 path);
1107 return 0;
1110 if (fs.f_type != HUGETLBFS_MAGIC)
1111 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
1113 return fs.f_bsize;
1116 static void *file_ram_alloc(RAMBlock *block,
1117 ram_addr_t memory,
1118 const char *path,
1119 Error **errp)
1121 char *filename;
1122 char *sanitized_name;
1123 char *c;
1124 void * volatile area = NULL;
1125 int fd;
1126 uint64_t hpagesize;
1127 Error *local_err = NULL;
1129 hpagesize = gethugepagesize(path, &local_err);
1130 if (local_err) {
1131 error_propagate(errp, local_err);
1132 goto error;
1134 block->mr->align = hpagesize;
1136 if (memory < hpagesize) {
1137 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1138 "or larger than huge page size 0x%" PRIx64,
1139 memory, hpagesize);
1140 goto error;
1143 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1144 error_setg(errp,
1145 "host lacks kvm mmu notifiers, -mem-path unsupported");
1146 goto error;
1149 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1150 sanitized_name = g_strdup(memory_region_name(block->mr));
1151 for (c = sanitized_name; *c != '\0'; c++) {
1152 if (*c == '/')
1153 *c = '_';
1156 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1157 sanitized_name);
1158 g_free(sanitized_name);
1160 fd = mkstemp(filename);
1161 if (fd < 0) {
1162 error_setg_errno(errp, errno,
1163 "unable to create backing store for hugepages");
1164 g_free(filename);
1165 goto error;
1167 unlink(filename);
1168 g_free(filename);
1170 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1173 * ftruncate is not supported by hugetlbfs in older
1174 * hosts, so don't bother bailing out on errors.
1175 * If anything goes wrong with it under other filesystems,
1176 * mmap will fail.
1178 if (ftruncate(fd, memory)) {
1179 perror("ftruncate");
1182 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1183 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1184 fd, 0);
1185 if (area == MAP_FAILED) {
1186 error_setg_errno(errp, errno,
1187 "unable to map backing store for hugepages");
1188 close(fd);
1189 goto error;
1192 if (mem_prealloc) {
1193 os_mem_prealloc(fd, area, memory);
1196 block->fd = fd;
1197 return area;
1199 error:
1200 if (mem_prealloc) {
1201 error_report("%s", error_get_pretty(*errp));
1202 exit(1);
1204 return NULL;
1206 #endif
1208 /* Called with the ramlist lock held. */
1209 static ram_addr_t find_ram_offset(ram_addr_t size)
1211 RAMBlock *block, *next_block;
1212 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1214 assert(size != 0); /* it would hand out same offset multiple times */
1216 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1217 return 0;
1220 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1221 ram_addr_t end, next = RAM_ADDR_MAX;
1223 end = block->offset + block->max_length;
1225 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
1226 if (next_block->offset >= end) {
1227 next = MIN(next, next_block->offset);
1230 if (next - end >= size && next - end < mingap) {
1231 offset = end;
1232 mingap = next - end;
1236 if (offset == RAM_ADDR_MAX) {
1237 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1238 (uint64_t)size);
1239 abort();
1242 return offset;
1245 ram_addr_t last_ram_offset(void)
1247 RAMBlock *block;
1248 ram_addr_t last = 0;
1250 rcu_read_lock();
1251 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1252 last = MAX(last, block->offset + block->max_length);
1254 rcu_read_unlock();
1255 return last;
1258 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1260 int ret;
1262 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1263 if (!machine_dump_guest_core(current_machine)) {
1264 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1265 if (ret) {
1266 perror("qemu_madvise");
1267 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1268 "but dump_guest_core=off specified\n");
1273 /* Called within an RCU critical section, or while the ramlist lock
1274 * is held.
1276 static RAMBlock *find_ram_block(ram_addr_t addr)
1278 RAMBlock *block;
1280 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1281 if (block->offset == addr) {
1282 return block;
1286 return NULL;
1289 /* Called with iothread lock held. */
1290 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1292 RAMBlock *new_block, *block;
1294 rcu_read_lock();
1295 new_block = find_ram_block(addr);
1296 assert(new_block);
1297 assert(!new_block->idstr[0]);
1299 if (dev) {
1300 char *id = qdev_get_dev_path(dev);
1301 if (id) {
1302 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1303 g_free(id);
1306 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1308 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1309 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1310 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1311 new_block->idstr);
1312 abort();
1315 rcu_read_unlock();
1318 /* Called with iothread lock held. */
1319 void qemu_ram_unset_idstr(ram_addr_t addr)
1321 RAMBlock *block;
1323 /* FIXME: arch_init.c assumes that this is not called throughout
1324 * migration. Ignore the problem since hot-unplug during migration
1325 * does not work anyway.
1328 rcu_read_lock();
1329 block = find_ram_block(addr);
1330 if (block) {
1331 memset(block->idstr, 0, sizeof(block->idstr));
1333 rcu_read_unlock();
1336 static int memory_try_enable_merging(void *addr, size_t len)
1338 if (!machine_mem_merge(current_machine)) {
1339 /* disabled by the user */
1340 return 0;
1343 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1346 /* Only legal before guest might have detected the memory size: e.g. on
1347 * incoming migration, or right after reset.
1349 * As memory core doesn't know how is memory accessed, it is up to
1350 * resize callback to update device state and/or add assertions to detect
1351 * misuse, if necessary.
1353 int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1355 RAMBlock *block = find_ram_block(base);
1357 assert(block);
1359 newsize = TARGET_PAGE_ALIGN(newsize);
1361 if (block->used_length == newsize) {
1362 return 0;
1365 if (!(block->flags & RAM_RESIZEABLE)) {
1366 error_setg_errno(errp, EINVAL,
1367 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1368 " in != 0x" RAM_ADDR_FMT, block->idstr,
1369 newsize, block->used_length);
1370 return -EINVAL;
1373 if (block->max_length < newsize) {
1374 error_setg_errno(errp, EINVAL,
1375 "Length too large: %s: 0x" RAM_ADDR_FMT
1376 " > 0x" RAM_ADDR_FMT, block->idstr,
1377 newsize, block->max_length);
1378 return -EINVAL;
1381 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1382 block->used_length = newsize;
1383 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1384 memory_region_set_size(block->mr, newsize);
1385 if (block->resized) {
1386 block->resized(block->idstr, newsize, block->host);
1388 return 0;
1391 static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
1393 RAMBlock *block;
1394 RAMBlock *last_block = NULL;
1395 ram_addr_t old_ram_size, new_ram_size;
1397 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1399 qemu_mutex_lock_ramlist();
1400 new_block->offset = find_ram_offset(new_block->max_length);
1402 if (!new_block->host) {
1403 if (xen_enabled()) {
1404 xen_ram_alloc(new_block->offset, new_block->max_length,
1405 new_block->mr);
1406 } else {
1407 new_block->host = phys_mem_alloc(new_block->max_length,
1408 &new_block->mr->align);
1409 if (!new_block->host) {
1410 error_setg_errno(errp, errno,
1411 "cannot set up guest memory '%s'",
1412 memory_region_name(new_block->mr));
1413 qemu_mutex_unlock_ramlist();
1414 return -1;
1416 memory_try_enable_merging(new_block->host, new_block->max_length);
1420 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1421 * QLIST (which has an RCU-friendly variant) does not have insertion at
1422 * tail, so save the last element in last_block.
1424 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1425 last_block = block;
1426 if (block->max_length < new_block->max_length) {
1427 break;
1430 if (block) {
1431 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1432 } else if (last_block) {
1433 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1434 } else { /* list is empty */
1435 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1437 ram_list.mru_block = NULL;
1439 /* Write list before version */
1440 smp_wmb();
1441 ram_list.version++;
1442 qemu_mutex_unlock_ramlist();
1444 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1446 if (new_ram_size > old_ram_size) {
1447 int i;
1449 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1450 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1451 ram_list.dirty_memory[i] =
1452 bitmap_zero_extend(ram_list.dirty_memory[i],
1453 old_ram_size, new_ram_size);
1456 cpu_physical_memory_set_dirty_range(new_block->offset,
1457 new_block->used_length);
1459 if (new_block->host) {
1460 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1461 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1462 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1463 if (kvm_enabled()) {
1464 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1468 return new_block->offset;
1471 #ifdef __linux__
1472 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1473 bool share, const char *mem_path,
1474 Error **errp)
1476 RAMBlock *new_block;
1477 ram_addr_t addr;
1478 Error *local_err = NULL;
1480 if (xen_enabled()) {
1481 error_setg(errp, "-mem-path not supported with Xen");
1482 return -1;
1485 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1487 * file_ram_alloc() needs to allocate just like
1488 * phys_mem_alloc, but we haven't bothered to provide
1489 * a hook there.
1491 error_setg(errp,
1492 "-mem-path not supported with this accelerator");
1493 return -1;
1496 size = TARGET_PAGE_ALIGN(size);
1497 new_block = g_malloc0(sizeof(*new_block));
1498 new_block->mr = mr;
1499 new_block->used_length = size;
1500 new_block->max_length = size;
1501 new_block->flags = share ? RAM_SHARED : 0;
1502 new_block->host = file_ram_alloc(new_block, size,
1503 mem_path, errp);
1504 if (!new_block->host) {
1505 g_free(new_block);
1506 return -1;
1509 addr = ram_block_add(new_block, &local_err);
1510 if (local_err) {
1511 g_free(new_block);
1512 error_propagate(errp, local_err);
1513 return -1;
1515 return addr;
1517 #endif
1519 static
1520 ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1521 void (*resized)(const char*,
1522 uint64_t length,
1523 void *host),
1524 void *host, bool resizeable,
1525 MemoryRegion *mr, Error **errp)
1527 RAMBlock *new_block;
1528 ram_addr_t addr;
1529 Error *local_err = NULL;
1531 size = TARGET_PAGE_ALIGN(size);
1532 max_size = TARGET_PAGE_ALIGN(max_size);
1533 new_block = g_malloc0(sizeof(*new_block));
1534 new_block->mr = mr;
1535 new_block->resized = resized;
1536 new_block->used_length = size;
1537 new_block->max_length = max_size;
1538 assert(max_size >= size);
1539 new_block->fd = -1;
1540 new_block->host = host;
1541 if (host) {
1542 new_block->flags |= RAM_PREALLOC;
1544 if (resizeable) {
1545 new_block->flags |= RAM_RESIZEABLE;
1547 addr = ram_block_add(new_block, &local_err);
1548 if (local_err) {
1549 g_free(new_block);
1550 error_propagate(errp, local_err);
1551 return -1;
1553 return addr;
1556 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1557 MemoryRegion *mr, Error **errp)
1559 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1562 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
1564 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1567 ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1568 void (*resized)(const char*,
1569 uint64_t length,
1570 void *host),
1571 MemoryRegion *mr, Error **errp)
1573 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
1576 void qemu_ram_free_from_ptr(ram_addr_t addr)
1578 RAMBlock *block;
1580 qemu_mutex_lock_ramlist();
1581 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1582 if (addr == block->offset) {
1583 QLIST_REMOVE_RCU(block, next);
1584 ram_list.mru_block = NULL;
1585 /* Write list before version */
1586 smp_wmb();
1587 ram_list.version++;
1588 g_free_rcu(block, rcu);
1589 break;
1592 qemu_mutex_unlock_ramlist();
1595 static void reclaim_ramblock(RAMBlock *block)
1597 if (block->flags & RAM_PREALLOC) {
1599 } else if (xen_enabled()) {
1600 xen_invalidate_map_cache_entry(block->host);
1601 #ifndef _WIN32
1602 } else if (block->fd >= 0) {
1603 munmap(block->host, block->max_length);
1604 close(block->fd);
1605 #endif
1606 } else {
1607 qemu_anon_ram_free(block->host, block->max_length);
1609 g_free(block);
1612 void qemu_ram_free(ram_addr_t addr)
1614 RAMBlock *block;
1616 qemu_mutex_lock_ramlist();
1617 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1618 if (addr == block->offset) {
1619 QLIST_REMOVE_RCU(block, next);
1620 ram_list.mru_block = NULL;
1621 /* Write list before version */
1622 smp_wmb();
1623 ram_list.version++;
1624 call_rcu(block, reclaim_ramblock, rcu);
1625 break;
1628 qemu_mutex_unlock_ramlist();
1631 #ifndef _WIN32
1632 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1634 RAMBlock *block;
1635 ram_addr_t offset;
1636 int flags;
1637 void *area, *vaddr;
1639 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1640 offset = addr - block->offset;
1641 if (offset < block->max_length) {
1642 vaddr = ramblock_ptr(block, offset);
1643 if (block->flags & RAM_PREALLOC) {
1645 } else if (xen_enabled()) {
1646 abort();
1647 } else {
1648 flags = MAP_FIXED;
1649 if (block->fd >= 0) {
1650 flags |= (block->flags & RAM_SHARED ?
1651 MAP_SHARED : MAP_PRIVATE);
1652 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1653 flags, block->fd, offset);
1654 } else {
1656 * Remap needs to match alloc. Accelerators that
1657 * set phys_mem_alloc never remap. If they did,
1658 * we'd need a remap hook here.
1660 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1662 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1663 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1664 flags, -1, 0);
1666 if (area != vaddr) {
1667 fprintf(stderr, "Could not remap addr: "
1668 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1669 length, addr);
1670 exit(1);
1672 memory_try_enable_merging(vaddr, length);
1673 qemu_ram_setup_dump(vaddr, length);
1678 #endif /* !_WIN32 */
1680 int qemu_get_ram_fd(ram_addr_t addr)
1682 RAMBlock *block;
1683 int fd;
1685 rcu_read_lock();
1686 block = qemu_get_ram_block(addr);
1687 fd = block->fd;
1688 rcu_read_unlock();
1689 return fd;
1692 void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1694 RAMBlock *block;
1695 void *ptr;
1697 rcu_read_lock();
1698 block = qemu_get_ram_block(addr);
1699 ptr = ramblock_ptr(block, 0);
1700 rcu_read_unlock();
1701 return ptr;
1704 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1705 * This should not be used for general purpose DMA. Use address_space_map
1706 * or address_space_rw instead. For local memory (e.g. video ram) that the
1707 * device owns, use memory_region_get_ram_ptr.
1709 * By the time this function returns, the returned pointer is not protected
1710 * by RCU anymore. If the caller is not within an RCU critical section and
1711 * does not hold the iothread lock, it must have other means of protecting the
1712 * pointer, such as a reference to the region that includes the incoming
1713 * ram_addr_t.
1715 void *qemu_get_ram_ptr(ram_addr_t addr)
1717 RAMBlock *block;
1718 void *ptr;
1720 rcu_read_lock();
1721 block = qemu_get_ram_block(addr);
1723 if (xen_enabled() && block->host == NULL) {
1724 /* We need to check if the requested address is in the RAM
1725 * because we don't want to map the entire memory in QEMU.
1726 * In that case just map until the end of the page.
1728 if (block->offset == 0) {
1729 ptr = xen_map_cache(addr, 0, 0);
1730 goto unlock;
1733 block->host = xen_map_cache(block->offset, block->max_length, 1);
1735 ptr = ramblock_ptr(block, addr - block->offset);
1737 unlock:
1738 rcu_read_unlock();
1739 return ptr;
1742 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1743 * but takes a size argument.
1745 * By the time this function returns, the returned pointer is not protected
1746 * by RCU anymore. If the caller is not within an RCU critical section and
1747 * does not hold the iothread lock, it must have other means of protecting the
1748 * pointer, such as a reference to the region that includes the incoming
1749 * ram_addr_t.
1751 static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1753 void *ptr;
1754 if (*size == 0) {
1755 return NULL;
1757 if (xen_enabled()) {
1758 return xen_map_cache(addr, *size, 1);
1759 } else {
1760 RAMBlock *block;
1761 rcu_read_lock();
1762 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1763 if (addr - block->offset < block->max_length) {
1764 if (addr - block->offset + *size > block->max_length)
1765 *size = block->max_length - addr + block->offset;
1766 ptr = ramblock_ptr(block, addr - block->offset);
1767 rcu_read_unlock();
1768 return ptr;
1772 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1773 abort();
1777 /* Some of the softmmu routines need to translate from a host pointer
1778 * (typically a TLB entry) back to a ram offset.
1780 * By the time this function returns, the returned pointer is not protected
1781 * by RCU anymore. If the caller is not within an RCU critical section and
1782 * does not hold the iothread lock, it must have other means of protecting the
1783 * pointer, such as a reference to the region that includes the incoming
1784 * ram_addr_t.
1786 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1788 RAMBlock *block;
1789 uint8_t *host = ptr;
1790 MemoryRegion *mr;
1792 if (xen_enabled()) {
1793 rcu_read_lock();
1794 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1795 mr = qemu_get_ram_block(*ram_addr)->mr;
1796 rcu_read_unlock();
1797 return mr;
1800 rcu_read_lock();
1801 block = atomic_rcu_read(&ram_list.mru_block);
1802 if (block && block->host && host - block->host < block->max_length) {
1803 goto found;
1806 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1807 /* This case append when the block is not mapped. */
1808 if (block->host == NULL) {
1809 continue;
1811 if (host - block->host < block->max_length) {
1812 goto found;
1816 rcu_read_unlock();
1817 return NULL;
1819 found:
1820 *ram_addr = block->offset + (host - block->host);
1821 mr = block->mr;
1822 rcu_read_unlock();
1823 return mr;
1826 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1827 uint64_t val, unsigned size)
1829 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1830 tb_invalidate_phys_page_fast(ram_addr, size);
1832 switch (size) {
1833 case 1:
1834 stb_p(qemu_get_ram_ptr(ram_addr), val);
1835 break;
1836 case 2:
1837 stw_p(qemu_get_ram_ptr(ram_addr), val);
1838 break;
1839 case 4:
1840 stl_p(qemu_get_ram_ptr(ram_addr), val);
1841 break;
1842 default:
1843 abort();
1845 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
1846 /* we remove the notdirty callback only if the code has been
1847 flushed */
1848 if (!cpu_physical_memory_is_clean(ram_addr)) {
1849 CPUArchState *env = current_cpu->env_ptr;
1850 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
1854 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1855 unsigned size, bool is_write)
1857 return is_write;
1860 static const MemoryRegionOps notdirty_mem_ops = {
1861 .write = notdirty_mem_write,
1862 .valid.accepts = notdirty_mem_accepts,
1863 .endianness = DEVICE_NATIVE_ENDIAN,
1866 /* Generate a debug exception if a watchpoint has been hit. */
1867 static void check_watchpoint(int offset, int len, int flags)
1869 CPUState *cpu = current_cpu;
1870 CPUArchState *env = cpu->env_ptr;
1871 target_ulong pc, cs_base;
1872 target_ulong vaddr;
1873 CPUWatchpoint *wp;
1874 int cpu_flags;
1876 if (cpu->watchpoint_hit) {
1877 /* We re-entered the check after replacing the TB. Now raise
1878 * the debug interrupt so that is will trigger after the
1879 * current instruction. */
1880 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
1881 return;
1883 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1884 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1885 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1886 && (wp->flags & flags)) {
1887 if (flags == BP_MEM_READ) {
1888 wp->flags |= BP_WATCHPOINT_HIT_READ;
1889 } else {
1890 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1892 wp->hitaddr = vaddr;
1893 if (!cpu->watchpoint_hit) {
1894 cpu->watchpoint_hit = wp;
1895 tb_check_watchpoint(cpu);
1896 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1897 cpu->exception_index = EXCP_DEBUG;
1898 cpu_loop_exit(cpu);
1899 } else {
1900 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1901 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
1902 cpu_resume_from_signal(cpu, NULL);
1905 } else {
1906 wp->flags &= ~BP_WATCHPOINT_HIT;
1911 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1912 so these check for a hit then pass through to the normal out-of-line
1913 phys routines. */
1914 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1915 unsigned size)
1917 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
1918 switch (size) {
1919 case 1: return ldub_phys(&address_space_memory, addr);
1920 case 2: return lduw_phys(&address_space_memory, addr);
1921 case 4: return ldl_phys(&address_space_memory, addr);
1922 default: abort();
1926 static void watch_mem_write(void *opaque, hwaddr addr,
1927 uint64_t val, unsigned size)
1929 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
1930 switch (size) {
1931 case 1:
1932 stb_phys(&address_space_memory, addr, val);
1933 break;
1934 case 2:
1935 stw_phys(&address_space_memory, addr, val);
1936 break;
1937 case 4:
1938 stl_phys(&address_space_memory, addr, val);
1939 break;
1940 default: abort();
1944 static const MemoryRegionOps watch_mem_ops = {
1945 .read = watch_mem_read,
1946 .write = watch_mem_write,
1947 .endianness = DEVICE_NATIVE_ENDIAN,
1950 static uint64_t subpage_read(void *opaque, hwaddr addr,
1951 unsigned len)
1953 subpage_t *subpage = opaque;
1954 uint8_t buf[8];
1956 #if defined(DEBUG_SUBPAGE)
1957 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1958 subpage, len, addr);
1959 #endif
1960 address_space_read(subpage->as, addr + subpage->base, buf, len);
1961 switch (len) {
1962 case 1:
1963 return ldub_p(buf);
1964 case 2:
1965 return lduw_p(buf);
1966 case 4:
1967 return ldl_p(buf);
1968 case 8:
1969 return ldq_p(buf);
1970 default:
1971 abort();
1975 static void subpage_write(void *opaque, hwaddr addr,
1976 uint64_t value, unsigned len)
1978 subpage_t *subpage = opaque;
1979 uint8_t buf[8];
1981 #if defined(DEBUG_SUBPAGE)
1982 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1983 " value %"PRIx64"\n",
1984 __func__, subpage, len, addr, value);
1985 #endif
1986 switch (len) {
1987 case 1:
1988 stb_p(buf, value);
1989 break;
1990 case 2:
1991 stw_p(buf, value);
1992 break;
1993 case 4:
1994 stl_p(buf, value);
1995 break;
1996 case 8:
1997 stq_p(buf, value);
1998 break;
1999 default:
2000 abort();
2002 address_space_write(subpage->as, addr + subpage->base, buf, len);
2005 static bool subpage_accepts(void *opaque, hwaddr addr,
2006 unsigned len, bool is_write)
2008 subpage_t *subpage = opaque;
2009 #if defined(DEBUG_SUBPAGE)
2010 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2011 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2012 #endif
2014 return address_space_access_valid(subpage->as, addr + subpage->base,
2015 len, is_write);
2018 static const MemoryRegionOps subpage_ops = {
2019 .read = subpage_read,
2020 .write = subpage_write,
2021 .impl.min_access_size = 1,
2022 .impl.max_access_size = 8,
2023 .valid.min_access_size = 1,
2024 .valid.max_access_size = 8,
2025 .valid.accepts = subpage_accepts,
2026 .endianness = DEVICE_NATIVE_ENDIAN,
2029 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2030 uint16_t section)
2032 int idx, eidx;
2034 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2035 return -1;
2036 idx = SUBPAGE_IDX(start);
2037 eidx = SUBPAGE_IDX(end);
2038 #if defined(DEBUG_SUBPAGE)
2039 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2040 __func__, mmio, start, end, idx, eidx, section);
2041 #endif
2042 for (; idx <= eidx; idx++) {
2043 mmio->sub_section[idx] = section;
2046 return 0;
2049 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2051 subpage_t *mmio;
2053 mmio = g_malloc0(sizeof(subpage_t));
2055 mmio->as = as;
2056 mmio->base = base;
2057 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2058 NULL, TARGET_PAGE_SIZE);
2059 mmio->iomem.subpage = true;
2060 #if defined(DEBUG_SUBPAGE)
2061 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2062 mmio, base, TARGET_PAGE_SIZE);
2063 #endif
2064 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2066 return mmio;
2069 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2070 MemoryRegion *mr)
2072 assert(as);
2073 MemoryRegionSection section = {
2074 .address_space = as,
2075 .mr = mr,
2076 .offset_within_address_space = 0,
2077 .offset_within_region = 0,
2078 .size = int128_2_64(),
2081 return phys_section_add(map, &section);
2084 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
2086 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2087 MemoryRegionSection *sections = d->map.sections;
2089 return sections[index & ~TARGET_PAGE_MASK].mr;
2092 static void io_mem_init(void)
2094 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2095 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2096 NULL, UINT64_MAX);
2097 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
2098 NULL, UINT64_MAX);
2099 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2100 NULL, UINT64_MAX);
2103 static void mem_begin(MemoryListener *listener)
2105 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2106 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2107 uint16_t n;
2109 n = dummy_section(&d->map, as, &io_mem_unassigned);
2110 assert(n == PHYS_SECTION_UNASSIGNED);
2111 n = dummy_section(&d->map, as, &io_mem_notdirty);
2112 assert(n == PHYS_SECTION_NOTDIRTY);
2113 n = dummy_section(&d->map, as, &io_mem_rom);
2114 assert(n == PHYS_SECTION_ROM);
2115 n = dummy_section(&d->map, as, &io_mem_watch);
2116 assert(n == PHYS_SECTION_WATCH);
2118 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2119 d->as = as;
2120 as->next_dispatch = d;
2123 static void address_space_dispatch_free(AddressSpaceDispatch *d)
2125 phys_sections_free(&d->map);
2126 g_free(d);
2129 static void mem_commit(MemoryListener *listener)
2131 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2132 AddressSpaceDispatch *cur = as->dispatch;
2133 AddressSpaceDispatch *next = as->next_dispatch;
2135 phys_page_compact_all(next, next->map.nodes_nb);
2137 atomic_rcu_set(&as->dispatch, next);
2138 if (cur) {
2139 call_rcu(cur, address_space_dispatch_free, rcu);
2143 static void tcg_commit(MemoryListener *listener)
2145 CPUState *cpu;
2147 /* since each CPU stores ram addresses in its TLB cache, we must
2148 reset the modified entries */
2149 /* XXX: slow ! */
2150 CPU_FOREACH(cpu) {
2151 /* FIXME: Disentangle the cpu.h circular files deps so we can
2152 directly get the right CPU from listener. */
2153 if (cpu->tcg_as_listener != listener) {
2154 continue;
2156 cpu_reload_memory_map(cpu);
2160 static void core_log_global_start(MemoryListener *listener)
2162 cpu_physical_memory_set_dirty_tracking(true);
2165 static void core_log_global_stop(MemoryListener *listener)
2167 cpu_physical_memory_set_dirty_tracking(false);
2170 static MemoryListener core_memory_listener = {
2171 .log_global_start = core_log_global_start,
2172 .log_global_stop = core_log_global_stop,
2173 .priority = 1,
2176 void address_space_init_dispatch(AddressSpace *as)
2178 as->dispatch = NULL;
2179 as->dispatch_listener = (MemoryListener) {
2180 .begin = mem_begin,
2181 .commit = mem_commit,
2182 .region_add = mem_add,
2183 .region_nop = mem_add,
2184 .priority = 0,
2186 memory_listener_register(&as->dispatch_listener, as);
2189 void address_space_unregister(AddressSpace *as)
2191 memory_listener_unregister(&as->dispatch_listener);
2194 void address_space_destroy_dispatch(AddressSpace *as)
2196 AddressSpaceDispatch *d = as->dispatch;
2198 atomic_rcu_set(&as->dispatch, NULL);
2199 if (d) {
2200 call_rcu(d, address_space_dispatch_free, rcu);
2204 static void memory_map_init(void)
2206 system_memory = g_malloc(sizeof(*system_memory));
2208 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2209 address_space_init(&address_space_memory, system_memory, "memory");
2211 system_io = g_malloc(sizeof(*system_io));
2212 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2213 65536);
2214 address_space_init(&address_space_io, system_io, "I/O");
2216 memory_listener_register(&core_memory_listener, &address_space_memory);
2219 MemoryRegion *get_system_memory(void)
2221 return system_memory;
2224 MemoryRegion *get_system_io(void)
2226 return system_io;
2229 #endif /* !defined(CONFIG_USER_ONLY) */
2231 /* physical memory access (slow version, mainly for debug) */
2232 #if defined(CONFIG_USER_ONLY)
2233 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2234 uint8_t *buf, int len, int is_write)
2236 int l, flags;
2237 target_ulong page;
2238 void * p;
2240 while (len > 0) {
2241 page = addr & TARGET_PAGE_MASK;
2242 l = (page + TARGET_PAGE_SIZE) - addr;
2243 if (l > len)
2244 l = len;
2245 flags = page_get_flags(page);
2246 if (!(flags & PAGE_VALID))
2247 return -1;
2248 if (is_write) {
2249 if (!(flags & PAGE_WRITE))
2250 return -1;
2251 /* XXX: this code should not depend on lock_user */
2252 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2253 return -1;
2254 memcpy(p, buf, l);
2255 unlock_user(p, addr, l);
2256 } else {
2257 if (!(flags & PAGE_READ))
2258 return -1;
2259 /* XXX: this code should not depend on lock_user */
2260 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2261 return -1;
2262 memcpy(buf, p, l);
2263 unlock_user(p, addr, 0);
2265 len -= l;
2266 buf += l;
2267 addr += l;
2269 return 0;
2272 #else
2274 static void invalidate_and_set_dirty(hwaddr addr,
2275 hwaddr length)
2277 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2278 tb_invalidate_phys_range(addr, addr + length, 0);
2279 cpu_physical_memory_set_dirty_range_nocode(addr, length);
2281 xen_modified_memory(addr, length);
2284 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2286 unsigned access_size_max = mr->ops->valid.max_access_size;
2288 /* Regions are assumed to support 1-4 byte accesses unless
2289 otherwise specified. */
2290 if (access_size_max == 0) {
2291 access_size_max = 4;
2294 /* Bound the maximum access by the alignment of the address. */
2295 if (!mr->ops->impl.unaligned) {
2296 unsigned align_size_max = addr & -addr;
2297 if (align_size_max != 0 && align_size_max < access_size_max) {
2298 access_size_max = align_size_max;
2302 /* Don't attempt accesses larger than the maximum. */
2303 if (l > access_size_max) {
2304 l = access_size_max;
2306 if (l & (l - 1)) {
2307 l = 1 << (qemu_fls(l) - 1);
2310 return l;
2313 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
2314 int len, bool is_write)
2316 hwaddr l;
2317 uint8_t *ptr;
2318 uint64_t val;
2319 hwaddr addr1;
2320 MemoryRegion *mr;
2321 bool error = false;
2323 while (len > 0) {
2324 l = len;
2325 mr = address_space_translate(as, addr, &addr1, &l, is_write);
2327 if (is_write) {
2328 if (!memory_access_is_direct(mr, is_write)) {
2329 l = memory_access_size(mr, l, addr1);
2330 /* XXX: could force current_cpu to NULL to avoid
2331 potential bugs */
2332 switch (l) {
2333 case 8:
2334 /* 64 bit write access */
2335 val = ldq_p(buf);
2336 error |= io_mem_write(mr, addr1, val, 8);
2337 break;
2338 case 4:
2339 /* 32 bit write access */
2340 val = ldl_p(buf);
2341 error |= io_mem_write(mr, addr1, val, 4);
2342 break;
2343 case 2:
2344 /* 16 bit write access */
2345 val = lduw_p(buf);
2346 error |= io_mem_write(mr, addr1, val, 2);
2347 break;
2348 case 1:
2349 /* 8 bit write access */
2350 val = ldub_p(buf);
2351 error |= io_mem_write(mr, addr1, val, 1);
2352 break;
2353 default:
2354 abort();
2356 } else {
2357 addr1 += memory_region_get_ram_addr(mr);
2358 /* RAM case */
2359 ptr = qemu_get_ram_ptr(addr1);
2360 memcpy(ptr, buf, l);
2361 invalidate_and_set_dirty(addr1, l);
2363 } else {
2364 if (!memory_access_is_direct(mr, is_write)) {
2365 /* I/O case */
2366 l = memory_access_size(mr, l, addr1);
2367 switch (l) {
2368 case 8:
2369 /* 64 bit read access */
2370 error |= io_mem_read(mr, addr1, &val, 8);
2371 stq_p(buf, val);
2372 break;
2373 case 4:
2374 /* 32 bit read access */
2375 error |= io_mem_read(mr, addr1, &val, 4);
2376 stl_p(buf, val);
2377 break;
2378 case 2:
2379 /* 16 bit read access */
2380 error |= io_mem_read(mr, addr1, &val, 2);
2381 stw_p(buf, val);
2382 break;
2383 case 1:
2384 /* 8 bit read access */
2385 error |= io_mem_read(mr, addr1, &val, 1);
2386 stb_p(buf, val);
2387 break;
2388 default:
2389 abort();
2391 } else {
2392 /* RAM case */
2393 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2394 memcpy(buf, ptr, l);
2397 len -= l;
2398 buf += l;
2399 addr += l;
2402 return error;
2405 bool address_space_write(AddressSpace *as, hwaddr addr,
2406 const uint8_t *buf, int len)
2408 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2411 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2413 return address_space_rw(as, addr, buf, len, false);
2417 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2418 int len, int is_write)
2420 address_space_rw(&address_space_memory, addr, buf, len, is_write);
2423 enum write_rom_type {
2424 WRITE_DATA,
2425 FLUSH_CACHE,
2428 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2429 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2431 hwaddr l;
2432 uint8_t *ptr;
2433 hwaddr addr1;
2434 MemoryRegion *mr;
2436 while (len > 0) {
2437 l = len;
2438 mr = address_space_translate(as, addr, &addr1, &l, true);
2440 if (!(memory_region_is_ram(mr) ||
2441 memory_region_is_romd(mr))) {
2442 /* do nothing */
2443 } else {
2444 addr1 += memory_region_get_ram_addr(mr);
2445 /* ROM/RAM case */
2446 ptr = qemu_get_ram_ptr(addr1);
2447 switch (type) {
2448 case WRITE_DATA:
2449 memcpy(ptr, buf, l);
2450 invalidate_and_set_dirty(addr1, l);
2451 break;
2452 case FLUSH_CACHE:
2453 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2454 break;
2457 len -= l;
2458 buf += l;
2459 addr += l;
2463 /* used for ROM loading : can write in RAM and ROM */
2464 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2465 const uint8_t *buf, int len)
2467 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2470 void cpu_flush_icache_range(hwaddr start, int len)
2473 * This function should do the same thing as an icache flush that was
2474 * triggered from within the guest. For TCG we are always cache coherent,
2475 * so there is no need to flush anything. For KVM / Xen we need to flush
2476 * the host's instruction cache at least.
2478 if (tcg_enabled()) {
2479 return;
2482 cpu_physical_memory_write_rom_internal(&address_space_memory,
2483 start, NULL, len, FLUSH_CACHE);
2486 typedef struct {
2487 MemoryRegion *mr;
2488 void *buffer;
2489 hwaddr addr;
2490 hwaddr len;
2491 } BounceBuffer;
2493 static BounceBuffer bounce;
2495 typedef struct MapClient {
2496 void *opaque;
2497 void (*callback)(void *opaque);
2498 QLIST_ENTRY(MapClient) link;
2499 } MapClient;
2501 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2502 = QLIST_HEAD_INITIALIZER(map_client_list);
2504 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2506 MapClient *client = g_malloc(sizeof(*client));
2508 client->opaque = opaque;
2509 client->callback = callback;
2510 QLIST_INSERT_HEAD(&map_client_list, client, link);
2511 return client;
2514 static void cpu_unregister_map_client(void *_client)
2516 MapClient *client = (MapClient *)_client;
2518 QLIST_REMOVE(client, link);
2519 g_free(client);
2522 static void cpu_notify_map_clients(void)
2524 MapClient *client;
2526 while (!QLIST_EMPTY(&map_client_list)) {
2527 client = QLIST_FIRST(&map_client_list);
2528 client->callback(client->opaque);
2529 cpu_unregister_map_client(client);
2533 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2535 MemoryRegion *mr;
2536 hwaddr l, xlat;
2538 while (len > 0) {
2539 l = len;
2540 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2541 if (!memory_access_is_direct(mr, is_write)) {
2542 l = memory_access_size(mr, l, addr);
2543 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2544 return false;
2548 len -= l;
2549 addr += l;
2551 return true;
2554 /* Map a physical memory region into a host virtual address.
2555 * May map a subset of the requested range, given by and returned in *plen.
2556 * May return NULL if resources needed to perform the mapping are exhausted.
2557 * Use only for reads OR writes - not for read-modify-write operations.
2558 * Use cpu_register_map_client() to know when retrying the map operation is
2559 * likely to succeed.
2561 void *address_space_map(AddressSpace *as,
2562 hwaddr addr,
2563 hwaddr *plen,
2564 bool is_write)
2566 hwaddr len = *plen;
2567 hwaddr done = 0;
2568 hwaddr l, xlat, base;
2569 MemoryRegion *mr, *this_mr;
2570 ram_addr_t raddr;
2572 if (len == 0) {
2573 return NULL;
2576 l = len;
2577 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2578 if (!memory_access_is_direct(mr, is_write)) {
2579 if (bounce.buffer) {
2580 return NULL;
2582 /* Avoid unbounded allocations */
2583 l = MIN(l, TARGET_PAGE_SIZE);
2584 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2585 bounce.addr = addr;
2586 bounce.len = l;
2588 memory_region_ref(mr);
2589 bounce.mr = mr;
2590 if (!is_write) {
2591 address_space_read(as, addr, bounce.buffer, l);
2594 *plen = l;
2595 return bounce.buffer;
2598 base = xlat;
2599 raddr = memory_region_get_ram_addr(mr);
2601 for (;;) {
2602 len -= l;
2603 addr += l;
2604 done += l;
2605 if (len == 0) {
2606 break;
2609 l = len;
2610 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2611 if (this_mr != mr || xlat != base + done) {
2612 break;
2616 memory_region_ref(mr);
2617 *plen = done;
2618 return qemu_ram_ptr_length(raddr + base, plen);
2621 /* Unmaps a memory region previously mapped by address_space_map().
2622 * Will also mark the memory as dirty if is_write == 1. access_len gives
2623 * the amount of memory that was actually read or written by the caller.
2625 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2626 int is_write, hwaddr access_len)
2628 if (buffer != bounce.buffer) {
2629 MemoryRegion *mr;
2630 ram_addr_t addr1;
2632 mr = qemu_ram_addr_from_host(buffer, &addr1);
2633 assert(mr != NULL);
2634 if (is_write) {
2635 invalidate_and_set_dirty(addr1, access_len);
2637 if (xen_enabled()) {
2638 xen_invalidate_map_cache_entry(buffer);
2640 memory_region_unref(mr);
2641 return;
2643 if (is_write) {
2644 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2646 qemu_vfree(bounce.buffer);
2647 bounce.buffer = NULL;
2648 memory_region_unref(bounce.mr);
2649 cpu_notify_map_clients();
2652 void *cpu_physical_memory_map(hwaddr addr,
2653 hwaddr *plen,
2654 int is_write)
2656 return address_space_map(&address_space_memory, addr, plen, is_write);
2659 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2660 int is_write, hwaddr access_len)
2662 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2665 /* warning: addr must be aligned */
2666 static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
2667 enum device_endian endian)
2669 uint8_t *ptr;
2670 uint64_t val;
2671 MemoryRegion *mr;
2672 hwaddr l = 4;
2673 hwaddr addr1;
2675 mr = address_space_translate(as, addr, &addr1, &l, false);
2676 if (l < 4 || !memory_access_is_direct(mr, false)) {
2677 /* I/O case */
2678 io_mem_read(mr, addr1, &val, 4);
2679 #if defined(TARGET_WORDS_BIGENDIAN)
2680 if (endian == DEVICE_LITTLE_ENDIAN) {
2681 val = bswap32(val);
2683 #else
2684 if (endian == DEVICE_BIG_ENDIAN) {
2685 val = bswap32(val);
2687 #endif
2688 } else {
2689 /* RAM case */
2690 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2691 & TARGET_PAGE_MASK)
2692 + addr1);
2693 switch (endian) {
2694 case DEVICE_LITTLE_ENDIAN:
2695 val = ldl_le_p(ptr);
2696 break;
2697 case DEVICE_BIG_ENDIAN:
2698 val = ldl_be_p(ptr);
2699 break;
2700 default:
2701 val = ldl_p(ptr);
2702 break;
2705 return val;
2708 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
2710 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2713 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
2715 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2718 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
2720 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2723 /* warning: addr must be aligned */
2724 static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
2725 enum device_endian endian)
2727 uint8_t *ptr;
2728 uint64_t val;
2729 MemoryRegion *mr;
2730 hwaddr l = 8;
2731 hwaddr addr1;
2733 mr = address_space_translate(as, addr, &addr1, &l,
2734 false);
2735 if (l < 8 || !memory_access_is_direct(mr, false)) {
2736 /* I/O case */
2737 io_mem_read(mr, addr1, &val, 8);
2738 #if defined(TARGET_WORDS_BIGENDIAN)
2739 if (endian == DEVICE_LITTLE_ENDIAN) {
2740 val = bswap64(val);
2742 #else
2743 if (endian == DEVICE_BIG_ENDIAN) {
2744 val = bswap64(val);
2746 #endif
2747 } else {
2748 /* RAM case */
2749 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2750 & TARGET_PAGE_MASK)
2751 + addr1);
2752 switch (endian) {
2753 case DEVICE_LITTLE_ENDIAN:
2754 val = ldq_le_p(ptr);
2755 break;
2756 case DEVICE_BIG_ENDIAN:
2757 val = ldq_be_p(ptr);
2758 break;
2759 default:
2760 val = ldq_p(ptr);
2761 break;
2764 return val;
2767 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
2769 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2772 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
2774 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2777 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
2779 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2782 /* XXX: optimize */
2783 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2785 uint8_t val;
2786 address_space_rw(as, addr, &val, 1, 0);
2787 return val;
2790 /* warning: addr must be aligned */
2791 static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
2792 enum device_endian endian)
2794 uint8_t *ptr;
2795 uint64_t val;
2796 MemoryRegion *mr;
2797 hwaddr l = 2;
2798 hwaddr addr1;
2800 mr = address_space_translate(as, addr, &addr1, &l,
2801 false);
2802 if (l < 2 || !memory_access_is_direct(mr, false)) {
2803 /* I/O case */
2804 io_mem_read(mr, addr1, &val, 2);
2805 #if defined(TARGET_WORDS_BIGENDIAN)
2806 if (endian == DEVICE_LITTLE_ENDIAN) {
2807 val = bswap16(val);
2809 #else
2810 if (endian == DEVICE_BIG_ENDIAN) {
2811 val = bswap16(val);
2813 #endif
2814 } else {
2815 /* RAM case */
2816 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2817 & TARGET_PAGE_MASK)
2818 + addr1);
2819 switch (endian) {
2820 case DEVICE_LITTLE_ENDIAN:
2821 val = lduw_le_p(ptr);
2822 break;
2823 case DEVICE_BIG_ENDIAN:
2824 val = lduw_be_p(ptr);
2825 break;
2826 default:
2827 val = lduw_p(ptr);
2828 break;
2831 return val;
2834 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
2836 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2839 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
2841 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2844 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
2846 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2849 /* warning: addr must be aligned. The ram page is not masked as dirty
2850 and the code inside is not invalidated. It is useful if the dirty
2851 bits are used to track modified PTEs */
2852 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
2854 uint8_t *ptr;
2855 MemoryRegion *mr;
2856 hwaddr l = 4;
2857 hwaddr addr1;
2859 mr = address_space_translate(as, addr, &addr1, &l,
2860 true);
2861 if (l < 4 || !memory_access_is_direct(mr, true)) {
2862 io_mem_write(mr, addr1, val, 4);
2863 } else {
2864 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2865 ptr = qemu_get_ram_ptr(addr1);
2866 stl_p(ptr, val);
2868 if (unlikely(in_migration)) {
2869 if (cpu_physical_memory_is_clean(addr1)) {
2870 /* invalidate code */
2871 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2872 /* set dirty bit */
2873 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
2879 /* warning: addr must be aligned */
2880 static inline void stl_phys_internal(AddressSpace *as,
2881 hwaddr addr, uint32_t val,
2882 enum device_endian endian)
2884 uint8_t *ptr;
2885 MemoryRegion *mr;
2886 hwaddr l = 4;
2887 hwaddr addr1;
2889 mr = address_space_translate(as, addr, &addr1, &l,
2890 true);
2891 if (l < 4 || !memory_access_is_direct(mr, true)) {
2892 #if defined(TARGET_WORDS_BIGENDIAN)
2893 if (endian == DEVICE_LITTLE_ENDIAN) {
2894 val = bswap32(val);
2896 #else
2897 if (endian == DEVICE_BIG_ENDIAN) {
2898 val = bswap32(val);
2900 #endif
2901 io_mem_write(mr, addr1, val, 4);
2902 } else {
2903 /* RAM case */
2904 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2905 ptr = qemu_get_ram_ptr(addr1);
2906 switch (endian) {
2907 case DEVICE_LITTLE_ENDIAN:
2908 stl_le_p(ptr, val);
2909 break;
2910 case DEVICE_BIG_ENDIAN:
2911 stl_be_p(ptr, val);
2912 break;
2913 default:
2914 stl_p(ptr, val);
2915 break;
2917 invalidate_and_set_dirty(addr1, 4);
2921 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2923 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2926 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2928 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2931 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2933 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2936 /* XXX: optimize */
2937 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2939 uint8_t v = val;
2940 address_space_rw(as, addr, &v, 1, 1);
2943 /* warning: addr must be aligned */
2944 static inline void stw_phys_internal(AddressSpace *as,
2945 hwaddr addr, uint32_t val,
2946 enum device_endian endian)
2948 uint8_t *ptr;
2949 MemoryRegion *mr;
2950 hwaddr l = 2;
2951 hwaddr addr1;
2953 mr = address_space_translate(as, addr, &addr1, &l, true);
2954 if (l < 2 || !memory_access_is_direct(mr, true)) {
2955 #if defined(TARGET_WORDS_BIGENDIAN)
2956 if (endian == DEVICE_LITTLE_ENDIAN) {
2957 val = bswap16(val);
2959 #else
2960 if (endian == DEVICE_BIG_ENDIAN) {
2961 val = bswap16(val);
2963 #endif
2964 io_mem_write(mr, addr1, val, 2);
2965 } else {
2966 /* RAM case */
2967 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2968 ptr = qemu_get_ram_ptr(addr1);
2969 switch (endian) {
2970 case DEVICE_LITTLE_ENDIAN:
2971 stw_le_p(ptr, val);
2972 break;
2973 case DEVICE_BIG_ENDIAN:
2974 stw_be_p(ptr, val);
2975 break;
2976 default:
2977 stw_p(ptr, val);
2978 break;
2980 invalidate_and_set_dirty(addr1, 2);
2984 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2986 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2989 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2991 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2994 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2996 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2999 /* XXX: optimize */
3000 void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3002 val = tswap64(val);
3003 address_space_rw(as, addr, (void *) &val, 8, 1);
3006 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3008 val = cpu_to_le64(val);
3009 address_space_rw(as, addr, (void *) &val, 8, 1);
3012 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3014 val = cpu_to_be64(val);
3015 address_space_rw(as, addr, (void *) &val, 8, 1);
3018 /* virtual memory access for debug (includes writing to ROM) */
3019 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3020 uint8_t *buf, int len, int is_write)
3022 int l;
3023 hwaddr phys_addr;
3024 target_ulong page;
3026 while (len > 0) {
3027 page = addr & TARGET_PAGE_MASK;
3028 phys_addr = cpu_get_phys_page_debug(cpu, page);
3029 /* if no physical page mapped, return an error */
3030 if (phys_addr == -1)
3031 return -1;
3032 l = (page + TARGET_PAGE_SIZE) - addr;
3033 if (l > len)
3034 l = len;
3035 phys_addr += (addr & ~TARGET_PAGE_MASK);
3036 if (is_write) {
3037 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3038 } else {
3039 address_space_rw(cpu->as, phys_addr, buf, l, 0);
3041 len -= l;
3042 buf += l;
3043 addr += l;
3045 return 0;
3047 #endif
3050 * A helper function for the _utterly broken_ virtio device model to find out if
3051 * it's running on a big endian machine. Don't do this at home kids!
3053 bool target_words_bigendian(void);
3054 bool target_words_bigendian(void)
3056 #if defined(TARGET_WORDS_BIGENDIAN)
3057 return true;
3058 #else
3059 return false;
3060 #endif
3063 #ifndef CONFIG_USER_ONLY
3064 bool cpu_physical_memory_is_io(hwaddr phys_addr)
3066 MemoryRegion*mr;
3067 hwaddr l = 1;
3069 mr = address_space_translate(&address_space_memory,
3070 phys_addr, &phys_addr, &l, false);
3072 return !(memory_region_is_ram(mr) ||
3073 memory_region_is_romd(mr));
3076 void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3078 RAMBlock *block;
3080 rcu_read_lock();
3081 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3082 func(block->host, block->offset, block->used_length, opaque);
3084 rcu_read_unlock();
3086 #endif