exec: Protect map_client_list with mutex
[qemu.git] / exec.c
blob81666d3d6d815c115b56e698d4d42d0134bdd0e9
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifndef _WIN32
21 #include <sys/types.h>
22 #include <sys/mman.h>
23 #endif
25 #include "qemu-common.h"
26 #include "cpu.h"
27 #include "tcg.h"
28 #include "hw/hw.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #endif
32 #include "hw/qdev.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
47 #include "trace.h"
48 #endif
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "exec/cputlb.h"
52 #include "translate-all.h"
54 #include "exec/memory-internal.h"
55 #include "exec/ram_addr.h"
57 #include "qemu/range.h"
59 //#define DEBUG_SUBPAGE
61 #if !defined(CONFIG_USER_ONLY)
62 static bool in_migration;
64 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
65 * are protected by the ramlist lock.
67 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
69 static MemoryRegion *system_memory;
70 static MemoryRegion *system_io;
72 AddressSpace address_space_io;
73 AddressSpace address_space_memory;
75 MemoryRegion io_mem_rom, io_mem_notdirty;
76 static MemoryRegion io_mem_unassigned;
78 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
79 #define RAM_PREALLOC (1 << 0)
81 /* RAM is mmap-ed with MAP_SHARED */
82 #define RAM_SHARED (1 << 1)
84 /* Only a portion of RAM (used_length) is actually used, and migrated.
85 * This used_length size can change across reboots.
87 #define RAM_RESIZEABLE (1 << 2)
89 #endif
91 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
92 /* current CPU in the current thread. It is only valid inside
93 cpu_exec() */
94 DEFINE_TLS(CPUState *, current_cpu);
95 /* 0 = Do not count executed instructions.
96 1 = Precise instruction counting.
97 2 = Adaptive rate instruction counting. */
98 int use_icount;
100 #if !defined(CONFIG_USER_ONLY)
102 typedef struct PhysPageEntry PhysPageEntry;
104 struct PhysPageEntry {
105 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
106 uint32_t skip : 6;
107 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
108 uint32_t ptr : 26;
111 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
113 /* Size of the L2 (and L3, etc) page tables. */
114 #define ADDR_SPACE_BITS 64
116 #define P_L2_BITS 9
117 #define P_L2_SIZE (1 << P_L2_BITS)
119 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
121 typedef PhysPageEntry Node[P_L2_SIZE];
123 typedef struct PhysPageMap {
124 struct rcu_head rcu;
126 unsigned sections_nb;
127 unsigned sections_nb_alloc;
128 unsigned nodes_nb;
129 unsigned nodes_nb_alloc;
130 Node *nodes;
131 MemoryRegionSection *sections;
132 } PhysPageMap;
134 struct AddressSpaceDispatch {
135 struct rcu_head rcu;
137 /* This is a multi-level map on the physical address space.
138 * The bottom level has pointers to MemoryRegionSections.
140 PhysPageEntry phys_map;
141 PhysPageMap map;
142 AddressSpace *as;
145 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
146 typedef struct subpage_t {
147 MemoryRegion iomem;
148 AddressSpace *as;
149 hwaddr base;
150 uint16_t sub_section[TARGET_PAGE_SIZE];
151 } subpage_t;
153 #define PHYS_SECTION_UNASSIGNED 0
154 #define PHYS_SECTION_NOTDIRTY 1
155 #define PHYS_SECTION_ROM 2
156 #define PHYS_SECTION_WATCH 3
158 static void io_mem_init(void);
159 static void memory_map_init(void);
160 static void tcg_commit(MemoryListener *listener);
162 static MemoryRegion io_mem_watch;
163 #endif
165 #if !defined(CONFIG_USER_ONLY)
167 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
169 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
171 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
172 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
176 static uint32_t phys_map_node_alloc(PhysPageMap *map)
178 unsigned i;
179 uint32_t ret;
181 ret = map->nodes_nb++;
182 assert(ret != PHYS_MAP_NODE_NIL);
183 assert(ret != map->nodes_nb_alloc);
184 for (i = 0; i < P_L2_SIZE; ++i) {
185 map->nodes[ret][i].skip = 1;
186 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
188 return ret;
191 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
192 hwaddr *index, hwaddr *nb, uint16_t leaf,
193 int level)
195 PhysPageEntry *p;
196 int i;
197 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
199 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
200 lp->ptr = phys_map_node_alloc(map);
201 p = map->nodes[lp->ptr];
202 if (level == 0) {
203 for (i = 0; i < P_L2_SIZE; i++) {
204 p[i].skip = 0;
205 p[i].ptr = PHYS_SECTION_UNASSIGNED;
208 } else {
209 p = map->nodes[lp->ptr];
211 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
213 while (*nb && lp < &p[P_L2_SIZE]) {
214 if ((*index & (step - 1)) == 0 && *nb >= step) {
215 lp->skip = 0;
216 lp->ptr = leaf;
217 *index += step;
218 *nb -= step;
219 } else {
220 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
222 ++lp;
226 static void phys_page_set(AddressSpaceDispatch *d,
227 hwaddr index, hwaddr nb,
228 uint16_t leaf)
230 /* Wildly overreserve - it doesn't matter much. */
231 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
233 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
236 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
237 * and update our entry so we can skip it and go directly to the destination.
239 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
241 unsigned valid_ptr = P_L2_SIZE;
242 int valid = 0;
243 PhysPageEntry *p;
244 int i;
246 if (lp->ptr == PHYS_MAP_NODE_NIL) {
247 return;
250 p = nodes[lp->ptr];
251 for (i = 0; i < P_L2_SIZE; i++) {
252 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
253 continue;
256 valid_ptr = i;
257 valid++;
258 if (p[i].skip) {
259 phys_page_compact(&p[i], nodes, compacted);
263 /* We can only compress if there's only one child. */
264 if (valid != 1) {
265 return;
268 assert(valid_ptr < P_L2_SIZE);
270 /* Don't compress if it won't fit in the # of bits we have. */
271 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
272 return;
275 lp->ptr = p[valid_ptr].ptr;
276 if (!p[valid_ptr].skip) {
277 /* If our only child is a leaf, make this a leaf. */
278 /* By design, we should have made this node a leaf to begin with so we
279 * should never reach here.
280 * But since it's so simple to handle this, let's do it just in case we
281 * change this rule.
283 lp->skip = 0;
284 } else {
285 lp->skip += p[valid_ptr].skip;
289 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
291 DECLARE_BITMAP(compacted, nodes_nb);
293 if (d->phys_map.skip) {
294 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
298 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
299 Node *nodes, MemoryRegionSection *sections)
301 PhysPageEntry *p;
302 hwaddr index = addr >> TARGET_PAGE_BITS;
303 int i;
305 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
306 if (lp.ptr == PHYS_MAP_NODE_NIL) {
307 return &sections[PHYS_SECTION_UNASSIGNED];
309 p = nodes[lp.ptr];
310 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
313 if (sections[lp.ptr].size.hi ||
314 range_covers_byte(sections[lp.ptr].offset_within_address_space,
315 sections[lp.ptr].size.lo, addr)) {
316 return &sections[lp.ptr];
317 } else {
318 return &sections[PHYS_SECTION_UNASSIGNED];
322 bool memory_region_is_unassigned(MemoryRegion *mr)
324 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
325 && mr != &io_mem_watch;
328 /* Called from RCU critical section */
329 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
330 hwaddr addr,
331 bool resolve_subpage)
333 MemoryRegionSection *section;
334 subpage_t *subpage;
336 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
337 if (resolve_subpage && section->mr->subpage) {
338 subpage = container_of(section->mr, subpage_t, iomem);
339 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
341 return section;
344 /* Called from RCU critical section */
345 static MemoryRegionSection *
346 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
347 hwaddr *plen, bool resolve_subpage)
349 MemoryRegionSection *section;
350 Int128 diff;
352 section = address_space_lookup_region(d, addr, resolve_subpage);
353 /* Compute offset within MemoryRegionSection */
354 addr -= section->offset_within_address_space;
356 /* Compute offset within MemoryRegion */
357 *xlat = addr + section->offset_within_region;
359 diff = int128_sub(section->mr->size, int128_make64(addr));
360 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
361 return section;
364 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
366 if (memory_region_is_ram(mr)) {
367 return !(is_write && mr->readonly);
369 if (memory_region_is_romd(mr)) {
370 return !is_write;
373 return false;
376 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
377 hwaddr *xlat, hwaddr *plen,
378 bool is_write)
380 IOMMUTLBEntry iotlb;
381 MemoryRegionSection *section;
382 MemoryRegion *mr;
383 hwaddr len = *plen;
385 rcu_read_lock();
386 for (;;) {
387 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
388 section = address_space_translate_internal(d, addr, &addr, plen, true);
389 mr = section->mr;
391 if (!mr->iommu_ops) {
392 break;
395 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
396 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
397 | (addr & iotlb.addr_mask));
398 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
399 if (!(iotlb.perm & (1 << is_write))) {
400 mr = &io_mem_unassigned;
401 break;
404 as = iotlb.target_as;
407 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
408 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
409 len = MIN(page, len);
412 *plen = len;
413 *xlat = addr;
414 rcu_read_unlock();
415 return mr;
418 /* Called from RCU critical section */
419 MemoryRegionSection *
420 address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
421 hwaddr *xlat, hwaddr *plen)
423 MemoryRegionSection *section;
424 section = address_space_translate_internal(cpu->memory_dispatch,
425 addr, xlat, plen, false);
427 assert(!section->mr->iommu_ops);
428 return section;
430 #endif
432 #if !defined(CONFIG_USER_ONLY)
434 static int cpu_common_post_load(void *opaque, int version_id)
436 CPUState *cpu = opaque;
438 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
439 version_id is increased. */
440 cpu->interrupt_request &= ~0x01;
441 tlb_flush(cpu, 1);
443 return 0;
446 static int cpu_common_pre_load(void *opaque)
448 CPUState *cpu = opaque;
450 cpu->exception_index = -1;
452 return 0;
455 static bool cpu_common_exception_index_needed(void *opaque)
457 CPUState *cpu = opaque;
459 return tcg_enabled() && cpu->exception_index != -1;
462 static const VMStateDescription vmstate_cpu_common_exception_index = {
463 .name = "cpu_common/exception_index",
464 .version_id = 1,
465 .minimum_version_id = 1,
466 .fields = (VMStateField[]) {
467 VMSTATE_INT32(exception_index, CPUState),
468 VMSTATE_END_OF_LIST()
472 const VMStateDescription vmstate_cpu_common = {
473 .name = "cpu_common",
474 .version_id = 1,
475 .minimum_version_id = 1,
476 .pre_load = cpu_common_pre_load,
477 .post_load = cpu_common_post_load,
478 .fields = (VMStateField[]) {
479 VMSTATE_UINT32(halted, CPUState),
480 VMSTATE_UINT32(interrupt_request, CPUState),
481 VMSTATE_END_OF_LIST()
483 .subsections = (VMStateSubsection[]) {
485 .vmsd = &vmstate_cpu_common_exception_index,
486 .needed = cpu_common_exception_index_needed,
487 } , {
488 /* empty */
493 #endif
495 CPUState *qemu_get_cpu(int index)
497 CPUState *cpu;
499 CPU_FOREACH(cpu) {
500 if (cpu->cpu_index == index) {
501 return cpu;
505 return NULL;
508 #if !defined(CONFIG_USER_ONLY)
509 void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
511 /* We only support one address space per cpu at the moment. */
512 assert(cpu->as == as);
514 if (cpu->tcg_as_listener) {
515 memory_listener_unregister(cpu->tcg_as_listener);
516 } else {
517 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
519 cpu->tcg_as_listener->commit = tcg_commit;
520 memory_listener_register(cpu->tcg_as_listener, as);
522 #endif
524 void cpu_exec_init(CPUArchState *env)
526 CPUState *cpu = ENV_GET_CPU(env);
527 CPUClass *cc = CPU_GET_CLASS(cpu);
528 CPUState *some_cpu;
529 int cpu_index;
531 #if defined(CONFIG_USER_ONLY)
532 cpu_list_lock();
533 #endif
534 cpu_index = 0;
535 CPU_FOREACH(some_cpu) {
536 cpu_index++;
538 cpu->cpu_index = cpu_index;
539 cpu->numa_node = 0;
540 QTAILQ_INIT(&cpu->breakpoints);
541 QTAILQ_INIT(&cpu->watchpoints);
542 #ifndef CONFIG_USER_ONLY
543 cpu->as = &address_space_memory;
544 cpu->thread_id = qemu_get_thread_id();
545 cpu_reload_memory_map(cpu);
546 #endif
547 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
548 #if defined(CONFIG_USER_ONLY)
549 cpu_list_unlock();
550 #endif
551 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
552 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
554 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
555 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
556 cpu_save, cpu_load, env);
557 assert(cc->vmsd == NULL);
558 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
559 #endif
560 if (cc->vmsd != NULL) {
561 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
565 #if defined(CONFIG_USER_ONLY)
566 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
568 tb_invalidate_phys_page_range(pc, pc + 1, 0);
570 #else
571 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
573 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
574 if (phys != -1) {
575 tb_invalidate_phys_addr(cpu->as,
576 phys | (pc & ~TARGET_PAGE_MASK));
579 #endif
581 #if defined(CONFIG_USER_ONLY)
582 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
587 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
588 int flags)
590 return -ENOSYS;
593 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
597 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
598 int flags, CPUWatchpoint **watchpoint)
600 return -ENOSYS;
602 #else
603 /* Add a watchpoint. */
604 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
605 int flags, CPUWatchpoint **watchpoint)
607 CPUWatchpoint *wp;
609 /* forbid ranges which are empty or run off the end of the address space */
610 if (len == 0 || (addr + len - 1) < addr) {
611 error_report("tried to set invalid watchpoint at %"
612 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
613 return -EINVAL;
615 wp = g_malloc(sizeof(*wp));
617 wp->vaddr = addr;
618 wp->len = len;
619 wp->flags = flags;
621 /* keep all GDB-injected watchpoints in front */
622 if (flags & BP_GDB) {
623 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
624 } else {
625 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
628 tlb_flush_page(cpu, addr);
630 if (watchpoint)
631 *watchpoint = wp;
632 return 0;
635 /* Remove a specific watchpoint. */
636 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
637 int flags)
639 CPUWatchpoint *wp;
641 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
642 if (addr == wp->vaddr && len == wp->len
643 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
644 cpu_watchpoint_remove_by_ref(cpu, wp);
645 return 0;
648 return -ENOENT;
651 /* Remove a specific watchpoint by reference. */
652 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
654 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
656 tlb_flush_page(cpu, watchpoint->vaddr);
658 g_free(watchpoint);
661 /* Remove all matching watchpoints. */
662 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
664 CPUWatchpoint *wp, *next;
666 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
667 if (wp->flags & mask) {
668 cpu_watchpoint_remove_by_ref(cpu, wp);
673 /* Return true if this watchpoint address matches the specified
674 * access (ie the address range covered by the watchpoint overlaps
675 * partially or completely with the address range covered by the
676 * access).
678 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
679 vaddr addr,
680 vaddr len)
682 /* We know the lengths are non-zero, but a little caution is
683 * required to avoid errors in the case where the range ends
684 * exactly at the top of the address space and so addr + len
685 * wraps round to zero.
687 vaddr wpend = wp->vaddr + wp->len - 1;
688 vaddr addrend = addr + len - 1;
690 return !(addr > wpend || wp->vaddr > addrend);
693 #endif
695 /* Add a breakpoint. */
696 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
697 CPUBreakpoint **breakpoint)
699 CPUBreakpoint *bp;
701 bp = g_malloc(sizeof(*bp));
703 bp->pc = pc;
704 bp->flags = flags;
706 /* keep all GDB-injected breakpoints in front */
707 if (flags & BP_GDB) {
708 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
709 } else {
710 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
713 breakpoint_invalidate(cpu, pc);
715 if (breakpoint) {
716 *breakpoint = bp;
718 return 0;
721 /* Remove a specific breakpoint. */
722 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
724 CPUBreakpoint *bp;
726 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
727 if (bp->pc == pc && bp->flags == flags) {
728 cpu_breakpoint_remove_by_ref(cpu, bp);
729 return 0;
732 return -ENOENT;
735 /* Remove a specific breakpoint by reference. */
736 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
738 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
740 breakpoint_invalidate(cpu, breakpoint->pc);
742 g_free(breakpoint);
745 /* Remove all matching breakpoints. */
746 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
748 CPUBreakpoint *bp, *next;
750 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
751 if (bp->flags & mask) {
752 cpu_breakpoint_remove_by_ref(cpu, bp);
757 /* enable or disable single step mode. EXCP_DEBUG is returned by the
758 CPU loop after each instruction */
759 void cpu_single_step(CPUState *cpu, int enabled)
761 if (cpu->singlestep_enabled != enabled) {
762 cpu->singlestep_enabled = enabled;
763 if (kvm_enabled()) {
764 kvm_update_guest_debug(cpu, 0);
765 } else {
766 /* must flush all the translated code to avoid inconsistencies */
767 /* XXX: only flush what is necessary */
768 CPUArchState *env = cpu->env_ptr;
769 tb_flush(env);
774 void cpu_abort(CPUState *cpu, const char *fmt, ...)
776 va_list ap;
777 va_list ap2;
779 va_start(ap, fmt);
780 va_copy(ap2, ap);
781 fprintf(stderr, "qemu: fatal: ");
782 vfprintf(stderr, fmt, ap);
783 fprintf(stderr, "\n");
784 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
785 if (qemu_log_enabled()) {
786 qemu_log("qemu: fatal: ");
787 qemu_log_vprintf(fmt, ap2);
788 qemu_log("\n");
789 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
790 qemu_log_flush();
791 qemu_log_close();
793 va_end(ap2);
794 va_end(ap);
795 #if defined(CONFIG_USER_ONLY)
797 struct sigaction act;
798 sigfillset(&act.sa_mask);
799 act.sa_handler = SIG_DFL;
800 sigaction(SIGABRT, &act, NULL);
802 #endif
803 abort();
806 #if !defined(CONFIG_USER_ONLY)
807 /* Called from RCU critical section */
808 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
810 RAMBlock *block;
812 block = atomic_rcu_read(&ram_list.mru_block);
813 if (block && addr - block->offset < block->max_length) {
814 goto found;
816 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
817 if (addr - block->offset < block->max_length) {
818 goto found;
822 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
823 abort();
825 found:
826 /* It is safe to write mru_block outside the iothread lock. This
827 * is what happens:
829 * mru_block = xxx
830 * rcu_read_unlock()
831 * xxx removed from list
832 * rcu_read_lock()
833 * read mru_block
834 * mru_block = NULL;
835 * call_rcu(reclaim_ramblock, xxx);
836 * rcu_read_unlock()
838 * atomic_rcu_set is not needed here. The block was already published
839 * when it was placed into the list. Here we're just making an extra
840 * copy of the pointer.
842 ram_list.mru_block = block;
843 return block;
846 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
848 ram_addr_t start1;
849 RAMBlock *block;
850 ram_addr_t end;
852 end = TARGET_PAGE_ALIGN(start + length);
853 start &= TARGET_PAGE_MASK;
855 rcu_read_lock();
856 block = qemu_get_ram_block(start);
857 assert(block == qemu_get_ram_block(end - 1));
858 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
859 cpu_tlb_reset_dirty_all(start1, length);
860 rcu_read_unlock();
863 /* Note: start and end must be within the same ram block. */
864 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
865 unsigned client)
867 if (length == 0)
868 return;
869 cpu_physical_memory_clear_dirty_range_type(start, length, client);
871 if (tcg_enabled()) {
872 tlb_reset_dirty_range_all(start, length);
876 static void cpu_physical_memory_set_dirty_tracking(bool enable)
878 in_migration = enable;
881 /* Called from RCU critical section */
882 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
883 MemoryRegionSection *section,
884 target_ulong vaddr,
885 hwaddr paddr, hwaddr xlat,
886 int prot,
887 target_ulong *address)
889 hwaddr iotlb;
890 CPUWatchpoint *wp;
892 if (memory_region_is_ram(section->mr)) {
893 /* Normal RAM. */
894 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
895 + xlat;
896 if (!section->readonly) {
897 iotlb |= PHYS_SECTION_NOTDIRTY;
898 } else {
899 iotlb |= PHYS_SECTION_ROM;
901 } else {
902 iotlb = section - section->address_space->dispatch->map.sections;
903 iotlb += xlat;
906 /* Make accesses to pages with watchpoints go via the
907 watchpoint trap routines. */
908 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
909 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
910 /* Avoid trapping reads of pages with a write breakpoint. */
911 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
912 iotlb = PHYS_SECTION_WATCH + paddr;
913 *address |= TLB_MMIO;
914 break;
919 return iotlb;
921 #endif /* defined(CONFIG_USER_ONLY) */
923 #if !defined(CONFIG_USER_ONLY)
925 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
926 uint16_t section);
927 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
929 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
930 qemu_anon_ram_alloc;
933 * Set a custom physical guest memory alloator.
934 * Accelerators with unusual needs may need this. Hopefully, we can
935 * get rid of it eventually.
937 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
939 phys_mem_alloc = alloc;
942 static uint16_t phys_section_add(PhysPageMap *map,
943 MemoryRegionSection *section)
945 /* The physical section number is ORed with a page-aligned
946 * pointer to produce the iotlb entries. Thus it should
947 * never overflow into the page-aligned value.
949 assert(map->sections_nb < TARGET_PAGE_SIZE);
951 if (map->sections_nb == map->sections_nb_alloc) {
952 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
953 map->sections = g_renew(MemoryRegionSection, map->sections,
954 map->sections_nb_alloc);
956 map->sections[map->sections_nb] = *section;
957 memory_region_ref(section->mr);
958 return map->sections_nb++;
961 static void phys_section_destroy(MemoryRegion *mr)
963 memory_region_unref(mr);
965 if (mr->subpage) {
966 subpage_t *subpage = container_of(mr, subpage_t, iomem);
967 object_unref(OBJECT(&subpage->iomem));
968 g_free(subpage);
972 static void phys_sections_free(PhysPageMap *map)
974 while (map->sections_nb > 0) {
975 MemoryRegionSection *section = &map->sections[--map->sections_nb];
976 phys_section_destroy(section->mr);
978 g_free(map->sections);
979 g_free(map->nodes);
982 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
984 subpage_t *subpage;
985 hwaddr base = section->offset_within_address_space
986 & TARGET_PAGE_MASK;
987 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
988 d->map.nodes, d->map.sections);
989 MemoryRegionSection subsection = {
990 .offset_within_address_space = base,
991 .size = int128_make64(TARGET_PAGE_SIZE),
993 hwaddr start, end;
995 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
997 if (!(existing->mr->subpage)) {
998 subpage = subpage_init(d->as, base);
999 subsection.address_space = d->as;
1000 subsection.mr = &subpage->iomem;
1001 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1002 phys_section_add(&d->map, &subsection));
1003 } else {
1004 subpage = container_of(existing->mr, subpage_t, iomem);
1006 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1007 end = start + int128_get64(section->size) - 1;
1008 subpage_register(subpage, start, end,
1009 phys_section_add(&d->map, section));
1013 static void register_multipage(AddressSpaceDispatch *d,
1014 MemoryRegionSection *section)
1016 hwaddr start_addr = section->offset_within_address_space;
1017 uint16_t section_index = phys_section_add(&d->map, section);
1018 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1019 TARGET_PAGE_BITS));
1021 assert(num_pages);
1022 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1025 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1027 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1028 AddressSpaceDispatch *d = as->next_dispatch;
1029 MemoryRegionSection now = *section, remain = *section;
1030 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1032 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1033 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1034 - now.offset_within_address_space;
1036 now.size = int128_min(int128_make64(left), now.size);
1037 register_subpage(d, &now);
1038 } else {
1039 now.size = int128_zero();
1041 while (int128_ne(remain.size, now.size)) {
1042 remain.size = int128_sub(remain.size, now.size);
1043 remain.offset_within_address_space += int128_get64(now.size);
1044 remain.offset_within_region += int128_get64(now.size);
1045 now = remain;
1046 if (int128_lt(remain.size, page_size)) {
1047 register_subpage(d, &now);
1048 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1049 now.size = page_size;
1050 register_subpage(d, &now);
1051 } else {
1052 now.size = int128_and(now.size, int128_neg(page_size));
1053 register_multipage(d, &now);
1058 void qemu_flush_coalesced_mmio_buffer(void)
1060 if (kvm_enabled())
1061 kvm_flush_coalesced_mmio_buffer();
1064 void qemu_mutex_lock_ramlist(void)
1066 qemu_mutex_lock(&ram_list.mutex);
1069 void qemu_mutex_unlock_ramlist(void)
1071 qemu_mutex_unlock(&ram_list.mutex);
1074 #ifdef __linux__
1076 #include <sys/vfs.h>
1078 #define HUGETLBFS_MAGIC 0x958458f6
1080 static long gethugepagesize(const char *path, Error **errp)
1082 struct statfs fs;
1083 int ret;
1085 do {
1086 ret = statfs(path, &fs);
1087 } while (ret != 0 && errno == EINTR);
1089 if (ret != 0) {
1090 error_setg_errno(errp, errno, "failed to get page size of file %s",
1091 path);
1092 return 0;
1095 if (fs.f_type != HUGETLBFS_MAGIC)
1096 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
1098 return fs.f_bsize;
1101 static void *file_ram_alloc(RAMBlock *block,
1102 ram_addr_t memory,
1103 const char *path,
1104 Error **errp)
1106 char *filename;
1107 char *sanitized_name;
1108 char *c;
1109 void *area = NULL;
1110 int fd;
1111 uint64_t hpagesize;
1112 Error *local_err = NULL;
1114 hpagesize = gethugepagesize(path, &local_err);
1115 if (local_err) {
1116 error_propagate(errp, local_err);
1117 goto error;
1119 block->mr->align = hpagesize;
1121 if (memory < hpagesize) {
1122 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1123 "or larger than huge page size 0x%" PRIx64,
1124 memory, hpagesize);
1125 goto error;
1128 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1129 error_setg(errp,
1130 "host lacks kvm mmu notifiers, -mem-path unsupported");
1131 goto error;
1134 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1135 sanitized_name = g_strdup(memory_region_name(block->mr));
1136 for (c = sanitized_name; *c != '\0'; c++) {
1137 if (*c == '/')
1138 *c = '_';
1141 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1142 sanitized_name);
1143 g_free(sanitized_name);
1145 fd = mkstemp(filename);
1146 if (fd < 0) {
1147 error_setg_errno(errp, errno,
1148 "unable to create backing store for hugepages");
1149 g_free(filename);
1150 goto error;
1152 unlink(filename);
1153 g_free(filename);
1155 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1158 * ftruncate is not supported by hugetlbfs in older
1159 * hosts, so don't bother bailing out on errors.
1160 * If anything goes wrong with it under other filesystems,
1161 * mmap will fail.
1163 if (ftruncate(fd, memory)) {
1164 perror("ftruncate");
1167 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1168 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1169 fd, 0);
1170 if (area == MAP_FAILED) {
1171 error_setg_errno(errp, errno,
1172 "unable to map backing store for hugepages");
1173 close(fd);
1174 goto error;
1177 if (mem_prealloc) {
1178 os_mem_prealloc(fd, area, memory);
1181 block->fd = fd;
1182 return area;
1184 error:
1185 if (mem_prealloc) {
1186 error_report("%s", error_get_pretty(*errp));
1187 exit(1);
1189 return NULL;
1191 #endif
1193 /* Called with the ramlist lock held. */
1194 static ram_addr_t find_ram_offset(ram_addr_t size)
1196 RAMBlock *block, *next_block;
1197 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1199 assert(size != 0); /* it would hand out same offset multiple times */
1201 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1202 return 0;
1205 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1206 ram_addr_t end, next = RAM_ADDR_MAX;
1208 end = block->offset + block->max_length;
1210 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
1211 if (next_block->offset >= end) {
1212 next = MIN(next, next_block->offset);
1215 if (next - end >= size && next - end < mingap) {
1216 offset = end;
1217 mingap = next - end;
1221 if (offset == RAM_ADDR_MAX) {
1222 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1223 (uint64_t)size);
1224 abort();
1227 return offset;
1230 ram_addr_t last_ram_offset(void)
1232 RAMBlock *block;
1233 ram_addr_t last = 0;
1235 rcu_read_lock();
1236 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1237 last = MAX(last, block->offset + block->max_length);
1239 rcu_read_unlock();
1240 return last;
1243 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1245 int ret;
1247 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1248 if (!machine_dump_guest_core(current_machine)) {
1249 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1250 if (ret) {
1251 perror("qemu_madvise");
1252 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1253 "but dump_guest_core=off specified\n");
1258 /* Called within an RCU critical section, or while the ramlist lock
1259 * is held.
1261 static RAMBlock *find_ram_block(ram_addr_t addr)
1263 RAMBlock *block;
1265 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1266 if (block->offset == addr) {
1267 return block;
1271 return NULL;
1274 /* Called with iothread lock held. */
1275 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1277 RAMBlock *new_block, *block;
1279 rcu_read_lock();
1280 new_block = find_ram_block(addr);
1281 assert(new_block);
1282 assert(!new_block->idstr[0]);
1284 if (dev) {
1285 char *id = qdev_get_dev_path(dev);
1286 if (id) {
1287 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1288 g_free(id);
1291 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1293 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1294 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1295 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1296 new_block->idstr);
1297 abort();
1300 rcu_read_unlock();
1303 /* Called with iothread lock held. */
1304 void qemu_ram_unset_idstr(ram_addr_t addr)
1306 RAMBlock *block;
1308 /* FIXME: arch_init.c assumes that this is not called throughout
1309 * migration. Ignore the problem since hot-unplug during migration
1310 * does not work anyway.
1313 rcu_read_lock();
1314 block = find_ram_block(addr);
1315 if (block) {
1316 memset(block->idstr, 0, sizeof(block->idstr));
1318 rcu_read_unlock();
1321 static int memory_try_enable_merging(void *addr, size_t len)
1323 if (!machine_mem_merge(current_machine)) {
1324 /* disabled by the user */
1325 return 0;
1328 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1331 /* Only legal before guest might have detected the memory size: e.g. on
1332 * incoming migration, or right after reset.
1334 * As memory core doesn't know how is memory accessed, it is up to
1335 * resize callback to update device state and/or add assertions to detect
1336 * misuse, if necessary.
1338 int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1340 RAMBlock *block = find_ram_block(base);
1342 assert(block);
1344 newsize = TARGET_PAGE_ALIGN(newsize);
1346 if (block->used_length == newsize) {
1347 return 0;
1350 if (!(block->flags & RAM_RESIZEABLE)) {
1351 error_setg_errno(errp, EINVAL,
1352 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1353 " in != 0x" RAM_ADDR_FMT, block->idstr,
1354 newsize, block->used_length);
1355 return -EINVAL;
1358 if (block->max_length < newsize) {
1359 error_setg_errno(errp, EINVAL,
1360 "Length too large: %s: 0x" RAM_ADDR_FMT
1361 " > 0x" RAM_ADDR_FMT, block->idstr,
1362 newsize, block->max_length);
1363 return -EINVAL;
1366 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1367 block->used_length = newsize;
1368 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1369 memory_region_set_size(block->mr, newsize);
1370 if (block->resized) {
1371 block->resized(block->idstr, newsize, block->host);
1373 return 0;
1376 static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
1378 RAMBlock *block;
1379 RAMBlock *last_block = NULL;
1380 ram_addr_t old_ram_size, new_ram_size;
1382 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1384 qemu_mutex_lock_ramlist();
1385 new_block->offset = find_ram_offset(new_block->max_length);
1387 if (!new_block->host) {
1388 if (xen_enabled()) {
1389 xen_ram_alloc(new_block->offset, new_block->max_length,
1390 new_block->mr);
1391 } else {
1392 new_block->host = phys_mem_alloc(new_block->max_length,
1393 &new_block->mr->align);
1394 if (!new_block->host) {
1395 error_setg_errno(errp, errno,
1396 "cannot set up guest memory '%s'",
1397 memory_region_name(new_block->mr));
1398 qemu_mutex_unlock_ramlist();
1399 return -1;
1401 memory_try_enable_merging(new_block->host, new_block->max_length);
1405 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1406 * QLIST (which has an RCU-friendly variant) does not have insertion at
1407 * tail, so save the last element in last_block.
1409 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1410 last_block = block;
1411 if (block->max_length < new_block->max_length) {
1412 break;
1415 if (block) {
1416 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1417 } else if (last_block) {
1418 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1419 } else { /* list is empty */
1420 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1422 ram_list.mru_block = NULL;
1424 /* Write list before version */
1425 smp_wmb();
1426 ram_list.version++;
1427 qemu_mutex_unlock_ramlist();
1429 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1431 if (new_ram_size > old_ram_size) {
1432 int i;
1434 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1435 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1436 ram_list.dirty_memory[i] =
1437 bitmap_zero_extend(ram_list.dirty_memory[i],
1438 old_ram_size, new_ram_size);
1441 cpu_physical_memory_set_dirty_range(new_block->offset,
1442 new_block->used_length);
1444 if (new_block->host) {
1445 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1446 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1447 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1448 if (kvm_enabled()) {
1449 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1453 return new_block->offset;
1456 #ifdef __linux__
1457 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1458 bool share, const char *mem_path,
1459 Error **errp)
1461 RAMBlock *new_block;
1462 ram_addr_t addr;
1463 Error *local_err = NULL;
1465 if (xen_enabled()) {
1466 error_setg(errp, "-mem-path not supported with Xen");
1467 return -1;
1470 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1472 * file_ram_alloc() needs to allocate just like
1473 * phys_mem_alloc, but we haven't bothered to provide
1474 * a hook there.
1476 error_setg(errp,
1477 "-mem-path not supported with this accelerator");
1478 return -1;
1481 size = TARGET_PAGE_ALIGN(size);
1482 new_block = g_malloc0(sizeof(*new_block));
1483 new_block->mr = mr;
1484 new_block->used_length = size;
1485 new_block->max_length = size;
1486 new_block->flags = share ? RAM_SHARED : 0;
1487 new_block->host = file_ram_alloc(new_block, size,
1488 mem_path, errp);
1489 if (!new_block->host) {
1490 g_free(new_block);
1491 return -1;
1494 addr = ram_block_add(new_block, &local_err);
1495 if (local_err) {
1496 g_free(new_block);
1497 error_propagate(errp, local_err);
1498 return -1;
1500 return addr;
1502 #endif
1504 static
1505 ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1506 void (*resized)(const char*,
1507 uint64_t length,
1508 void *host),
1509 void *host, bool resizeable,
1510 MemoryRegion *mr, Error **errp)
1512 RAMBlock *new_block;
1513 ram_addr_t addr;
1514 Error *local_err = NULL;
1516 size = TARGET_PAGE_ALIGN(size);
1517 max_size = TARGET_PAGE_ALIGN(max_size);
1518 new_block = g_malloc0(sizeof(*new_block));
1519 new_block->mr = mr;
1520 new_block->resized = resized;
1521 new_block->used_length = size;
1522 new_block->max_length = max_size;
1523 assert(max_size >= size);
1524 new_block->fd = -1;
1525 new_block->host = host;
1526 if (host) {
1527 new_block->flags |= RAM_PREALLOC;
1529 if (resizeable) {
1530 new_block->flags |= RAM_RESIZEABLE;
1532 addr = ram_block_add(new_block, &local_err);
1533 if (local_err) {
1534 g_free(new_block);
1535 error_propagate(errp, local_err);
1536 return -1;
1538 return addr;
1541 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1542 MemoryRegion *mr, Error **errp)
1544 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1547 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
1549 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1552 ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1553 void (*resized)(const char*,
1554 uint64_t length,
1555 void *host),
1556 MemoryRegion *mr, Error **errp)
1558 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
1561 void qemu_ram_free_from_ptr(ram_addr_t addr)
1563 RAMBlock *block;
1565 qemu_mutex_lock_ramlist();
1566 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1567 if (addr == block->offset) {
1568 QLIST_REMOVE_RCU(block, next);
1569 ram_list.mru_block = NULL;
1570 /* Write list before version */
1571 smp_wmb();
1572 ram_list.version++;
1573 g_free_rcu(block, rcu);
1574 break;
1577 qemu_mutex_unlock_ramlist();
1580 static void reclaim_ramblock(RAMBlock *block)
1582 if (block->flags & RAM_PREALLOC) {
1584 } else if (xen_enabled()) {
1585 xen_invalidate_map_cache_entry(block->host);
1586 #ifndef _WIN32
1587 } else if (block->fd >= 0) {
1588 munmap(block->host, block->max_length);
1589 close(block->fd);
1590 #endif
1591 } else {
1592 qemu_anon_ram_free(block->host, block->max_length);
1594 g_free(block);
1597 void qemu_ram_free(ram_addr_t addr)
1599 RAMBlock *block;
1601 qemu_mutex_lock_ramlist();
1602 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1603 if (addr == block->offset) {
1604 QLIST_REMOVE_RCU(block, next);
1605 ram_list.mru_block = NULL;
1606 /* Write list before version */
1607 smp_wmb();
1608 ram_list.version++;
1609 call_rcu(block, reclaim_ramblock, rcu);
1610 break;
1613 qemu_mutex_unlock_ramlist();
1616 #ifndef _WIN32
1617 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1619 RAMBlock *block;
1620 ram_addr_t offset;
1621 int flags;
1622 void *area, *vaddr;
1624 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1625 offset = addr - block->offset;
1626 if (offset < block->max_length) {
1627 vaddr = ramblock_ptr(block, offset);
1628 if (block->flags & RAM_PREALLOC) {
1630 } else if (xen_enabled()) {
1631 abort();
1632 } else {
1633 flags = MAP_FIXED;
1634 if (block->fd >= 0) {
1635 flags |= (block->flags & RAM_SHARED ?
1636 MAP_SHARED : MAP_PRIVATE);
1637 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1638 flags, block->fd, offset);
1639 } else {
1641 * Remap needs to match alloc. Accelerators that
1642 * set phys_mem_alloc never remap. If they did,
1643 * we'd need a remap hook here.
1645 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1647 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1648 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1649 flags, -1, 0);
1651 if (area != vaddr) {
1652 fprintf(stderr, "Could not remap addr: "
1653 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1654 length, addr);
1655 exit(1);
1657 memory_try_enable_merging(vaddr, length);
1658 qemu_ram_setup_dump(vaddr, length);
1663 #endif /* !_WIN32 */
1665 int qemu_get_ram_fd(ram_addr_t addr)
1667 RAMBlock *block;
1668 int fd;
1670 rcu_read_lock();
1671 block = qemu_get_ram_block(addr);
1672 fd = block->fd;
1673 rcu_read_unlock();
1674 return fd;
1677 void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1679 RAMBlock *block;
1680 void *ptr;
1682 rcu_read_lock();
1683 block = qemu_get_ram_block(addr);
1684 ptr = ramblock_ptr(block, 0);
1685 rcu_read_unlock();
1686 return ptr;
1689 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1690 * This should not be used for general purpose DMA. Use address_space_map
1691 * or address_space_rw instead. For local memory (e.g. video ram) that the
1692 * device owns, use memory_region_get_ram_ptr.
1694 * By the time this function returns, the returned pointer is not protected
1695 * by RCU anymore. If the caller is not within an RCU critical section and
1696 * does not hold the iothread lock, it must have other means of protecting the
1697 * pointer, such as a reference to the region that includes the incoming
1698 * ram_addr_t.
1700 void *qemu_get_ram_ptr(ram_addr_t addr)
1702 RAMBlock *block;
1703 void *ptr;
1705 rcu_read_lock();
1706 block = qemu_get_ram_block(addr);
1708 if (xen_enabled() && block->host == NULL) {
1709 /* We need to check if the requested address is in the RAM
1710 * because we don't want to map the entire memory in QEMU.
1711 * In that case just map until the end of the page.
1713 if (block->offset == 0) {
1714 ptr = xen_map_cache(addr, 0, 0);
1715 goto unlock;
1718 block->host = xen_map_cache(block->offset, block->max_length, 1);
1720 ptr = ramblock_ptr(block, addr - block->offset);
1722 unlock:
1723 rcu_read_unlock();
1724 return ptr;
1727 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1728 * but takes a size argument.
1730 * By the time this function returns, the returned pointer is not protected
1731 * by RCU anymore. If the caller is not within an RCU critical section and
1732 * does not hold the iothread lock, it must have other means of protecting the
1733 * pointer, such as a reference to the region that includes the incoming
1734 * ram_addr_t.
1736 static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1738 void *ptr;
1739 if (*size == 0) {
1740 return NULL;
1742 if (xen_enabled()) {
1743 return xen_map_cache(addr, *size, 1);
1744 } else {
1745 RAMBlock *block;
1746 rcu_read_lock();
1747 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1748 if (addr - block->offset < block->max_length) {
1749 if (addr - block->offset + *size > block->max_length)
1750 *size = block->max_length - addr + block->offset;
1751 ptr = ramblock_ptr(block, addr - block->offset);
1752 rcu_read_unlock();
1753 return ptr;
1757 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1758 abort();
1762 /* Some of the softmmu routines need to translate from a host pointer
1763 * (typically a TLB entry) back to a ram offset.
1765 * By the time this function returns, the returned pointer is not protected
1766 * by RCU anymore. If the caller is not within an RCU critical section and
1767 * does not hold the iothread lock, it must have other means of protecting the
1768 * pointer, such as a reference to the region that includes the incoming
1769 * ram_addr_t.
1771 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1773 RAMBlock *block;
1774 uint8_t *host = ptr;
1775 MemoryRegion *mr;
1777 if (xen_enabled()) {
1778 rcu_read_lock();
1779 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1780 mr = qemu_get_ram_block(*ram_addr)->mr;
1781 rcu_read_unlock();
1782 return mr;
1785 rcu_read_lock();
1786 block = atomic_rcu_read(&ram_list.mru_block);
1787 if (block && block->host && host - block->host < block->max_length) {
1788 goto found;
1791 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1792 /* This case append when the block is not mapped. */
1793 if (block->host == NULL) {
1794 continue;
1796 if (host - block->host < block->max_length) {
1797 goto found;
1801 rcu_read_unlock();
1802 return NULL;
1804 found:
1805 *ram_addr = block->offset + (host - block->host);
1806 mr = block->mr;
1807 rcu_read_unlock();
1808 return mr;
1811 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1812 uint64_t val, unsigned size)
1814 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1815 tb_invalidate_phys_page_fast(ram_addr, size);
1817 switch (size) {
1818 case 1:
1819 stb_p(qemu_get_ram_ptr(ram_addr), val);
1820 break;
1821 case 2:
1822 stw_p(qemu_get_ram_ptr(ram_addr), val);
1823 break;
1824 case 4:
1825 stl_p(qemu_get_ram_ptr(ram_addr), val);
1826 break;
1827 default:
1828 abort();
1830 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
1831 /* we remove the notdirty callback only if the code has been
1832 flushed */
1833 if (!cpu_physical_memory_is_clean(ram_addr)) {
1834 CPUArchState *env = current_cpu->env_ptr;
1835 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
1839 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1840 unsigned size, bool is_write)
1842 return is_write;
1845 static const MemoryRegionOps notdirty_mem_ops = {
1846 .write = notdirty_mem_write,
1847 .valid.accepts = notdirty_mem_accepts,
1848 .endianness = DEVICE_NATIVE_ENDIAN,
1851 /* Generate a debug exception if a watchpoint has been hit. */
1852 static void check_watchpoint(int offset, int len, int flags)
1854 CPUState *cpu = current_cpu;
1855 CPUArchState *env = cpu->env_ptr;
1856 target_ulong pc, cs_base;
1857 target_ulong vaddr;
1858 CPUWatchpoint *wp;
1859 int cpu_flags;
1861 if (cpu->watchpoint_hit) {
1862 /* We re-entered the check after replacing the TB. Now raise
1863 * the debug interrupt so that is will trigger after the
1864 * current instruction. */
1865 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
1866 return;
1868 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1869 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1870 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1871 && (wp->flags & flags)) {
1872 if (flags == BP_MEM_READ) {
1873 wp->flags |= BP_WATCHPOINT_HIT_READ;
1874 } else {
1875 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1877 wp->hitaddr = vaddr;
1878 if (!cpu->watchpoint_hit) {
1879 cpu->watchpoint_hit = wp;
1880 tb_check_watchpoint(cpu);
1881 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1882 cpu->exception_index = EXCP_DEBUG;
1883 cpu_loop_exit(cpu);
1884 } else {
1885 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1886 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
1887 cpu_resume_from_signal(cpu, NULL);
1890 } else {
1891 wp->flags &= ~BP_WATCHPOINT_HIT;
1896 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1897 so these check for a hit then pass through to the normal out-of-line
1898 phys routines. */
1899 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1900 unsigned size)
1902 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
1903 switch (size) {
1904 case 1: return ldub_phys(&address_space_memory, addr);
1905 case 2: return lduw_phys(&address_space_memory, addr);
1906 case 4: return ldl_phys(&address_space_memory, addr);
1907 default: abort();
1911 static void watch_mem_write(void *opaque, hwaddr addr,
1912 uint64_t val, unsigned size)
1914 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
1915 switch (size) {
1916 case 1:
1917 stb_phys(&address_space_memory, addr, val);
1918 break;
1919 case 2:
1920 stw_phys(&address_space_memory, addr, val);
1921 break;
1922 case 4:
1923 stl_phys(&address_space_memory, addr, val);
1924 break;
1925 default: abort();
1929 static const MemoryRegionOps watch_mem_ops = {
1930 .read = watch_mem_read,
1931 .write = watch_mem_write,
1932 .endianness = DEVICE_NATIVE_ENDIAN,
1935 static uint64_t subpage_read(void *opaque, hwaddr addr,
1936 unsigned len)
1938 subpage_t *subpage = opaque;
1939 uint8_t buf[8];
1941 #if defined(DEBUG_SUBPAGE)
1942 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1943 subpage, len, addr);
1944 #endif
1945 address_space_read(subpage->as, addr + subpage->base, buf, len);
1946 switch (len) {
1947 case 1:
1948 return ldub_p(buf);
1949 case 2:
1950 return lduw_p(buf);
1951 case 4:
1952 return ldl_p(buf);
1953 case 8:
1954 return ldq_p(buf);
1955 default:
1956 abort();
1960 static void subpage_write(void *opaque, hwaddr addr,
1961 uint64_t value, unsigned len)
1963 subpage_t *subpage = opaque;
1964 uint8_t buf[8];
1966 #if defined(DEBUG_SUBPAGE)
1967 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1968 " value %"PRIx64"\n",
1969 __func__, subpage, len, addr, value);
1970 #endif
1971 switch (len) {
1972 case 1:
1973 stb_p(buf, value);
1974 break;
1975 case 2:
1976 stw_p(buf, value);
1977 break;
1978 case 4:
1979 stl_p(buf, value);
1980 break;
1981 case 8:
1982 stq_p(buf, value);
1983 break;
1984 default:
1985 abort();
1987 address_space_write(subpage->as, addr + subpage->base, buf, len);
1990 static bool subpage_accepts(void *opaque, hwaddr addr,
1991 unsigned len, bool is_write)
1993 subpage_t *subpage = opaque;
1994 #if defined(DEBUG_SUBPAGE)
1995 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1996 __func__, subpage, is_write ? 'w' : 'r', len, addr);
1997 #endif
1999 return address_space_access_valid(subpage->as, addr + subpage->base,
2000 len, is_write);
2003 static const MemoryRegionOps subpage_ops = {
2004 .read = subpage_read,
2005 .write = subpage_write,
2006 .impl.min_access_size = 1,
2007 .impl.max_access_size = 8,
2008 .valid.min_access_size = 1,
2009 .valid.max_access_size = 8,
2010 .valid.accepts = subpage_accepts,
2011 .endianness = DEVICE_NATIVE_ENDIAN,
2014 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2015 uint16_t section)
2017 int idx, eidx;
2019 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2020 return -1;
2021 idx = SUBPAGE_IDX(start);
2022 eidx = SUBPAGE_IDX(end);
2023 #if defined(DEBUG_SUBPAGE)
2024 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2025 __func__, mmio, start, end, idx, eidx, section);
2026 #endif
2027 for (; idx <= eidx; idx++) {
2028 mmio->sub_section[idx] = section;
2031 return 0;
2034 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2036 subpage_t *mmio;
2038 mmio = g_malloc0(sizeof(subpage_t));
2040 mmio->as = as;
2041 mmio->base = base;
2042 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2043 NULL, TARGET_PAGE_SIZE);
2044 mmio->iomem.subpage = true;
2045 #if defined(DEBUG_SUBPAGE)
2046 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2047 mmio, base, TARGET_PAGE_SIZE);
2048 #endif
2049 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2051 return mmio;
2054 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2055 MemoryRegion *mr)
2057 assert(as);
2058 MemoryRegionSection section = {
2059 .address_space = as,
2060 .mr = mr,
2061 .offset_within_address_space = 0,
2062 .offset_within_region = 0,
2063 .size = int128_2_64(),
2066 return phys_section_add(map, &section);
2069 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
2071 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2072 MemoryRegionSection *sections = d->map.sections;
2074 return sections[index & ~TARGET_PAGE_MASK].mr;
2077 static void io_mem_init(void)
2079 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2080 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2081 NULL, UINT64_MAX);
2082 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
2083 NULL, UINT64_MAX);
2084 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2085 NULL, UINT64_MAX);
2088 static void mem_begin(MemoryListener *listener)
2090 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2091 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2092 uint16_t n;
2094 n = dummy_section(&d->map, as, &io_mem_unassigned);
2095 assert(n == PHYS_SECTION_UNASSIGNED);
2096 n = dummy_section(&d->map, as, &io_mem_notdirty);
2097 assert(n == PHYS_SECTION_NOTDIRTY);
2098 n = dummy_section(&d->map, as, &io_mem_rom);
2099 assert(n == PHYS_SECTION_ROM);
2100 n = dummy_section(&d->map, as, &io_mem_watch);
2101 assert(n == PHYS_SECTION_WATCH);
2103 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2104 d->as = as;
2105 as->next_dispatch = d;
2108 static void address_space_dispatch_free(AddressSpaceDispatch *d)
2110 phys_sections_free(&d->map);
2111 g_free(d);
2114 static void mem_commit(MemoryListener *listener)
2116 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2117 AddressSpaceDispatch *cur = as->dispatch;
2118 AddressSpaceDispatch *next = as->next_dispatch;
2120 phys_page_compact_all(next, next->map.nodes_nb);
2122 atomic_rcu_set(&as->dispatch, next);
2123 if (cur) {
2124 call_rcu(cur, address_space_dispatch_free, rcu);
2128 static void tcg_commit(MemoryListener *listener)
2130 CPUState *cpu;
2132 /* since each CPU stores ram addresses in its TLB cache, we must
2133 reset the modified entries */
2134 /* XXX: slow ! */
2135 CPU_FOREACH(cpu) {
2136 /* FIXME: Disentangle the cpu.h circular files deps so we can
2137 directly get the right CPU from listener. */
2138 if (cpu->tcg_as_listener != listener) {
2139 continue;
2141 cpu_reload_memory_map(cpu);
2145 static void core_log_global_start(MemoryListener *listener)
2147 cpu_physical_memory_set_dirty_tracking(true);
2150 static void core_log_global_stop(MemoryListener *listener)
2152 cpu_physical_memory_set_dirty_tracking(false);
2155 static MemoryListener core_memory_listener = {
2156 .log_global_start = core_log_global_start,
2157 .log_global_stop = core_log_global_stop,
2158 .priority = 1,
2161 void address_space_init_dispatch(AddressSpace *as)
2163 as->dispatch = NULL;
2164 as->dispatch_listener = (MemoryListener) {
2165 .begin = mem_begin,
2166 .commit = mem_commit,
2167 .region_add = mem_add,
2168 .region_nop = mem_add,
2169 .priority = 0,
2171 memory_listener_register(&as->dispatch_listener, as);
2174 void address_space_unregister(AddressSpace *as)
2176 memory_listener_unregister(&as->dispatch_listener);
2179 void address_space_destroy_dispatch(AddressSpace *as)
2181 AddressSpaceDispatch *d = as->dispatch;
2183 atomic_rcu_set(&as->dispatch, NULL);
2184 if (d) {
2185 call_rcu(d, address_space_dispatch_free, rcu);
2189 static void memory_map_init(void)
2191 system_memory = g_malloc(sizeof(*system_memory));
2193 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2194 address_space_init(&address_space_memory, system_memory, "memory");
2196 system_io = g_malloc(sizeof(*system_io));
2197 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2198 65536);
2199 address_space_init(&address_space_io, system_io, "I/O");
2201 memory_listener_register(&core_memory_listener, &address_space_memory);
2204 MemoryRegion *get_system_memory(void)
2206 return system_memory;
2209 MemoryRegion *get_system_io(void)
2211 return system_io;
2214 #endif /* !defined(CONFIG_USER_ONLY) */
2216 /* physical memory access (slow version, mainly for debug) */
2217 #if defined(CONFIG_USER_ONLY)
2218 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2219 uint8_t *buf, int len, int is_write)
2221 int l, flags;
2222 target_ulong page;
2223 void * p;
2225 while (len > 0) {
2226 page = addr & TARGET_PAGE_MASK;
2227 l = (page + TARGET_PAGE_SIZE) - addr;
2228 if (l > len)
2229 l = len;
2230 flags = page_get_flags(page);
2231 if (!(flags & PAGE_VALID))
2232 return -1;
2233 if (is_write) {
2234 if (!(flags & PAGE_WRITE))
2235 return -1;
2236 /* XXX: this code should not depend on lock_user */
2237 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2238 return -1;
2239 memcpy(p, buf, l);
2240 unlock_user(p, addr, l);
2241 } else {
2242 if (!(flags & PAGE_READ))
2243 return -1;
2244 /* XXX: this code should not depend on lock_user */
2245 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2246 return -1;
2247 memcpy(buf, p, l);
2248 unlock_user(p, addr, 0);
2250 len -= l;
2251 buf += l;
2252 addr += l;
2254 return 0;
2257 #else
2259 static void invalidate_and_set_dirty(hwaddr addr,
2260 hwaddr length)
2262 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2263 tb_invalidate_phys_range(addr, addr + length, 0);
2264 cpu_physical_memory_set_dirty_range_nocode(addr, length);
2266 xen_modified_memory(addr, length);
2269 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2271 unsigned access_size_max = mr->ops->valid.max_access_size;
2273 /* Regions are assumed to support 1-4 byte accesses unless
2274 otherwise specified. */
2275 if (access_size_max == 0) {
2276 access_size_max = 4;
2279 /* Bound the maximum access by the alignment of the address. */
2280 if (!mr->ops->impl.unaligned) {
2281 unsigned align_size_max = addr & -addr;
2282 if (align_size_max != 0 && align_size_max < access_size_max) {
2283 access_size_max = align_size_max;
2287 /* Don't attempt accesses larger than the maximum. */
2288 if (l > access_size_max) {
2289 l = access_size_max;
2291 if (l & (l - 1)) {
2292 l = 1 << (qemu_fls(l) - 1);
2295 return l;
2298 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
2299 int len, bool is_write)
2301 hwaddr l;
2302 uint8_t *ptr;
2303 uint64_t val;
2304 hwaddr addr1;
2305 MemoryRegion *mr;
2306 bool error = false;
2308 while (len > 0) {
2309 l = len;
2310 mr = address_space_translate(as, addr, &addr1, &l, is_write);
2312 if (is_write) {
2313 if (!memory_access_is_direct(mr, is_write)) {
2314 l = memory_access_size(mr, l, addr1);
2315 /* XXX: could force current_cpu to NULL to avoid
2316 potential bugs */
2317 switch (l) {
2318 case 8:
2319 /* 64 bit write access */
2320 val = ldq_p(buf);
2321 error |= io_mem_write(mr, addr1, val, 8);
2322 break;
2323 case 4:
2324 /* 32 bit write access */
2325 val = ldl_p(buf);
2326 error |= io_mem_write(mr, addr1, val, 4);
2327 break;
2328 case 2:
2329 /* 16 bit write access */
2330 val = lduw_p(buf);
2331 error |= io_mem_write(mr, addr1, val, 2);
2332 break;
2333 case 1:
2334 /* 8 bit write access */
2335 val = ldub_p(buf);
2336 error |= io_mem_write(mr, addr1, val, 1);
2337 break;
2338 default:
2339 abort();
2341 } else {
2342 addr1 += memory_region_get_ram_addr(mr);
2343 /* RAM case */
2344 ptr = qemu_get_ram_ptr(addr1);
2345 memcpy(ptr, buf, l);
2346 invalidate_and_set_dirty(addr1, l);
2348 } else {
2349 if (!memory_access_is_direct(mr, is_write)) {
2350 /* I/O case */
2351 l = memory_access_size(mr, l, addr1);
2352 switch (l) {
2353 case 8:
2354 /* 64 bit read access */
2355 error |= io_mem_read(mr, addr1, &val, 8);
2356 stq_p(buf, val);
2357 break;
2358 case 4:
2359 /* 32 bit read access */
2360 error |= io_mem_read(mr, addr1, &val, 4);
2361 stl_p(buf, val);
2362 break;
2363 case 2:
2364 /* 16 bit read access */
2365 error |= io_mem_read(mr, addr1, &val, 2);
2366 stw_p(buf, val);
2367 break;
2368 case 1:
2369 /* 8 bit read access */
2370 error |= io_mem_read(mr, addr1, &val, 1);
2371 stb_p(buf, val);
2372 break;
2373 default:
2374 abort();
2376 } else {
2377 /* RAM case */
2378 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2379 memcpy(buf, ptr, l);
2382 len -= l;
2383 buf += l;
2384 addr += l;
2387 return error;
2390 bool address_space_write(AddressSpace *as, hwaddr addr,
2391 const uint8_t *buf, int len)
2393 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2396 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2398 return address_space_rw(as, addr, buf, len, false);
2402 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2403 int len, int is_write)
2405 address_space_rw(&address_space_memory, addr, buf, len, is_write);
2408 enum write_rom_type {
2409 WRITE_DATA,
2410 FLUSH_CACHE,
2413 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2414 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2416 hwaddr l;
2417 uint8_t *ptr;
2418 hwaddr addr1;
2419 MemoryRegion *mr;
2421 while (len > 0) {
2422 l = len;
2423 mr = address_space_translate(as, addr, &addr1, &l, true);
2425 if (!(memory_region_is_ram(mr) ||
2426 memory_region_is_romd(mr))) {
2427 /* do nothing */
2428 } else {
2429 addr1 += memory_region_get_ram_addr(mr);
2430 /* ROM/RAM case */
2431 ptr = qemu_get_ram_ptr(addr1);
2432 switch (type) {
2433 case WRITE_DATA:
2434 memcpy(ptr, buf, l);
2435 invalidate_and_set_dirty(addr1, l);
2436 break;
2437 case FLUSH_CACHE:
2438 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2439 break;
2442 len -= l;
2443 buf += l;
2444 addr += l;
2448 /* used for ROM loading : can write in RAM and ROM */
2449 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2450 const uint8_t *buf, int len)
2452 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2455 void cpu_flush_icache_range(hwaddr start, int len)
2458 * This function should do the same thing as an icache flush that was
2459 * triggered from within the guest. For TCG we are always cache coherent,
2460 * so there is no need to flush anything. For KVM / Xen we need to flush
2461 * the host's instruction cache at least.
2463 if (tcg_enabled()) {
2464 return;
2467 cpu_physical_memory_write_rom_internal(&address_space_memory,
2468 start, NULL, len, FLUSH_CACHE);
2471 typedef struct {
2472 MemoryRegion *mr;
2473 void *buffer;
2474 hwaddr addr;
2475 hwaddr len;
2476 bool in_use;
2477 } BounceBuffer;
2479 static BounceBuffer bounce;
2481 typedef struct MapClient {
2482 void *opaque;
2483 void (*callback)(void *opaque);
2484 QLIST_ENTRY(MapClient) link;
2485 } MapClient;
2487 QemuMutex map_client_list_lock;
2488 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2489 = QLIST_HEAD_INITIALIZER(map_client_list);
2491 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2493 MapClient *client = g_malloc(sizeof(*client));
2495 qemu_mutex_lock(&map_client_list_lock);
2496 client->opaque = opaque;
2497 client->callback = callback;
2498 QLIST_INSERT_HEAD(&map_client_list, client, link);
2499 qemu_mutex_unlock(&map_client_list_lock);
2500 return client;
2503 void cpu_exec_init_all(void)
2505 qemu_mutex_init(&ram_list.mutex);
2506 memory_map_init();
2507 io_mem_init();
2508 qemu_mutex_init(&map_client_list_lock);
2511 static void cpu_unregister_map_client(void *_client)
2513 MapClient *client = (MapClient *)_client;
2515 QLIST_REMOVE(client, link);
2516 g_free(client);
2519 static void cpu_notify_map_clients(void)
2521 MapClient *client;
2523 qemu_mutex_lock(&map_client_list_lock);
2524 while (!QLIST_EMPTY(&map_client_list)) {
2525 client = QLIST_FIRST(&map_client_list);
2526 client->callback(client->opaque);
2527 cpu_unregister_map_client(client);
2529 qemu_mutex_unlock(&map_client_list_lock);
2532 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2534 MemoryRegion *mr;
2535 hwaddr l, xlat;
2537 while (len > 0) {
2538 l = len;
2539 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2540 if (!memory_access_is_direct(mr, is_write)) {
2541 l = memory_access_size(mr, l, addr);
2542 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2543 return false;
2547 len -= l;
2548 addr += l;
2550 return true;
2553 /* Map a physical memory region into a host virtual address.
2554 * May map a subset of the requested range, given by and returned in *plen.
2555 * May return NULL if resources needed to perform the mapping are exhausted.
2556 * Use only for reads OR writes - not for read-modify-write operations.
2557 * Use cpu_register_map_client() to know when retrying the map operation is
2558 * likely to succeed.
2560 void *address_space_map(AddressSpace *as,
2561 hwaddr addr,
2562 hwaddr *plen,
2563 bool is_write)
2565 hwaddr len = *plen;
2566 hwaddr done = 0;
2567 hwaddr l, xlat, base;
2568 MemoryRegion *mr, *this_mr;
2569 ram_addr_t raddr;
2571 if (len == 0) {
2572 return NULL;
2575 l = len;
2576 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2577 if (!memory_access_is_direct(mr, is_write)) {
2578 if (atomic_xchg(&bounce.in_use, true)) {
2579 return NULL;
2581 /* Avoid unbounded allocations */
2582 l = MIN(l, TARGET_PAGE_SIZE);
2583 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2584 bounce.addr = addr;
2585 bounce.len = l;
2587 memory_region_ref(mr);
2588 bounce.mr = mr;
2589 if (!is_write) {
2590 address_space_read(as, addr, bounce.buffer, l);
2593 *plen = l;
2594 return bounce.buffer;
2597 base = xlat;
2598 raddr = memory_region_get_ram_addr(mr);
2600 for (;;) {
2601 len -= l;
2602 addr += l;
2603 done += l;
2604 if (len == 0) {
2605 break;
2608 l = len;
2609 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2610 if (this_mr != mr || xlat != base + done) {
2611 break;
2615 memory_region_ref(mr);
2616 *plen = done;
2617 return qemu_ram_ptr_length(raddr + base, plen);
2620 /* Unmaps a memory region previously mapped by address_space_map().
2621 * Will also mark the memory as dirty if is_write == 1. access_len gives
2622 * the amount of memory that was actually read or written by the caller.
2624 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2625 int is_write, hwaddr access_len)
2627 if (buffer != bounce.buffer) {
2628 MemoryRegion *mr;
2629 ram_addr_t addr1;
2631 mr = qemu_ram_addr_from_host(buffer, &addr1);
2632 assert(mr != NULL);
2633 if (is_write) {
2634 invalidate_and_set_dirty(addr1, access_len);
2636 if (xen_enabled()) {
2637 xen_invalidate_map_cache_entry(buffer);
2639 memory_region_unref(mr);
2640 return;
2642 if (is_write) {
2643 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2645 qemu_vfree(bounce.buffer);
2646 bounce.buffer = NULL;
2647 memory_region_unref(bounce.mr);
2648 atomic_mb_set(&bounce.in_use, false);
2649 cpu_notify_map_clients();
2652 void *cpu_physical_memory_map(hwaddr addr,
2653 hwaddr *plen,
2654 int is_write)
2656 return address_space_map(&address_space_memory, addr, plen, is_write);
2659 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2660 int is_write, hwaddr access_len)
2662 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2665 /* warning: addr must be aligned */
2666 static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
2667 enum device_endian endian)
2669 uint8_t *ptr;
2670 uint64_t val;
2671 MemoryRegion *mr;
2672 hwaddr l = 4;
2673 hwaddr addr1;
2675 mr = address_space_translate(as, addr, &addr1, &l, false);
2676 if (l < 4 || !memory_access_is_direct(mr, false)) {
2677 /* I/O case */
2678 io_mem_read(mr, addr1, &val, 4);
2679 #if defined(TARGET_WORDS_BIGENDIAN)
2680 if (endian == DEVICE_LITTLE_ENDIAN) {
2681 val = bswap32(val);
2683 #else
2684 if (endian == DEVICE_BIG_ENDIAN) {
2685 val = bswap32(val);
2687 #endif
2688 } else {
2689 /* RAM case */
2690 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2691 & TARGET_PAGE_MASK)
2692 + addr1);
2693 switch (endian) {
2694 case DEVICE_LITTLE_ENDIAN:
2695 val = ldl_le_p(ptr);
2696 break;
2697 case DEVICE_BIG_ENDIAN:
2698 val = ldl_be_p(ptr);
2699 break;
2700 default:
2701 val = ldl_p(ptr);
2702 break;
2705 return val;
2708 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
2710 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2713 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
2715 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2718 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
2720 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2723 /* warning: addr must be aligned */
2724 static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
2725 enum device_endian endian)
2727 uint8_t *ptr;
2728 uint64_t val;
2729 MemoryRegion *mr;
2730 hwaddr l = 8;
2731 hwaddr addr1;
2733 mr = address_space_translate(as, addr, &addr1, &l,
2734 false);
2735 if (l < 8 || !memory_access_is_direct(mr, false)) {
2736 /* I/O case */
2737 io_mem_read(mr, addr1, &val, 8);
2738 #if defined(TARGET_WORDS_BIGENDIAN)
2739 if (endian == DEVICE_LITTLE_ENDIAN) {
2740 val = bswap64(val);
2742 #else
2743 if (endian == DEVICE_BIG_ENDIAN) {
2744 val = bswap64(val);
2746 #endif
2747 } else {
2748 /* RAM case */
2749 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2750 & TARGET_PAGE_MASK)
2751 + addr1);
2752 switch (endian) {
2753 case DEVICE_LITTLE_ENDIAN:
2754 val = ldq_le_p(ptr);
2755 break;
2756 case DEVICE_BIG_ENDIAN:
2757 val = ldq_be_p(ptr);
2758 break;
2759 default:
2760 val = ldq_p(ptr);
2761 break;
2764 return val;
2767 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
2769 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2772 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
2774 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2777 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
2779 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2782 /* XXX: optimize */
2783 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2785 uint8_t val;
2786 address_space_rw(as, addr, &val, 1, 0);
2787 return val;
2790 /* warning: addr must be aligned */
2791 static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
2792 enum device_endian endian)
2794 uint8_t *ptr;
2795 uint64_t val;
2796 MemoryRegion *mr;
2797 hwaddr l = 2;
2798 hwaddr addr1;
2800 mr = address_space_translate(as, addr, &addr1, &l,
2801 false);
2802 if (l < 2 || !memory_access_is_direct(mr, false)) {
2803 /* I/O case */
2804 io_mem_read(mr, addr1, &val, 2);
2805 #if defined(TARGET_WORDS_BIGENDIAN)
2806 if (endian == DEVICE_LITTLE_ENDIAN) {
2807 val = bswap16(val);
2809 #else
2810 if (endian == DEVICE_BIG_ENDIAN) {
2811 val = bswap16(val);
2813 #endif
2814 } else {
2815 /* RAM case */
2816 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2817 & TARGET_PAGE_MASK)
2818 + addr1);
2819 switch (endian) {
2820 case DEVICE_LITTLE_ENDIAN:
2821 val = lduw_le_p(ptr);
2822 break;
2823 case DEVICE_BIG_ENDIAN:
2824 val = lduw_be_p(ptr);
2825 break;
2826 default:
2827 val = lduw_p(ptr);
2828 break;
2831 return val;
2834 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
2836 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2839 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
2841 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2844 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
2846 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2849 /* warning: addr must be aligned. The ram page is not masked as dirty
2850 and the code inside is not invalidated. It is useful if the dirty
2851 bits are used to track modified PTEs */
2852 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
2854 uint8_t *ptr;
2855 MemoryRegion *mr;
2856 hwaddr l = 4;
2857 hwaddr addr1;
2859 mr = address_space_translate(as, addr, &addr1, &l,
2860 true);
2861 if (l < 4 || !memory_access_is_direct(mr, true)) {
2862 io_mem_write(mr, addr1, val, 4);
2863 } else {
2864 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2865 ptr = qemu_get_ram_ptr(addr1);
2866 stl_p(ptr, val);
2868 if (unlikely(in_migration)) {
2869 if (cpu_physical_memory_is_clean(addr1)) {
2870 /* invalidate code */
2871 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2872 /* set dirty bit */
2873 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
2879 /* warning: addr must be aligned */
2880 static inline void stl_phys_internal(AddressSpace *as,
2881 hwaddr addr, uint32_t val,
2882 enum device_endian endian)
2884 uint8_t *ptr;
2885 MemoryRegion *mr;
2886 hwaddr l = 4;
2887 hwaddr addr1;
2889 mr = address_space_translate(as, addr, &addr1, &l,
2890 true);
2891 if (l < 4 || !memory_access_is_direct(mr, true)) {
2892 #if defined(TARGET_WORDS_BIGENDIAN)
2893 if (endian == DEVICE_LITTLE_ENDIAN) {
2894 val = bswap32(val);
2896 #else
2897 if (endian == DEVICE_BIG_ENDIAN) {
2898 val = bswap32(val);
2900 #endif
2901 io_mem_write(mr, addr1, val, 4);
2902 } else {
2903 /* RAM case */
2904 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2905 ptr = qemu_get_ram_ptr(addr1);
2906 switch (endian) {
2907 case DEVICE_LITTLE_ENDIAN:
2908 stl_le_p(ptr, val);
2909 break;
2910 case DEVICE_BIG_ENDIAN:
2911 stl_be_p(ptr, val);
2912 break;
2913 default:
2914 stl_p(ptr, val);
2915 break;
2917 invalidate_and_set_dirty(addr1, 4);
2921 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2923 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2926 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2928 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2931 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2933 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2936 /* XXX: optimize */
2937 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2939 uint8_t v = val;
2940 address_space_rw(as, addr, &v, 1, 1);
2943 /* warning: addr must be aligned */
2944 static inline void stw_phys_internal(AddressSpace *as,
2945 hwaddr addr, uint32_t val,
2946 enum device_endian endian)
2948 uint8_t *ptr;
2949 MemoryRegion *mr;
2950 hwaddr l = 2;
2951 hwaddr addr1;
2953 mr = address_space_translate(as, addr, &addr1, &l, true);
2954 if (l < 2 || !memory_access_is_direct(mr, true)) {
2955 #if defined(TARGET_WORDS_BIGENDIAN)
2956 if (endian == DEVICE_LITTLE_ENDIAN) {
2957 val = bswap16(val);
2959 #else
2960 if (endian == DEVICE_BIG_ENDIAN) {
2961 val = bswap16(val);
2963 #endif
2964 io_mem_write(mr, addr1, val, 2);
2965 } else {
2966 /* RAM case */
2967 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2968 ptr = qemu_get_ram_ptr(addr1);
2969 switch (endian) {
2970 case DEVICE_LITTLE_ENDIAN:
2971 stw_le_p(ptr, val);
2972 break;
2973 case DEVICE_BIG_ENDIAN:
2974 stw_be_p(ptr, val);
2975 break;
2976 default:
2977 stw_p(ptr, val);
2978 break;
2980 invalidate_and_set_dirty(addr1, 2);
2984 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2986 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2989 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2991 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2994 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2996 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2999 /* XXX: optimize */
3000 void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3002 val = tswap64(val);
3003 address_space_rw(as, addr, (void *) &val, 8, 1);
3006 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3008 val = cpu_to_le64(val);
3009 address_space_rw(as, addr, (void *) &val, 8, 1);
3012 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3014 val = cpu_to_be64(val);
3015 address_space_rw(as, addr, (void *) &val, 8, 1);
3018 /* virtual memory access for debug (includes writing to ROM) */
3019 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3020 uint8_t *buf, int len, int is_write)
3022 int l;
3023 hwaddr phys_addr;
3024 target_ulong page;
3026 while (len > 0) {
3027 page = addr & TARGET_PAGE_MASK;
3028 phys_addr = cpu_get_phys_page_debug(cpu, page);
3029 /* if no physical page mapped, return an error */
3030 if (phys_addr == -1)
3031 return -1;
3032 l = (page + TARGET_PAGE_SIZE) - addr;
3033 if (l > len)
3034 l = len;
3035 phys_addr += (addr & ~TARGET_PAGE_MASK);
3036 if (is_write) {
3037 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3038 } else {
3039 address_space_rw(cpu->as, phys_addr, buf, l, 0);
3041 len -= l;
3042 buf += l;
3043 addr += l;
3045 return 0;
3047 #endif
3050 * A helper function for the _utterly broken_ virtio device model to find out if
3051 * it's running on a big endian machine. Don't do this at home kids!
3053 bool target_words_bigendian(void);
3054 bool target_words_bigendian(void)
3056 #if defined(TARGET_WORDS_BIGENDIAN)
3057 return true;
3058 #else
3059 return false;
3060 #endif
3063 #ifndef CONFIG_USER_ONLY
3064 bool cpu_physical_memory_is_io(hwaddr phys_addr)
3066 MemoryRegion*mr;
3067 hwaddr l = 1;
3069 mr = address_space_translate(&address_space_memory,
3070 phys_addr, &phys_addr, &l, false);
3072 return !(memory_region_is_ram(mr) ||
3073 memory_region_is_romd(mr));
3076 void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3078 RAMBlock *block;
3080 rcu_read_lock();
3081 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3082 func(block->host, block->offset, block->used_length, opaque);
3084 rcu_read_unlock();
3086 #endif