Merge remote-tracking branch 'remotes/gkurz/tags/for-upstream' into staging
[qemu/kevin.git] / exec.c
blobd20c34ca830158ee23f751aa2f66beb9b75234b2
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
21 #ifndef _WIN32
22 #endif
24 #include "qemu/cutils.h"
25 #include "cpu.h"
26 #include "exec/exec-all.h"
27 #include "exec/target_page.h"
28 #include "tcg.h"
29 #include "hw/qdev-core.h"
30 #include "hw/qdev-properties.h"
31 #if !defined(CONFIG_USER_ONLY)
32 #include "hw/boards.h"
33 #include "hw/xen/xen.h"
34 #endif
35 #include "sysemu/kvm.h"
36 #include "sysemu/sysemu.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #if defined(CONFIG_USER_ONLY)
41 #include "qemu.h"
42 #else /* !CONFIG_USER_ONLY */
43 #include "hw/hw.h"
44 #include "exec/memory.h"
45 #include "exec/ioport.h"
46 #include "sysemu/dma.h"
47 #include "sysemu/numa.h"
48 #include "sysemu/hw_accel.h"
49 #include "exec/address-spaces.h"
50 #include "sysemu/xen-mapcache.h"
51 #include "trace-root.h"
53 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
54 #include <fcntl.h>
55 #include <linux/falloc.h>
56 #endif
58 #endif
59 #include "exec/cpu-all.h"
60 #include "qemu/rcu_queue.h"
61 #include "qemu/main-loop.h"
62 #include "translate-all.h"
63 #include "sysemu/replay.h"
65 #include "exec/memory-internal.h"
66 #include "exec/ram_addr.h"
67 #include "exec/log.h"
69 #include "migration/vmstate.h"
71 #include "qemu/range.h"
72 #ifndef _WIN32
73 #include "qemu/mmap-alloc.h"
74 #endif
76 #include "monitor/monitor.h"
78 //#define DEBUG_SUBPAGE
80 #if !defined(CONFIG_USER_ONLY)
81 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
82 * are protected by the ramlist lock.
84 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
86 static MemoryRegion *system_memory;
87 static MemoryRegion *system_io;
89 AddressSpace address_space_io;
90 AddressSpace address_space_memory;
92 MemoryRegion io_mem_rom, io_mem_notdirty;
93 static MemoryRegion io_mem_unassigned;
95 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
96 #define RAM_PREALLOC (1 << 0)
98 /* RAM is mmap-ed with MAP_SHARED */
99 #define RAM_SHARED (1 << 1)
101 /* Only a portion of RAM (used_length) is actually used, and migrated.
102 * This used_length size can change across reboots.
104 #define RAM_RESIZEABLE (1 << 2)
106 #endif
108 #ifdef TARGET_PAGE_BITS_VARY
109 int target_page_bits;
110 bool target_page_bits_decided;
111 #endif
113 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
114 /* current CPU in the current thread. It is only valid inside
115 cpu_exec() */
116 __thread CPUState *current_cpu;
117 /* 0 = Do not count executed instructions.
118 1 = Precise instruction counting.
119 2 = Adaptive rate instruction counting. */
120 int use_icount;
122 uintptr_t qemu_host_page_size;
123 intptr_t qemu_host_page_mask;
124 uintptr_t qemu_real_host_page_size;
125 intptr_t qemu_real_host_page_mask;
127 bool set_preferred_target_page_bits(int bits)
129 /* The target page size is the lowest common denominator for all
130 * the CPUs in the system, so we can only make it smaller, never
131 * larger. And we can't make it smaller once we've committed to
132 * a particular size.
134 #ifdef TARGET_PAGE_BITS_VARY
135 assert(bits >= TARGET_PAGE_BITS_MIN);
136 if (target_page_bits == 0 || target_page_bits > bits) {
137 if (target_page_bits_decided) {
138 return false;
140 target_page_bits = bits;
142 #endif
143 return true;
146 #if !defined(CONFIG_USER_ONLY)
148 static void finalize_target_page_bits(void)
150 #ifdef TARGET_PAGE_BITS_VARY
151 if (target_page_bits == 0) {
152 target_page_bits = TARGET_PAGE_BITS_MIN;
154 target_page_bits_decided = true;
155 #endif
158 typedef struct PhysPageEntry PhysPageEntry;
160 struct PhysPageEntry {
161 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
162 uint32_t skip : 6;
163 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
164 uint32_t ptr : 26;
167 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
169 /* Size of the L2 (and L3, etc) page tables. */
170 #define ADDR_SPACE_BITS 64
172 #define P_L2_BITS 9
173 #define P_L2_SIZE (1 << P_L2_BITS)
175 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
177 typedef PhysPageEntry Node[P_L2_SIZE];
179 typedef struct PhysPageMap {
180 struct rcu_head rcu;
182 unsigned sections_nb;
183 unsigned sections_nb_alloc;
184 unsigned nodes_nb;
185 unsigned nodes_nb_alloc;
186 Node *nodes;
187 MemoryRegionSection *sections;
188 } PhysPageMap;
190 struct AddressSpaceDispatch {
191 struct rcu_head rcu;
193 MemoryRegionSection *mru_section;
194 /* This is a multi-level map on the physical address space.
195 * The bottom level has pointers to MemoryRegionSections.
197 PhysPageEntry phys_map;
198 PhysPageMap map;
199 AddressSpace *as;
202 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
203 typedef struct subpage_t {
204 MemoryRegion iomem;
205 AddressSpace *as;
206 hwaddr base;
207 uint16_t sub_section[];
208 } subpage_t;
210 #define PHYS_SECTION_UNASSIGNED 0
211 #define PHYS_SECTION_NOTDIRTY 1
212 #define PHYS_SECTION_ROM 2
213 #define PHYS_SECTION_WATCH 3
215 static void io_mem_init(void);
216 static void memory_map_init(void);
217 static void tcg_commit(MemoryListener *listener);
219 static MemoryRegion io_mem_watch;
222 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
223 * @cpu: the CPU whose AddressSpace this is
224 * @as: the AddressSpace itself
225 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
226 * @tcg_as_listener: listener for tracking changes to the AddressSpace
228 struct CPUAddressSpace {
229 CPUState *cpu;
230 AddressSpace *as;
231 struct AddressSpaceDispatch *memory_dispatch;
232 MemoryListener tcg_as_listener;
235 struct DirtyBitmapSnapshot {
236 ram_addr_t start;
237 ram_addr_t end;
238 unsigned long dirty[];
241 #endif
243 #if !defined(CONFIG_USER_ONLY)
245 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
247 static unsigned alloc_hint = 16;
248 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
249 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
250 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
251 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
252 alloc_hint = map->nodes_nb_alloc;
256 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
258 unsigned i;
259 uint32_t ret;
260 PhysPageEntry e;
261 PhysPageEntry *p;
263 ret = map->nodes_nb++;
264 p = map->nodes[ret];
265 assert(ret != PHYS_MAP_NODE_NIL);
266 assert(ret != map->nodes_nb_alloc);
268 e.skip = leaf ? 0 : 1;
269 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
270 for (i = 0; i < P_L2_SIZE; ++i) {
271 memcpy(&p[i], &e, sizeof(e));
273 return ret;
276 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
277 hwaddr *index, hwaddr *nb, uint16_t leaf,
278 int level)
280 PhysPageEntry *p;
281 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
283 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
284 lp->ptr = phys_map_node_alloc(map, level == 0);
286 p = map->nodes[lp->ptr];
287 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
289 while (*nb && lp < &p[P_L2_SIZE]) {
290 if ((*index & (step - 1)) == 0 && *nb >= step) {
291 lp->skip = 0;
292 lp->ptr = leaf;
293 *index += step;
294 *nb -= step;
295 } else {
296 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
298 ++lp;
302 static void phys_page_set(AddressSpaceDispatch *d,
303 hwaddr index, hwaddr nb,
304 uint16_t leaf)
306 /* Wildly overreserve - it doesn't matter much. */
307 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
309 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
312 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
313 * and update our entry so we can skip it and go directly to the destination.
315 static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
317 unsigned valid_ptr = P_L2_SIZE;
318 int valid = 0;
319 PhysPageEntry *p;
320 int i;
322 if (lp->ptr == PHYS_MAP_NODE_NIL) {
323 return;
326 p = nodes[lp->ptr];
327 for (i = 0; i < P_L2_SIZE; i++) {
328 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
329 continue;
332 valid_ptr = i;
333 valid++;
334 if (p[i].skip) {
335 phys_page_compact(&p[i], nodes);
339 /* We can only compress if there's only one child. */
340 if (valid != 1) {
341 return;
344 assert(valid_ptr < P_L2_SIZE);
346 /* Don't compress if it won't fit in the # of bits we have. */
347 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
348 return;
351 lp->ptr = p[valid_ptr].ptr;
352 if (!p[valid_ptr].skip) {
353 /* If our only child is a leaf, make this a leaf. */
354 /* By design, we should have made this node a leaf to begin with so we
355 * should never reach here.
356 * But since it's so simple to handle this, let's do it just in case we
357 * change this rule.
359 lp->skip = 0;
360 } else {
361 lp->skip += p[valid_ptr].skip;
365 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
367 if (d->phys_map.skip) {
368 phys_page_compact(&d->phys_map, d->map.nodes);
372 static inline bool section_covers_addr(const MemoryRegionSection *section,
373 hwaddr addr)
375 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
376 * the section must cover the entire address space.
378 return int128_gethi(section->size) ||
379 range_covers_byte(section->offset_within_address_space,
380 int128_getlo(section->size), addr);
383 static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr)
385 PhysPageEntry lp = d->phys_map, *p;
386 Node *nodes = d->map.nodes;
387 MemoryRegionSection *sections = d->map.sections;
388 hwaddr index = addr >> TARGET_PAGE_BITS;
389 int i;
391 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
392 if (lp.ptr == PHYS_MAP_NODE_NIL) {
393 return &sections[PHYS_SECTION_UNASSIGNED];
395 p = nodes[lp.ptr];
396 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
399 if (section_covers_addr(&sections[lp.ptr], addr)) {
400 return &sections[lp.ptr];
401 } else {
402 return &sections[PHYS_SECTION_UNASSIGNED];
406 bool memory_region_is_unassigned(MemoryRegion *mr)
408 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
409 && mr != &io_mem_watch;
412 /* Called from RCU critical section */
413 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
414 hwaddr addr,
415 bool resolve_subpage)
417 MemoryRegionSection *section = atomic_read(&d->mru_section);
418 subpage_t *subpage;
419 bool update;
421 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
422 section_covers_addr(section, addr)) {
423 update = false;
424 } else {
425 section = phys_page_find(d, addr);
426 update = true;
428 if (resolve_subpage && section->mr->subpage) {
429 subpage = container_of(section->mr, subpage_t, iomem);
430 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
432 if (update) {
433 atomic_set(&d->mru_section, section);
435 return section;
438 /* Called from RCU critical section */
439 static MemoryRegionSection *
440 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
441 hwaddr *plen, bool resolve_subpage)
443 MemoryRegionSection *section;
444 MemoryRegion *mr;
445 Int128 diff;
447 section = address_space_lookup_region(d, addr, resolve_subpage);
448 /* Compute offset within MemoryRegionSection */
449 addr -= section->offset_within_address_space;
451 /* Compute offset within MemoryRegion */
452 *xlat = addr + section->offset_within_region;
454 mr = section->mr;
456 /* MMIO registers can be expected to perform full-width accesses based only
457 * on their address, without considering adjacent registers that could
458 * decode to completely different MemoryRegions. When such registers
459 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
460 * regions overlap wildly. For this reason we cannot clamp the accesses
461 * here.
463 * If the length is small (as is the case for address_space_ldl/stl),
464 * everything works fine. If the incoming length is large, however,
465 * the caller really has to do the clamping through memory_access_size.
467 if (memory_region_is_ram(mr)) {
468 diff = int128_sub(section->size, int128_make64(addr));
469 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
471 return section;
474 /* Called from RCU critical section */
475 static MemoryRegionSection address_space_do_translate(AddressSpace *as,
476 hwaddr addr,
477 hwaddr *xlat,
478 hwaddr *plen,
479 bool is_write,
480 bool is_mmio)
482 IOMMUTLBEntry iotlb;
483 MemoryRegionSection *section;
484 IOMMUMemoryRegion *iommu_mr;
485 IOMMUMemoryRegionClass *imrc;
487 for (;;) {
488 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
489 section = address_space_translate_internal(d, addr, &addr, plen, is_mmio);
491 iommu_mr = memory_region_get_iommu(section->mr);
492 if (!iommu_mr) {
493 break;
495 imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
497 iotlb = imrc->translate(iommu_mr, addr, is_write ?
498 IOMMU_WO : IOMMU_RO);
499 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
500 | (addr & iotlb.addr_mask));
501 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
502 if (!(iotlb.perm & (1 << is_write))) {
503 goto translate_fail;
506 as = iotlb.target_as;
509 *xlat = addr;
511 return *section;
513 translate_fail:
514 return (MemoryRegionSection) { .mr = &io_mem_unassigned };
517 /* Called from RCU critical section */
518 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
519 bool is_write)
521 MemoryRegionSection section;
522 hwaddr xlat, plen;
524 /* Try to get maximum page mask during translation. */
525 plen = (hwaddr)-1;
527 /* This can never be MMIO. */
528 section = address_space_do_translate(as, addr, &xlat, &plen,
529 is_write, false);
531 /* Illegal translation */
532 if (section.mr == &io_mem_unassigned) {
533 goto iotlb_fail;
536 /* Convert memory region offset into address space offset */
537 xlat += section.offset_within_address_space -
538 section.offset_within_region;
540 if (plen == (hwaddr)-1) {
542 * We use default page size here. Logically it only happens
543 * for identity mappings.
545 plen = TARGET_PAGE_SIZE;
548 /* Convert to address mask */
549 plen -= 1;
551 return (IOMMUTLBEntry) {
552 .target_as = section.address_space,
553 .iova = addr & ~plen,
554 .translated_addr = xlat & ~plen,
555 .addr_mask = plen,
556 /* IOTLBs are for DMAs, and DMA only allows on RAMs. */
557 .perm = IOMMU_RW,
560 iotlb_fail:
561 return (IOMMUTLBEntry) {0};
564 /* Called from RCU critical section */
565 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
566 hwaddr *xlat, hwaddr *plen,
567 bool is_write)
569 MemoryRegion *mr;
570 MemoryRegionSection section;
572 /* This can be MMIO, so setup MMIO bit. */
573 section = address_space_do_translate(as, addr, xlat, plen, is_write, true);
574 mr = section.mr;
576 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
577 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
578 *plen = MIN(page, *plen);
581 return mr;
584 /* Called from RCU critical section */
585 MemoryRegionSection *
586 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
587 hwaddr *xlat, hwaddr *plen)
589 MemoryRegionSection *section;
590 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
592 section = address_space_translate_internal(d, addr, xlat, plen, false);
594 assert(!memory_region_is_iommu(section->mr));
595 return section;
597 #endif
599 #if !defined(CONFIG_USER_ONLY)
601 static int cpu_common_post_load(void *opaque, int version_id)
603 CPUState *cpu = opaque;
605 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
606 version_id is increased. */
607 cpu->interrupt_request &= ~0x01;
608 tlb_flush(cpu);
610 return 0;
613 static int cpu_common_pre_load(void *opaque)
615 CPUState *cpu = opaque;
617 cpu->exception_index = -1;
619 return 0;
622 static bool cpu_common_exception_index_needed(void *opaque)
624 CPUState *cpu = opaque;
626 return tcg_enabled() && cpu->exception_index != -1;
629 static const VMStateDescription vmstate_cpu_common_exception_index = {
630 .name = "cpu_common/exception_index",
631 .version_id = 1,
632 .minimum_version_id = 1,
633 .needed = cpu_common_exception_index_needed,
634 .fields = (VMStateField[]) {
635 VMSTATE_INT32(exception_index, CPUState),
636 VMSTATE_END_OF_LIST()
640 static bool cpu_common_crash_occurred_needed(void *opaque)
642 CPUState *cpu = opaque;
644 return cpu->crash_occurred;
647 static const VMStateDescription vmstate_cpu_common_crash_occurred = {
648 .name = "cpu_common/crash_occurred",
649 .version_id = 1,
650 .minimum_version_id = 1,
651 .needed = cpu_common_crash_occurred_needed,
652 .fields = (VMStateField[]) {
653 VMSTATE_BOOL(crash_occurred, CPUState),
654 VMSTATE_END_OF_LIST()
658 const VMStateDescription vmstate_cpu_common = {
659 .name = "cpu_common",
660 .version_id = 1,
661 .minimum_version_id = 1,
662 .pre_load = cpu_common_pre_load,
663 .post_load = cpu_common_post_load,
664 .fields = (VMStateField[]) {
665 VMSTATE_UINT32(halted, CPUState),
666 VMSTATE_UINT32(interrupt_request, CPUState),
667 VMSTATE_END_OF_LIST()
669 .subsections = (const VMStateDescription*[]) {
670 &vmstate_cpu_common_exception_index,
671 &vmstate_cpu_common_crash_occurred,
672 NULL
676 #endif
678 CPUState *qemu_get_cpu(int index)
680 CPUState *cpu;
682 CPU_FOREACH(cpu) {
683 if (cpu->cpu_index == index) {
684 return cpu;
688 return NULL;
691 #if !defined(CONFIG_USER_ONLY)
692 void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
694 CPUAddressSpace *newas;
696 /* Target code should have set num_ases before calling us */
697 assert(asidx < cpu->num_ases);
699 if (asidx == 0) {
700 /* address space 0 gets the convenience alias */
701 cpu->as = as;
704 /* KVM cannot currently support multiple address spaces. */
705 assert(asidx == 0 || !kvm_enabled());
707 if (!cpu->cpu_ases) {
708 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
711 newas = &cpu->cpu_ases[asidx];
712 newas->cpu = cpu;
713 newas->as = as;
714 if (tcg_enabled()) {
715 newas->tcg_as_listener.commit = tcg_commit;
716 memory_listener_register(&newas->tcg_as_listener, as);
720 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
722 /* Return the AddressSpace corresponding to the specified index */
723 return cpu->cpu_ases[asidx].as;
725 #endif
727 void cpu_exec_unrealizefn(CPUState *cpu)
729 CPUClass *cc = CPU_GET_CLASS(cpu);
731 cpu_list_remove(cpu);
733 if (cc->vmsd != NULL) {
734 vmstate_unregister(NULL, cc->vmsd, cpu);
736 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
737 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
741 Property cpu_common_props[] = {
742 #ifndef CONFIG_USER_ONLY
743 /* Create a memory property for softmmu CPU object,
744 * so users can wire up its memory. (This can't go in qom/cpu.c
745 * because that file is compiled only once for both user-mode
746 * and system builds.) The default if no link is set up is to use
747 * the system address space.
749 DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
750 MemoryRegion *),
751 #endif
752 DEFINE_PROP_END_OF_LIST(),
755 void cpu_exec_initfn(CPUState *cpu)
757 cpu->as = NULL;
758 cpu->num_ases = 0;
760 #ifndef CONFIG_USER_ONLY
761 cpu->thread_id = qemu_get_thread_id();
762 cpu->memory = system_memory;
763 object_ref(OBJECT(cpu->memory));
764 #endif
767 void cpu_exec_realizefn(CPUState *cpu, Error **errp)
769 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
771 cpu_list_add(cpu);
773 #ifndef CONFIG_USER_ONLY
774 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
775 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
777 if (cc->vmsd != NULL) {
778 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
780 #endif
783 #if defined(CONFIG_USER_ONLY)
784 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
786 mmap_lock();
787 tb_lock();
788 tb_invalidate_phys_page_range(pc, pc + 1, 0);
789 tb_unlock();
790 mmap_unlock();
792 #else
793 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
795 MemTxAttrs attrs;
796 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
797 int asidx = cpu_asidx_from_attrs(cpu, attrs);
798 if (phys != -1) {
799 /* Locks grabbed by tb_invalidate_phys_addr */
800 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
801 phys | (pc & ~TARGET_PAGE_MASK));
804 #endif
806 #if defined(CONFIG_USER_ONLY)
807 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
812 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
813 int flags)
815 return -ENOSYS;
818 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
822 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
823 int flags, CPUWatchpoint **watchpoint)
825 return -ENOSYS;
827 #else
828 /* Add a watchpoint. */
829 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
830 int flags, CPUWatchpoint **watchpoint)
832 CPUWatchpoint *wp;
834 /* forbid ranges which are empty or run off the end of the address space */
835 if (len == 0 || (addr + len - 1) < addr) {
836 error_report("tried to set invalid watchpoint at %"
837 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
838 return -EINVAL;
840 wp = g_malloc(sizeof(*wp));
842 wp->vaddr = addr;
843 wp->len = len;
844 wp->flags = flags;
846 /* keep all GDB-injected watchpoints in front */
847 if (flags & BP_GDB) {
848 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
849 } else {
850 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
853 tlb_flush_page(cpu, addr);
855 if (watchpoint)
856 *watchpoint = wp;
857 return 0;
860 /* Remove a specific watchpoint. */
861 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
862 int flags)
864 CPUWatchpoint *wp;
866 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
867 if (addr == wp->vaddr && len == wp->len
868 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
869 cpu_watchpoint_remove_by_ref(cpu, wp);
870 return 0;
873 return -ENOENT;
876 /* Remove a specific watchpoint by reference. */
877 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
879 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
881 tlb_flush_page(cpu, watchpoint->vaddr);
883 g_free(watchpoint);
886 /* Remove all matching watchpoints. */
887 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
889 CPUWatchpoint *wp, *next;
891 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
892 if (wp->flags & mask) {
893 cpu_watchpoint_remove_by_ref(cpu, wp);
898 /* Return true if this watchpoint address matches the specified
899 * access (ie the address range covered by the watchpoint overlaps
900 * partially or completely with the address range covered by the
901 * access).
903 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
904 vaddr addr,
905 vaddr len)
907 /* We know the lengths are non-zero, but a little caution is
908 * required to avoid errors in the case where the range ends
909 * exactly at the top of the address space and so addr + len
910 * wraps round to zero.
912 vaddr wpend = wp->vaddr + wp->len - 1;
913 vaddr addrend = addr + len - 1;
915 return !(addr > wpend || wp->vaddr > addrend);
918 #endif
920 /* Add a breakpoint. */
921 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
922 CPUBreakpoint **breakpoint)
924 CPUBreakpoint *bp;
926 bp = g_malloc(sizeof(*bp));
928 bp->pc = pc;
929 bp->flags = flags;
931 /* keep all GDB-injected breakpoints in front */
932 if (flags & BP_GDB) {
933 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
934 } else {
935 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
938 breakpoint_invalidate(cpu, pc);
940 if (breakpoint) {
941 *breakpoint = bp;
943 return 0;
946 /* Remove a specific breakpoint. */
947 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
949 CPUBreakpoint *bp;
951 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
952 if (bp->pc == pc && bp->flags == flags) {
953 cpu_breakpoint_remove_by_ref(cpu, bp);
954 return 0;
957 return -ENOENT;
960 /* Remove a specific breakpoint by reference. */
961 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
963 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
965 breakpoint_invalidate(cpu, breakpoint->pc);
967 g_free(breakpoint);
970 /* Remove all matching breakpoints. */
971 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
973 CPUBreakpoint *bp, *next;
975 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
976 if (bp->flags & mask) {
977 cpu_breakpoint_remove_by_ref(cpu, bp);
982 /* enable or disable single step mode. EXCP_DEBUG is returned by the
983 CPU loop after each instruction */
984 void cpu_single_step(CPUState *cpu, int enabled)
986 if (cpu->singlestep_enabled != enabled) {
987 cpu->singlestep_enabled = enabled;
988 if (kvm_enabled()) {
989 kvm_update_guest_debug(cpu, 0);
990 } else {
991 /* must flush all the translated code to avoid inconsistencies */
992 /* XXX: only flush what is necessary */
993 tb_flush(cpu);
998 void cpu_abort(CPUState *cpu, const char *fmt, ...)
1000 va_list ap;
1001 va_list ap2;
1003 va_start(ap, fmt);
1004 va_copy(ap2, ap);
1005 fprintf(stderr, "qemu: fatal: ");
1006 vfprintf(stderr, fmt, ap);
1007 fprintf(stderr, "\n");
1008 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
1009 if (qemu_log_separate()) {
1010 qemu_log_lock();
1011 qemu_log("qemu: fatal: ");
1012 qemu_log_vprintf(fmt, ap2);
1013 qemu_log("\n");
1014 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
1015 qemu_log_flush();
1016 qemu_log_unlock();
1017 qemu_log_close();
1019 va_end(ap2);
1020 va_end(ap);
1021 replay_finish();
1022 #if defined(CONFIG_USER_ONLY)
1024 struct sigaction act;
1025 sigfillset(&act.sa_mask);
1026 act.sa_handler = SIG_DFL;
1027 sigaction(SIGABRT, &act, NULL);
1029 #endif
1030 abort();
1033 #if !defined(CONFIG_USER_ONLY)
1034 /* Called from RCU critical section */
1035 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
1037 RAMBlock *block;
1039 block = atomic_rcu_read(&ram_list.mru_block);
1040 if (block && addr - block->offset < block->max_length) {
1041 return block;
1043 RAMBLOCK_FOREACH(block) {
1044 if (addr - block->offset < block->max_length) {
1045 goto found;
1049 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1050 abort();
1052 found:
1053 /* It is safe to write mru_block outside the iothread lock. This
1054 * is what happens:
1056 * mru_block = xxx
1057 * rcu_read_unlock()
1058 * xxx removed from list
1059 * rcu_read_lock()
1060 * read mru_block
1061 * mru_block = NULL;
1062 * call_rcu(reclaim_ramblock, xxx);
1063 * rcu_read_unlock()
1065 * atomic_rcu_set is not needed here. The block was already published
1066 * when it was placed into the list. Here we're just making an extra
1067 * copy of the pointer.
1069 ram_list.mru_block = block;
1070 return block;
1073 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
1075 CPUState *cpu;
1076 ram_addr_t start1;
1077 RAMBlock *block;
1078 ram_addr_t end;
1080 end = TARGET_PAGE_ALIGN(start + length);
1081 start &= TARGET_PAGE_MASK;
1083 rcu_read_lock();
1084 block = qemu_get_ram_block(start);
1085 assert(block == qemu_get_ram_block(end - 1));
1086 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
1087 CPU_FOREACH(cpu) {
1088 tlb_reset_dirty(cpu, start1, length);
1090 rcu_read_unlock();
1093 /* Note: start and end must be within the same ram block. */
1094 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1095 ram_addr_t length,
1096 unsigned client)
1098 DirtyMemoryBlocks *blocks;
1099 unsigned long end, page;
1100 bool dirty = false;
1102 if (length == 0) {
1103 return false;
1106 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1107 page = start >> TARGET_PAGE_BITS;
1109 rcu_read_lock();
1111 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1113 while (page < end) {
1114 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1115 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1116 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1118 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1119 offset, num);
1120 page += num;
1123 rcu_read_unlock();
1125 if (dirty && tcg_enabled()) {
1126 tlb_reset_dirty_range_all(start, length);
1129 return dirty;
1132 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
1133 (ram_addr_t start, ram_addr_t length, unsigned client)
1135 DirtyMemoryBlocks *blocks;
1136 unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL);
1137 ram_addr_t first = QEMU_ALIGN_DOWN(start, align);
1138 ram_addr_t last = QEMU_ALIGN_UP(start + length, align);
1139 DirtyBitmapSnapshot *snap;
1140 unsigned long page, end, dest;
1142 snap = g_malloc0(sizeof(*snap) +
1143 ((last - first) >> (TARGET_PAGE_BITS + 3)));
1144 snap->start = first;
1145 snap->end = last;
1147 page = first >> TARGET_PAGE_BITS;
1148 end = last >> TARGET_PAGE_BITS;
1149 dest = 0;
1151 rcu_read_lock();
1153 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1155 while (page < end) {
1156 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1157 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1158 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1160 assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
1161 assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL)));
1162 offset >>= BITS_PER_LEVEL;
1164 bitmap_copy_and_clear_atomic(snap->dirty + dest,
1165 blocks->blocks[idx] + offset,
1166 num);
1167 page += num;
1168 dest += num >> BITS_PER_LEVEL;
1171 rcu_read_unlock();
1173 if (tcg_enabled()) {
1174 tlb_reset_dirty_range_all(start, length);
1177 return snap;
1180 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
1181 ram_addr_t start,
1182 ram_addr_t length)
1184 unsigned long page, end;
1186 assert(start >= snap->start);
1187 assert(start + length <= snap->end);
1189 end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS;
1190 page = (start - snap->start) >> TARGET_PAGE_BITS;
1192 while (page < end) {
1193 if (test_bit(page, snap->dirty)) {
1194 return true;
1196 page++;
1198 return false;
1201 /* Called from RCU critical section */
1202 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1203 MemoryRegionSection *section,
1204 target_ulong vaddr,
1205 hwaddr paddr, hwaddr xlat,
1206 int prot,
1207 target_ulong *address)
1209 hwaddr iotlb;
1210 CPUWatchpoint *wp;
1212 if (memory_region_is_ram(section->mr)) {
1213 /* Normal RAM. */
1214 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1215 if (!section->readonly) {
1216 iotlb |= PHYS_SECTION_NOTDIRTY;
1217 } else {
1218 iotlb |= PHYS_SECTION_ROM;
1220 } else {
1221 AddressSpaceDispatch *d;
1223 d = atomic_rcu_read(&section->address_space->dispatch);
1224 iotlb = section - d->map.sections;
1225 iotlb += xlat;
1228 /* Make accesses to pages with watchpoints go via the
1229 watchpoint trap routines. */
1230 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1231 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1232 /* Avoid trapping reads of pages with a write breakpoint. */
1233 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1234 iotlb = PHYS_SECTION_WATCH + paddr;
1235 *address |= TLB_MMIO;
1236 break;
1241 return iotlb;
1243 #endif /* defined(CONFIG_USER_ONLY) */
1245 #if !defined(CONFIG_USER_ONLY)
1247 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1248 uint16_t section);
1249 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
1251 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1252 qemu_anon_ram_alloc;
1255 * Set a custom physical guest memory alloator.
1256 * Accelerators with unusual needs may need this. Hopefully, we can
1257 * get rid of it eventually.
1259 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
1261 phys_mem_alloc = alloc;
1264 static uint16_t phys_section_add(PhysPageMap *map,
1265 MemoryRegionSection *section)
1267 /* The physical section number is ORed with a page-aligned
1268 * pointer to produce the iotlb entries. Thus it should
1269 * never overflow into the page-aligned value.
1271 assert(map->sections_nb < TARGET_PAGE_SIZE);
1273 if (map->sections_nb == map->sections_nb_alloc) {
1274 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1275 map->sections = g_renew(MemoryRegionSection, map->sections,
1276 map->sections_nb_alloc);
1278 map->sections[map->sections_nb] = *section;
1279 memory_region_ref(section->mr);
1280 return map->sections_nb++;
1283 static void phys_section_destroy(MemoryRegion *mr)
1285 bool have_sub_page = mr->subpage;
1287 memory_region_unref(mr);
1289 if (have_sub_page) {
1290 subpage_t *subpage = container_of(mr, subpage_t, iomem);
1291 object_unref(OBJECT(&subpage->iomem));
1292 g_free(subpage);
1296 static void phys_sections_free(PhysPageMap *map)
1298 while (map->sections_nb > 0) {
1299 MemoryRegionSection *section = &map->sections[--map->sections_nb];
1300 phys_section_destroy(section->mr);
1302 g_free(map->sections);
1303 g_free(map->nodes);
1306 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
1308 subpage_t *subpage;
1309 hwaddr base = section->offset_within_address_space
1310 & TARGET_PAGE_MASK;
1311 MemoryRegionSection *existing = phys_page_find(d, base);
1312 MemoryRegionSection subsection = {
1313 .offset_within_address_space = base,
1314 .size = int128_make64(TARGET_PAGE_SIZE),
1316 hwaddr start, end;
1318 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1320 if (!(existing->mr->subpage)) {
1321 subpage = subpage_init(d->as, base);
1322 subsection.address_space = d->as;
1323 subsection.mr = &subpage->iomem;
1324 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1325 phys_section_add(&d->map, &subsection));
1326 } else {
1327 subpage = container_of(existing->mr, subpage_t, iomem);
1329 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1330 end = start + int128_get64(section->size) - 1;
1331 subpage_register(subpage, start, end,
1332 phys_section_add(&d->map, section));
1336 static void register_multipage(AddressSpaceDispatch *d,
1337 MemoryRegionSection *section)
1339 hwaddr start_addr = section->offset_within_address_space;
1340 uint16_t section_index = phys_section_add(&d->map, section);
1341 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1342 TARGET_PAGE_BITS));
1344 assert(num_pages);
1345 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1348 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1350 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1351 AddressSpaceDispatch *d = as->next_dispatch;
1352 MemoryRegionSection now = *section, remain = *section;
1353 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1355 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1356 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1357 - now.offset_within_address_space;
1359 now.size = int128_min(int128_make64(left), now.size);
1360 register_subpage(d, &now);
1361 } else {
1362 now.size = int128_zero();
1364 while (int128_ne(remain.size, now.size)) {
1365 remain.size = int128_sub(remain.size, now.size);
1366 remain.offset_within_address_space += int128_get64(now.size);
1367 remain.offset_within_region += int128_get64(now.size);
1368 now = remain;
1369 if (int128_lt(remain.size, page_size)) {
1370 register_subpage(d, &now);
1371 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1372 now.size = page_size;
1373 register_subpage(d, &now);
1374 } else {
1375 now.size = int128_and(now.size, int128_neg(page_size));
1376 register_multipage(d, &now);
1381 void qemu_flush_coalesced_mmio_buffer(void)
1383 if (kvm_enabled())
1384 kvm_flush_coalesced_mmio_buffer();
1387 void qemu_mutex_lock_ramlist(void)
1389 qemu_mutex_lock(&ram_list.mutex);
1392 void qemu_mutex_unlock_ramlist(void)
1394 qemu_mutex_unlock(&ram_list.mutex);
1397 void ram_block_dump(Monitor *mon)
1399 RAMBlock *block;
1400 char *psize;
1402 rcu_read_lock();
1403 monitor_printf(mon, "%24s %8s %18s %18s %18s\n",
1404 "Block Name", "PSize", "Offset", "Used", "Total");
1405 RAMBLOCK_FOREACH(block) {
1406 psize = size_to_str(block->page_size);
1407 monitor_printf(mon, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64
1408 " 0x%016" PRIx64 "\n", block->idstr, psize,
1409 (uint64_t)block->offset,
1410 (uint64_t)block->used_length,
1411 (uint64_t)block->max_length);
1412 g_free(psize);
1414 rcu_read_unlock();
1417 #ifdef __linux__
1419 * FIXME TOCTTOU: this iterates over memory backends' mem-path, which
1420 * may or may not name the same files / on the same filesystem now as
1421 * when we actually open and map them. Iterate over the file
1422 * descriptors instead, and use qemu_fd_getpagesize().
1424 static int find_max_supported_pagesize(Object *obj, void *opaque)
1426 char *mem_path;
1427 long *hpsize_min = opaque;
1429 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
1430 mem_path = object_property_get_str(obj, "mem-path", NULL);
1431 if (mem_path) {
1432 long hpsize = qemu_mempath_getpagesize(mem_path);
1433 if (hpsize < *hpsize_min) {
1434 *hpsize_min = hpsize;
1436 } else {
1437 *hpsize_min = getpagesize();
1441 return 0;
1444 long qemu_getrampagesize(void)
1446 long hpsize = LONG_MAX;
1447 long mainrampagesize;
1448 Object *memdev_root;
1450 if (mem_path) {
1451 mainrampagesize = qemu_mempath_getpagesize(mem_path);
1452 } else {
1453 mainrampagesize = getpagesize();
1456 /* it's possible we have memory-backend objects with
1457 * hugepage-backed RAM. these may get mapped into system
1458 * address space via -numa parameters or memory hotplug
1459 * hooks. we want to take these into account, but we
1460 * also want to make sure these supported hugepage
1461 * sizes are applicable across the entire range of memory
1462 * we may boot from, so we take the min across all
1463 * backends, and assume normal pages in cases where a
1464 * backend isn't backed by hugepages.
1466 memdev_root = object_resolve_path("/objects", NULL);
1467 if (memdev_root) {
1468 object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize);
1470 if (hpsize == LONG_MAX) {
1471 /* No additional memory regions found ==> Report main RAM page size */
1472 return mainrampagesize;
1475 /* If NUMA is disabled or the NUMA nodes are not backed with a
1476 * memory-backend, then there is at least one node using "normal" RAM,
1477 * so if its page size is smaller we have got to report that size instead.
1479 if (hpsize > mainrampagesize &&
1480 (nb_numa_nodes == 0 || numa_info[0].node_memdev == NULL)) {
1481 static bool warned;
1482 if (!warned) {
1483 error_report("Huge page support disabled (n/a for main memory).");
1484 warned = true;
1486 return mainrampagesize;
1489 return hpsize;
1491 #else
1492 long qemu_getrampagesize(void)
1494 return getpagesize();
1496 #endif
1498 #ifdef __linux__
1499 static int64_t get_file_size(int fd)
1501 int64_t size = lseek(fd, 0, SEEK_END);
1502 if (size < 0) {
1503 return -errno;
1505 return size;
1508 static int file_ram_open(const char *path,
1509 const char *region_name,
1510 bool *created,
1511 Error **errp)
1513 char *filename;
1514 char *sanitized_name;
1515 char *c;
1516 int fd = -1;
1518 *created = false;
1519 for (;;) {
1520 fd = open(path, O_RDWR);
1521 if (fd >= 0) {
1522 /* @path names an existing file, use it */
1523 break;
1525 if (errno == ENOENT) {
1526 /* @path names a file that doesn't exist, create it */
1527 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1528 if (fd >= 0) {
1529 *created = true;
1530 break;
1532 } else if (errno == EISDIR) {
1533 /* @path names a directory, create a file there */
1534 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1535 sanitized_name = g_strdup(region_name);
1536 for (c = sanitized_name; *c != '\0'; c++) {
1537 if (*c == '/') {
1538 *c = '_';
1542 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1543 sanitized_name);
1544 g_free(sanitized_name);
1546 fd = mkstemp(filename);
1547 if (fd >= 0) {
1548 unlink(filename);
1549 g_free(filename);
1550 break;
1552 g_free(filename);
1554 if (errno != EEXIST && errno != EINTR) {
1555 error_setg_errno(errp, errno,
1556 "can't open backing store %s for guest RAM",
1557 path);
1558 return -1;
1561 * Try again on EINTR and EEXIST. The latter happens when
1562 * something else creates the file between our two open().
1566 return fd;
1569 static void *file_ram_alloc(RAMBlock *block,
1570 ram_addr_t memory,
1571 int fd,
1572 bool truncate,
1573 Error **errp)
1575 void *area;
1577 block->page_size = qemu_fd_getpagesize(fd);
1578 block->mr->align = block->page_size;
1579 #if defined(__s390x__)
1580 if (kvm_enabled()) {
1581 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1583 #endif
1585 if (memory < block->page_size) {
1586 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1587 "or larger than page size 0x%zx",
1588 memory, block->page_size);
1589 return NULL;
1592 memory = ROUND_UP(memory, block->page_size);
1595 * ftruncate is not supported by hugetlbfs in older
1596 * hosts, so don't bother bailing out on errors.
1597 * If anything goes wrong with it under other filesystems,
1598 * mmap will fail.
1600 * Do not truncate the non-empty backend file to avoid corrupting
1601 * the existing data in the file. Disabling shrinking is not
1602 * enough. For example, the current vNVDIMM implementation stores
1603 * the guest NVDIMM labels at the end of the backend file. If the
1604 * backend file is later extended, QEMU will not be able to find
1605 * those labels. Therefore, extending the non-empty backend file
1606 * is disabled as well.
1608 if (truncate && ftruncate(fd, memory)) {
1609 perror("ftruncate");
1612 area = qemu_ram_mmap(fd, memory, block->mr->align,
1613 block->flags & RAM_SHARED);
1614 if (area == MAP_FAILED) {
1615 error_setg_errno(errp, errno,
1616 "unable to map backing store for guest RAM");
1617 return NULL;
1620 if (mem_prealloc) {
1621 os_mem_prealloc(fd, area, memory, smp_cpus, errp);
1622 if (errp && *errp) {
1623 qemu_ram_munmap(area, memory);
1624 return NULL;
1628 block->fd = fd;
1629 return area;
1631 #endif
1633 /* Called with the ramlist lock held. */
1634 static ram_addr_t find_ram_offset(ram_addr_t size)
1636 RAMBlock *block, *next_block;
1637 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1639 assert(size != 0); /* it would hand out same offset multiple times */
1641 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1642 return 0;
1645 RAMBLOCK_FOREACH(block) {
1646 ram_addr_t end, next = RAM_ADDR_MAX;
1648 end = block->offset + block->max_length;
1650 RAMBLOCK_FOREACH(next_block) {
1651 if (next_block->offset >= end) {
1652 next = MIN(next, next_block->offset);
1655 if (next - end >= size && next - end < mingap) {
1656 offset = end;
1657 mingap = next - end;
1661 if (offset == RAM_ADDR_MAX) {
1662 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1663 (uint64_t)size);
1664 abort();
1667 return offset;
1670 unsigned long last_ram_page(void)
1672 RAMBlock *block;
1673 ram_addr_t last = 0;
1675 rcu_read_lock();
1676 RAMBLOCK_FOREACH(block) {
1677 last = MAX(last, block->offset + block->max_length);
1679 rcu_read_unlock();
1680 return last >> TARGET_PAGE_BITS;
1683 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1685 int ret;
1687 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1688 if (!machine_dump_guest_core(current_machine)) {
1689 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1690 if (ret) {
1691 perror("qemu_madvise");
1692 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1693 "but dump_guest_core=off specified\n");
1698 const char *qemu_ram_get_idstr(RAMBlock *rb)
1700 return rb->idstr;
1703 bool qemu_ram_is_shared(RAMBlock *rb)
1705 return rb->flags & RAM_SHARED;
1708 /* Called with iothread lock held. */
1709 void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
1711 RAMBlock *block;
1713 assert(new_block);
1714 assert(!new_block->idstr[0]);
1716 if (dev) {
1717 char *id = qdev_get_dev_path(dev);
1718 if (id) {
1719 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1720 g_free(id);
1723 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1725 rcu_read_lock();
1726 RAMBLOCK_FOREACH(block) {
1727 if (block != new_block &&
1728 !strcmp(block->idstr, new_block->idstr)) {
1729 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1730 new_block->idstr);
1731 abort();
1734 rcu_read_unlock();
1737 /* Called with iothread lock held. */
1738 void qemu_ram_unset_idstr(RAMBlock *block)
1740 /* FIXME: arch_init.c assumes that this is not called throughout
1741 * migration. Ignore the problem since hot-unplug during migration
1742 * does not work anyway.
1744 if (block) {
1745 memset(block->idstr, 0, sizeof(block->idstr));
1749 size_t qemu_ram_pagesize(RAMBlock *rb)
1751 return rb->page_size;
1754 /* Returns the largest size of page in use */
1755 size_t qemu_ram_pagesize_largest(void)
1757 RAMBlock *block;
1758 size_t largest = 0;
1760 RAMBLOCK_FOREACH(block) {
1761 largest = MAX(largest, qemu_ram_pagesize(block));
1764 return largest;
1767 static int memory_try_enable_merging(void *addr, size_t len)
1769 if (!machine_mem_merge(current_machine)) {
1770 /* disabled by the user */
1771 return 0;
1774 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1777 /* Only legal before guest might have detected the memory size: e.g. on
1778 * incoming migration, or right after reset.
1780 * As memory core doesn't know how is memory accessed, it is up to
1781 * resize callback to update device state and/or add assertions to detect
1782 * misuse, if necessary.
1784 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
1786 assert(block);
1788 newsize = HOST_PAGE_ALIGN(newsize);
1790 if (block->used_length == newsize) {
1791 return 0;
1794 if (!(block->flags & RAM_RESIZEABLE)) {
1795 error_setg_errno(errp, EINVAL,
1796 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1797 " in != 0x" RAM_ADDR_FMT, block->idstr,
1798 newsize, block->used_length);
1799 return -EINVAL;
1802 if (block->max_length < newsize) {
1803 error_setg_errno(errp, EINVAL,
1804 "Length too large: %s: 0x" RAM_ADDR_FMT
1805 " > 0x" RAM_ADDR_FMT, block->idstr,
1806 newsize, block->max_length);
1807 return -EINVAL;
1810 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1811 block->used_length = newsize;
1812 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1813 DIRTY_CLIENTS_ALL);
1814 memory_region_set_size(block->mr, newsize);
1815 if (block->resized) {
1816 block->resized(block->idstr, newsize, block->host);
1818 return 0;
1821 /* Called with ram_list.mutex held */
1822 static void dirty_memory_extend(ram_addr_t old_ram_size,
1823 ram_addr_t new_ram_size)
1825 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1826 DIRTY_MEMORY_BLOCK_SIZE);
1827 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1828 DIRTY_MEMORY_BLOCK_SIZE);
1829 int i;
1831 /* Only need to extend if block count increased */
1832 if (new_num_blocks <= old_num_blocks) {
1833 return;
1836 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1837 DirtyMemoryBlocks *old_blocks;
1838 DirtyMemoryBlocks *new_blocks;
1839 int j;
1841 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1842 new_blocks = g_malloc(sizeof(*new_blocks) +
1843 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1845 if (old_num_blocks) {
1846 memcpy(new_blocks->blocks, old_blocks->blocks,
1847 old_num_blocks * sizeof(old_blocks->blocks[0]));
1850 for (j = old_num_blocks; j < new_num_blocks; j++) {
1851 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1854 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1856 if (old_blocks) {
1857 g_free_rcu(old_blocks, rcu);
1862 static void ram_block_add(RAMBlock *new_block, Error **errp)
1864 RAMBlock *block;
1865 RAMBlock *last_block = NULL;
1866 ram_addr_t old_ram_size, new_ram_size;
1867 Error *err = NULL;
1869 old_ram_size = last_ram_page();
1871 qemu_mutex_lock_ramlist();
1872 new_block->offset = find_ram_offset(new_block->max_length);
1874 if (!new_block->host) {
1875 if (xen_enabled()) {
1876 xen_ram_alloc(new_block->offset, new_block->max_length,
1877 new_block->mr, &err);
1878 if (err) {
1879 error_propagate(errp, err);
1880 qemu_mutex_unlock_ramlist();
1881 return;
1883 } else {
1884 new_block->host = phys_mem_alloc(new_block->max_length,
1885 &new_block->mr->align);
1886 if (!new_block->host) {
1887 error_setg_errno(errp, errno,
1888 "cannot set up guest memory '%s'",
1889 memory_region_name(new_block->mr));
1890 qemu_mutex_unlock_ramlist();
1891 return;
1893 memory_try_enable_merging(new_block->host, new_block->max_length);
1897 new_ram_size = MAX(old_ram_size,
1898 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1899 if (new_ram_size > old_ram_size) {
1900 dirty_memory_extend(old_ram_size, new_ram_size);
1902 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1903 * QLIST (which has an RCU-friendly variant) does not have insertion at
1904 * tail, so save the last element in last_block.
1906 RAMBLOCK_FOREACH(block) {
1907 last_block = block;
1908 if (block->max_length < new_block->max_length) {
1909 break;
1912 if (block) {
1913 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1914 } else if (last_block) {
1915 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1916 } else { /* list is empty */
1917 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1919 ram_list.mru_block = NULL;
1921 /* Write list before version */
1922 smp_wmb();
1923 ram_list.version++;
1924 qemu_mutex_unlock_ramlist();
1926 cpu_physical_memory_set_dirty_range(new_block->offset,
1927 new_block->used_length,
1928 DIRTY_CLIENTS_ALL);
1930 if (new_block->host) {
1931 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1932 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1933 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
1934 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1935 ram_block_notify_add(new_block->host, new_block->max_length);
1939 #ifdef __linux__
1940 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
1941 bool share, int fd,
1942 Error **errp)
1944 RAMBlock *new_block;
1945 Error *local_err = NULL;
1946 int64_t file_size;
1948 if (xen_enabled()) {
1949 error_setg(errp, "-mem-path not supported with Xen");
1950 return NULL;
1953 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1954 error_setg(errp,
1955 "host lacks kvm mmu notifiers, -mem-path unsupported");
1956 return NULL;
1959 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1961 * file_ram_alloc() needs to allocate just like
1962 * phys_mem_alloc, but we haven't bothered to provide
1963 * a hook there.
1965 error_setg(errp,
1966 "-mem-path not supported with this accelerator");
1967 return NULL;
1970 size = HOST_PAGE_ALIGN(size);
1971 file_size = get_file_size(fd);
1972 if (file_size > 0 && file_size < size) {
1973 error_setg(errp, "backing store %s size 0x%" PRIx64
1974 " does not match 'size' option 0x" RAM_ADDR_FMT,
1975 mem_path, file_size, size);
1976 return NULL;
1979 new_block = g_malloc0(sizeof(*new_block));
1980 new_block->mr = mr;
1981 new_block->used_length = size;
1982 new_block->max_length = size;
1983 new_block->flags = share ? RAM_SHARED : 0;
1984 new_block->host = file_ram_alloc(new_block, size, fd, !file_size, errp);
1985 if (!new_block->host) {
1986 g_free(new_block);
1987 return NULL;
1990 ram_block_add(new_block, &local_err);
1991 if (local_err) {
1992 g_free(new_block);
1993 error_propagate(errp, local_err);
1994 return NULL;
1996 return new_block;
2001 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
2002 bool share, const char *mem_path,
2003 Error **errp)
2005 int fd;
2006 bool created;
2007 RAMBlock *block;
2009 fd = file_ram_open(mem_path, memory_region_name(mr), &created, errp);
2010 if (fd < 0) {
2011 return NULL;
2014 block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
2015 if (!block) {
2016 if (created) {
2017 unlink(mem_path);
2019 close(fd);
2020 return NULL;
2023 return block;
2025 #endif
2027 static
2028 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
2029 void (*resized)(const char*,
2030 uint64_t length,
2031 void *host),
2032 void *host, bool resizeable,
2033 MemoryRegion *mr, Error **errp)
2035 RAMBlock *new_block;
2036 Error *local_err = NULL;
2038 size = HOST_PAGE_ALIGN(size);
2039 max_size = HOST_PAGE_ALIGN(max_size);
2040 new_block = g_malloc0(sizeof(*new_block));
2041 new_block->mr = mr;
2042 new_block->resized = resized;
2043 new_block->used_length = size;
2044 new_block->max_length = max_size;
2045 assert(max_size >= size);
2046 new_block->fd = -1;
2047 new_block->page_size = getpagesize();
2048 new_block->host = host;
2049 if (host) {
2050 new_block->flags |= RAM_PREALLOC;
2052 if (resizeable) {
2053 new_block->flags |= RAM_RESIZEABLE;
2055 ram_block_add(new_block, &local_err);
2056 if (local_err) {
2057 g_free(new_block);
2058 error_propagate(errp, local_err);
2059 return NULL;
2061 return new_block;
2064 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2065 MemoryRegion *mr, Error **errp)
2067 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
2070 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
2072 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
2075 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
2076 void (*resized)(const char*,
2077 uint64_t length,
2078 void *host),
2079 MemoryRegion *mr, Error **errp)
2081 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
2084 static void reclaim_ramblock(RAMBlock *block)
2086 if (block->flags & RAM_PREALLOC) {
2088 } else if (xen_enabled()) {
2089 xen_invalidate_map_cache_entry(block->host);
2090 #ifndef _WIN32
2091 } else if (block->fd >= 0) {
2092 qemu_ram_munmap(block->host, block->max_length);
2093 close(block->fd);
2094 #endif
2095 } else {
2096 qemu_anon_ram_free(block->host, block->max_length);
2098 g_free(block);
2101 void qemu_ram_free(RAMBlock *block)
2103 if (!block) {
2104 return;
2107 if (block->host) {
2108 ram_block_notify_remove(block->host, block->max_length);
2111 qemu_mutex_lock_ramlist();
2112 QLIST_REMOVE_RCU(block, next);
2113 ram_list.mru_block = NULL;
2114 /* Write list before version */
2115 smp_wmb();
2116 ram_list.version++;
2117 call_rcu(block, reclaim_ramblock, rcu);
2118 qemu_mutex_unlock_ramlist();
2121 #ifndef _WIN32
2122 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2124 RAMBlock *block;
2125 ram_addr_t offset;
2126 int flags;
2127 void *area, *vaddr;
2129 RAMBLOCK_FOREACH(block) {
2130 offset = addr - block->offset;
2131 if (offset < block->max_length) {
2132 vaddr = ramblock_ptr(block, offset);
2133 if (block->flags & RAM_PREALLOC) {
2135 } else if (xen_enabled()) {
2136 abort();
2137 } else {
2138 flags = MAP_FIXED;
2139 if (block->fd >= 0) {
2140 flags |= (block->flags & RAM_SHARED ?
2141 MAP_SHARED : MAP_PRIVATE);
2142 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2143 flags, block->fd, offset);
2144 } else {
2146 * Remap needs to match alloc. Accelerators that
2147 * set phys_mem_alloc never remap. If they did,
2148 * we'd need a remap hook here.
2150 assert(phys_mem_alloc == qemu_anon_ram_alloc);
2152 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2153 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2154 flags, -1, 0);
2156 if (area != vaddr) {
2157 fprintf(stderr, "Could not remap addr: "
2158 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2159 length, addr);
2160 exit(1);
2162 memory_try_enable_merging(vaddr, length);
2163 qemu_ram_setup_dump(vaddr, length);
2168 #endif /* !_WIN32 */
2170 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2171 * This should not be used for general purpose DMA. Use address_space_map
2172 * or address_space_rw instead. For local memory (e.g. video ram) that the
2173 * device owns, use memory_region_get_ram_ptr.
2175 * Called within RCU critical section.
2177 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
2179 RAMBlock *block = ram_block;
2181 if (block == NULL) {
2182 block = qemu_get_ram_block(addr);
2183 addr -= block->offset;
2186 if (xen_enabled() && block->host == NULL) {
2187 /* We need to check if the requested address is in the RAM
2188 * because we don't want to map the entire memory in QEMU.
2189 * In that case just map until the end of the page.
2191 if (block->offset == 0) {
2192 return xen_map_cache(addr, 0, 0, false);
2195 block->host = xen_map_cache(block->offset, block->max_length, 1, false);
2197 return ramblock_ptr(block, addr);
2200 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
2201 * but takes a size argument.
2203 * Called within RCU critical section.
2205 static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
2206 hwaddr *size, bool lock)
2208 RAMBlock *block = ram_block;
2209 if (*size == 0) {
2210 return NULL;
2213 if (block == NULL) {
2214 block = qemu_get_ram_block(addr);
2215 addr -= block->offset;
2217 *size = MIN(*size, block->max_length - addr);
2219 if (xen_enabled() && block->host == NULL) {
2220 /* We need to check if the requested address is in the RAM
2221 * because we don't want to map the entire memory in QEMU.
2222 * In that case just map the requested area.
2224 if (block->offset == 0) {
2225 return xen_map_cache(addr, *size, lock, lock);
2228 block->host = xen_map_cache(block->offset, block->max_length, 1, lock);
2231 return ramblock_ptr(block, addr);
2235 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
2236 * in that RAMBlock.
2238 * ptr: Host pointer to look up
2239 * round_offset: If true round the result offset down to a page boundary
2240 * *ram_addr: set to result ram_addr
2241 * *offset: set to result offset within the RAMBlock
2243 * Returns: RAMBlock (or NULL if not found)
2245 * By the time this function returns, the returned pointer is not protected
2246 * by RCU anymore. If the caller is not within an RCU critical section and
2247 * does not hold the iothread lock, it must have other means of protecting the
2248 * pointer, such as a reference to the region that includes the incoming
2249 * ram_addr_t.
2251 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
2252 ram_addr_t *offset)
2254 RAMBlock *block;
2255 uint8_t *host = ptr;
2257 if (xen_enabled()) {
2258 ram_addr_t ram_addr;
2259 rcu_read_lock();
2260 ram_addr = xen_ram_addr_from_mapcache(ptr);
2261 block = qemu_get_ram_block(ram_addr);
2262 if (block) {
2263 *offset = ram_addr - block->offset;
2265 rcu_read_unlock();
2266 return block;
2269 rcu_read_lock();
2270 block = atomic_rcu_read(&ram_list.mru_block);
2271 if (block && block->host && host - block->host < block->max_length) {
2272 goto found;
2275 RAMBLOCK_FOREACH(block) {
2276 /* This case append when the block is not mapped. */
2277 if (block->host == NULL) {
2278 continue;
2280 if (host - block->host < block->max_length) {
2281 goto found;
2285 rcu_read_unlock();
2286 return NULL;
2288 found:
2289 *offset = (host - block->host);
2290 if (round_offset) {
2291 *offset &= TARGET_PAGE_MASK;
2293 rcu_read_unlock();
2294 return block;
2298 * Finds the named RAMBlock
2300 * name: The name of RAMBlock to find
2302 * Returns: RAMBlock (or NULL if not found)
2304 RAMBlock *qemu_ram_block_by_name(const char *name)
2306 RAMBlock *block;
2308 RAMBLOCK_FOREACH(block) {
2309 if (!strcmp(name, block->idstr)) {
2310 return block;
2314 return NULL;
2317 /* Some of the softmmu routines need to translate from a host pointer
2318 (typically a TLB entry) back to a ram offset. */
2319 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2321 RAMBlock *block;
2322 ram_addr_t offset;
2324 block = qemu_ram_block_from_host(ptr, false, &offset);
2325 if (!block) {
2326 return RAM_ADDR_INVALID;
2329 return block->offset + offset;
2332 /* Called within RCU critical section. */
2333 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
2334 uint64_t val, unsigned size)
2336 bool locked = false;
2338 assert(tcg_enabled());
2339 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
2340 locked = true;
2341 tb_lock();
2342 tb_invalidate_phys_page_fast(ram_addr, size);
2344 switch (size) {
2345 case 1:
2346 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2347 break;
2348 case 2:
2349 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2350 break;
2351 case 4:
2352 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2353 break;
2354 default:
2355 abort();
2358 if (locked) {
2359 tb_unlock();
2362 /* Set both VGA and migration bits for simplicity and to remove
2363 * the notdirty callback faster.
2365 cpu_physical_memory_set_dirty_range(ram_addr, size,
2366 DIRTY_CLIENTS_NOCODE);
2367 /* we remove the notdirty callback only if the code has been
2368 flushed */
2369 if (!cpu_physical_memory_is_clean(ram_addr)) {
2370 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
2374 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2375 unsigned size, bool is_write)
2377 return is_write;
2380 static const MemoryRegionOps notdirty_mem_ops = {
2381 .write = notdirty_mem_write,
2382 .valid.accepts = notdirty_mem_accepts,
2383 .endianness = DEVICE_NATIVE_ENDIAN,
2386 /* Generate a debug exception if a watchpoint has been hit. */
2387 static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
2389 CPUState *cpu = current_cpu;
2390 CPUClass *cc = CPU_GET_CLASS(cpu);
2391 CPUArchState *env = cpu->env_ptr;
2392 target_ulong pc, cs_base;
2393 target_ulong vaddr;
2394 CPUWatchpoint *wp;
2395 uint32_t cpu_flags;
2397 assert(tcg_enabled());
2398 if (cpu->watchpoint_hit) {
2399 /* We re-entered the check after replacing the TB. Now raise
2400 * the debug interrupt so that is will trigger after the
2401 * current instruction. */
2402 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2403 return;
2405 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2406 vaddr = cc->adjust_watchpoint_address(cpu, vaddr, len);
2407 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
2408 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2409 && (wp->flags & flags)) {
2410 if (flags == BP_MEM_READ) {
2411 wp->flags |= BP_WATCHPOINT_HIT_READ;
2412 } else {
2413 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2415 wp->hitaddr = vaddr;
2416 wp->hitattrs = attrs;
2417 if (!cpu->watchpoint_hit) {
2418 if (wp->flags & BP_CPU &&
2419 !cc->debug_check_watchpoint(cpu, wp)) {
2420 wp->flags &= ~BP_WATCHPOINT_HIT;
2421 continue;
2423 cpu->watchpoint_hit = wp;
2425 /* Both tb_lock and iothread_mutex will be reset when
2426 * cpu_loop_exit or cpu_loop_exit_noexc longjmp
2427 * back into the cpu_exec main loop.
2429 tb_lock();
2430 tb_check_watchpoint(cpu);
2431 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2432 cpu->exception_index = EXCP_DEBUG;
2433 cpu_loop_exit(cpu);
2434 } else {
2435 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2436 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
2437 cpu_loop_exit_noexc(cpu);
2440 } else {
2441 wp->flags &= ~BP_WATCHPOINT_HIT;
2446 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2447 so these check for a hit then pass through to the normal out-of-line
2448 phys routines. */
2449 static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2450 unsigned size, MemTxAttrs attrs)
2452 MemTxResult res;
2453 uint64_t data;
2454 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2455 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2457 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
2458 switch (size) {
2459 case 1:
2460 data = address_space_ldub(as, addr, attrs, &res);
2461 break;
2462 case 2:
2463 data = address_space_lduw(as, addr, attrs, &res);
2464 break;
2465 case 4:
2466 data = address_space_ldl(as, addr, attrs, &res);
2467 break;
2468 default: abort();
2470 *pdata = data;
2471 return res;
2474 static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2475 uint64_t val, unsigned size,
2476 MemTxAttrs attrs)
2478 MemTxResult res;
2479 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2480 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2482 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2483 switch (size) {
2484 case 1:
2485 address_space_stb(as, addr, val, attrs, &res);
2486 break;
2487 case 2:
2488 address_space_stw(as, addr, val, attrs, &res);
2489 break;
2490 case 4:
2491 address_space_stl(as, addr, val, attrs, &res);
2492 break;
2493 default: abort();
2495 return res;
2498 static const MemoryRegionOps watch_mem_ops = {
2499 .read_with_attrs = watch_mem_read,
2500 .write_with_attrs = watch_mem_write,
2501 .endianness = DEVICE_NATIVE_ENDIAN,
2504 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2505 unsigned len, MemTxAttrs attrs)
2507 subpage_t *subpage = opaque;
2508 uint8_t buf[8];
2509 MemTxResult res;
2511 #if defined(DEBUG_SUBPAGE)
2512 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2513 subpage, len, addr);
2514 #endif
2515 res = address_space_read(subpage->as, addr + subpage->base,
2516 attrs, buf, len);
2517 if (res) {
2518 return res;
2520 switch (len) {
2521 case 1:
2522 *data = ldub_p(buf);
2523 return MEMTX_OK;
2524 case 2:
2525 *data = lduw_p(buf);
2526 return MEMTX_OK;
2527 case 4:
2528 *data = ldl_p(buf);
2529 return MEMTX_OK;
2530 case 8:
2531 *data = ldq_p(buf);
2532 return MEMTX_OK;
2533 default:
2534 abort();
2538 static MemTxResult subpage_write(void *opaque, hwaddr addr,
2539 uint64_t value, unsigned len, MemTxAttrs attrs)
2541 subpage_t *subpage = opaque;
2542 uint8_t buf[8];
2544 #if defined(DEBUG_SUBPAGE)
2545 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2546 " value %"PRIx64"\n",
2547 __func__, subpage, len, addr, value);
2548 #endif
2549 switch (len) {
2550 case 1:
2551 stb_p(buf, value);
2552 break;
2553 case 2:
2554 stw_p(buf, value);
2555 break;
2556 case 4:
2557 stl_p(buf, value);
2558 break;
2559 case 8:
2560 stq_p(buf, value);
2561 break;
2562 default:
2563 abort();
2565 return address_space_write(subpage->as, addr + subpage->base,
2566 attrs, buf, len);
2569 static bool subpage_accepts(void *opaque, hwaddr addr,
2570 unsigned len, bool is_write)
2572 subpage_t *subpage = opaque;
2573 #if defined(DEBUG_SUBPAGE)
2574 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2575 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2576 #endif
2578 return address_space_access_valid(subpage->as, addr + subpage->base,
2579 len, is_write);
2582 static const MemoryRegionOps subpage_ops = {
2583 .read_with_attrs = subpage_read,
2584 .write_with_attrs = subpage_write,
2585 .impl.min_access_size = 1,
2586 .impl.max_access_size = 8,
2587 .valid.min_access_size = 1,
2588 .valid.max_access_size = 8,
2589 .valid.accepts = subpage_accepts,
2590 .endianness = DEVICE_NATIVE_ENDIAN,
2593 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2594 uint16_t section)
2596 int idx, eidx;
2598 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2599 return -1;
2600 idx = SUBPAGE_IDX(start);
2601 eidx = SUBPAGE_IDX(end);
2602 #if defined(DEBUG_SUBPAGE)
2603 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2604 __func__, mmio, start, end, idx, eidx, section);
2605 #endif
2606 for (; idx <= eidx; idx++) {
2607 mmio->sub_section[idx] = section;
2610 return 0;
2613 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2615 subpage_t *mmio;
2617 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
2618 mmio->as = as;
2619 mmio->base = base;
2620 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2621 NULL, TARGET_PAGE_SIZE);
2622 mmio->iomem.subpage = true;
2623 #if defined(DEBUG_SUBPAGE)
2624 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2625 mmio, base, TARGET_PAGE_SIZE);
2626 #endif
2627 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2629 return mmio;
2632 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2633 MemoryRegion *mr)
2635 assert(as);
2636 MemoryRegionSection section = {
2637 .address_space = as,
2638 .mr = mr,
2639 .offset_within_address_space = 0,
2640 .offset_within_region = 0,
2641 .size = int128_2_64(),
2644 return phys_section_add(map, &section);
2647 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2649 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2650 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
2651 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
2652 MemoryRegionSection *sections = d->map.sections;
2654 return sections[index & ~TARGET_PAGE_MASK].mr;
2657 static void io_mem_init(void)
2659 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2660 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2661 NULL, UINT64_MAX);
2663 /* io_mem_notdirty calls tb_invalidate_phys_page_fast,
2664 * which can be called without the iothread mutex.
2666 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
2667 NULL, UINT64_MAX);
2668 memory_region_clear_global_locking(&io_mem_notdirty);
2670 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2671 NULL, UINT64_MAX);
2674 static void mem_begin(MemoryListener *listener)
2676 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2677 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2678 uint16_t n;
2680 n = dummy_section(&d->map, as, &io_mem_unassigned);
2681 assert(n == PHYS_SECTION_UNASSIGNED);
2682 n = dummy_section(&d->map, as, &io_mem_notdirty);
2683 assert(n == PHYS_SECTION_NOTDIRTY);
2684 n = dummy_section(&d->map, as, &io_mem_rom);
2685 assert(n == PHYS_SECTION_ROM);
2686 n = dummy_section(&d->map, as, &io_mem_watch);
2687 assert(n == PHYS_SECTION_WATCH);
2689 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2690 d->as = as;
2691 as->next_dispatch = d;
2694 static void address_space_dispatch_free(AddressSpaceDispatch *d)
2696 phys_sections_free(&d->map);
2697 g_free(d);
2700 static void mem_commit(MemoryListener *listener)
2702 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2703 AddressSpaceDispatch *cur = as->dispatch;
2704 AddressSpaceDispatch *next = as->next_dispatch;
2706 phys_page_compact_all(next, next->map.nodes_nb);
2708 atomic_rcu_set(&as->dispatch, next);
2709 if (cur) {
2710 call_rcu(cur, address_space_dispatch_free, rcu);
2714 static void tcg_commit(MemoryListener *listener)
2716 CPUAddressSpace *cpuas;
2717 AddressSpaceDispatch *d;
2719 /* since each CPU stores ram addresses in its TLB cache, we must
2720 reset the modified entries */
2721 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2722 cpu_reloading_memory_map();
2723 /* The CPU and TLB are protected by the iothread lock.
2724 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2725 * may have split the RCU critical section.
2727 d = atomic_rcu_read(&cpuas->as->dispatch);
2728 atomic_rcu_set(&cpuas->memory_dispatch, d);
2729 tlb_flush(cpuas->cpu);
2732 void address_space_init_dispatch(AddressSpace *as)
2734 as->dispatch = NULL;
2735 as->dispatch_listener = (MemoryListener) {
2736 .begin = mem_begin,
2737 .commit = mem_commit,
2738 .region_add = mem_add,
2739 .region_nop = mem_add,
2740 .priority = 0,
2742 memory_listener_register(&as->dispatch_listener, as);
2745 void address_space_unregister(AddressSpace *as)
2747 memory_listener_unregister(&as->dispatch_listener);
2750 void address_space_destroy_dispatch(AddressSpace *as)
2752 AddressSpaceDispatch *d = as->dispatch;
2754 atomic_rcu_set(&as->dispatch, NULL);
2755 if (d) {
2756 call_rcu(d, address_space_dispatch_free, rcu);
2760 static void memory_map_init(void)
2762 system_memory = g_malloc(sizeof(*system_memory));
2764 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2765 address_space_init(&address_space_memory, system_memory, "memory");
2767 system_io = g_malloc(sizeof(*system_io));
2768 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2769 65536);
2770 address_space_init(&address_space_io, system_io, "I/O");
2773 MemoryRegion *get_system_memory(void)
2775 return system_memory;
2778 MemoryRegion *get_system_io(void)
2780 return system_io;
2783 #endif /* !defined(CONFIG_USER_ONLY) */
2785 /* physical memory access (slow version, mainly for debug) */
2786 #if defined(CONFIG_USER_ONLY)
2787 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2788 uint8_t *buf, int len, int is_write)
2790 int l, flags;
2791 target_ulong page;
2792 void * p;
2794 while (len > 0) {
2795 page = addr & TARGET_PAGE_MASK;
2796 l = (page + TARGET_PAGE_SIZE) - addr;
2797 if (l > len)
2798 l = len;
2799 flags = page_get_flags(page);
2800 if (!(flags & PAGE_VALID))
2801 return -1;
2802 if (is_write) {
2803 if (!(flags & PAGE_WRITE))
2804 return -1;
2805 /* XXX: this code should not depend on lock_user */
2806 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2807 return -1;
2808 memcpy(p, buf, l);
2809 unlock_user(p, addr, l);
2810 } else {
2811 if (!(flags & PAGE_READ))
2812 return -1;
2813 /* XXX: this code should not depend on lock_user */
2814 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2815 return -1;
2816 memcpy(buf, p, l);
2817 unlock_user(p, addr, 0);
2819 len -= l;
2820 buf += l;
2821 addr += l;
2823 return 0;
2826 #else
2828 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2829 hwaddr length)
2831 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2832 addr += memory_region_get_ram_addr(mr);
2834 /* No early return if dirty_log_mask is or becomes 0, because
2835 * cpu_physical_memory_set_dirty_range will still call
2836 * xen_modified_memory.
2838 if (dirty_log_mask) {
2839 dirty_log_mask =
2840 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2842 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2843 assert(tcg_enabled());
2844 tb_lock();
2845 tb_invalidate_phys_range(addr, addr + length);
2846 tb_unlock();
2847 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2849 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2852 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2854 unsigned access_size_max = mr->ops->valid.max_access_size;
2856 /* Regions are assumed to support 1-4 byte accesses unless
2857 otherwise specified. */
2858 if (access_size_max == 0) {
2859 access_size_max = 4;
2862 /* Bound the maximum access by the alignment of the address. */
2863 if (!mr->ops->impl.unaligned) {
2864 unsigned align_size_max = addr & -addr;
2865 if (align_size_max != 0 && align_size_max < access_size_max) {
2866 access_size_max = align_size_max;
2870 /* Don't attempt accesses larger than the maximum. */
2871 if (l > access_size_max) {
2872 l = access_size_max;
2874 l = pow2floor(l);
2876 return l;
2879 static bool prepare_mmio_access(MemoryRegion *mr)
2881 bool unlocked = !qemu_mutex_iothread_locked();
2882 bool release_lock = false;
2884 if (unlocked && mr->global_locking) {
2885 qemu_mutex_lock_iothread();
2886 unlocked = false;
2887 release_lock = true;
2889 if (mr->flush_coalesced_mmio) {
2890 if (unlocked) {
2891 qemu_mutex_lock_iothread();
2893 qemu_flush_coalesced_mmio_buffer();
2894 if (unlocked) {
2895 qemu_mutex_unlock_iothread();
2899 return release_lock;
2902 /* Called within RCU critical section. */
2903 static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2904 MemTxAttrs attrs,
2905 const uint8_t *buf,
2906 int len, hwaddr addr1,
2907 hwaddr l, MemoryRegion *mr)
2909 uint8_t *ptr;
2910 uint64_t val;
2911 MemTxResult result = MEMTX_OK;
2912 bool release_lock = false;
2914 for (;;) {
2915 if (!memory_access_is_direct(mr, true)) {
2916 release_lock |= prepare_mmio_access(mr);
2917 l = memory_access_size(mr, l, addr1);
2918 /* XXX: could force current_cpu to NULL to avoid
2919 potential bugs */
2920 switch (l) {
2921 case 8:
2922 /* 64 bit write access */
2923 val = ldq_p(buf);
2924 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2925 attrs);
2926 break;
2927 case 4:
2928 /* 32 bit write access */
2929 val = (uint32_t)ldl_p(buf);
2930 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2931 attrs);
2932 break;
2933 case 2:
2934 /* 16 bit write access */
2935 val = lduw_p(buf);
2936 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2937 attrs);
2938 break;
2939 case 1:
2940 /* 8 bit write access */
2941 val = ldub_p(buf);
2942 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2943 attrs);
2944 break;
2945 default:
2946 abort();
2948 } else {
2949 /* RAM case */
2950 ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
2951 memcpy(ptr, buf, l);
2952 invalidate_and_set_dirty(mr, addr1, l);
2955 if (release_lock) {
2956 qemu_mutex_unlock_iothread();
2957 release_lock = false;
2960 len -= l;
2961 buf += l;
2962 addr += l;
2964 if (!len) {
2965 break;
2968 l = len;
2969 mr = address_space_translate(as, addr, &addr1, &l, true);
2972 return result;
2975 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2976 const uint8_t *buf, int len)
2978 hwaddr l;
2979 hwaddr addr1;
2980 MemoryRegion *mr;
2981 MemTxResult result = MEMTX_OK;
2983 if (len > 0) {
2984 rcu_read_lock();
2985 l = len;
2986 mr = address_space_translate(as, addr, &addr1, &l, true);
2987 result = address_space_write_continue(as, addr, attrs, buf, len,
2988 addr1, l, mr);
2989 rcu_read_unlock();
2992 return result;
2995 /* Called within RCU critical section. */
2996 MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2997 MemTxAttrs attrs, uint8_t *buf,
2998 int len, hwaddr addr1, hwaddr l,
2999 MemoryRegion *mr)
3001 uint8_t *ptr;
3002 uint64_t val;
3003 MemTxResult result = MEMTX_OK;
3004 bool release_lock = false;
3006 for (;;) {
3007 if (!memory_access_is_direct(mr, false)) {
3008 /* I/O case */
3009 release_lock |= prepare_mmio_access(mr);
3010 l = memory_access_size(mr, l, addr1);
3011 switch (l) {
3012 case 8:
3013 /* 64 bit read access */
3014 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
3015 attrs);
3016 stq_p(buf, val);
3017 break;
3018 case 4:
3019 /* 32 bit read access */
3020 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
3021 attrs);
3022 stl_p(buf, val);
3023 break;
3024 case 2:
3025 /* 16 bit read access */
3026 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
3027 attrs);
3028 stw_p(buf, val);
3029 break;
3030 case 1:
3031 /* 8 bit read access */
3032 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
3033 attrs);
3034 stb_p(buf, val);
3035 break;
3036 default:
3037 abort();
3039 } else {
3040 /* RAM case */
3041 ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
3042 memcpy(buf, ptr, l);
3045 if (release_lock) {
3046 qemu_mutex_unlock_iothread();
3047 release_lock = false;
3050 len -= l;
3051 buf += l;
3052 addr += l;
3054 if (!len) {
3055 break;
3058 l = len;
3059 mr = address_space_translate(as, addr, &addr1, &l, false);
3062 return result;
3065 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
3066 MemTxAttrs attrs, uint8_t *buf, int len)
3068 hwaddr l;
3069 hwaddr addr1;
3070 MemoryRegion *mr;
3071 MemTxResult result = MEMTX_OK;
3073 if (len > 0) {
3074 rcu_read_lock();
3075 l = len;
3076 mr = address_space_translate(as, addr, &addr1, &l, false);
3077 result = address_space_read_continue(as, addr, attrs, buf, len,
3078 addr1, l, mr);
3079 rcu_read_unlock();
3082 return result;
3085 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
3086 uint8_t *buf, int len, bool is_write)
3088 if (is_write) {
3089 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
3090 } else {
3091 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
3095 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
3096 int len, int is_write)
3098 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
3099 buf, len, is_write);
3102 enum write_rom_type {
3103 WRITE_DATA,
3104 FLUSH_CACHE,
3107 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
3108 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
3110 hwaddr l;
3111 uint8_t *ptr;
3112 hwaddr addr1;
3113 MemoryRegion *mr;
3115 rcu_read_lock();
3116 while (len > 0) {
3117 l = len;
3118 mr = address_space_translate(as, addr, &addr1, &l, true);
3120 if (!(memory_region_is_ram(mr) ||
3121 memory_region_is_romd(mr))) {
3122 l = memory_access_size(mr, l, addr1);
3123 } else {
3124 /* ROM/RAM case */
3125 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3126 switch (type) {
3127 case WRITE_DATA:
3128 memcpy(ptr, buf, l);
3129 invalidate_and_set_dirty(mr, addr1, l);
3130 break;
3131 case FLUSH_CACHE:
3132 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
3133 break;
3136 len -= l;
3137 buf += l;
3138 addr += l;
3140 rcu_read_unlock();
3143 /* used for ROM loading : can write in RAM and ROM */
3144 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
3145 const uint8_t *buf, int len)
3147 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
3150 void cpu_flush_icache_range(hwaddr start, int len)
3153 * This function should do the same thing as an icache flush that was
3154 * triggered from within the guest. For TCG we are always cache coherent,
3155 * so there is no need to flush anything. For KVM / Xen we need to flush
3156 * the host's instruction cache at least.
3158 if (tcg_enabled()) {
3159 return;
3162 cpu_physical_memory_write_rom_internal(&address_space_memory,
3163 start, NULL, len, FLUSH_CACHE);
3166 typedef struct {
3167 MemoryRegion *mr;
3168 void *buffer;
3169 hwaddr addr;
3170 hwaddr len;
3171 bool in_use;
3172 } BounceBuffer;
3174 static BounceBuffer bounce;
3176 typedef struct MapClient {
3177 QEMUBH *bh;
3178 QLIST_ENTRY(MapClient) link;
3179 } MapClient;
3181 QemuMutex map_client_list_lock;
3182 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3183 = QLIST_HEAD_INITIALIZER(map_client_list);
3185 static void cpu_unregister_map_client_do(MapClient *client)
3187 QLIST_REMOVE(client, link);
3188 g_free(client);
3191 static void cpu_notify_map_clients_locked(void)
3193 MapClient *client;
3195 while (!QLIST_EMPTY(&map_client_list)) {
3196 client = QLIST_FIRST(&map_client_list);
3197 qemu_bh_schedule(client->bh);
3198 cpu_unregister_map_client_do(client);
3202 void cpu_register_map_client(QEMUBH *bh)
3204 MapClient *client = g_malloc(sizeof(*client));
3206 qemu_mutex_lock(&map_client_list_lock);
3207 client->bh = bh;
3208 QLIST_INSERT_HEAD(&map_client_list, client, link);
3209 if (!atomic_read(&bounce.in_use)) {
3210 cpu_notify_map_clients_locked();
3212 qemu_mutex_unlock(&map_client_list_lock);
3215 void cpu_exec_init_all(void)
3217 qemu_mutex_init(&ram_list.mutex);
3218 /* The data structures we set up here depend on knowing the page size,
3219 * so no more changes can be made after this point.
3220 * In an ideal world, nothing we did before we had finished the
3221 * machine setup would care about the target page size, and we could
3222 * do this much later, rather than requiring board models to state
3223 * up front what their requirements are.
3225 finalize_target_page_bits();
3226 io_mem_init();
3227 memory_map_init();
3228 qemu_mutex_init(&map_client_list_lock);
3231 void cpu_unregister_map_client(QEMUBH *bh)
3233 MapClient *client;
3235 qemu_mutex_lock(&map_client_list_lock);
3236 QLIST_FOREACH(client, &map_client_list, link) {
3237 if (client->bh == bh) {
3238 cpu_unregister_map_client_do(client);
3239 break;
3242 qemu_mutex_unlock(&map_client_list_lock);
3245 static void cpu_notify_map_clients(void)
3247 qemu_mutex_lock(&map_client_list_lock);
3248 cpu_notify_map_clients_locked();
3249 qemu_mutex_unlock(&map_client_list_lock);
3252 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
3254 MemoryRegion *mr;
3255 hwaddr l, xlat;
3257 rcu_read_lock();
3258 while (len > 0) {
3259 l = len;
3260 mr = address_space_translate(as, addr, &xlat, &l, is_write);
3261 if (!memory_access_is_direct(mr, is_write)) {
3262 l = memory_access_size(mr, l, addr);
3263 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
3264 rcu_read_unlock();
3265 return false;
3269 len -= l;
3270 addr += l;
3272 rcu_read_unlock();
3273 return true;
3276 static hwaddr
3277 address_space_extend_translation(AddressSpace *as, hwaddr addr, hwaddr target_len,
3278 MemoryRegion *mr, hwaddr base, hwaddr len,
3279 bool is_write)
3281 hwaddr done = 0;
3282 hwaddr xlat;
3283 MemoryRegion *this_mr;
3285 for (;;) {
3286 target_len -= len;
3287 addr += len;
3288 done += len;
3289 if (target_len == 0) {
3290 return done;
3293 len = target_len;
3294 this_mr = address_space_translate(as, addr, &xlat, &len, is_write);
3295 if (this_mr != mr || xlat != base + done) {
3296 return done;
3301 /* Map a physical memory region into a host virtual address.
3302 * May map a subset of the requested range, given by and returned in *plen.
3303 * May return NULL if resources needed to perform the mapping are exhausted.
3304 * Use only for reads OR writes - not for read-modify-write operations.
3305 * Use cpu_register_map_client() to know when retrying the map operation is
3306 * likely to succeed.
3308 void *address_space_map(AddressSpace *as,
3309 hwaddr addr,
3310 hwaddr *plen,
3311 bool is_write)
3313 hwaddr len = *plen;
3314 hwaddr l, xlat;
3315 MemoryRegion *mr;
3316 void *ptr;
3318 if (len == 0) {
3319 return NULL;
3322 l = len;
3323 rcu_read_lock();
3324 mr = address_space_translate(as, addr, &xlat, &l, is_write);
3326 if (!memory_access_is_direct(mr, is_write)) {
3327 if (atomic_xchg(&bounce.in_use, true)) {
3328 rcu_read_unlock();
3329 return NULL;
3331 /* Avoid unbounded allocations */
3332 l = MIN(l, TARGET_PAGE_SIZE);
3333 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
3334 bounce.addr = addr;
3335 bounce.len = l;
3337 memory_region_ref(mr);
3338 bounce.mr = mr;
3339 if (!is_write) {
3340 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
3341 bounce.buffer, l);
3344 rcu_read_unlock();
3345 *plen = l;
3346 return bounce.buffer;
3350 memory_region_ref(mr);
3351 *plen = address_space_extend_translation(as, addr, len, mr, xlat, l, is_write);
3352 ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
3353 rcu_read_unlock();
3355 return ptr;
3358 /* Unmaps a memory region previously mapped by address_space_map().
3359 * Will also mark the memory as dirty if is_write == 1. access_len gives
3360 * the amount of memory that was actually read or written by the caller.
3362 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3363 int is_write, hwaddr access_len)
3365 if (buffer != bounce.buffer) {
3366 MemoryRegion *mr;
3367 ram_addr_t addr1;
3369 mr = memory_region_from_host(buffer, &addr1);
3370 assert(mr != NULL);
3371 if (is_write) {
3372 invalidate_and_set_dirty(mr, addr1, access_len);
3374 if (xen_enabled()) {
3375 xen_invalidate_map_cache_entry(buffer);
3377 memory_region_unref(mr);
3378 return;
3380 if (is_write) {
3381 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3382 bounce.buffer, access_len);
3384 qemu_vfree(bounce.buffer);
3385 bounce.buffer = NULL;
3386 memory_region_unref(bounce.mr);
3387 atomic_mb_set(&bounce.in_use, false);
3388 cpu_notify_map_clients();
3391 void *cpu_physical_memory_map(hwaddr addr,
3392 hwaddr *plen,
3393 int is_write)
3395 return address_space_map(&address_space_memory, addr, plen, is_write);
3398 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3399 int is_write, hwaddr access_len)
3401 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3404 #define ARG1_DECL AddressSpace *as
3405 #define ARG1 as
3406 #define SUFFIX
3407 #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
3408 #define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write)
3409 #define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
3410 #define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
3411 #define RCU_READ_LOCK(...) rcu_read_lock()
3412 #define RCU_READ_UNLOCK(...) rcu_read_unlock()
3413 #include "memory_ldst.inc.c"
3415 int64_t address_space_cache_init(MemoryRegionCache *cache,
3416 AddressSpace *as,
3417 hwaddr addr,
3418 hwaddr len,
3419 bool is_write)
3421 cache->len = len;
3422 cache->as = as;
3423 cache->xlat = addr;
3424 return len;
3427 void address_space_cache_invalidate(MemoryRegionCache *cache,
3428 hwaddr addr,
3429 hwaddr access_len)
3433 void address_space_cache_destroy(MemoryRegionCache *cache)
3435 cache->as = NULL;
3438 #define ARG1_DECL MemoryRegionCache *cache
3439 #define ARG1 cache
3440 #define SUFFIX _cached
3441 #define TRANSLATE(addr, ...) \
3442 address_space_translate(cache->as, cache->xlat + (addr), __VA_ARGS__)
3443 #define IS_DIRECT(mr, is_write) true
3444 #define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
3445 #define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
3446 #define RCU_READ_LOCK() rcu_read_lock()
3447 #define RCU_READ_UNLOCK() rcu_read_unlock()
3448 #include "memory_ldst.inc.c"
3450 /* virtual memory access for debug (includes writing to ROM) */
3451 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3452 uint8_t *buf, int len, int is_write)
3454 int l;
3455 hwaddr phys_addr;
3456 target_ulong page;
3458 cpu_synchronize_state(cpu);
3459 while (len > 0) {
3460 int asidx;
3461 MemTxAttrs attrs;
3463 page = addr & TARGET_PAGE_MASK;
3464 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3465 asidx = cpu_asidx_from_attrs(cpu, attrs);
3466 /* if no physical page mapped, return an error */
3467 if (phys_addr == -1)
3468 return -1;
3469 l = (page + TARGET_PAGE_SIZE) - addr;
3470 if (l > len)
3471 l = len;
3472 phys_addr += (addr & ~TARGET_PAGE_MASK);
3473 if (is_write) {
3474 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3475 phys_addr, buf, l);
3476 } else {
3477 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3478 MEMTXATTRS_UNSPECIFIED,
3479 buf, l, 0);
3481 len -= l;
3482 buf += l;
3483 addr += l;
3485 return 0;
3489 * Allows code that needs to deal with migration bitmaps etc to still be built
3490 * target independent.
3492 size_t qemu_target_page_size(void)
3494 return TARGET_PAGE_SIZE;
3497 int qemu_target_page_bits(void)
3499 return TARGET_PAGE_BITS;
3502 int qemu_target_page_bits_min(void)
3504 return TARGET_PAGE_BITS_MIN;
3506 #endif
3509 * A helper function for the _utterly broken_ virtio device model to find out if
3510 * it's running on a big endian machine. Don't do this at home kids!
3512 bool target_words_bigendian(void);
3513 bool target_words_bigendian(void)
3515 #if defined(TARGET_WORDS_BIGENDIAN)
3516 return true;
3517 #else
3518 return false;
3519 #endif
3522 #ifndef CONFIG_USER_ONLY
3523 bool cpu_physical_memory_is_io(hwaddr phys_addr)
3525 MemoryRegion*mr;
3526 hwaddr l = 1;
3527 bool res;
3529 rcu_read_lock();
3530 mr = address_space_translate(&address_space_memory,
3531 phys_addr, &phys_addr, &l, false);
3533 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3534 rcu_read_unlock();
3535 return res;
3538 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3540 RAMBlock *block;
3541 int ret = 0;
3543 rcu_read_lock();
3544 RAMBLOCK_FOREACH(block) {
3545 ret = func(block->idstr, block->host, block->offset,
3546 block->used_length, opaque);
3547 if (ret) {
3548 break;
3551 rcu_read_unlock();
3552 return ret;
3556 * Unmap pages of memory from start to start+length such that
3557 * they a) read as 0, b) Trigger whatever fault mechanism
3558 * the OS provides for postcopy.
3559 * The pages must be unmapped by the end of the function.
3560 * Returns: 0 on success, none-0 on failure
3563 int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
3565 int ret = -1;
3567 uint8_t *host_startaddr = rb->host + start;
3569 if ((uintptr_t)host_startaddr & (rb->page_size - 1)) {
3570 error_report("ram_block_discard_range: Unaligned start address: %p",
3571 host_startaddr);
3572 goto err;
3575 if ((start + length) <= rb->used_length) {
3576 uint8_t *host_endaddr = host_startaddr + length;
3577 if ((uintptr_t)host_endaddr & (rb->page_size - 1)) {
3578 error_report("ram_block_discard_range: Unaligned end address: %p",
3579 host_endaddr);
3580 goto err;
3583 errno = ENOTSUP; /* If we are missing MADVISE etc */
3585 if (rb->page_size == qemu_host_page_size) {
3586 #if defined(CONFIG_MADVISE)
3587 /* Note: We need the madvise MADV_DONTNEED behaviour of definitely
3588 * freeing the page.
3590 ret = madvise(host_startaddr, length, MADV_DONTNEED);
3591 #endif
3592 } else {
3593 /* Huge page case - unfortunately it can't do DONTNEED, but
3594 * it can do the equivalent by FALLOC_FL_PUNCH_HOLE in the
3595 * huge page file.
3597 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
3598 ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
3599 start, length);
3600 #endif
3602 if (ret) {
3603 ret = -errno;
3604 error_report("ram_block_discard_range: Failed to discard range "
3605 "%s:%" PRIx64 " +%zx (%d)",
3606 rb->idstr, start, length, ret);
3608 } else {
3609 error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
3610 "/%zx/" RAM_ADDR_FMT")",
3611 rb->idstr, start, length, rb->used_length);
3614 err:
3615 return ret;
3618 #endif
3620 void page_size_init(void)
3622 /* NOTE: we can always suppose that qemu_host_page_size >=
3623 TARGET_PAGE_SIZE */
3624 qemu_real_host_page_size = getpagesize();
3625 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
3626 if (qemu_host_page_size == 0) {
3627 qemu_host_page_size = qemu_real_host_page_size;
3629 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
3630 qemu_host_page_size = TARGET_PAGE_SIZE;
3632 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;