crypto: fix nettle config check for running pbkdf test
[qemu/kevin.git] / exec.c
blobf46e5968189387a8bcd35d0d2751149eaa1762e2
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
21 #ifndef _WIN32
22 #include <sys/mman.h>
23 #endif
25 #include "qemu/cutils.h"
26 #include "cpu.h"
27 #include "tcg.h"
28 #include "hw/hw.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #endif
32 #include "hw/qdev.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "hw/xen/xen.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "qemu/error-report.h"
39 #include "exec/memory.h"
40 #include "sysemu/dma.h"
41 #include "exec/address-spaces.h"
42 #if defined(CONFIG_USER_ONLY)
43 #include <qemu.h>
44 #else /* !CONFIG_USER_ONLY */
45 #include "sysemu/xen-mapcache.h"
46 #include "trace.h"
47 #endif
48 #include "exec/cpu-all.h"
49 #include "qemu/rcu_queue.h"
50 #include "qemu/main-loop.h"
51 #include "translate-all.h"
52 #include "sysemu/replay.h"
54 #include "exec/memory-internal.h"
55 #include "exec/ram_addr.h"
56 #include "exec/log.h"
58 #include "qemu/range.h"
59 #ifndef _WIN32
60 #include "qemu/mmap-alloc.h"
61 #endif
63 //#define DEBUG_SUBPAGE
65 #if !defined(CONFIG_USER_ONLY)
66 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
69 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
71 static MemoryRegion *system_memory;
72 static MemoryRegion *system_io;
74 AddressSpace address_space_io;
75 AddressSpace address_space_memory;
77 MemoryRegion io_mem_rom, io_mem_notdirty;
78 static MemoryRegion io_mem_unassigned;
80 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81 #define RAM_PREALLOC (1 << 0)
83 /* RAM is mmap-ed with MAP_SHARED */
84 #define RAM_SHARED (1 << 1)
86 /* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
89 #define RAM_RESIZEABLE (1 << 2)
91 #endif
93 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
94 /* current CPU in the current thread. It is only valid inside
95 cpu_exec() */
96 __thread CPUState *current_cpu;
97 /* 0 = Do not count executed instructions.
98 1 = Precise instruction counting.
99 2 = Adaptive rate instruction counting. */
100 int use_icount;
102 #if !defined(CONFIG_USER_ONLY)
104 typedef struct PhysPageEntry PhysPageEntry;
106 struct PhysPageEntry {
107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
108 uint32_t skip : 6;
109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
110 uint32_t ptr : 26;
113 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
115 /* Size of the L2 (and L3, etc) page tables. */
116 #define ADDR_SPACE_BITS 64
118 #define P_L2_BITS 9
119 #define P_L2_SIZE (1 << P_L2_BITS)
121 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
123 typedef PhysPageEntry Node[P_L2_SIZE];
125 typedef struct PhysPageMap {
126 struct rcu_head rcu;
128 unsigned sections_nb;
129 unsigned sections_nb_alloc;
130 unsigned nodes_nb;
131 unsigned nodes_nb_alloc;
132 Node *nodes;
133 MemoryRegionSection *sections;
134 } PhysPageMap;
136 struct AddressSpaceDispatch {
137 struct rcu_head rcu;
139 MemoryRegionSection *mru_section;
140 /* This is a multi-level map on the physical address space.
141 * The bottom level has pointers to MemoryRegionSections.
143 PhysPageEntry phys_map;
144 PhysPageMap map;
145 AddressSpace *as;
148 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
149 typedef struct subpage_t {
150 MemoryRegion iomem;
151 AddressSpace *as;
152 hwaddr base;
153 uint16_t sub_section[TARGET_PAGE_SIZE];
154 } subpage_t;
156 #define PHYS_SECTION_UNASSIGNED 0
157 #define PHYS_SECTION_NOTDIRTY 1
158 #define PHYS_SECTION_ROM 2
159 #define PHYS_SECTION_WATCH 3
161 static void io_mem_init(void);
162 static void memory_map_init(void);
163 static void tcg_commit(MemoryListener *listener);
165 static MemoryRegion io_mem_watch;
168 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
169 * @cpu: the CPU whose AddressSpace this is
170 * @as: the AddressSpace itself
171 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
172 * @tcg_as_listener: listener for tracking changes to the AddressSpace
174 struct CPUAddressSpace {
175 CPUState *cpu;
176 AddressSpace *as;
177 struct AddressSpaceDispatch *memory_dispatch;
178 MemoryListener tcg_as_listener;
181 #endif
183 #if !defined(CONFIG_USER_ONLY)
185 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
187 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
188 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
189 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
190 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
194 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
196 unsigned i;
197 uint32_t ret;
198 PhysPageEntry e;
199 PhysPageEntry *p;
201 ret = map->nodes_nb++;
202 p = map->nodes[ret];
203 assert(ret != PHYS_MAP_NODE_NIL);
204 assert(ret != map->nodes_nb_alloc);
206 e.skip = leaf ? 0 : 1;
207 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
208 for (i = 0; i < P_L2_SIZE; ++i) {
209 memcpy(&p[i], &e, sizeof(e));
211 return ret;
214 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
215 hwaddr *index, hwaddr *nb, uint16_t leaf,
216 int level)
218 PhysPageEntry *p;
219 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
221 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
222 lp->ptr = phys_map_node_alloc(map, level == 0);
224 p = map->nodes[lp->ptr];
225 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
227 while (*nb && lp < &p[P_L2_SIZE]) {
228 if ((*index & (step - 1)) == 0 && *nb >= step) {
229 lp->skip = 0;
230 lp->ptr = leaf;
231 *index += step;
232 *nb -= step;
233 } else {
234 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
236 ++lp;
240 static void phys_page_set(AddressSpaceDispatch *d,
241 hwaddr index, hwaddr nb,
242 uint16_t leaf)
244 /* Wildly overreserve - it doesn't matter much. */
245 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
247 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
250 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
251 * and update our entry so we can skip it and go directly to the destination.
253 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
255 unsigned valid_ptr = P_L2_SIZE;
256 int valid = 0;
257 PhysPageEntry *p;
258 int i;
260 if (lp->ptr == PHYS_MAP_NODE_NIL) {
261 return;
264 p = nodes[lp->ptr];
265 for (i = 0; i < P_L2_SIZE; i++) {
266 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
267 continue;
270 valid_ptr = i;
271 valid++;
272 if (p[i].skip) {
273 phys_page_compact(&p[i], nodes, compacted);
277 /* We can only compress if there's only one child. */
278 if (valid != 1) {
279 return;
282 assert(valid_ptr < P_L2_SIZE);
284 /* Don't compress if it won't fit in the # of bits we have. */
285 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
286 return;
289 lp->ptr = p[valid_ptr].ptr;
290 if (!p[valid_ptr].skip) {
291 /* If our only child is a leaf, make this a leaf. */
292 /* By design, we should have made this node a leaf to begin with so we
293 * should never reach here.
294 * But since it's so simple to handle this, let's do it just in case we
295 * change this rule.
297 lp->skip = 0;
298 } else {
299 lp->skip += p[valid_ptr].skip;
303 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
305 DECLARE_BITMAP(compacted, nodes_nb);
307 if (d->phys_map.skip) {
308 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
312 static inline bool section_covers_addr(const MemoryRegionSection *section,
313 hwaddr addr)
315 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
316 * the section must cover the entire address space.
318 return section->size.hi ||
319 range_covers_byte(section->offset_within_address_space,
320 section->size.lo, addr);
323 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
324 Node *nodes, MemoryRegionSection *sections)
326 PhysPageEntry *p;
327 hwaddr index = addr >> TARGET_PAGE_BITS;
328 int i;
330 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
331 if (lp.ptr == PHYS_MAP_NODE_NIL) {
332 return &sections[PHYS_SECTION_UNASSIGNED];
334 p = nodes[lp.ptr];
335 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
338 if (section_covers_addr(&sections[lp.ptr], addr)) {
339 return &sections[lp.ptr];
340 } else {
341 return &sections[PHYS_SECTION_UNASSIGNED];
345 bool memory_region_is_unassigned(MemoryRegion *mr)
347 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
348 && mr != &io_mem_watch;
351 /* Called from RCU critical section */
352 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
353 hwaddr addr,
354 bool resolve_subpage)
356 MemoryRegionSection *section = atomic_read(&d->mru_section);
357 subpage_t *subpage;
358 bool update;
360 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
361 section_covers_addr(section, addr)) {
362 update = false;
363 } else {
364 section = phys_page_find(d->phys_map, addr, d->map.nodes,
365 d->map.sections);
366 update = true;
368 if (resolve_subpage && section->mr->subpage) {
369 subpage = container_of(section->mr, subpage_t, iomem);
370 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
372 if (update) {
373 atomic_set(&d->mru_section, section);
375 return section;
378 /* Called from RCU critical section */
379 static MemoryRegionSection *
380 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
381 hwaddr *plen, bool resolve_subpage)
383 MemoryRegionSection *section;
384 MemoryRegion *mr;
385 Int128 diff;
387 section = address_space_lookup_region(d, addr, resolve_subpage);
388 /* Compute offset within MemoryRegionSection */
389 addr -= section->offset_within_address_space;
391 /* Compute offset within MemoryRegion */
392 *xlat = addr + section->offset_within_region;
394 mr = section->mr;
396 /* MMIO registers can be expected to perform full-width accesses based only
397 * on their address, without considering adjacent registers that could
398 * decode to completely different MemoryRegions. When such registers
399 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
400 * regions overlap wildly. For this reason we cannot clamp the accesses
401 * here.
403 * If the length is small (as is the case for address_space_ldl/stl),
404 * everything works fine. If the incoming length is large, however,
405 * the caller really has to do the clamping through memory_access_size.
407 if (memory_region_is_ram(mr)) {
408 diff = int128_sub(section->size, int128_make64(addr));
409 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
411 return section;
414 /* Called from RCU critical section */
415 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
416 hwaddr *xlat, hwaddr *plen,
417 bool is_write)
419 IOMMUTLBEntry iotlb;
420 MemoryRegionSection *section;
421 MemoryRegion *mr;
423 for (;;) {
424 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
425 section = address_space_translate_internal(d, addr, &addr, plen, true);
426 mr = section->mr;
428 if (!mr->iommu_ops) {
429 break;
432 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
433 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
434 | (addr & iotlb.addr_mask));
435 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
436 if (!(iotlb.perm & (1 << is_write))) {
437 mr = &io_mem_unassigned;
438 break;
441 as = iotlb.target_as;
444 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
445 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
446 *plen = MIN(page, *plen);
449 *xlat = addr;
450 return mr;
453 /* Called from RCU critical section */
454 MemoryRegionSection *
455 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
456 hwaddr *xlat, hwaddr *plen)
458 MemoryRegionSection *section;
459 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
461 section = address_space_translate_internal(d, addr, xlat, plen, false);
463 assert(!section->mr->iommu_ops);
464 return section;
466 #endif
468 #if !defined(CONFIG_USER_ONLY)
470 static int cpu_common_post_load(void *opaque, int version_id)
472 CPUState *cpu = opaque;
474 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
475 version_id is increased. */
476 cpu->interrupt_request &= ~0x01;
477 tlb_flush(cpu, 1);
479 return 0;
482 static int cpu_common_pre_load(void *opaque)
484 CPUState *cpu = opaque;
486 cpu->exception_index = -1;
488 return 0;
491 static bool cpu_common_exception_index_needed(void *opaque)
493 CPUState *cpu = opaque;
495 return tcg_enabled() && cpu->exception_index != -1;
498 static const VMStateDescription vmstate_cpu_common_exception_index = {
499 .name = "cpu_common/exception_index",
500 .version_id = 1,
501 .minimum_version_id = 1,
502 .needed = cpu_common_exception_index_needed,
503 .fields = (VMStateField[]) {
504 VMSTATE_INT32(exception_index, CPUState),
505 VMSTATE_END_OF_LIST()
509 static bool cpu_common_crash_occurred_needed(void *opaque)
511 CPUState *cpu = opaque;
513 return cpu->crash_occurred;
516 static const VMStateDescription vmstate_cpu_common_crash_occurred = {
517 .name = "cpu_common/crash_occurred",
518 .version_id = 1,
519 .minimum_version_id = 1,
520 .needed = cpu_common_crash_occurred_needed,
521 .fields = (VMStateField[]) {
522 VMSTATE_BOOL(crash_occurred, CPUState),
523 VMSTATE_END_OF_LIST()
527 const VMStateDescription vmstate_cpu_common = {
528 .name = "cpu_common",
529 .version_id = 1,
530 .minimum_version_id = 1,
531 .pre_load = cpu_common_pre_load,
532 .post_load = cpu_common_post_load,
533 .fields = (VMStateField[]) {
534 VMSTATE_UINT32(halted, CPUState),
535 VMSTATE_UINT32(interrupt_request, CPUState),
536 VMSTATE_END_OF_LIST()
538 .subsections = (const VMStateDescription*[]) {
539 &vmstate_cpu_common_exception_index,
540 &vmstate_cpu_common_crash_occurred,
541 NULL
545 #endif
547 CPUState *qemu_get_cpu(int index)
549 CPUState *cpu;
551 CPU_FOREACH(cpu) {
552 if (cpu->cpu_index == index) {
553 return cpu;
557 return NULL;
560 #if !defined(CONFIG_USER_ONLY)
561 void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
563 CPUAddressSpace *newas;
565 /* Target code should have set num_ases before calling us */
566 assert(asidx < cpu->num_ases);
568 if (asidx == 0) {
569 /* address space 0 gets the convenience alias */
570 cpu->as = as;
573 /* KVM cannot currently support multiple address spaces. */
574 assert(asidx == 0 || !kvm_enabled());
576 if (!cpu->cpu_ases) {
577 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
580 newas = &cpu->cpu_ases[asidx];
581 newas->cpu = cpu;
582 newas->as = as;
583 if (tcg_enabled()) {
584 newas->tcg_as_listener.commit = tcg_commit;
585 memory_listener_register(&newas->tcg_as_listener, as);
589 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
591 /* Return the AddressSpace corresponding to the specified index */
592 return cpu->cpu_ases[asidx].as;
594 #endif
596 #ifndef CONFIG_USER_ONLY
597 static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
599 static int cpu_get_free_index(Error **errp)
601 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
603 if (cpu >= MAX_CPUMASK_BITS) {
604 error_setg(errp, "Trying to use more CPUs than max of %d",
605 MAX_CPUMASK_BITS);
606 return -1;
609 bitmap_set(cpu_index_map, cpu, 1);
610 return cpu;
613 void cpu_exec_exit(CPUState *cpu)
615 if (cpu->cpu_index == -1) {
616 /* cpu_index was never allocated by this @cpu or was already freed. */
617 return;
620 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
621 cpu->cpu_index = -1;
623 #else
625 static int cpu_get_free_index(Error **errp)
627 CPUState *some_cpu;
628 int cpu_index = 0;
630 CPU_FOREACH(some_cpu) {
631 cpu_index++;
633 return cpu_index;
636 void cpu_exec_exit(CPUState *cpu)
639 #endif
641 void cpu_exec_init(CPUState *cpu, Error **errp)
643 CPUClass *cc = CPU_GET_CLASS(cpu);
644 int cpu_index;
645 Error *local_err = NULL;
647 cpu->as = NULL;
648 cpu->num_ases = 0;
650 #ifndef CONFIG_USER_ONLY
651 cpu->thread_id = qemu_get_thread_id();
653 /* This is a softmmu CPU object, so create a property for it
654 * so users can wire up its memory. (This can't go in qom/cpu.c
655 * because that file is compiled only once for both user-mode
656 * and system builds.) The default if no link is set up is to use
657 * the system address space.
659 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
660 (Object **)&cpu->memory,
661 qdev_prop_allow_set_link_before_realize,
662 OBJ_PROP_LINK_UNREF_ON_RELEASE,
663 &error_abort);
664 cpu->memory = system_memory;
665 object_ref(OBJECT(cpu->memory));
666 #endif
668 #if defined(CONFIG_USER_ONLY)
669 cpu_list_lock();
670 #endif
671 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
672 if (local_err) {
673 error_propagate(errp, local_err);
674 #if defined(CONFIG_USER_ONLY)
675 cpu_list_unlock();
676 #endif
677 return;
679 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
680 #if defined(CONFIG_USER_ONLY)
681 cpu_list_unlock();
682 #endif
683 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
684 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
686 if (cc->vmsd != NULL) {
687 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
691 #if defined(CONFIG_USER_ONLY)
692 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
694 tb_invalidate_phys_page_range(pc, pc + 1, 0);
696 #else
697 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
699 MemTxAttrs attrs;
700 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
701 int asidx = cpu_asidx_from_attrs(cpu, attrs);
702 if (phys != -1) {
703 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
704 phys | (pc & ~TARGET_PAGE_MASK));
707 #endif
709 #if defined(CONFIG_USER_ONLY)
710 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
715 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
716 int flags)
718 return -ENOSYS;
721 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
725 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
726 int flags, CPUWatchpoint **watchpoint)
728 return -ENOSYS;
730 #else
731 /* Add a watchpoint. */
732 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
733 int flags, CPUWatchpoint **watchpoint)
735 CPUWatchpoint *wp;
737 /* forbid ranges which are empty or run off the end of the address space */
738 if (len == 0 || (addr + len - 1) < addr) {
739 error_report("tried to set invalid watchpoint at %"
740 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
741 return -EINVAL;
743 wp = g_malloc(sizeof(*wp));
745 wp->vaddr = addr;
746 wp->len = len;
747 wp->flags = flags;
749 /* keep all GDB-injected watchpoints in front */
750 if (flags & BP_GDB) {
751 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
752 } else {
753 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
756 tlb_flush_page(cpu, addr);
758 if (watchpoint)
759 *watchpoint = wp;
760 return 0;
763 /* Remove a specific watchpoint. */
764 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
765 int flags)
767 CPUWatchpoint *wp;
769 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
770 if (addr == wp->vaddr && len == wp->len
771 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
772 cpu_watchpoint_remove_by_ref(cpu, wp);
773 return 0;
776 return -ENOENT;
779 /* Remove a specific watchpoint by reference. */
780 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
782 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
784 tlb_flush_page(cpu, watchpoint->vaddr);
786 g_free(watchpoint);
789 /* Remove all matching watchpoints. */
790 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
792 CPUWatchpoint *wp, *next;
794 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
795 if (wp->flags & mask) {
796 cpu_watchpoint_remove_by_ref(cpu, wp);
801 /* Return true if this watchpoint address matches the specified
802 * access (ie the address range covered by the watchpoint overlaps
803 * partially or completely with the address range covered by the
804 * access).
806 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
807 vaddr addr,
808 vaddr len)
810 /* We know the lengths are non-zero, but a little caution is
811 * required to avoid errors in the case where the range ends
812 * exactly at the top of the address space and so addr + len
813 * wraps round to zero.
815 vaddr wpend = wp->vaddr + wp->len - 1;
816 vaddr addrend = addr + len - 1;
818 return !(addr > wpend || wp->vaddr > addrend);
821 #endif
823 /* Add a breakpoint. */
824 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
825 CPUBreakpoint **breakpoint)
827 CPUBreakpoint *bp;
829 bp = g_malloc(sizeof(*bp));
831 bp->pc = pc;
832 bp->flags = flags;
834 /* keep all GDB-injected breakpoints in front */
835 if (flags & BP_GDB) {
836 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
837 } else {
838 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
841 breakpoint_invalidate(cpu, pc);
843 if (breakpoint) {
844 *breakpoint = bp;
846 return 0;
849 /* Remove a specific breakpoint. */
850 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
852 CPUBreakpoint *bp;
854 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
855 if (bp->pc == pc && bp->flags == flags) {
856 cpu_breakpoint_remove_by_ref(cpu, bp);
857 return 0;
860 return -ENOENT;
863 /* Remove a specific breakpoint by reference. */
864 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
866 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
868 breakpoint_invalidate(cpu, breakpoint->pc);
870 g_free(breakpoint);
873 /* Remove all matching breakpoints. */
874 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
876 CPUBreakpoint *bp, *next;
878 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
879 if (bp->flags & mask) {
880 cpu_breakpoint_remove_by_ref(cpu, bp);
885 /* enable or disable single step mode. EXCP_DEBUG is returned by the
886 CPU loop after each instruction */
887 void cpu_single_step(CPUState *cpu, int enabled)
889 if (cpu->singlestep_enabled != enabled) {
890 cpu->singlestep_enabled = enabled;
891 if (kvm_enabled()) {
892 kvm_update_guest_debug(cpu, 0);
893 } else {
894 /* must flush all the translated code to avoid inconsistencies */
895 /* XXX: only flush what is necessary */
896 tb_flush(cpu);
901 void cpu_abort(CPUState *cpu, const char *fmt, ...)
903 va_list ap;
904 va_list ap2;
906 va_start(ap, fmt);
907 va_copy(ap2, ap);
908 fprintf(stderr, "qemu: fatal: ");
909 vfprintf(stderr, fmt, ap);
910 fprintf(stderr, "\n");
911 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
912 if (qemu_log_separate()) {
913 qemu_log("qemu: fatal: ");
914 qemu_log_vprintf(fmt, ap2);
915 qemu_log("\n");
916 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
917 qemu_log_flush();
918 qemu_log_close();
920 va_end(ap2);
921 va_end(ap);
922 replay_finish();
923 #if defined(CONFIG_USER_ONLY)
925 struct sigaction act;
926 sigfillset(&act.sa_mask);
927 act.sa_handler = SIG_DFL;
928 sigaction(SIGABRT, &act, NULL);
930 #endif
931 abort();
934 #if !defined(CONFIG_USER_ONLY)
935 /* Called from RCU critical section */
936 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
938 RAMBlock *block;
940 block = atomic_rcu_read(&ram_list.mru_block);
941 if (block && addr - block->offset < block->max_length) {
942 return block;
944 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
945 if (addr - block->offset < block->max_length) {
946 goto found;
950 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
951 abort();
953 found:
954 /* It is safe to write mru_block outside the iothread lock. This
955 * is what happens:
957 * mru_block = xxx
958 * rcu_read_unlock()
959 * xxx removed from list
960 * rcu_read_lock()
961 * read mru_block
962 * mru_block = NULL;
963 * call_rcu(reclaim_ramblock, xxx);
964 * rcu_read_unlock()
966 * atomic_rcu_set is not needed here. The block was already published
967 * when it was placed into the list. Here we're just making an extra
968 * copy of the pointer.
970 ram_list.mru_block = block;
971 return block;
974 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
976 CPUState *cpu;
977 ram_addr_t start1;
978 RAMBlock *block;
979 ram_addr_t end;
981 end = TARGET_PAGE_ALIGN(start + length);
982 start &= TARGET_PAGE_MASK;
984 rcu_read_lock();
985 block = qemu_get_ram_block(start);
986 assert(block == qemu_get_ram_block(end - 1));
987 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
988 CPU_FOREACH(cpu) {
989 tlb_reset_dirty(cpu, start1, length);
991 rcu_read_unlock();
994 /* Note: start and end must be within the same ram block. */
995 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
996 ram_addr_t length,
997 unsigned client)
999 DirtyMemoryBlocks *blocks;
1000 unsigned long end, page;
1001 bool dirty = false;
1003 if (length == 0) {
1004 return false;
1007 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1008 page = start >> TARGET_PAGE_BITS;
1010 rcu_read_lock();
1012 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1014 while (page < end) {
1015 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1016 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1017 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1019 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1020 offset, num);
1021 page += num;
1024 rcu_read_unlock();
1026 if (dirty && tcg_enabled()) {
1027 tlb_reset_dirty_range_all(start, length);
1030 return dirty;
1033 /* Called from RCU critical section */
1034 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1035 MemoryRegionSection *section,
1036 target_ulong vaddr,
1037 hwaddr paddr, hwaddr xlat,
1038 int prot,
1039 target_ulong *address)
1041 hwaddr iotlb;
1042 CPUWatchpoint *wp;
1044 if (memory_region_is_ram(section->mr)) {
1045 /* Normal RAM. */
1046 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1047 + xlat;
1048 if (!section->readonly) {
1049 iotlb |= PHYS_SECTION_NOTDIRTY;
1050 } else {
1051 iotlb |= PHYS_SECTION_ROM;
1053 } else {
1054 AddressSpaceDispatch *d;
1056 d = atomic_rcu_read(&section->address_space->dispatch);
1057 iotlb = section - d->map.sections;
1058 iotlb += xlat;
1061 /* Make accesses to pages with watchpoints go via the
1062 watchpoint trap routines. */
1063 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1064 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1065 /* Avoid trapping reads of pages with a write breakpoint. */
1066 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1067 iotlb = PHYS_SECTION_WATCH + paddr;
1068 *address |= TLB_MMIO;
1069 break;
1074 return iotlb;
1076 #endif /* defined(CONFIG_USER_ONLY) */
1078 #if !defined(CONFIG_USER_ONLY)
1080 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1081 uint16_t section);
1082 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
1084 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1085 qemu_anon_ram_alloc;
1088 * Set a custom physical guest memory alloator.
1089 * Accelerators with unusual needs may need this. Hopefully, we can
1090 * get rid of it eventually.
1092 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
1094 phys_mem_alloc = alloc;
1097 static uint16_t phys_section_add(PhysPageMap *map,
1098 MemoryRegionSection *section)
1100 /* The physical section number is ORed with a page-aligned
1101 * pointer to produce the iotlb entries. Thus it should
1102 * never overflow into the page-aligned value.
1104 assert(map->sections_nb < TARGET_PAGE_SIZE);
1106 if (map->sections_nb == map->sections_nb_alloc) {
1107 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1108 map->sections = g_renew(MemoryRegionSection, map->sections,
1109 map->sections_nb_alloc);
1111 map->sections[map->sections_nb] = *section;
1112 memory_region_ref(section->mr);
1113 return map->sections_nb++;
1116 static void phys_section_destroy(MemoryRegion *mr)
1118 bool have_sub_page = mr->subpage;
1120 memory_region_unref(mr);
1122 if (have_sub_page) {
1123 subpage_t *subpage = container_of(mr, subpage_t, iomem);
1124 object_unref(OBJECT(&subpage->iomem));
1125 g_free(subpage);
1129 static void phys_sections_free(PhysPageMap *map)
1131 while (map->sections_nb > 0) {
1132 MemoryRegionSection *section = &map->sections[--map->sections_nb];
1133 phys_section_destroy(section->mr);
1135 g_free(map->sections);
1136 g_free(map->nodes);
1139 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
1141 subpage_t *subpage;
1142 hwaddr base = section->offset_within_address_space
1143 & TARGET_PAGE_MASK;
1144 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
1145 d->map.nodes, d->map.sections);
1146 MemoryRegionSection subsection = {
1147 .offset_within_address_space = base,
1148 .size = int128_make64(TARGET_PAGE_SIZE),
1150 hwaddr start, end;
1152 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1154 if (!(existing->mr->subpage)) {
1155 subpage = subpage_init(d->as, base);
1156 subsection.address_space = d->as;
1157 subsection.mr = &subpage->iomem;
1158 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1159 phys_section_add(&d->map, &subsection));
1160 } else {
1161 subpage = container_of(existing->mr, subpage_t, iomem);
1163 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1164 end = start + int128_get64(section->size) - 1;
1165 subpage_register(subpage, start, end,
1166 phys_section_add(&d->map, section));
1170 static void register_multipage(AddressSpaceDispatch *d,
1171 MemoryRegionSection *section)
1173 hwaddr start_addr = section->offset_within_address_space;
1174 uint16_t section_index = phys_section_add(&d->map, section);
1175 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1176 TARGET_PAGE_BITS));
1178 assert(num_pages);
1179 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1182 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1184 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1185 AddressSpaceDispatch *d = as->next_dispatch;
1186 MemoryRegionSection now = *section, remain = *section;
1187 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1189 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1190 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1191 - now.offset_within_address_space;
1193 now.size = int128_min(int128_make64(left), now.size);
1194 register_subpage(d, &now);
1195 } else {
1196 now.size = int128_zero();
1198 while (int128_ne(remain.size, now.size)) {
1199 remain.size = int128_sub(remain.size, now.size);
1200 remain.offset_within_address_space += int128_get64(now.size);
1201 remain.offset_within_region += int128_get64(now.size);
1202 now = remain;
1203 if (int128_lt(remain.size, page_size)) {
1204 register_subpage(d, &now);
1205 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1206 now.size = page_size;
1207 register_subpage(d, &now);
1208 } else {
1209 now.size = int128_and(now.size, int128_neg(page_size));
1210 register_multipage(d, &now);
1215 void qemu_flush_coalesced_mmio_buffer(void)
1217 if (kvm_enabled())
1218 kvm_flush_coalesced_mmio_buffer();
1221 void qemu_mutex_lock_ramlist(void)
1223 qemu_mutex_lock(&ram_list.mutex);
1226 void qemu_mutex_unlock_ramlist(void)
1228 qemu_mutex_unlock(&ram_list.mutex);
1231 #ifdef __linux__
1232 static void *file_ram_alloc(RAMBlock *block,
1233 ram_addr_t memory,
1234 const char *path,
1235 Error **errp)
1237 bool unlink_on_error = false;
1238 char *filename;
1239 char *sanitized_name;
1240 char *c;
1241 void *area;
1242 int fd = -1;
1243 int64_t page_size;
1245 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1246 error_setg(errp,
1247 "host lacks kvm mmu notifiers, -mem-path unsupported");
1248 return NULL;
1251 for (;;) {
1252 fd = open(path, O_RDWR);
1253 if (fd >= 0) {
1254 /* @path names an existing file, use it */
1255 break;
1257 if (errno == ENOENT) {
1258 /* @path names a file that doesn't exist, create it */
1259 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1260 if (fd >= 0) {
1261 unlink_on_error = true;
1262 break;
1264 } else if (errno == EISDIR) {
1265 /* @path names a directory, create a file there */
1266 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1267 sanitized_name = g_strdup(memory_region_name(block->mr));
1268 for (c = sanitized_name; *c != '\0'; c++) {
1269 if (*c == '/') {
1270 *c = '_';
1274 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1275 sanitized_name);
1276 g_free(sanitized_name);
1278 fd = mkstemp(filename);
1279 if (fd >= 0) {
1280 unlink(filename);
1281 g_free(filename);
1282 break;
1284 g_free(filename);
1286 if (errno != EEXIST && errno != EINTR) {
1287 error_setg_errno(errp, errno,
1288 "can't open backing store %s for guest RAM",
1289 path);
1290 goto error;
1293 * Try again on EINTR and EEXIST. The latter happens when
1294 * something else creates the file between our two open().
1298 page_size = qemu_fd_getpagesize(fd);
1299 block->mr->align = page_size;
1301 if (memory < page_size) {
1302 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1303 "or larger than page size 0x%" PRIx64,
1304 memory, page_size);
1305 goto error;
1308 memory = ROUND_UP(memory, page_size);
1311 * ftruncate is not supported by hugetlbfs in older
1312 * hosts, so don't bother bailing out on errors.
1313 * If anything goes wrong with it under other filesystems,
1314 * mmap will fail.
1316 if (ftruncate(fd, memory)) {
1317 perror("ftruncate");
1320 area = qemu_ram_mmap(fd, memory, page_size, block->flags & RAM_SHARED);
1321 if (area == MAP_FAILED) {
1322 error_setg_errno(errp, errno,
1323 "unable to map backing store for guest RAM");
1324 goto error;
1327 if (mem_prealloc) {
1328 os_mem_prealloc(fd, area, memory);
1331 block->fd = fd;
1332 return area;
1334 error:
1335 if (unlink_on_error) {
1336 unlink(path);
1338 if (fd != -1) {
1339 close(fd);
1341 return NULL;
1343 #endif
1345 /* Called with the ramlist lock held. */
1346 static ram_addr_t find_ram_offset(ram_addr_t size)
1348 RAMBlock *block, *next_block;
1349 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1351 assert(size != 0); /* it would hand out same offset multiple times */
1353 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1354 return 0;
1357 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1358 ram_addr_t end, next = RAM_ADDR_MAX;
1360 end = block->offset + block->max_length;
1362 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
1363 if (next_block->offset >= end) {
1364 next = MIN(next, next_block->offset);
1367 if (next - end >= size && next - end < mingap) {
1368 offset = end;
1369 mingap = next - end;
1373 if (offset == RAM_ADDR_MAX) {
1374 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1375 (uint64_t)size);
1376 abort();
1379 return offset;
1382 ram_addr_t last_ram_offset(void)
1384 RAMBlock *block;
1385 ram_addr_t last = 0;
1387 rcu_read_lock();
1388 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1389 last = MAX(last, block->offset + block->max_length);
1391 rcu_read_unlock();
1392 return last;
1395 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1397 int ret;
1399 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1400 if (!machine_dump_guest_core(current_machine)) {
1401 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1402 if (ret) {
1403 perror("qemu_madvise");
1404 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1405 "but dump_guest_core=off specified\n");
1410 /* Called within an RCU critical section, or while the ramlist lock
1411 * is held.
1413 static RAMBlock *find_ram_block(ram_addr_t addr)
1415 RAMBlock *block;
1417 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1418 if (block->offset == addr) {
1419 return block;
1423 return NULL;
1426 const char *qemu_ram_get_idstr(RAMBlock *rb)
1428 return rb->idstr;
1431 /* Called with iothread lock held. */
1432 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1434 RAMBlock *new_block, *block;
1436 rcu_read_lock();
1437 new_block = find_ram_block(addr);
1438 assert(new_block);
1439 assert(!new_block->idstr[0]);
1441 if (dev) {
1442 char *id = qdev_get_dev_path(dev);
1443 if (id) {
1444 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1445 g_free(id);
1448 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1450 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1451 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1452 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1453 new_block->idstr);
1454 abort();
1457 rcu_read_unlock();
1460 /* Called with iothread lock held. */
1461 void qemu_ram_unset_idstr(ram_addr_t addr)
1463 RAMBlock *block;
1465 /* FIXME: arch_init.c assumes that this is not called throughout
1466 * migration. Ignore the problem since hot-unplug during migration
1467 * does not work anyway.
1470 rcu_read_lock();
1471 block = find_ram_block(addr);
1472 if (block) {
1473 memset(block->idstr, 0, sizeof(block->idstr));
1475 rcu_read_unlock();
1478 static int memory_try_enable_merging(void *addr, size_t len)
1480 if (!machine_mem_merge(current_machine)) {
1481 /* disabled by the user */
1482 return 0;
1485 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1488 /* Only legal before guest might have detected the memory size: e.g. on
1489 * incoming migration, or right after reset.
1491 * As memory core doesn't know how is memory accessed, it is up to
1492 * resize callback to update device state and/or add assertions to detect
1493 * misuse, if necessary.
1495 int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1497 RAMBlock *block = find_ram_block(base);
1499 assert(block);
1501 newsize = HOST_PAGE_ALIGN(newsize);
1503 if (block->used_length == newsize) {
1504 return 0;
1507 if (!(block->flags & RAM_RESIZEABLE)) {
1508 error_setg_errno(errp, EINVAL,
1509 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1510 " in != 0x" RAM_ADDR_FMT, block->idstr,
1511 newsize, block->used_length);
1512 return -EINVAL;
1515 if (block->max_length < newsize) {
1516 error_setg_errno(errp, EINVAL,
1517 "Length too large: %s: 0x" RAM_ADDR_FMT
1518 " > 0x" RAM_ADDR_FMT, block->idstr,
1519 newsize, block->max_length);
1520 return -EINVAL;
1523 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1524 block->used_length = newsize;
1525 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1526 DIRTY_CLIENTS_ALL);
1527 memory_region_set_size(block->mr, newsize);
1528 if (block->resized) {
1529 block->resized(block->idstr, newsize, block->host);
1531 return 0;
1534 /* Called with ram_list.mutex held */
1535 static void dirty_memory_extend(ram_addr_t old_ram_size,
1536 ram_addr_t new_ram_size)
1538 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1539 DIRTY_MEMORY_BLOCK_SIZE);
1540 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1541 DIRTY_MEMORY_BLOCK_SIZE);
1542 int i;
1544 /* Only need to extend if block count increased */
1545 if (new_num_blocks <= old_num_blocks) {
1546 return;
1549 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1550 DirtyMemoryBlocks *old_blocks;
1551 DirtyMemoryBlocks *new_blocks;
1552 int j;
1554 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1555 new_blocks = g_malloc(sizeof(*new_blocks) +
1556 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1558 if (old_num_blocks) {
1559 memcpy(new_blocks->blocks, old_blocks->blocks,
1560 old_num_blocks * sizeof(old_blocks->blocks[0]));
1563 for (j = old_num_blocks; j < new_num_blocks; j++) {
1564 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1567 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1569 if (old_blocks) {
1570 g_free_rcu(old_blocks, rcu);
1575 static void ram_block_add(RAMBlock *new_block, Error **errp)
1577 RAMBlock *block;
1578 RAMBlock *last_block = NULL;
1579 ram_addr_t old_ram_size, new_ram_size;
1580 Error *err = NULL;
1582 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1584 qemu_mutex_lock_ramlist();
1585 new_block->offset = find_ram_offset(new_block->max_length);
1587 if (!new_block->host) {
1588 if (xen_enabled()) {
1589 xen_ram_alloc(new_block->offset, new_block->max_length,
1590 new_block->mr, &err);
1591 if (err) {
1592 error_propagate(errp, err);
1593 qemu_mutex_unlock_ramlist();
1594 return;
1596 } else {
1597 new_block->host = phys_mem_alloc(new_block->max_length,
1598 &new_block->mr->align);
1599 if (!new_block->host) {
1600 error_setg_errno(errp, errno,
1601 "cannot set up guest memory '%s'",
1602 memory_region_name(new_block->mr));
1603 qemu_mutex_unlock_ramlist();
1604 return;
1606 memory_try_enable_merging(new_block->host, new_block->max_length);
1610 new_ram_size = MAX(old_ram_size,
1611 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1612 if (new_ram_size > old_ram_size) {
1613 migration_bitmap_extend(old_ram_size, new_ram_size);
1614 dirty_memory_extend(old_ram_size, new_ram_size);
1616 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1617 * QLIST (which has an RCU-friendly variant) does not have insertion at
1618 * tail, so save the last element in last_block.
1620 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1621 last_block = block;
1622 if (block->max_length < new_block->max_length) {
1623 break;
1626 if (block) {
1627 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1628 } else if (last_block) {
1629 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1630 } else { /* list is empty */
1631 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1633 ram_list.mru_block = NULL;
1635 /* Write list before version */
1636 smp_wmb();
1637 ram_list.version++;
1638 qemu_mutex_unlock_ramlist();
1640 cpu_physical_memory_set_dirty_range(new_block->offset,
1641 new_block->used_length,
1642 DIRTY_CLIENTS_ALL);
1644 if (new_block->host) {
1645 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1646 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1647 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1648 if (kvm_enabled()) {
1649 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1654 #ifdef __linux__
1655 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1656 bool share, const char *mem_path,
1657 Error **errp)
1659 RAMBlock *new_block;
1660 Error *local_err = NULL;
1662 if (xen_enabled()) {
1663 error_setg(errp, "-mem-path not supported with Xen");
1664 return NULL;
1667 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1669 * file_ram_alloc() needs to allocate just like
1670 * phys_mem_alloc, but we haven't bothered to provide
1671 * a hook there.
1673 error_setg(errp,
1674 "-mem-path not supported with this accelerator");
1675 return NULL;
1678 size = HOST_PAGE_ALIGN(size);
1679 new_block = g_malloc0(sizeof(*new_block));
1680 new_block->mr = mr;
1681 new_block->used_length = size;
1682 new_block->max_length = size;
1683 new_block->flags = share ? RAM_SHARED : 0;
1684 new_block->host = file_ram_alloc(new_block, size,
1685 mem_path, errp);
1686 if (!new_block->host) {
1687 g_free(new_block);
1688 return NULL;
1691 ram_block_add(new_block, &local_err);
1692 if (local_err) {
1693 g_free(new_block);
1694 error_propagate(errp, local_err);
1695 return NULL;
1697 return new_block;
1699 #endif
1701 static
1702 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1703 void (*resized)(const char*,
1704 uint64_t length,
1705 void *host),
1706 void *host, bool resizeable,
1707 MemoryRegion *mr, Error **errp)
1709 RAMBlock *new_block;
1710 Error *local_err = NULL;
1712 size = HOST_PAGE_ALIGN(size);
1713 max_size = HOST_PAGE_ALIGN(max_size);
1714 new_block = g_malloc0(sizeof(*new_block));
1715 new_block->mr = mr;
1716 new_block->resized = resized;
1717 new_block->used_length = size;
1718 new_block->max_length = max_size;
1719 assert(max_size >= size);
1720 new_block->fd = -1;
1721 new_block->host = host;
1722 if (host) {
1723 new_block->flags |= RAM_PREALLOC;
1725 if (resizeable) {
1726 new_block->flags |= RAM_RESIZEABLE;
1728 ram_block_add(new_block, &local_err);
1729 if (local_err) {
1730 g_free(new_block);
1731 error_propagate(errp, local_err);
1732 return NULL;
1734 return new_block;
1737 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1738 MemoryRegion *mr, Error **errp)
1740 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1743 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
1745 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1748 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1749 void (*resized)(const char*,
1750 uint64_t length,
1751 void *host),
1752 MemoryRegion *mr, Error **errp)
1754 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
1757 static void reclaim_ramblock(RAMBlock *block)
1759 if (block->flags & RAM_PREALLOC) {
1761 } else if (xen_enabled()) {
1762 xen_invalidate_map_cache_entry(block->host);
1763 #ifndef _WIN32
1764 } else if (block->fd >= 0) {
1765 qemu_ram_munmap(block->host, block->max_length);
1766 close(block->fd);
1767 #endif
1768 } else {
1769 qemu_anon_ram_free(block->host, block->max_length);
1771 g_free(block);
1774 void qemu_ram_free(RAMBlock *block)
1776 qemu_mutex_lock_ramlist();
1777 QLIST_REMOVE_RCU(block, next);
1778 ram_list.mru_block = NULL;
1779 /* Write list before version */
1780 smp_wmb();
1781 ram_list.version++;
1782 call_rcu(block, reclaim_ramblock, rcu);
1783 qemu_mutex_unlock_ramlist();
1786 #ifndef _WIN32
1787 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1789 RAMBlock *block;
1790 ram_addr_t offset;
1791 int flags;
1792 void *area, *vaddr;
1794 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1795 offset = addr - block->offset;
1796 if (offset < block->max_length) {
1797 vaddr = ramblock_ptr(block, offset);
1798 if (block->flags & RAM_PREALLOC) {
1800 } else if (xen_enabled()) {
1801 abort();
1802 } else {
1803 flags = MAP_FIXED;
1804 if (block->fd >= 0) {
1805 flags |= (block->flags & RAM_SHARED ?
1806 MAP_SHARED : MAP_PRIVATE);
1807 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1808 flags, block->fd, offset);
1809 } else {
1811 * Remap needs to match alloc. Accelerators that
1812 * set phys_mem_alloc never remap. If they did,
1813 * we'd need a remap hook here.
1815 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1817 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1818 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1819 flags, -1, 0);
1821 if (area != vaddr) {
1822 fprintf(stderr, "Could not remap addr: "
1823 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1824 length, addr);
1825 exit(1);
1827 memory_try_enable_merging(vaddr, length);
1828 qemu_ram_setup_dump(vaddr, length);
1833 #endif /* !_WIN32 */
1835 int qemu_get_ram_fd(ram_addr_t addr)
1837 RAMBlock *block;
1838 int fd;
1840 rcu_read_lock();
1841 block = qemu_get_ram_block(addr);
1842 fd = block->fd;
1843 rcu_read_unlock();
1844 return fd;
1847 void qemu_set_ram_fd(ram_addr_t addr, int fd)
1849 RAMBlock *block;
1851 rcu_read_lock();
1852 block = qemu_get_ram_block(addr);
1853 block->fd = fd;
1854 rcu_read_unlock();
1857 void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1859 RAMBlock *block;
1860 void *ptr;
1862 rcu_read_lock();
1863 block = qemu_get_ram_block(addr);
1864 ptr = ramblock_ptr(block, 0);
1865 rcu_read_unlock();
1866 return ptr;
1869 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1870 * This should not be used for general purpose DMA. Use address_space_map
1871 * or address_space_rw instead. For local memory (e.g. video ram) that the
1872 * device owns, use memory_region_get_ram_ptr.
1874 * Called within RCU critical section.
1876 void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
1878 RAMBlock *block = ram_block;
1880 if (block == NULL) {
1881 block = qemu_get_ram_block(addr);
1884 if (xen_enabled() && block->host == NULL) {
1885 /* We need to check if the requested address is in the RAM
1886 * because we don't want to map the entire memory in QEMU.
1887 * In that case just map until the end of the page.
1889 if (block->offset == 0) {
1890 return xen_map_cache(addr, 0, 0);
1893 block->host = xen_map_cache(block->offset, block->max_length, 1);
1895 return ramblock_ptr(block, addr - block->offset);
1898 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1899 * but takes a size argument.
1901 * Called within RCU critical section.
1903 static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1904 hwaddr *size)
1906 RAMBlock *block = ram_block;
1907 ram_addr_t offset_inside_block;
1908 if (*size == 0) {
1909 return NULL;
1912 if (block == NULL) {
1913 block = qemu_get_ram_block(addr);
1915 offset_inside_block = addr - block->offset;
1916 *size = MIN(*size, block->max_length - offset_inside_block);
1918 if (xen_enabled() && block->host == NULL) {
1919 /* We need to check if the requested address is in the RAM
1920 * because we don't want to map the entire memory in QEMU.
1921 * In that case just map the requested area.
1923 if (block->offset == 0) {
1924 return xen_map_cache(addr, *size, 1);
1927 block->host = xen_map_cache(block->offset, block->max_length, 1);
1930 return ramblock_ptr(block, offset_inside_block);
1934 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1935 * in that RAMBlock.
1937 * ptr: Host pointer to look up
1938 * round_offset: If true round the result offset down to a page boundary
1939 * *ram_addr: set to result ram_addr
1940 * *offset: set to result offset within the RAMBlock
1942 * Returns: RAMBlock (or NULL if not found)
1944 * By the time this function returns, the returned pointer is not protected
1945 * by RCU anymore. If the caller is not within an RCU critical section and
1946 * does not hold the iothread lock, it must have other means of protecting the
1947 * pointer, such as a reference to the region that includes the incoming
1948 * ram_addr_t.
1950 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1951 ram_addr_t *ram_addr,
1952 ram_addr_t *offset)
1954 RAMBlock *block;
1955 uint8_t *host = ptr;
1957 if (xen_enabled()) {
1958 rcu_read_lock();
1959 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1960 block = qemu_get_ram_block(*ram_addr);
1961 if (block) {
1962 *offset = (host - block->host);
1964 rcu_read_unlock();
1965 return block;
1968 rcu_read_lock();
1969 block = atomic_rcu_read(&ram_list.mru_block);
1970 if (block && block->host && host - block->host < block->max_length) {
1971 goto found;
1974 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1975 /* This case append when the block is not mapped. */
1976 if (block->host == NULL) {
1977 continue;
1979 if (host - block->host < block->max_length) {
1980 goto found;
1984 rcu_read_unlock();
1985 return NULL;
1987 found:
1988 *offset = (host - block->host);
1989 if (round_offset) {
1990 *offset &= TARGET_PAGE_MASK;
1992 *ram_addr = block->offset + *offset;
1993 rcu_read_unlock();
1994 return block;
1998 * Finds the named RAMBlock
2000 * name: The name of RAMBlock to find
2002 * Returns: RAMBlock (or NULL if not found)
2004 RAMBlock *qemu_ram_block_by_name(const char *name)
2006 RAMBlock *block;
2008 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2009 if (!strcmp(name, block->idstr)) {
2010 return block;
2014 return NULL;
2017 /* Some of the softmmu routines need to translate from a host pointer
2018 (typically a TLB entry) back to a ram offset. */
2019 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2021 RAMBlock *block;
2022 ram_addr_t offset; /* Not used */
2024 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2026 if (!block) {
2027 return NULL;
2030 return block->mr;
2033 /* Called within RCU critical section. */
2034 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
2035 uint64_t val, unsigned size)
2037 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
2038 tb_invalidate_phys_page_fast(ram_addr, size);
2040 switch (size) {
2041 case 1:
2042 stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
2043 break;
2044 case 2:
2045 stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
2046 break;
2047 case 4:
2048 stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
2049 break;
2050 default:
2051 abort();
2053 /* Set both VGA and migration bits for simplicity and to remove
2054 * the notdirty callback faster.
2056 cpu_physical_memory_set_dirty_range(ram_addr, size,
2057 DIRTY_CLIENTS_NOCODE);
2058 /* we remove the notdirty callback only if the code has been
2059 flushed */
2060 if (!cpu_physical_memory_is_clean(ram_addr)) {
2061 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
2065 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2066 unsigned size, bool is_write)
2068 return is_write;
2071 static const MemoryRegionOps notdirty_mem_ops = {
2072 .write = notdirty_mem_write,
2073 .valid.accepts = notdirty_mem_accepts,
2074 .endianness = DEVICE_NATIVE_ENDIAN,
2077 /* Generate a debug exception if a watchpoint has been hit. */
2078 static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
2080 CPUState *cpu = current_cpu;
2081 CPUClass *cc = CPU_GET_CLASS(cpu);
2082 CPUArchState *env = cpu->env_ptr;
2083 target_ulong pc, cs_base;
2084 target_ulong vaddr;
2085 CPUWatchpoint *wp;
2086 int cpu_flags;
2088 if (cpu->watchpoint_hit) {
2089 /* We re-entered the check after replacing the TB. Now raise
2090 * the debug interrupt so that is will trigger after the
2091 * current instruction. */
2092 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2093 return;
2095 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2096 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
2097 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2098 && (wp->flags & flags)) {
2099 if (flags == BP_MEM_READ) {
2100 wp->flags |= BP_WATCHPOINT_HIT_READ;
2101 } else {
2102 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2104 wp->hitaddr = vaddr;
2105 wp->hitattrs = attrs;
2106 if (!cpu->watchpoint_hit) {
2107 if (wp->flags & BP_CPU &&
2108 !cc->debug_check_watchpoint(cpu, wp)) {
2109 wp->flags &= ~BP_WATCHPOINT_HIT;
2110 continue;
2112 cpu->watchpoint_hit = wp;
2113 tb_check_watchpoint(cpu);
2114 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2115 cpu->exception_index = EXCP_DEBUG;
2116 cpu_loop_exit(cpu);
2117 } else {
2118 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2119 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
2120 cpu_resume_from_signal(cpu, NULL);
2123 } else {
2124 wp->flags &= ~BP_WATCHPOINT_HIT;
2129 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2130 so these check for a hit then pass through to the normal out-of-line
2131 phys routines. */
2132 static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2133 unsigned size, MemTxAttrs attrs)
2135 MemTxResult res;
2136 uint64_t data;
2137 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2138 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2140 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
2141 switch (size) {
2142 case 1:
2143 data = address_space_ldub(as, addr, attrs, &res);
2144 break;
2145 case 2:
2146 data = address_space_lduw(as, addr, attrs, &res);
2147 break;
2148 case 4:
2149 data = address_space_ldl(as, addr, attrs, &res);
2150 break;
2151 default: abort();
2153 *pdata = data;
2154 return res;
2157 static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2158 uint64_t val, unsigned size,
2159 MemTxAttrs attrs)
2161 MemTxResult res;
2162 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2163 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2165 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2166 switch (size) {
2167 case 1:
2168 address_space_stb(as, addr, val, attrs, &res);
2169 break;
2170 case 2:
2171 address_space_stw(as, addr, val, attrs, &res);
2172 break;
2173 case 4:
2174 address_space_stl(as, addr, val, attrs, &res);
2175 break;
2176 default: abort();
2178 return res;
2181 static const MemoryRegionOps watch_mem_ops = {
2182 .read_with_attrs = watch_mem_read,
2183 .write_with_attrs = watch_mem_write,
2184 .endianness = DEVICE_NATIVE_ENDIAN,
2187 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2188 unsigned len, MemTxAttrs attrs)
2190 subpage_t *subpage = opaque;
2191 uint8_t buf[8];
2192 MemTxResult res;
2194 #if defined(DEBUG_SUBPAGE)
2195 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2196 subpage, len, addr);
2197 #endif
2198 res = address_space_read(subpage->as, addr + subpage->base,
2199 attrs, buf, len);
2200 if (res) {
2201 return res;
2203 switch (len) {
2204 case 1:
2205 *data = ldub_p(buf);
2206 return MEMTX_OK;
2207 case 2:
2208 *data = lduw_p(buf);
2209 return MEMTX_OK;
2210 case 4:
2211 *data = ldl_p(buf);
2212 return MEMTX_OK;
2213 case 8:
2214 *data = ldq_p(buf);
2215 return MEMTX_OK;
2216 default:
2217 abort();
2221 static MemTxResult subpage_write(void *opaque, hwaddr addr,
2222 uint64_t value, unsigned len, MemTxAttrs attrs)
2224 subpage_t *subpage = opaque;
2225 uint8_t buf[8];
2227 #if defined(DEBUG_SUBPAGE)
2228 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2229 " value %"PRIx64"\n",
2230 __func__, subpage, len, addr, value);
2231 #endif
2232 switch (len) {
2233 case 1:
2234 stb_p(buf, value);
2235 break;
2236 case 2:
2237 stw_p(buf, value);
2238 break;
2239 case 4:
2240 stl_p(buf, value);
2241 break;
2242 case 8:
2243 stq_p(buf, value);
2244 break;
2245 default:
2246 abort();
2248 return address_space_write(subpage->as, addr + subpage->base,
2249 attrs, buf, len);
2252 static bool subpage_accepts(void *opaque, hwaddr addr,
2253 unsigned len, bool is_write)
2255 subpage_t *subpage = opaque;
2256 #if defined(DEBUG_SUBPAGE)
2257 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2258 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2259 #endif
2261 return address_space_access_valid(subpage->as, addr + subpage->base,
2262 len, is_write);
2265 static const MemoryRegionOps subpage_ops = {
2266 .read_with_attrs = subpage_read,
2267 .write_with_attrs = subpage_write,
2268 .impl.min_access_size = 1,
2269 .impl.max_access_size = 8,
2270 .valid.min_access_size = 1,
2271 .valid.max_access_size = 8,
2272 .valid.accepts = subpage_accepts,
2273 .endianness = DEVICE_NATIVE_ENDIAN,
2276 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2277 uint16_t section)
2279 int idx, eidx;
2281 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2282 return -1;
2283 idx = SUBPAGE_IDX(start);
2284 eidx = SUBPAGE_IDX(end);
2285 #if defined(DEBUG_SUBPAGE)
2286 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2287 __func__, mmio, start, end, idx, eidx, section);
2288 #endif
2289 for (; idx <= eidx; idx++) {
2290 mmio->sub_section[idx] = section;
2293 return 0;
2296 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2298 subpage_t *mmio;
2300 mmio = g_malloc0(sizeof(subpage_t));
2302 mmio->as = as;
2303 mmio->base = base;
2304 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2305 NULL, TARGET_PAGE_SIZE);
2306 mmio->iomem.subpage = true;
2307 #if defined(DEBUG_SUBPAGE)
2308 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2309 mmio, base, TARGET_PAGE_SIZE);
2310 #endif
2311 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2313 return mmio;
2316 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2317 MemoryRegion *mr)
2319 assert(as);
2320 MemoryRegionSection section = {
2321 .address_space = as,
2322 .mr = mr,
2323 .offset_within_address_space = 0,
2324 .offset_within_region = 0,
2325 .size = int128_2_64(),
2328 return phys_section_add(map, &section);
2331 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2333 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2334 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
2335 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
2336 MemoryRegionSection *sections = d->map.sections;
2338 return sections[index & ~TARGET_PAGE_MASK].mr;
2341 static void io_mem_init(void)
2343 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2344 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2345 NULL, UINT64_MAX);
2346 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
2347 NULL, UINT64_MAX);
2348 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2349 NULL, UINT64_MAX);
2352 static void mem_begin(MemoryListener *listener)
2354 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2355 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2356 uint16_t n;
2358 n = dummy_section(&d->map, as, &io_mem_unassigned);
2359 assert(n == PHYS_SECTION_UNASSIGNED);
2360 n = dummy_section(&d->map, as, &io_mem_notdirty);
2361 assert(n == PHYS_SECTION_NOTDIRTY);
2362 n = dummy_section(&d->map, as, &io_mem_rom);
2363 assert(n == PHYS_SECTION_ROM);
2364 n = dummy_section(&d->map, as, &io_mem_watch);
2365 assert(n == PHYS_SECTION_WATCH);
2367 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2368 d->as = as;
2369 as->next_dispatch = d;
2372 static void address_space_dispatch_free(AddressSpaceDispatch *d)
2374 phys_sections_free(&d->map);
2375 g_free(d);
2378 static void mem_commit(MemoryListener *listener)
2380 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2381 AddressSpaceDispatch *cur = as->dispatch;
2382 AddressSpaceDispatch *next = as->next_dispatch;
2384 phys_page_compact_all(next, next->map.nodes_nb);
2386 atomic_rcu_set(&as->dispatch, next);
2387 if (cur) {
2388 call_rcu(cur, address_space_dispatch_free, rcu);
2392 static void tcg_commit(MemoryListener *listener)
2394 CPUAddressSpace *cpuas;
2395 AddressSpaceDispatch *d;
2397 /* since each CPU stores ram addresses in its TLB cache, we must
2398 reset the modified entries */
2399 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2400 cpu_reloading_memory_map();
2401 /* The CPU and TLB are protected by the iothread lock.
2402 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2403 * may have split the RCU critical section.
2405 d = atomic_rcu_read(&cpuas->as->dispatch);
2406 cpuas->memory_dispatch = d;
2407 tlb_flush(cpuas->cpu, 1);
2410 void address_space_init_dispatch(AddressSpace *as)
2412 as->dispatch = NULL;
2413 as->dispatch_listener = (MemoryListener) {
2414 .begin = mem_begin,
2415 .commit = mem_commit,
2416 .region_add = mem_add,
2417 .region_nop = mem_add,
2418 .priority = 0,
2420 memory_listener_register(&as->dispatch_listener, as);
2423 void address_space_unregister(AddressSpace *as)
2425 memory_listener_unregister(&as->dispatch_listener);
2428 void address_space_destroy_dispatch(AddressSpace *as)
2430 AddressSpaceDispatch *d = as->dispatch;
2432 atomic_rcu_set(&as->dispatch, NULL);
2433 if (d) {
2434 call_rcu(d, address_space_dispatch_free, rcu);
2438 static void memory_map_init(void)
2440 system_memory = g_malloc(sizeof(*system_memory));
2442 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2443 address_space_init(&address_space_memory, system_memory, "memory");
2445 system_io = g_malloc(sizeof(*system_io));
2446 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2447 65536);
2448 address_space_init(&address_space_io, system_io, "I/O");
2451 MemoryRegion *get_system_memory(void)
2453 return system_memory;
2456 MemoryRegion *get_system_io(void)
2458 return system_io;
2461 #endif /* !defined(CONFIG_USER_ONLY) */
2463 /* physical memory access (slow version, mainly for debug) */
2464 #if defined(CONFIG_USER_ONLY)
2465 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2466 uint8_t *buf, int len, int is_write)
2468 int l, flags;
2469 target_ulong page;
2470 void * p;
2472 while (len > 0) {
2473 page = addr & TARGET_PAGE_MASK;
2474 l = (page + TARGET_PAGE_SIZE) - addr;
2475 if (l > len)
2476 l = len;
2477 flags = page_get_flags(page);
2478 if (!(flags & PAGE_VALID))
2479 return -1;
2480 if (is_write) {
2481 if (!(flags & PAGE_WRITE))
2482 return -1;
2483 /* XXX: this code should not depend on lock_user */
2484 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2485 return -1;
2486 memcpy(p, buf, l);
2487 unlock_user(p, addr, l);
2488 } else {
2489 if (!(flags & PAGE_READ))
2490 return -1;
2491 /* XXX: this code should not depend on lock_user */
2492 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2493 return -1;
2494 memcpy(buf, p, l);
2495 unlock_user(p, addr, 0);
2497 len -= l;
2498 buf += l;
2499 addr += l;
2501 return 0;
2504 #else
2506 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2507 hwaddr length)
2509 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2510 /* No early return if dirty_log_mask is or becomes 0, because
2511 * cpu_physical_memory_set_dirty_range will still call
2512 * xen_modified_memory.
2514 if (dirty_log_mask) {
2515 dirty_log_mask =
2516 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2518 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2519 tb_invalidate_phys_range(addr, addr + length);
2520 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2522 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2525 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2527 unsigned access_size_max = mr->ops->valid.max_access_size;
2529 /* Regions are assumed to support 1-4 byte accesses unless
2530 otherwise specified. */
2531 if (access_size_max == 0) {
2532 access_size_max = 4;
2535 /* Bound the maximum access by the alignment of the address. */
2536 if (!mr->ops->impl.unaligned) {
2537 unsigned align_size_max = addr & -addr;
2538 if (align_size_max != 0 && align_size_max < access_size_max) {
2539 access_size_max = align_size_max;
2543 /* Don't attempt accesses larger than the maximum. */
2544 if (l > access_size_max) {
2545 l = access_size_max;
2547 l = pow2floor(l);
2549 return l;
2552 static bool prepare_mmio_access(MemoryRegion *mr)
2554 bool unlocked = !qemu_mutex_iothread_locked();
2555 bool release_lock = false;
2557 if (unlocked && mr->global_locking) {
2558 qemu_mutex_lock_iothread();
2559 unlocked = false;
2560 release_lock = true;
2562 if (mr->flush_coalesced_mmio) {
2563 if (unlocked) {
2564 qemu_mutex_lock_iothread();
2566 qemu_flush_coalesced_mmio_buffer();
2567 if (unlocked) {
2568 qemu_mutex_unlock_iothread();
2572 return release_lock;
2575 /* Called within RCU critical section. */
2576 static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2577 MemTxAttrs attrs,
2578 const uint8_t *buf,
2579 int len, hwaddr addr1,
2580 hwaddr l, MemoryRegion *mr)
2582 uint8_t *ptr;
2583 uint64_t val;
2584 MemTxResult result = MEMTX_OK;
2585 bool release_lock = false;
2587 for (;;) {
2588 if (!memory_access_is_direct(mr, true)) {
2589 release_lock |= prepare_mmio_access(mr);
2590 l = memory_access_size(mr, l, addr1);
2591 /* XXX: could force current_cpu to NULL to avoid
2592 potential bugs */
2593 switch (l) {
2594 case 8:
2595 /* 64 bit write access */
2596 val = ldq_p(buf);
2597 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2598 attrs);
2599 break;
2600 case 4:
2601 /* 32 bit write access */
2602 val = ldl_p(buf);
2603 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2604 attrs);
2605 break;
2606 case 2:
2607 /* 16 bit write access */
2608 val = lduw_p(buf);
2609 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2610 attrs);
2611 break;
2612 case 1:
2613 /* 8 bit write access */
2614 val = ldub_p(buf);
2615 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2616 attrs);
2617 break;
2618 default:
2619 abort();
2621 } else {
2622 addr1 += memory_region_get_ram_addr(mr);
2623 /* RAM case */
2624 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
2625 memcpy(ptr, buf, l);
2626 invalidate_and_set_dirty(mr, addr1, l);
2629 if (release_lock) {
2630 qemu_mutex_unlock_iothread();
2631 release_lock = false;
2634 len -= l;
2635 buf += l;
2636 addr += l;
2638 if (!len) {
2639 break;
2642 l = len;
2643 mr = address_space_translate(as, addr, &addr1, &l, true);
2646 return result;
2649 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2650 const uint8_t *buf, int len)
2652 hwaddr l;
2653 hwaddr addr1;
2654 MemoryRegion *mr;
2655 MemTxResult result = MEMTX_OK;
2657 if (len > 0) {
2658 rcu_read_lock();
2659 l = len;
2660 mr = address_space_translate(as, addr, &addr1, &l, true);
2661 result = address_space_write_continue(as, addr, attrs, buf, len,
2662 addr1, l, mr);
2663 rcu_read_unlock();
2666 return result;
2669 /* Called within RCU critical section. */
2670 MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2671 MemTxAttrs attrs, uint8_t *buf,
2672 int len, hwaddr addr1, hwaddr l,
2673 MemoryRegion *mr)
2675 uint8_t *ptr;
2676 uint64_t val;
2677 MemTxResult result = MEMTX_OK;
2678 bool release_lock = false;
2680 for (;;) {
2681 if (!memory_access_is_direct(mr, false)) {
2682 /* I/O case */
2683 release_lock |= prepare_mmio_access(mr);
2684 l = memory_access_size(mr, l, addr1);
2685 switch (l) {
2686 case 8:
2687 /* 64 bit read access */
2688 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2689 attrs);
2690 stq_p(buf, val);
2691 break;
2692 case 4:
2693 /* 32 bit read access */
2694 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2695 attrs);
2696 stl_p(buf, val);
2697 break;
2698 case 2:
2699 /* 16 bit read access */
2700 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2701 attrs);
2702 stw_p(buf, val);
2703 break;
2704 case 1:
2705 /* 8 bit read access */
2706 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2707 attrs);
2708 stb_p(buf, val);
2709 break;
2710 default:
2711 abort();
2713 } else {
2714 /* RAM case */
2715 ptr = qemu_get_ram_ptr(mr->ram_block,
2716 memory_region_get_ram_addr(mr) + addr1);
2717 memcpy(buf, ptr, l);
2720 if (release_lock) {
2721 qemu_mutex_unlock_iothread();
2722 release_lock = false;
2725 len -= l;
2726 buf += l;
2727 addr += l;
2729 if (!len) {
2730 break;
2733 l = len;
2734 mr = address_space_translate(as, addr, &addr1, &l, false);
2737 return result;
2740 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2741 MemTxAttrs attrs, uint8_t *buf, int len)
2743 hwaddr l;
2744 hwaddr addr1;
2745 MemoryRegion *mr;
2746 MemTxResult result = MEMTX_OK;
2748 if (len > 0) {
2749 rcu_read_lock();
2750 l = len;
2751 mr = address_space_translate(as, addr, &addr1, &l, false);
2752 result = address_space_read_continue(as, addr, attrs, buf, len,
2753 addr1, l, mr);
2754 rcu_read_unlock();
2757 return result;
2760 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2761 uint8_t *buf, int len, bool is_write)
2763 if (is_write) {
2764 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2765 } else {
2766 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2770 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2771 int len, int is_write)
2773 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2774 buf, len, is_write);
2777 enum write_rom_type {
2778 WRITE_DATA,
2779 FLUSH_CACHE,
2782 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2783 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2785 hwaddr l;
2786 uint8_t *ptr;
2787 hwaddr addr1;
2788 MemoryRegion *mr;
2790 rcu_read_lock();
2791 while (len > 0) {
2792 l = len;
2793 mr = address_space_translate(as, addr, &addr1, &l, true);
2795 if (!(memory_region_is_ram(mr) ||
2796 memory_region_is_romd(mr))) {
2797 l = memory_access_size(mr, l, addr1);
2798 } else {
2799 addr1 += memory_region_get_ram_addr(mr);
2800 /* ROM/RAM case */
2801 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
2802 switch (type) {
2803 case WRITE_DATA:
2804 memcpy(ptr, buf, l);
2805 invalidate_and_set_dirty(mr, addr1, l);
2806 break;
2807 case FLUSH_CACHE:
2808 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2809 break;
2812 len -= l;
2813 buf += l;
2814 addr += l;
2816 rcu_read_unlock();
2819 /* used for ROM loading : can write in RAM and ROM */
2820 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2821 const uint8_t *buf, int len)
2823 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2826 void cpu_flush_icache_range(hwaddr start, int len)
2829 * This function should do the same thing as an icache flush that was
2830 * triggered from within the guest. For TCG we are always cache coherent,
2831 * so there is no need to flush anything. For KVM / Xen we need to flush
2832 * the host's instruction cache at least.
2834 if (tcg_enabled()) {
2835 return;
2838 cpu_physical_memory_write_rom_internal(&address_space_memory,
2839 start, NULL, len, FLUSH_CACHE);
2842 typedef struct {
2843 MemoryRegion *mr;
2844 void *buffer;
2845 hwaddr addr;
2846 hwaddr len;
2847 bool in_use;
2848 } BounceBuffer;
2850 static BounceBuffer bounce;
2852 typedef struct MapClient {
2853 QEMUBH *bh;
2854 QLIST_ENTRY(MapClient) link;
2855 } MapClient;
2857 QemuMutex map_client_list_lock;
2858 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2859 = QLIST_HEAD_INITIALIZER(map_client_list);
2861 static void cpu_unregister_map_client_do(MapClient *client)
2863 QLIST_REMOVE(client, link);
2864 g_free(client);
2867 static void cpu_notify_map_clients_locked(void)
2869 MapClient *client;
2871 while (!QLIST_EMPTY(&map_client_list)) {
2872 client = QLIST_FIRST(&map_client_list);
2873 qemu_bh_schedule(client->bh);
2874 cpu_unregister_map_client_do(client);
2878 void cpu_register_map_client(QEMUBH *bh)
2880 MapClient *client = g_malloc(sizeof(*client));
2882 qemu_mutex_lock(&map_client_list_lock);
2883 client->bh = bh;
2884 QLIST_INSERT_HEAD(&map_client_list, client, link);
2885 if (!atomic_read(&bounce.in_use)) {
2886 cpu_notify_map_clients_locked();
2888 qemu_mutex_unlock(&map_client_list_lock);
2891 void cpu_exec_init_all(void)
2893 qemu_mutex_init(&ram_list.mutex);
2894 io_mem_init();
2895 memory_map_init();
2896 qemu_mutex_init(&map_client_list_lock);
2899 void cpu_unregister_map_client(QEMUBH *bh)
2901 MapClient *client;
2903 qemu_mutex_lock(&map_client_list_lock);
2904 QLIST_FOREACH(client, &map_client_list, link) {
2905 if (client->bh == bh) {
2906 cpu_unregister_map_client_do(client);
2907 break;
2910 qemu_mutex_unlock(&map_client_list_lock);
2913 static void cpu_notify_map_clients(void)
2915 qemu_mutex_lock(&map_client_list_lock);
2916 cpu_notify_map_clients_locked();
2917 qemu_mutex_unlock(&map_client_list_lock);
2920 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2922 MemoryRegion *mr;
2923 hwaddr l, xlat;
2925 rcu_read_lock();
2926 while (len > 0) {
2927 l = len;
2928 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2929 if (!memory_access_is_direct(mr, is_write)) {
2930 l = memory_access_size(mr, l, addr);
2931 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2932 return false;
2936 len -= l;
2937 addr += l;
2939 rcu_read_unlock();
2940 return true;
2943 /* Map a physical memory region into a host virtual address.
2944 * May map a subset of the requested range, given by and returned in *plen.
2945 * May return NULL if resources needed to perform the mapping are exhausted.
2946 * Use only for reads OR writes - not for read-modify-write operations.
2947 * Use cpu_register_map_client() to know when retrying the map operation is
2948 * likely to succeed.
2950 void *address_space_map(AddressSpace *as,
2951 hwaddr addr,
2952 hwaddr *plen,
2953 bool is_write)
2955 hwaddr len = *plen;
2956 hwaddr done = 0;
2957 hwaddr l, xlat, base;
2958 MemoryRegion *mr, *this_mr;
2959 ram_addr_t raddr;
2960 void *ptr;
2962 if (len == 0) {
2963 return NULL;
2966 l = len;
2967 rcu_read_lock();
2968 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2970 if (!memory_access_is_direct(mr, is_write)) {
2971 if (atomic_xchg(&bounce.in_use, true)) {
2972 rcu_read_unlock();
2973 return NULL;
2975 /* Avoid unbounded allocations */
2976 l = MIN(l, TARGET_PAGE_SIZE);
2977 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2978 bounce.addr = addr;
2979 bounce.len = l;
2981 memory_region_ref(mr);
2982 bounce.mr = mr;
2983 if (!is_write) {
2984 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2985 bounce.buffer, l);
2988 rcu_read_unlock();
2989 *plen = l;
2990 return bounce.buffer;
2993 base = xlat;
2994 raddr = memory_region_get_ram_addr(mr);
2996 for (;;) {
2997 len -= l;
2998 addr += l;
2999 done += l;
3000 if (len == 0) {
3001 break;
3004 l = len;
3005 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
3006 if (this_mr != mr || xlat != base + done) {
3007 break;
3011 memory_region_ref(mr);
3012 *plen = done;
3013 ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
3014 rcu_read_unlock();
3016 return ptr;
3019 /* Unmaps a memory region previously mapped by address_space_map().
3020 * Will also mark the memory as dirty if is_write == 1. access_len gives
3021 * the amount of memory that was actually read or written by the caller.
3023 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3024 int is_write, hwaddr access_len)
3026 if (buffer != bounce.buffer) {
3027 MemoryRegion *mr;
3028 ram_addr_t addr1;
3030 mr = qemu_ram_addr_from_host(buffer, &addr1);
3031 assert(mr != NULL);
3032 if (is_write) {
3033 invalidate_and_set_dirty(mr, addr1, access_len);
3035 if (xen_enabled()) {
3036 xen_invalidate_map_cache_entry(buffer);
3038 memory_region_unref(mr);
3039 return;
3041 if (is_write) {
3042 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3043 bounce.buffer, access_len);
3045 qemu_vfree(bounce.buffer);
3046 bounce.buffer = NULL;
3047 memory_region_unref(bounce.mr);
3048 atomic_mb_set(&bounce.in_use, false);
3049 cpu_notify_map_clients();
3052 void *cpu_physical_memory_map(hwaddr addr,
3053 hwaddr *plen,
3054 int is_write)
3056 return address_space_map(&address_space_memory, addr, plen, is_write);
3059 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3060 int is_write, hwaddr access_len)
3062 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3065 /* warning: addr must be aligned */
3066 static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3067 MemTxAttrs attrs,
3068 MemTxResult *result,
3069 enum device_endian endian)
3071 uint8_t *ptr;
3072 uint64_t val;
3073 MemoryRegion *mr;
3074 hwaddr l = 4;
3075 hwaddr addr1;
3076 MemTxResult r;
3077 bool release_lock = false;
3079 rcu_read_lock();
3080 mr = address_space_translate(as, addr, &addr1, &l, false);
3081 if (l < 4 || !memory_access_is_direct(mr, false)) {
3082 release_lock |= prepare_mmio_access(mr);
3084 /* I/O case */
3085 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
3086 #if defined(TARGET_WORDS_BIGENDIAN)
3087 if (endian == DEVICE_LITTLE_ENDIAN) {
3088 val = bswap32(val);
3090 #else
3091 if (endian == DEVICE_BIG_ENDIAN) {
3092 val = bswap32(val);
3094 #endif
3095 } else {
3096 /* RAM case */
3097 ptr = qemu_get_ram_ptr(mr->ram_block,
3098 (memory_region_get_ram_addr(mr)
3099 & TARGET_PAGE_MASK)
3100 + addr1);
3101 switch (endian) {
3102 case DEVICE_LITTLE_ENDIAN:
3103 val = ldl_le_p(ptr);
3104 break;
3105 case DEVICE_BIG_ENDIAN:
3106 val = ldl_be_p(ptr);
3107 break;
3108 default:
3109 val = ldl_p(ptr);
3110 break;
3112 r = MEMTX_OK;
3114 if (result) {
3115 *result = r;
3117 if (release_lock) {
3118 qemu_mutex_unlock_iothread();
3120 rcu_read_unlock();
3121 return val;
3124 uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3125 MemTxAttrs attrs, MemTxResult *result)
3127 return address_space_ldl_internal(as, addr, attrs, result,
3128 DEVICE_NATIVE_ENDIAN);
3131 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3132 MemTxAttrs attrs, MemTxResult *result)
3134 return address_space_ldl_internal(as, addr, attrs, result,
3135 DEVICE_LITTLE_ENDIAN);
3138 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3139 MemTxAttrs attrs, MemTxResult *result)
3141 return address_space_ldl_internal(as, addr, attrs, result,
3142 DEVICE_BIG_ENDIAN);
3145 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
3147 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3150 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
3152 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3155 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
3157 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3160 /* warning: addr must be aligned */
3161 static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3162 MemTxAttrs attrs,
3163 MemTxResult *result,
3164 enum device_endian endian)
3166 uint8_t *ptr;
3167 uint64_t val;
3168 MemoryRegion *mr;
3169 hwaddr l = 8;
3170 hwaddr addr1;
3171 MemTxResult r;
3172 bool release_lock = false;
3174 rcu_read_lock();
3175 mr = address_space_translate(as, addr, &addr1, &l,
3176 false);
3177 if (l < 8 || !memory_access_is_direct(mr, false)) {
3178 release_lock |= prepare_mmio_access(mr);
3180 /* I/O case */
3181 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
3182 #if defined(TARGET_WORDS_BIGENDIAN)
3183 if (endian == DEVICE_LITTLE_ENDIAN) {
3184 val = bswap64(val);
3186 #else
3187 if (endian == DEVICE_BIG_ENDIAN) {
3188 val = bswap64(val);
3190 #endif
3191 } else {
3192 /* RAM case */
3193 ptr = qemu_get_ram_ptr(mr->ram_block,
3194 (memory_region_get_ram_addr(mr)
3195 & TARGET_PAGE_MASK)
3196 + addr1);
3197 switch (endian) {
3198 case DEVICE_LITTLE_ENDIAN:
3199 val = ldq_le_p(ptr);
3200 break;
3201 case DEVICE_BIG_ENDIAN:
3202 val = ldq_be_p(ptr);
3203 break;
3204 default:
3205 val = ldq_p(ptr);
3206 break;
3208 r = MEMTX_OK;
3210 if (result) {
3211 *result = r;
3213 if (release_lock) {
3214 qemu_mutex_unlock_iothread();
3216 rcu_read_unlock();
3217 return val;
3220 uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3221 MemTxAttrs attrs, MemTxResult *result)
3223 return address_space_ldq_internal(as, addr, attrs, result,
3224 DEVICE_NATIVE_ENDIAN);
3227 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3228 MemTxAttrs attrs, MemTxResult *result)
3230 return address_space_ldq_internal(as, addr, attrs, result,
3231 DEVICE_LITTLE_ENDIAN);
3234 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3235 MemTxAttrs attrs, MemTxResult *result)
3237 return address_space_ldq_internal(as, addr, attrs, result,
3238 DEVICE_BIG_ENDIAN);
3241 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
3243 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3246 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
3248 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3251 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
3253 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3256 /* XXX: optimize */
3257 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3258 MemTxAttrs attrs, MemTxResult *result)
3260 uint8_t val;
3261 MemTxResult r;
3263 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3264 if (result) {
3265 *result = r;
3267 return val;
3270 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3272 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3275 /* warning: addr must be aligned */
3276 static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3277 hwaddr addr,
3278 MemTxAttrs attrs,
3279 MemTxResult *result,
3280 enum device_endian endian)
3282 uint8_t *ptr;
3283 uint64_t val;
3284 MemoryRegion *mr;
3285 hwaddr l = 2;
3286 hwaddr addr1;
3287 MemTxResult r;
3288 bool release_lock = false;
3290 rcu_read_lock();
3291 mr = address_space_translate(as, addr, &addr1, &l,
3292 false);
3293 if (l < 2 || !memory_access_is_direct(mr, false)) {
3294 release_lock |= prepare_mmio_access(mr);
3296 /* I/O case */
3297 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
3298 #if defined(TARGET_WORDS_BIGENDIAN)
3299 if (endian == DEVICE_LITTLE_ENDIAN) {
3300 val = bswap16(val);
3302 #else
3303 if (endian == DEVICE_BIG_ENDIAN) {
3304 val = bswap16(val);
3306 #endif
3307 } else {
3308 /* RAM case */
3309 ptr = qemu_get_ram_ptr(mr->ram_block,
3310 (memory_region_get_ram_addr(mr)
3311 & TARGET_PAGE_MASK)
3312 + addr1);
3313 switch (endian) {
3314 case DEVICE_LITTLE_ENDIAN:
3315 val = lduw_le_p(ptr);
3316 break;
3317 case DEVICE_BIG_ENDIAN:
3318 val = lduw_be_p(ptr);
3319 break;
3320 default:
3321 val = lduw_p(ptr);
3322 break;
3324 r = MEMTX_OK;
3326 if (result) {
3327 *result = r;
3329 if (release_lock) {
3330 qemu_mutex_unlock_iothread();
3332 rcu_read_unlock();
3333 return val;
3336 uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3337 MemTxAttrs attrs, MemTxResult *result)
3339 return address_space_lduw_internal(as, addr, attrs, result,
3340 DEVICE_NATIVE_ENDIAN);
3343 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3344 MemTxAttrs attrs, MemTxResult *result)
3346 return address_space_lduw_internal(as, addr, attrs, result,
3347 DEVICE_LITTLE_ENDIAN);
3350 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3351 MemTxAttrs attrs, MemTxResult *result)
3353 return address_space_lduw_internal(as, addr, attrs, result,
3354 DEVICE_BIG_ENDIAN);
3357 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
3359 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3362 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
3364 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3367 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
3369 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3372 /* warning: addr must be aligned. The ram page is not masked as dirty
3373 and the code inside is not invalidated. It is useful if the dirty
3374 bits are used to track modified PTEs */
3375 void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3376 MemTxAttrs attrs, MemTxResult *result)
3378 uint8_t *ptr;
3379 MemoryRegion *mr;
3380 hwaddr l = 4;
3381 hwaddr addr1;
3382 MemTxResult r;
3383 uint8_t dirty_log_mask;
3384 bool release_lock = false;
3386 rcu_read_lock();
3387 mr = address_space_translate(as, addr, &addr1, &l,
3388 true);
3389 if (l < 4 || !memory_access_is_direct(mr, true)) {
3390 release_lock |= prepare_mmio_access(mr);
3392 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3393 } else {
3394 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3395 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
3396 stl_p(ptr, val);
3398 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3399 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
3400 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
3401 r = MEMTX_OK;
3403 if (result) {
3404 *result = r;
3406 if (release_lock) {
3407 qemu_mutex_unlock_iothread();
3409 rcu_read_unlock();
3412 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3414 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3417 /* warning: addr must be aligned */
3418 static inline void address_space_stl_internal(AddressSpace *as,
3419 hwaddr addr, uint32_t val,
3420 MemTxAttrs attrs,
3421 MemTxResult *result,
3422 enum device_endian endian)
3424 uint8_t *ptr;
3425 MemoryRegion *mr;
3426 hwaddr l = 4;
3427 hwaddr addr1;
3428 MemTxResult r;
3429 bool release_lock = false;
3431 rcu_read_lock();
3432 mr = address_space_translate(as, addr, &addr1, &l,
3433 true);
3434 if (l < 4 || !memory_access_is_direct(mr, true)) {
3435 release_lock |= prepare_mmio_access(mr);
3437 #if defined(TARGET_WORDS_BIGENDIAN)
3438 if (endian == DEVICE_LITTLE_ENDIAN) {
3439 val = bswap32(val);
3441 #else
3442 if (endian == DEVICE_BIG_ENDIAN) {
3443 val = bswap32(val);
3445 #endif
3446 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3447 } else {
3448 /* RAM case */
3449 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3450 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
3451 switch (endian) {
3452 case DEVICE_LITTLE_ENDIAN:
3453 stl_le_p(ptr, val);
3454 break;
3455 case DEVICE_BIG_ENDIAN:
3456 stl_be_p(ptr, val);
3457 break;
3458 default:
3459 stl_p(ptr, val);
3460 break;
3462 invalidate_and_set_dirty(mr, addr1, 4);
3463 r = MEMTX_OK;
3465 if (result) {
3466 *result = r;
3468 if (release_lock) {
3469 qemu_mutex_unlock_iothread();
3471 rcu_read_unlock();
3474 void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3475 MemTxAttrs attrs, MemTxResult *result)
3477 address_space_stl_internal(as, addr, val, attrs, result,
3478 DEVICE_NATIVE_ENDIAN);
3481 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3482 MemTxAttrs attrs, MemTxResult *result)
3484 address_space_stl_internal(as, addr, val, attrs, result,
3485 DEVICE_LITTLE_ENDIAN);
3488 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3489 MemTxAttrs attrs, MemTxResult *result)
3491 address_space_stl_internal(as, addr, val, attrs, result,
3492 DEVICE_BIG_ENDIAN);
3495 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3497 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3500 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3502 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3505 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3507 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3510 /* XXX: optimize */
3511 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3512 MemTxAttrs attrs, MemTxResult *result)
3514 uint8_t v = val;
3515 MemTxResult r;
3517 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3518 if (result) {
3519 *result = r;
3523 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3525 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3528 /* warning: addr must be aligned */
3529 static inline void address_space_stw_internal(AddressSpace *as,
3530 hwaddr addr, uint32_t val,
3531 MemTxAttrs attrs,
3532 MemTxResult *result,
3533 enum device_endian endian)
3535 uint8_t *ptr;
3536 MemoryRegion *mr;
3537 hwaddr l = 2;
3538 hwaddr addr1;
3539 MemTxResult r;
3540 bool release_lock = false;
3542 rcu_read_lock();
3543 mr = address_space_translate(as, addr, &addr1, &l, true);
3544 if (l < 2 || !memory_access_is_direct(mr, true)) {
3545 release_lock |= prepare_mmio_access(mr);
3547 #if defined(TARGET_WORDS_BIGENDIAN)
3548 if (endian == DEVICE_LITTLE_ENDIAN) {
3549 val = bswap16(val);
3551 #else
3552 if (endian == DEVICE_BIG_ENDIAN) {
3553 val = bswap16(val);
3555 #endif
3556 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
3557 } else {
3558 /* RAM case */
3559 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3560 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
3561 switch (endian) {
3562 case DEVICE_LITTLE_ENDIAN:
3563 stw_le_p(ptr, val);
3564 break;
3565 case DEVICE_BIG_ENDIAN:
3566 stw_be_p(ptr, val);
3567 break;
3568 default:
3569 stw_p(ptr, val);
3570 break;
3572 invalidate_and_set_dirty(mr, addr1, 2);
3573 r = MEMTX_OK;
3575 if (result) {
3576 *result = r;
3578 if (release_lock) {
3579 qemu_mutex_unlock_iothread();
3581 rcu_read_unlock();
3584 void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3585 MemTxAttrs attrs, MemTxResult *result)
3587 address_space_stw_internal(as, addr, val, attrs, result,
3588 DEVICE_NATIVE_ENDIAN);
3591 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3592 MemTxAttrs attrs, MemTxResult *result)
3594 address_space_stw_internal(as, addr, val, attrs, result,
3595 DEVICE_LITTLE_ENDIAN);
3598 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3599 MemTxAttrs attrs, MemTxResult *result)
3601 address_space_stw_internal(as, addr, val, attrs, result,
3602 DEVICE_BIG_ENDIAN);
3605 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3607 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3610 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3612 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3615 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3617 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3620 /* XXX: optimize */
3621 void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3622 MemTxAttrs attrs, MemTxResult *result)
3624 MemTxResult r;
3625 val = tswap64(val);
3626 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3627 if (result) {
3628 *result = r;
3632 void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3633 MemTxAttrs attrs, MemTxResult *result)
3635 MemTxResult r;
3636 val = cpu_to_le64(val);
3637 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3638 if (result) {
3639 *result = r;
3642 void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3643 MemTxAttrs attrs, MemTxResult *result)
3645 MemTxResult r;
3646 val = cpu_to_be64(val);
3647 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3648 if (result) {
3649 *result = r;
3653 void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3655 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3658 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3660 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3663 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3665 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3668 /* virtual memory access for debug (includes writing to ROM) */
3669 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3670 uint8_t *buf, int len, int is_write)
3672 int l;
3673 hwaddr phys_addr;
3674 target_ulong page;
3676 while (len > 0) {
3677 int asidx;
3678 MemTxAttrs attrs;
3680 page = addr & TARGET_PAGE_MASK;
3681 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3682 asidx = cpu_asidx_from_attrs(cpu, attrs);
3683 /* if no physical page mapped, return an error */
3684 if (phys_addr == -1)
3685 return -1;
3686 l = (page + TARGET_PAGE_SIZE) - addr;
3687 if (l > len)
3688 l = len;
3689 phys_addr += (addr & ~TARGET_PAGE_MASK);
3690 if (is_write) {
3691 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3692 phys_addr, buf, l);
3693 } else {
3694 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3695 MEMTXATTRS_UNSPECIFIED,
3696 buf, l, 0);
3698 len -= l;
3699 buf += l;
3700 addr += l;
3702 return 0;
3706 * Allows code that needs to deal with migration bitmaps etc to still be built
3707 * target independent.
3709 size_t qemu_target_page_bits(void)
3711 return TARGET_PAGE_BITS;
3714 #endif
3717 * A helper function for the _utterly broken_ virtio device model to find out if
3718 * it's running on a big endian machine. Don't do this at home kids!
3720 bool target_words_bigendian(void);
3721 bool target_words_bigendian(void)
3723 #if defined(TARGET_WORDS_BIGENDIAN)
3724 return true;
3725 #else
3726 return false;
3727 #endif
3730 #ifndef CONFIG_USER_ONLY
3731 bool cpu_physical_memory_is_io(hwaddr phys_addr)
3733 MemoryRegion*mr;
3734 hwaddr l = 1;
3735 bool res;
3737 rcu_read_lock();
3738 mr = address_space_translate(&address_space_memory,
3739 phys_addr, &phys_addr, &l, false);
3741 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3742 rcu_read_unlock();
3743 return res;
3746 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3748 RAMBlock *block;
3749 int ret = 0;
3751 rcu_read_lock();
3752 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3753 ret = func(block->idstr, block->host, block->offset,
3754 block->used_length, opaque);
3755 if (ret) {
3756 break;
3759 rcu_read_unlock();
3760 return ret;
3762 #endif