sparc: Use the new common NVRAM functions for system and free space partition
[qemu/ar7.git] / exec.c
blob4c84389b565af1c06eccb9429b7635f92b544d5f
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
21 #ifndef _WIN32
22 #endif
24 #include "qemu/cutils.h"
25 #include "cpu.h"
26 #include "exec/exec-all.h"
27 #include "tcg.h"
28 #include "hw/qdev-core.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #include "hw/xen/xen.h"
32 #endif
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #if defined(CONFIG_USER_ONLY)
39 #include "qemu.h"
40 #else /* !CONFIG_USER_ONLY */
41 #include "hw/hw.h"
42 #include "exec/memory.h"
43 #include "exec/ioport.h"
44 #include "sysemu/dma.h"
45 #include "exec/address-spaces.h"
46 #include "sysemu/xen-mapcache.h"
47 #include "trace.h"
48 #endif
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
57 #include "exec/log.h"
59 #include "migration/vmstate.h"
61 #include "qemu/range.h"
62 #ifndef _WIN32
63 #include "qemu/mmap-alloc.h"
64 #endif
66 //#define DEBUG_SUBPAGE
68 #if !defined(CONFIG_USER_ONLY)
69 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
72 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
74 static MemoryRegion *system_memory;
75 static MemoryRegion *system_io;
77 AddressSpace address_space_io;
78 AddressSpace address_space_memory;
80 MemoryRegion io_mem_rom, io_mem_notdirty;
81 static MemoryRegion io_mem_unassigned;
83 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84 #define RAM_PREALLOC (1 << 0)
86 /* RAM is mmap-ed with MAP_SHARED */
87 #define RAM_SHARED (1 << 1)
89 /* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
92 #define RAM_RESIZEABLE (1 << 2)
94 #endif
96 #ifdef TARGET_PAGE_BITS_VARY
97 int target_page_bits;
98 bool target_page_bits_decided;
99 #endif
101 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
102 /* current CPU in the current thread. It is only valid inside
103 cpu_exec() */
104 __thread CPUState *current_cpu;
105 /* 0 = Do not count executed instructions.
106 1 = Precise instruction counting.
107 2 = Adaptive rate instruction counting. */
108 int use_icount;
110 bool set_preferred_target_page_bits(int bits)
112 /* The target page size is the lowest common denominator for all
113 * the CPUs in the system, so we can only make it smaller, never
114 * larger. And we can't make it smaller once we've committed to
115 * a particular size.
117 #ifdef TARGET_PAGE_BITS_VARY
118 assert(bits >= TARGET_PAGE_BITS_MIN);
119 if (target_page_bits == 0 || target_page_bits > bits) {
120 if (target_page_bits_decided) {
121 return false;
123 target_page_bits = bits;
125 #endif
126 return true;
129 #if !defined(CONFIG_USER_ONLY)
131 static void finalize_target_page_bits(void)
133 #ifdef TARGET_PAGE_BITS_VARY
134 if (target_page_bits == 0) {
135 target_page_bits = TARGET_PAGE_BITS_MIN;
137 target_page_bits_decided = true;
138 #endif
141 typedef struct PhysPageEntry PhysPageEntry;
143 struct PhysPageEntry {
144 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
145 uint32_t skip : 6;
146 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
147 uint32_t ptr : 26;
150 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
152 /* Size of the L2 (and L3, etc) page tables. */
153 #define ADDR_SPACE_BITS 64
155 #define P_L2_BITS 9
156 #define P_L2_SIZE (1 << P_L2_BITS)
158 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
160 typedef PhysPageEntry Node[P_L2_SIZE];
162 typedef struct PhysPageMap {
163 struct rcu_head rcu;
165 unsigned sections_nb;
166 unsigned sections_nb_alloc;
167 unsigned nodes_nb;
168 unsigned nodes_nb_alloc;
169 Node *nodes;
170 MemoryRegionSection *sections;
171 } PhysPageMap;
173 struct AddressSpaceDispatch {
174 struct rcu_head rcu;
176 MemoryRegionSection *mru_section;
177 /* This is a multi-level map on the physical address space.
178 * The bottom level has pointers to MemoryRegionSections.
180 PhysPageEntry phys_map;
181 PhysPageMap map;
182 AddressSpace *as;
185 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
186 typedef struct subpage_t {
187 MemoryRegion iomem;
188 AddressSpace *as;
189 hwaddr base;
190 uint16_t sub_section[];
191 } subpage_t;
193 #define PHYS_SECTION_UNASSIGNED 0
194 #define PHYS_SECTION_NOTDIRTY 1
195 #define PHYS_SECTION_ROM 2
196 #define PHYS_SECTION_WATCH 3
198 static void io_mem_init(void);
199 static void memory_map_init(void);
200 static void tcg_commit(MemoryListener *listener);
202 static MemoryRegion io_mem_watch;
205 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
206 * @cpu: the CPU whose AddressSpace this is
207 * @as: the AddressSpace itself
208 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
209 * @tcg_as_listener: listener for tracking changes to the AddressSpace
211 struct CPUAddressSpace {
212 CPUState *cpu;
213 AddressSpace *as;
214 struct AddressSpaceDispatch *memory_dispatch;
215 MemoryListener tcg_as_listener;
218 #endif
220 #if !defined(CONFIG_USER_ONLY)
222 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
224 static unsigned alloc_hint = 16;
225 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
226 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
227 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
228 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
229 alloc_hint = map->nodes_nb_alloc;
233 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
235 unsigned i;
236 uint32_t ret;
237 PhysPageEntry e;
238 PhysPageEntry *p;
240 ret = map->nodes_nb++;
241 p = map->nodes[ret];
242 assert(ret != PHYS_MAP_NODE_NIL);
243 assert(ret != map->nodes_nb_alloc);
245 e.skip = leaf ? 0 : 1;
246 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
247 for (i = 0; i < P_L2_SIZE; ++i) {
248 memcpy(&p[i], &e, sizeof(e));
250 return ret;
253 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
254 hwaddr *index, hwaddr *nb, uint16_t leaf,
255 int level)
257 PhysPageEntry *p;
258 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
260 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
261 lp->ptr = phys_map_node_alloc(map, level == 0);
263 p = map->nodes[lp->ptr];
264 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
266 while (*nb && lp < &p[P_L2_SIZE]) {
267 if ((*index & (step - 1)) == 0 && *nb >= step) {
268 lp->skip = 0;
269 lp->ptr = leaf;
270 *index += step;
271 *nb -= step;
272 } else {
273 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
275 ++lp;
279 static void phys_page_set(AddressSpaceDispatch *d,
280 hwaddr index, hwaddr nb,
281 uint16_t leaf)
283 /* Wildly overreserve - it doesn't matter much. */
284 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
286 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
289 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
290 * and update our entry so we can skip it and go directly to the destination.
292 static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
294 unsigned valid_ptr = P_L2_SIZE;
295 int valid = 0;
296 PhysPageEntry *p;
297 int i;
299 if (lp->ptr == PHYS_MAP_NODE_NIL) {
300 return;
303 p = nodes[lp->ptr];
304 for (i = 0; i < P_L2_SIZE; i++) {
305 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
306 continue;
309 valid_ptr = i;
310 valid++;
311 if (p[i].skip) {
312 phys_page_compact(&p[i], nodes);
316 /* We can only compress if there's only one child. */
317 if (valid != 1) {
318 return;
321 assert(valid_ptr < P_L2_SIZE);
323 /* Don't compress if it won't fit in the # of bits we have. */
324 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
325 return;
328 lp->ptr = p[valid_ptr].ptr;
329 if (!p[valid_ptr].skip) {
330 /* If our only child is a leaf, make this a leaf. */
331 /* By design, we should have made this node a leaf to begin with so we
332 * should never reach here.
333 * But since it's so simple to handle this, let's do it just in case we
334 * change this rule.
336 lp->skip = 0;
337 } else {
338 lp->skip += p[valid_ptr].skip;
342 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
344 if (d->phys_map.skip) {
345 phys_page_compact(&d->phys_map, d->map.nodes);
349 static inline bool section_covers_addr(const MemoryRegionSection *section,
350 hwaddr addr)
352 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
353 * the section must cover the entire address space.
355 return int128_gethi(section->size) ||
356 range_covers_byte(section->offset_within_address_space,
357 int128_getlo(section->size), addr);
360 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
361 Node *nodes, MemoryRegionSection *sections)
363 PhysPageEntry *p;
364 hwaddr index = addr >> TARGET_PAGE_BITS;
365 int i;
367 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
368 if (lp.ptr == PHYS_MAP_NODE_NIL) {
369 return &sections[PHYS_SECTION_UNASSIGNED];
371 p = nodes[lp.ptr];
372 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
375 if (section_covers_addr(&sections[lp.ptr], addr)) {
376 return &sections[lp.ptr];
377 } else {
378 return &sections[PHYS_SECTION_UNASSIGNED];
382 bool memory_region_is_unassigned(MemoryRegion *mr)
384 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
385 && mr != &io_mem_watch;
388 /* Called from RCU critical section */
389 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
390 hwaddr addr,
391 bool resolve_subpage)
393 MemoryRegionSection *section = atomic_read(&d->mru_section);
394 subpage_t *subpage;
395 bool update;
397 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
398 section_covers_addr(section, addr)) {
399 update = false;
400 } else {
401 section = phys_page_find(d->phys_map, addr, d->map.nodes,
402 d->map.sections);
403 update = true;
405 if (resolve_subpage && section->mr->subpage) {
406 subpage = container_of(section->mr, subpage_t, iomem);
407 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
409 if (update) {
410 atomic_set(&d->mru_section, section);
412 return section;
415 /* Called from RCU critical section */
416 static MemoryRegionSection *
417 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
418 hwaddr *plen, bool resolve_subpage)
420 MemoryRegionSection *section;
421 MemoryRegion *mr;
422 Int128 diff;
424 section = address_space_lookup_region(d, addr, resolve_subpage);
425 /* Compute offset within MemoryRegionSection */
426 addr -= section->offset_within_address_space;
428 /* Compute offset within MemoryRegion */
429 *xlat = addr + section->offset_within_region;
431 mr = section->mr;
433 /* MMIO registers can be expected to perform full-width accesses based only
434 * on their address, without considering adjacent registers that could
435 * decode to completely different MemoryRegions. When such registers
436 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
437 * regions overlap wildly. For this reason we cannot clamp the accesses
438 * here.
440 * If the length is small (as is the case for address_space_ldl/stl),
441 * everything works fine. If the incoming length is large, however,
442 * the caller really has to do the clamping through memory_access_size.
444 if (memory_region_is_ram(mr)) {
445 diff = int128_sub(section->size, int128_make64(addr));
446 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
448 return section;
451 /* Called from RCU critical section */
452 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
453 hwaddr *xlat, hwaddr *plen,
454 bool is_write)
456 IOMMUTLBEntry iotlb;
457 MemoryRegionSection *section;
458 MemoryRegion *mr;
460 for (;;) {
461 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
462 section = address_space_translate_internal(d, addr, &addr, plen, true);
463 mr = section->mr;
465 if (!mr->iommu_ops) {
466 break;
469 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
470 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
471 | (addr & iotlb.addr_mask));
472 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
473 if (!(iotlb.perm & (1 << is_write))) {
474 mr = &io_mem_unassigned;
475 break;
478 as = iotlb.target_as;
481 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
482 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
483 *plen = MIN(page, *plen);
486 *xlat = addr;
487 return mr;
490 /* Called from RCU critical section */
491 MemoryRegionSection *
492 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
493 hwaddr *xlat, hwaddr *plen)
495 MemoryRegionSection *section;
496 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
498 section = address_space_translate_internal(d, addr, xlat, plen, false);
500 assert(!section->mr->iommu_ops);
501 return section;
503 #endif
505 #if !defined(CONFIG_USER_ONLY)
507 static int cpu_common_post_load(void *opaque, int version_id)
509 CPUState *cpu = opaque;
511 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
512 version_id is increased. */
513 cpu->interrupt_request &= ~0x01;
514 tlb_flush(cpu, 1);
516 return 0;
519 static int cpu_common_pre_load(void *opaque)
521 CPUState *cpu = opaque;
523 cpu->exception_index = -1;
525 return 0;
528 static bool cpu_common_exception_index_needed(void *opaque)
530 CPUState *cpu = opaque;
532 return tcg_enabled() && cpu->exception_index != -1;
535 static const VMStateDescription vmstate_cpu_common_exception_index = {
536 .name = "cpu_common/exception_index",
537 .version_id = 1,
538 .minimum_version_id = 1,
539 .needed = cpu_common_exception_index_needed,
540 .fields = (VMStateField[]) {
541 VMSTATE_INT32(exception_index, CPUState),
542 VMSTATE_END_OF_LIST()
546 static bool cpu_common_crash_occurred_needed(void *opaque)
548 CPUState *cpu = opaque;
550 return cpu->crash_occurred;
553 static const VMStateDescription vmstate_cpu_common_crash_occurred = {
554 .name = "cpu_common/crash_occurred",
555 .version_id = 1,
556 .minimum_version_id = 1,
557 .needed = cpu_common_crash_occurred_needed,
558 .fields = (VMStateField[]) {
559 VMSTATE_BOOL(crash_occurred, CPUState),
560 VMSTATE_END_OF_LIST()
564 const VMStateDescription vmstate_cpu_common = {
565 .name = "cpu_common",
566 .version_id = 1,
567 .minimum_version_id = 1,
568 .pre_load = cpu_common_pre_load,
569 .post_load = cpu_common_post_load,
570 .fields = (VMStateField[]) {
571 VMSTATE_UINT32(halted, CPUState),
572 VMSTATE_UINT32(interrupt_request, CPUState),
573 VMSTATE_END_OF_LIST()
575 .subsections = (const VMStateDescription*[]) {
576 &vmstate_cpu_common_exception_index,
577 &vmstate_cpu_common_crash_occurred,
578 NULL
582 #endif
584 CPUState *qemu_get_cpu(int index)
586 CPUState *cpu;
588 CPU_FOREACH(cpu) {
589 if (cpu->cpu_index == index) {
590 return cpu;
594 return NULL;
597 #if !defined(CONFIG_USER_ONLY)
598 void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
600 CPUAddressSpace *newas;
602 /* Target code should have set num_ases before calling us */
603 assert(asidx < cpu->num_ases);
605 if (asidx == 0) {
606 /* address space 0 gets the convenience alias */
607 cpu->as = as;
610 /* KVM cannot currently support multiple address spaces. */
611 assert(asidx == 0 || !kvm_enabled());
613 if (!cpu->cpu_ases) {
614 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
617 newas = &cpu->cpu_ases[asidx];
618 newas->cpu = cpu;
619 newas->as = as;
620 if (tcg_enabled()) {
621 newas->tcg_as_listener.commit = tcg_commit;
622 memory_listener_register(&newas->tcg_as_listener, as);
626 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
628 /* Return the AddressSpace corresponding to the specified index */
629 return cpu->cpu_ases[asidx].as;
631 #endif
633 void cpu_exec_unrealizefn(CPUState *cpu)
635 CPUClass *cc = CPU_GET_CLASS(cpu);
637 cpu_list_remove(cpu);
639 if (cc->vmsd != NULL) {
640 vmstate_unregister(NULL, cc->vmsd, cpu);
642 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
643 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
647 void cpu_exec_initfn(CPUState *cpu)
649 cpu->as = NULL;
650 cpu->num_ases = 0;
652 #ifndef CONFIG_USER_ONLY
653 cpu->thread_id = qemu_get_thread_id();
655 /* This is a softmmu CPU object, so create a property for it
656 * so users can wire up its memory. (This can't go in qom/cpu.c
657 * because that file is compiled only once for both user-mode
658 * and system builds.) The default if no link is set up is to use
659 * the system address space.
661 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
662 (Object **)&cpu->memory,
663 qdev_prop_allow_set_link_before_realize,
664 OBJ_PROP_LINK_UNREF_ON_RELEASE,
665 &error_abort);
666 cpu->memory = system_memory;
667 object_ref(OBJECT(cpu->memory));
668 #endif
671 void cpu_exec_realizefn(CPUState *cpu, Error **errp)
673 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
675 cpu_list_add(cpu);
677 #ifndef CONFIG_USER_ONLY
678 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
679 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
681 if (cc->vmsd != NULL) {
682 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
684 #endif
687 #if defined(CONFIG_USER_ONLY)
688 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
690 tb_invalidate_phys_page_range(pc, pc + 1, 0);
692 #else
693 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
695 MemTxAttrs attrs;
696 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
697 int asidx = cpu_asidx_from_attrs(cpu, attrs);
698 if (phys != -1) {
699 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
700 phys | (pc & ~TARGET_PAGE_MASK));
703 #endif
705 #if defined(CONFIG_USER_ONLY)
706 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
711 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
712 int flags)
714 return -ENOSYS;
717 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
721 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
722 int flags, CPUWatchpoint **watchpoint)
724 return -ENOSYS;
726 #else
727 /* Add a watchpoint. */
728 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
729 int flags, CPUWatchpoint **watchpoint)
731 CPUWatchpoint *wp;
733 /* forbid ranges which are empty or run off the end of the address space */
734 if (len == 0 || (addr + len - 1) < addr) {
735 error_report("tried to set invalid watchpoint at %"
736 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
737 return -EINVAL;
739 wp = g_malloc(sizeof(*wp));
741 wp->vaddr = addr;
742 wp->len = len;
743 wp->flags = flags;
745 /* keep all GDB-injected watchpoints in front */
746 if (flags & BP_GDB) {
747 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
748 } else {
749 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
752 tlb_flush_page(cpu, addr);
754 if (watchpoint)
755 *watchpoint = wp;
756 return 0;
759 /* Remove a specific watchpoint. */
760 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
761 int flags)
763 CPUWatchpoint *wp;
765 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
766 if (addr == wp->vaddr && len == wp->len
767 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
768 cpu_watchpoint_remove_by_ref(cpu, wp);
769 return 0;
772 return -ENOENT;
775 /* Remove a specific watchpoint by reference. */
776 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
778 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
780 tlb_flush_page(cpu, watchpoint->vaddr);
782 g_free(watchpoint);
785 /* Remove all matching watchpoints. */
786 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
788 CPUWatchpoint *wp, *next;
790 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
791 if (wp->flags & mask) {
792 cpu_watchpoint_remove_by_ref(cpu, wp);
797 /* Return true if this watchpoint address matches the specified
798 * access (ie the address range covered by the watchpoint overlaps
799 * partially or completely with the address range covered by the
800 * access).
802 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
803 vaddr addr,
804 vaddr len)
806 /* We know the lengths are non-zero, but a little caution is
807 * required to avoid errors in the case where the range ends
808 * exactly at the top of the address space and so addr + len
809 * wraps round to zero.
811 vaddr wpend = wp->vaddr + wp->len - 1;
812 vaddr addrend = addr + len - 1;
814 return !(addr > wpend || wp->vaddr > addrend);
817 #endif
819 /* Add a breakpoint. */
820 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
821 CPUBreakpoint **breakpoint)
823 CPUBreakpoint *bp;
825 bp = g_malloc(sizeof(*bp));
827 bp->pc = pc;
828 bp->flags = flags;
830 /* keep all GDB-injected breakpoints in front */
831 if (flags & BP_GDB) {
832 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
833 } else {
834 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
837 breakpoint_invalidate(cpu, pc);
839 if (breakpoint) {
840 *breakpoint = bp;
842 return 0;
845 /* Remove a specific breakpoint. */
846 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
848 CPUBreakpoint *bp;
850 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
851 if (bp->pc == pc && bp->flags == flags) {
852 cpu_breakpoint_remove_by_ref(cpu, bp);
853 return 0;
856 return -ENOENT;
859 /* Remove a specific breakpoint by reference. */
860 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
862 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
864 breakpoint_invalidate(cpu, breakpoint->pc);
866 g_free(breakpoint);
869 /* Remove all matching breakpoints. */
870 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
872 CPUBreakpoint *bp, *next;
874 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
875 if (bp->flags & mask) {
876 cpu_breakpoint_remove_by_ref(cpu, bp);
881 /* enable or disable single step mode. EXCP_DEBUG is returned by the
882 CPU loop after each instruction */
883 void cpu_single_step(CPUState *cpu, int enabled)
885 if (cpu->singlestep_enabled != enabled) {
886 cpu->singlestep_enabled = enabled;
887 if (kvm_enabled()) {
888 kvm_update_guest_debug(cpu, 0);
889 } else {
890 /* must flush all the translated code to avoid inconsistencies */
891 /* XXX: only flush what is necessary */
892 tb_flush(cpu);
897 void cpu_abort(CPUState *cpu, const char *fmt, ...)
899 va_list ap;
900 va_list ap2;
902 va_start(ap, fmt);
903 va_copy(ap2, ap);
904 fprintf(stderr, "qemu: fatal: ");
905 vfprintf(stderr, fmt, ap);
906 fprintf(stderr, "\n");
907 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
908 if (qemu_log_separate()) {
909 qemu_log("qemu: fatal: ");
910 qemu_log_vprintf(fmt, ap2);
911 qemu_log("\n");
912 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
913 qemu_log_flush();
914 qemu_log_close();
916 va_end(ap2);
917 va_end(ap);
918 replay_finish();
919 #if defined(CONFIG_USER_ONLY)
921 struct sigaction act;
922 sigfillset(&act.sa_mask);
923 act.sa_handler = SIG_DFL;
924 sigaction(SIGABRT, &act, NULL);
926 #endif
927 abort();
930 #if !defined(CONFIG_USER_ONLY)
931 /* Called from RCU critical section */
932 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
934 RAMBlock *block;
936 block = atomic_rcu_read(&ram_list.mru_block);
937 if (block && addr - block->offset < block->max_length) {
938 return block;
940 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
941 if (addr - block->offset < block->max_length) {
942 goto found;
946 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
947 abort();
949 found:
950 /* It is safe to write mru_block outside the iothread lock. This
951 * is what happens:
953 * mru_block = xxx
954 * rcu_read_unlock()
955 * xxx removed from list
956 * rcu_read_lock()
957 * read mru_block
958 * mru_block = NULL;
959 * call_rcu(reclaim_ramblock, xxx);
960 * rcu_read_unlock()
962 * atomic_rcu_set is not needed here. The block was already published
963 * when it was placed into the list. Here we're just making an extra
964 * copy of the pointer.
966 ram_list.mru_block = block;
967 return block;
970 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
972 CPUState *cpu;
973 ram_addr_t start1;
974 RAMBlock *block;
975 ram_addr_t end;
977 end = TARGET_PAGE_ALIGN(start + length);
978 start &= TARGET_PAGE_MASK;
980 rcu_read_lock();
981 block = qemu_get_ram_block(start);
982 assert(block == qemu_get_ram_block(end - 1));
983 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
984 CPU_FOREACH(cpu) {
985 tlb_reset_dirty(cpu, start1, length);
987 rcu_read_unlock();
990 /* Note: start and end must be within the same ram block. */
991 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
992 ram_addr_t length,
993 unsigned client)
995 DirtyMemoryBlocks *blocks;
996 unsigned long end, page;
997 bool dirty = false;
999 if (length == 0) {
1000 return false;
1003 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1004 page = start >> TARGET_PAGE_BITS;
1006 rcu_read_lock();
1008 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1010 while (page < end) {
1011 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1012 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1013 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1015 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1016 offset, num);
1017 page += num;
1020 rcu_read_unlock();
1022 if (dirty && tcg_enabled()) {
1023 tlb_reset_dirty_range_all(start, length);
1026 return dirty;
1029 /* Called from RCU critical section */
1030 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1031 MemoryRegionSection *section,
1032 target_ulong vaddr,
1033 hwaddr paddr, hwaddr xlat,
1034 int prot,
1035 target_ulong *address)
1037 hwaddr iotlb;
1038 CPUWatchpoint *wp;
1040 if (memory_region_is_ram(section->mr)) {
1041 /* Normal RAM. */
1042 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1043 if (!section->readonly) {
1044 iotlb |= PHYS_SECTION_NOTDIRTY;
1045 } else {
1046 iotlb |= PHYS_SECTION_ROM;
1048 } else {
1049 AddressSpaceDispatch *d;
1051 d = atomic_rcu_read(&section->address_space->dispatch);
1052 iotlb = section - d->map.sections;
1053 iotlb += xlat;
1056 /* Make accesses to pages with watchpoints go via the
1057 watchpoint trap routines. */
1058 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1059 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1060 /* Avoid trapping reads of pages with a write breakpoint. */
1061 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1062 iotlb = PHYS_SECTION_WATCH + paddr;
1063 *address |= TLB_MMIO;
1064 break;
1069 return iotlb;
1071 #endif /* defined(CONFIG_USER_ONLY) */
1073 #if !defined(CONFIG_USER_ONLY)
1075 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1076 uint16_t section);
1077 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
1079 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1080 qemu_anon_ram_alloc;
1083 * Set a custom physical guest memory alloator.
1084 * Accelerators with unusual needs may need this. Hopefully, we can
1085 * get rid of it eventually.
1087 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
1089 phys_mem_alloc = alloc;
1092 static uint16_t phys_section_add(PhysPageMap *map,
1093 MemoryRegionSection *section)
1095 /* The physical section number is ORed with a page-aligned
1096 * pointer to produce the iotlb entries. Thus it should
1097 * never overflow into the page-aligned value.
1099 assert(map->sections_nb < TARGET_PAGE_SIZE);
1101 if (map->sections_nb == map->sections_nb_alloc) {
1102 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1103 map->sections = g_renew(MemoryRegionSection, map->sections,
1104 map->sections_nb_alloc);
1106 map->sections[map->sections_nb] = *section;
1107 memory_region_ref(section->mr);
1108 return map->sections_nb++;
1111 static void phys_section_destroy(MemoryRegion *mr)
1113 bool have_sub_page = mr->subpage;
1115 memory_region_unref(mr);
1117 if (have_sub_page) {
1118 subpage_t *subpage = container_of(mr, subpage_t, iomem);
1119 object_unref(OBJECT(&subpage->iomem));
1120 g_free(subpage);
1124 static void phys_sections_free(PhysPageMap *map)
1126 while (map->sections_nb > 0) {
1127 MemoryRegionSection *section = &map->sections[--map->sections_nb];
1128 phys_section_destroy(section->mr);
1130 g_free(map->sections);
1131 g_free(map->nodes);
1134 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
1136 subpage_t *subpage;
1137 hwaddr base = section->offset_within_address_space
1138 & TARGET_PAGE_MASK;
1139 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
1140 d->map.nodes, d->map.sections);
1141 MemoryRegionSection subsection = {
1142 .offset_within_address_space = base,
1143 .size = int128_make64(TARGET_PAGE_SIZE),
1145 hwaddr start, end;
1147 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1149 if (!(existing->mr->subpage)) {
1150 subpage = subpage_init(d->as, base);
1151 subsection.address_space = d->as;
1152 subsection.mr = &subpage->iomem;
1153 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1154 phys_section_add(&d->map, &subsection));
1155 } else {
1156 subpage = container_of(existing->mr, subpage_t, iomem);
1158 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1159 end = start + int128_get64(section->size) - 1;
1160 subpage_register(subpage, start, end,
1161 phys_section_add(&d->map, section));
1165 static void register_multipage(AddressSpaceDispatch *d,
1166 MemoryRegionSection *section)
1168 hwaddr start_addr = section->offset_within_address_space;
1169 uint16_t section_index = phys_section_add(&d->map, section);
1170 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1171 TARGET_PAGE_BITS));
1173 assert(num_pages);
1174 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1177 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1179 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1180 AddressSpaceDispatch *d = as->next_dispatch;
1181 MemoryRegionSection now = *section, remain = *section;
1182 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1184 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1185 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1186 - now.offset_within_address_space;
1188 now.size = int128_min(int128_make64(left), now.size);
1189 register_subpage(d, &now);
1190 } else {
1191 now.size = int128_zero();
1193 while (int128_ne(remain.size, now.size)) {
1194 remain.size = int128_sub(remain.size, now.size);
1195 remain.offset_within_address_space += int128_get64(now.size);
1196 remain.offset_within_region += int128_get64(now.size);
1197 now = remain;
1198 if (int128_lt(remain.size, page_size)) {
1199 register_subpage(d, &now);
1200 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1201 now.size = page_size;
1202 register_subpage(d, &now);
1203 } else {
1204 now.size = int128_and(now.size, int128_neg(page_size));
1205 register_multipage(d, &now);
1210 void qemu_flush_coalesced_mmio_buffer(void)
1212 if (kvm_enabled())
1213 kvm_flush_coalesced_mmio_buffer();
1216 void qemu_mutex_lock_ramlist(void)
1218 qemu_mutex_lock(&ram_list.mutex);
1221 void qemu_mutex_unlock_ramlist(void)
1223 qemu_mutex_unlock(&ram_list.mutex);
1226 #ifdef __linux__
1227 static void *file_ram_alloc(RAMBlock *block,
1228 ram_addr_t memory,
1229 const char *path,
1230 Error **errp)
1232 bool unlink_on_error = false;
1233 char *filename;
1234 char *sanitized_name;
1235 char *c;
1236 void *area = MAP_FAILED;
1237 int fd = -1;
1239 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1240 error_setg(errp,
1241 "host lacks kvm mmu notifiers, -mem-path unsupported");
1242 return NULL;
1245 for (;;) {
1246 fd = open(path, O_RDWR);
1247 if (fd >= 0) {
1248 /* @path names an existing file, use it */
1249 break;
1251 if (errno == ENOENT) {
1252 /* @path names a file that doesn't exist, create it */
1253 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1254 if (fd >= 0) {
1255 unlink_on_error = true;
1256 break;
1258 } else if (errno == EISDIR) {
1259 /* @path names a directory, create a file there */
1260 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1261 sanitized_name = g_strdup(memory_region_name(block->mr));
1262 for (c = sanitized_name; *c != '\0'; c++) {
1263 if (*c == '/') {
1264 *c = '_';
1268 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1269 sanitized_name);
1270 g_free(sanitized_name);
1272 fd = mkstemp(filename);
1273 if (fd >= 0) {
1274 unlink(filename);
1275 g_free(filename);
1276 break;
1278 g_free(filename);
1280 if (errno != EEXIST && errno != EINTR) {
1281 error_setg_errno(errp, errno,
1282 "can't open backing store %s for guest RAM",
1283 path);
1284 goto error;
1287 * Try again on EINTR and EEXIST. The latter happens when
1288 * something else creates the file between our two open().
1292 block->page_size = qemu_fd_getpagesize(fd);
1293 block->mr->align = block->page_size;
1294 #if defined(__s390x__)
1295 if (kvm_enabled()) {
1296 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1298 #endif
1300 if (memory < block->page_size) {
1301 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1302 "or larger than page size 0x%zx",
1303 memory, block->page_size);
1304 goto error;
1307 memory = ROUND_UP(memory, block->page_size);
1310 * ftruncate is not supported by hugetlbfs in older
1311 * hosts, so don't bother bailing out on errors.
1312 * If anything goes wrong with it under other filesystems,
1313 * mmap will fail.
1315 if (ftruncate(fd, memory)) {
1316 perror("ftruncate");
1319 area = qemu_ram_mmap(fd, memory, block->mr->align,
1320 block->flags & RAM_SHARED);
1321 if (area == MAP_FAILED) {
1322 error_setg_errno(errp, errno,
1323 "unable to map backing store for guest RAM");
1324 goto error;
1327 if (mem_prealloc) {
1328 os_mem_prealloc(fd, area, memory, errp);
1329 if (errp && *errp) {
1330 goto error;
1334 block->fd = fd;
1335 return area;
1337 error:
1338 if (area != MAP_FAILED) {
1339 qemu_ram_munmap(area, memory);
1341 if (unlink_on_error) {
1342 unlink(path);
1344 if (fd != -1) {
1345 close(fd);
1347 return NULL;
1349 #endif
1351 /* Called with the ramlist lock held. */
1352 static ram_addr_t find_ram_offset(ram_addr_t size)
1354 RAMBlock *block, *next_block;
1355 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1357 assert(size != 0); /* it would hand out same offset multiple times */
1359 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1360 return 0;
1363 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1364 ram_addr_t end, next = RAM_ADDR_MAX;
1366 end = block->offset + block->max_length;
1368 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
1369 if (next_block->offset >= end) {
1370 next = MIN(next, next_block->offset);
1373 if (next - end >= size && next - end < mingap) {
1374 offset = end;
1375 mingap = next - end;
1379 if (offset == RAM_ADDR_MAX) {
1380 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1381 (uint64_t)size);
1382 abort();
1385 return offset;
1388 ram_addr_t last_ram_offset(void)
1390 RAMBlock *block;
1391 ram_addr_t last = 0;
1393 rcu_read_lock();
1394 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1395 last = MAX(last, block->offset + block->max_length);
1397 rcu_read_unlock();
1398 return last;
1401 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1403 int ret;
1405 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1406 if (!machine_dump_guest_core(current_machine)) {
1407 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1408 if (ret) {
1409 perror("qemu_madvise");
1410 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1411 "but dump_guest_core=off specified\n");
1416 const char *qemu_ram_get_idstr(RAMBlock *rb)
1418 return rb->idstr;
1421 /* Called with iothread lock held. */
1422 void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
1424 RAMBlock *block;
1426 assert(new_block);
1427 assert(!new_block->idstr[0]);
1429 if (dev) {
1430 char *id = qdev_get_dev_path(dev);
1431 if (id) {
1432 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1433 g_free(id);
1436 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1438 rcu_read_lock();
1439 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1440 if (block != new_block &&
1441 !strcmp(block->idstr, new_block->idstr)) {
1442 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1443 new_block->idstr);
1444 abort();
1447 rcu_read_unlock();
1450 /* Called with iothread lock held. */
1451 void qemu_ram_unset_idstr(RAMBlock *block)
1453 /* FIXME: arch_init.c assumes that this is not called throughout
1454 * migration. Ignore the problem since hot-unplug during migration
1455 * does not work anyway.
1457 if (block) {
1458 memset(block->idstr, 0, sizeof(block->idstr));
1462 size_t qemu_ram_pagesize(RAMBlock *rb)
1464 return rb->page_size;
1467 static int memory_try_enable_merging(void *addr, size_t len)
1469 if (!machine_mem_merge(current_machine)) {
1470 /* disabled by the user */
1471 return 0;
1474 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1477 /* Only legal before guest might have detected the memory size: e.g. on
1478 * incoming migration, or right after reset.
1480 * As memory core doesn't know how is memory accessed, it is up to
1481 * resize callback to update device state and/or add assertions to detect
1482 * misuse, if necessary.
1484 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
1486 assert(block);
1488 newsize = HOST_PAGE_ALIGN(newsize);
1490 if (block->used_length == newsize) {
1491 return 0;
1494 if (!(block->flags & RAM_RESIZEABLE)) {
1495 error_setg_errno(errp, EINVAL,
1496 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1497 " in != 0x" RAM_ADDR_FMT, block->idstr,
1498 newsize, block->used_length);
1499 return -EINVAL;
1502 if (block->max_length < newsize) {
1503 error_setg_errno(errp, EINVAL,
1504 "Length too large: %s: 0x" RAM_ADDR_FMT
1505 " > 0x" RAM_ADDR_FMT, block->idstr,
1506 newsize, block->max_length);
1507 return -EINVAL;
1510 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1511 block->used_length = newsize;
1512 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1513 DIRTY_CLIENTS_ALL);
1514 memory_region_set_size(block->mr, newsize);
1515 if (block->resized) {
1516 block->resized(block->idstr, newsize, block->host);
1518 return 0;
1521 /* Called with ram_list.mutex held */
1522 static void dirty_memory_extend(ram_addr_t old_ram_size,
1523 ram_addr_t new_ram_size)
1525 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1526 DIRTY_MEMORY_BLOCK_SIZE);
1527 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1528 DIRTY_MEMORY_BLOCK_SIZE);
1529 int i;
1531 /* Only need to extend if block count increased */
1532 if (new_num_blocks <= old_num_blocks) {
1533 return;
1536 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1537 DirtyMemoryBlocks *old_blocks;
1538 DirtyMemoryBlocks *new_blocks;
1539 int j;
1541 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1542 new_blocks = g_malloc(sizeof(*new_blocks) +
1543 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1545 if (old_num_blocks) {
1546 memcpy(new_blocks->blocks, old_blocks->blocks,
1547 old_num_blocks * sizeof(old_blocks->blocks[0]));
1550 for (j = old_num_blocks; j < new_num_blocks; j++) {
1551 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1554 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1556 if (old_blocks) {
1557 g_free_rcu(old_blocks, rcu);
1562 static void ram_block_add(RAMBlock *new_block, Error **errp)
1564 RAMBlock *block;
1565 RAMBlock *last_block = NULL;
1566 ram_addr_t old_ram_size, new_ram_size;
1567 Error *err = NULL;
1569 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1571 qemu_mutex_lock_ramlist();
1572 new_block->offset = find_ram_offset(new_block->max_length);
1574 if (!new_block->host) {
1575 if (xen_enabled()) {
1576 xen_ram_alloc(new_block->offset, new_block->max_length,
1577 new_block->mr, &err);
1578 if (err) {
1579 error_propagate(errp, err);
1580 qemu_mutex_unlock_ramlist();
1581 return;
1583 } else {
1584 new_block->host = phys_mem_alloc(new_block->max_length,
1585 &new_block->mr->align);
1586 if (!new_block->host) {
1587 error_setg_errno(errp, errno,
1588 "cannot set up guest memory '%s'",
1589 memory_region_name(new_block->mr));
1590 qemu_mutex_unlock_ramlist();
1591 return;
1593 memory_try_enable_merging(new_block->host, new_block->max_length);
1597 new_ram_size = MAX(old_ram_size,
1598 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1599 if (new_ram_size > old_ram_size) {
1600 migration_bitmap_extend(old_ram_size, new_ram_size);
1601 dirty_memory_extend(old_ram_size, new_ram_size);
1603 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1604 * QLIST (which has an RCU-friendly variant) does not have insertion at
1605 * tail, so save the last element in last_block.
1607 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1608 last_block = block;
1609 if (block->max_length < new_block->max_length) {
1610 break;
1613 if (block) {
1614 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1615 } else if (last_block) {
1616 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1617 } else { /* list is empty */
1618 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1620 ram_list.mru_block = NULL;
1622 /* Write list before version */
1623 smp_wmb();
1624 ram_list.version++;
1625 qemu_mutex_unlock_ramlist();
1627 cpu_physical_memory_set_dirty_range(new_block->offset,
1628 new_block->used_length,
1629 DIRTY_CLIENTS_ALL);
1631 if (new_block->host) {
1632 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1633 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1634 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
1635 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1639 #ifdef __linux__
1640 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1641 bool share, const char *mem_path,
1642 Error **errp)
1644 RAMBlock *new_block;
1645 Error *local_err = NULL;
1647 if (xen_enabled()) {
1648 error_setg(errp, "-mem-path not supported with Xen");
1649 return NULL;
1652 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1654 * file_ram_alloc() needs to allocate just like
1655 * phys_mem_alloc, but we haven't bothered to provide
1656 * a hook there.
1658 error_setg(errp,
1659 "-mem-path not supported with this accelerator");
1660 return NULL;
1663 size = HOST_PAGE_ALIGN(size);
1664 new_block = g_malloc0(sizeof(*new_block));
1665 new_block->mr = mr;
1666 new_block->used_length = size;
1667 new_block->max_length = size;
1668 new_block->flags = share ? RAM_SHARED : 0;
1669 new_block->host = file_ram_alloc(new_block, size,
1670 mem_path, errp);
1671 if (!new_block->host) {
1672 g_free(new_block);
1673 return NULL;
1676 ram_block_add(new_block, &local_err);
1677 if (local_err) {
1678 g_free(new_block);
1679 error_propagate(errp, local_err);
1680 return NULL;
1682 return new_block;
1684 #endif
1686 static
1687 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1688 void (*resized)(const char*,
1689 uint64_t length,
1690 void *host),
1691 void *host, bool resizeable,
1692 MemoryRegion *mr, Error **errp)
1694 RAMBlock *new_block;
1695 Error *local_err = NULL;
1697 size = HOST_PAGE_ALIGN(size);
1698 max_size = HOST_PAGE_ALIGN(max_size);
1699 new_block = g_malloc0(sizeof(*new_block));
1700 new_block->mr = mr;
1701 new_block->resized = resized;
1702 new_block->used_length = size;
1703 new_block->max_length = max_size;
1704 assert(max_size >= size);
1705 new_block->fd = -1;
1706 new_block->page_size = getpagesize();
1707 new_block->host = host;
1708 if (host) {
1709 new_block->flags |= RAM_PREALLOC;
1711 if (resizeable) {
1712 new_block->flags |= RAM_RESIZEABLE;
1714 ram_block_add(new_block, &local_err);
1715 if (local_err) {
1716 g_free(new_block);
1717 error_propagate(errp, local_err);
1718 return NULL;
1720 return new_block;
1723 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1724 MemoryRegion *mr, Error **errp)
1726 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1729 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
1731 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1734 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1735 void (*resized)(const char*,
1736 uint64_t length,
1737 void *host),
1738 MemoryRegion *mr, Error **errp)
1740 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
1743 static void reclaim_ramblock(RAMBlock *block)
1745 if (block->flags & RAM_PREALLOC) {
1747 } else if (xen_enabled()) {
1748 xen_invalidate_map_cache_entry(block->host);
1749 #ifndef _WIN32
1750 } else if (block->fd >= 0) {
1751 qemu_ram_munmap(block->host, block->max_length);
1752 close(block->fd);
1753 #endif
1754 } else {
1755 qemu_anon_ram_free(block->host, block->max_length);
1757 g_free(block);
1760 void qemu_ram_free(RAMBlock *block)
1762 if (!block) {
1763 return;
1766 qemu_mutex_lock_ramlist();
1767 QLIST_REMOVE_RCU(block, next);
1768 ram_list.mru_block = NULL;
1769 /* Write list before version */
1770 smp_wmb();
1771 ram_list.version++;
1772 call_rcu(block, reclaim_ramblock, rcu);
1773 qemu_mutex_unlock_ramlist();
1776 #ifndef _WIN32
1777 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1779 RAMBlock *block;
1780 ram_addr_t offset;
1781 int flags;
1782 void *area, *vaddr;
1784 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1785 offset = addr - block->offset;
1786 if (offset < block->max_length) {
1787 vaddr = ramblock_ptr(block, offset);
1788 if (block->flags & RAM_PREALLOC) {
1790 } else if (xen_enabled()) {
1791 abort();
1792 } else {
1793 flags = MAP_FIXED;
1794 if (block->fd >= 0) {
1795 flags |= (block->flags & RAM_SHARED ?
1796 MAP_SHARED : MAP_PRIVATE);
1797 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1798 flags, block->fd, offset);
1799 } else {
1801 * Remap needs to match alloc. Accelerators that
1802 * set phys_mem_alloc never remap. If they did,
1803 * we'd need a remap hook here.
1805 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1807 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1808 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1809 flags, -1, 0);
1811 if (area != vaddr) {
1812 fprintf(stderr, "Could not remap addr: "
1813 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1814 length, addr);
1815 exit(1);
1817 memory_try_enable_merging(vaddr, length);
1818 qemu_ram_setup_dump(vaddr, length);
1823 #endif /* !_WIN32 */
1825 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1826 * This should not be used for general purpose DMA. Use address_space_map
1827 * or address_space_rw instead. For local memory (e.g. video ram) that the
1828 * device owns, use memory_region_get_ram_ptr.
1830 * Called within RCU critical section.
1832 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
1834 RAMBlock *block = ram_block;
1836 if (block == NULL) {
1837 block = qemu_get_ram_block(addr);
1838 addr -= block->offset;
1841 if (xen_enabled() && block->host == NULL) {
1842 /* We need to check if the requested address is in the RAM
1843 * because we don't want to map the entire memory in QEMU.
1844 * In that case just map until the end of the page.
1846 if (block->offset == 0) {
1847 return xen_map_cache(addr, 0, 0);
1850 block->host = xen_map_cache(block->offset, block->max_length, 1);
1852 return ramblock_ptr(block, addr);
1855 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1856 * but takes a size argument.
1858 * Called within RCU critical section.
1860 static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1861 hwaddr *size)
1863 RAMBlock *block = ram_block;
1864 if (*size == 0) {
1865 return NULL;
1868 if (block == NULL) {
1869 block = qemu_get_ram_block(addr);
1870 addr -= block->offset;
1872 *size = MIN(*size, block->max_length - addr);
1874 if (xen_enabled() && block->host == NULL) {
1875 /* We need to check if the requested address is in the RAM
1876 * because we don't want to map the entire memory in QEMU.
1877 * In that case just map the requested area.
1879 if (block->offset == 0) {
1880 return xen_map_cache(addr, *size, 1);
1883 block->host = xen_map_cache(block->offset, block->max_length, 1);
1886 return ramblock_ptr(block, addr);
1890 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1891 * in that RAMBlock.
1893 * ptr: Host pointer to look up
1894 * round_offset: If true round the result offset down to a page boundary
1895 * *ram_addr: set to result ram_addr
1896 * *offset: set to result offset within the RAMBlock
1898 * Returns: RAMBlock (or NULL if not found)
1900 * By the time this function returns, the returned pointer is not protected
1901 * by RCU anymore. If the caller is not within an RCU critical section and
1902 * does not hold the iothread lock, it must have other means of protecting the
1903 * pointer, such as a reference to the region that includes the incoming
1904 * ram_addr_t.
1906 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1907 ram_addr_t *offset)
1909 RAMBlock *block;
1910 uint8_t *host = ptr;
1912 if (xen_enabled()) {
1913 ram_addr_t ram_addr;
1914 rcu_read_lock();
1915 ram_addr = xen_ram_addr_from_mapcache(ptr);
1916 block = qemu_get_ram_block(ram_addr);
1917 if (block) {
1918 *offset = ram_addr - block->offset;
1920 rcu_read_unlock();
1921 return block;
1924 rcu_read_lock();
1925 block = atomic_rcu_read(&ram_list.mru_block);
1926 if (block && block->host && host - block->host < block->max_length) {
1927 goto found;
1930 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1931 /* This case append when the block is not mapped. */
1932 if (block->host == NULL) {
1933 continue;
1935 if (host - block->host < block->max_length) {
1936 goto found;
1940 rcu_read_unlock();
1941 return NULL;
1943 found:
1944 *offset = (host - block->host);
1945 if (round_offset) {
1946 *offset &= TARGET_PAGE_MASK;
1948 rcu_read_unlock();
1949 return block;
1953 * Finds the named RAMBlock
1955 * name: The name of RAMBlock to find
1957 * Returns: RAMBlock (or NULL if not found)
1959 RAMBlock *qemu_ram_block_by_name(const char *name)
1961 RAMBlock *block;
1963 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1964 if (!strcmp(name, block->idstr)) {
1965 return block;
1969 return NULL;
1972 /* Some of the softmmu routines need to translate from a host pointer
1973 (typically a TLB entry) back to a ram offset. */
1974 ram_addr_t qemu_ram_addr_from_host(void *ptr)
1976 RAMBlock *block;
1977 ram_addr_t offset;
1979 block = qemu_ram_block_from_host(ptr, false, &offset);
1980 if (!block) {
1981 return RAM_ADDR_INVALID;
1984 return block->offset + offset;
1987 /* Called within RCU critical section. */
1988 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1989 uint64_t val, unsigned size)
1991 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1992 tb_invalidate_phys_page_fast(ram_addr, size);
1994 switch (size) {
1995 case 1:
1996 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
1997 break;
1998 case 2:
1999 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2000 break;
2001 case 4:
2002 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2003 break;
2004 default:
2005 abort();
2007 /* Set both VGA and migration bits for simplicity and to remove
2008 * the notdirty callback faster.
2010 cpu_physical_memory_set_dirty_range(ram_addr, size,
2011 DIRTY_CLIENTS_NOCODE);
2012 /* we remove the notdirty callback only if the code has been
2013 flushed */
2014 if (!cpu_physical_memory_is_clean(ram_addr)) {
2015 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
2019 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2020 unsigned size, bool is_write)
2022 return is_write;
2025 static const MemoryRegionOps notdirty_mem_ops = {
2026 .write = notdirty_mem_write,
2027 .valid.accepts = notdirty_mem_accepts,
2028 .endianness = DEVICE_NATIVE_ENDIAN,
2031 /* Generate a debug exception if a watchpoint has been hit. */
2032 static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
2034 CPUState *cpu = current_cpu;
2035 CPUClass *cc = CPU_GET_CLASS(cpu);
2036 CPUArchState *env = cpu->env_ptr;
2037 target_ulong pc, cs_base;
2038 target_ulong vaddr;
2039 CPUWatchpoint *wp;
2040 uint32_t cpu_flags;
2042 if (cpu->watchpoint_hit) {
2043 /* We re-entered the check after replacing the TB. Now raise
2044 * the debug interrupt so that is will trigger after the
2045 * current instruction. */
2046 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2047 return;
2049 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2050 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
2051 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2052 && (wp->flags & flags)) {
2053 if (flags == BP_MEM_READ) {
2054 wp->flags |= BP_WATCHPOINT_HIT_READ;
2055 } else {
2056 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2058 wp->hitaddr = vaddr;
2059 wp->hitattrs = attrs;
2060 if (!cpu->watchpoint_hit) {
2061 if (wp->flags & BP_CPU &&
2062 !cc->debug_check_watchpoint(cpu, wp)) {
2063 wp->flags &= ~BP_WATCHPOINT_HIT;
2064 continue;
2066 cpu->watchpoint_hit = wp;
2067 tb_check_watchpoint(cpu);
2068 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2069 cpu->exception_index = EXCP_DEBUG;
2070 cpu_loop_exit(cpu);
2071 } else {
2072 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2073 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
2074 cpu_loop_exit_noexc(cpu);
2077 } else {
2078 wp->flags &= ~BP_WATCHPOINT_HIT;
2083 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2084 so these check for a hit then pass through to the normal out-of-line
2085 phys routines. */
2086 static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2087 unsigned size, MemTxAttrs attrs)
2089 MemTxResult res;
2090 uint64_t data;
2091 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2092 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2094 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
2095 switch (size) {
2096 case 1:
2097 data = address_space_ldub(as, addr, attrs, &res);
2098 break;
2099 case 2:
2100 data = address_space_lduw(as, addr, attrs, &res);
2101 break;
2102 case 4:
2103 data = address_space_ldl(as, addr, attrs, &res);
2104 break;
2105 default: abort();
2107 *pdata = data;
2108 return res;
2111 static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2112 uint64_t val, unsigned size,
2113 MemTxAttrs attrs)
2115 MemTxResult res;
2116 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2117 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2119 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2120 switch (size) {
2121 case 1:
2122 address_space_stb(as, addr, val, attrs, &res);
2123 break;
2124 case 2:
2125 address_space_stw(as, addr, val, attrs, &res);
2126 break;
2127 case 4:
2128 address_space_stl(as, addr, val, attrs, &res);
2129 break;
2130 default: abort();
2132 return res;
2135 static const MemoryRegionOps watch_mem_ops = {
2136 .read_with_attrs = watch_mem_read,
2137 .write_with_attrs = watch_mem_write,
2138 .endianness = DEVICE_NATIVE_ENDIAN,
2141 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2142 unsigned len, MemTxAttrs attrs)
2144 subpage_t *subpage = opaque;
2145 uint8_t buf[8];
2146 MemTxResult res;
2148 #if defined(DEBUG_SUBPAGE)
2149 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2150 subpage, len, addr);
2151 #endif
2152 res = address_space_read(subpage->as, addr + subpage->base,
2153 attrs, buf, len);
2154 if (res) {
2155 return res;
2157 switch (len) {
2158 case 1:
2159 *data = ldub_p(buf);
2160 return MEMTX_OK;
2161 case 2:
2162 *data = lduw_p(buf);
2163 return MEMTX_OK;
2164 case 4:
2165 *data = ldl_p(buf);
2166 return MEMTX_OK;
2167 case 8:
2168 *data = ldq_p(buf);
2169 return MEMTX_OK;
2170 default:
2171 abort();
2175 static MemTxResult subpage_write(void *opaque, hwaddr addr,
2176 uint64_t value, unsigned len, MemTxAttrs attrs)
2178 subpage_t *subpage = opaque;
2179 uint8_t buf[8];
2181 #if defined(DEBUG_SUBPAGE)
2182 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2183 " value %"PRIx64"\n",
2184 __func__, subpage, len, addr, value);
2185 #endif
2186 switch (len) {
2187 case 1:
2188 stb_p(buf, value);
2189 break;
2190 case 2:
2191 stw_p(buf, value);
2192 break;
2193 case 4:
2194 stl_p(buf, value);
2195 break;
2196 case 8:
2197 stq_p(buf, value);
2198 break;
2199 default:
2200 abort();
2202 return address_space_write(subpage->as, addr + subpage->base,
2203 attrs, buf, len);
2206 static bool subpage_accepts(void *opaque, hwaddr addr,
2207 unsigned len, bool is_write)
2209 subpage_t *subpage = opaque;
2210 #if defined(DEBUG_SUBPAGE)
2211 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2212 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2213 #endif
2215 return address_space_access_valid(subpage->as, addr + subpage->base,
2216 len, is_write);
2219 static const MemoryRegionOps subpage_ops = {
2220 .read_with_attrs = subpage_read,
2221 .write_with_attrs = subpage_write,
2222 .impl.min_access_size = 1,
2223 .impl.max_access_size = 8,
2224 .valid.min_access_size = 1,
2225 .valid.max_access_size = 8,
2226 .valid.accepts = subpage_accepts,
2227 .endianness = DEVICE_NATIVE_ENDIAN,
2230 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2231 uint16_t section)
2233 int idx, eidx;
2235 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2236 return -1;
2237 idx = SUBPAGE_IDX(start);
2238 eidx = SUBPAGE_IDX(end);
2239 #if defined(DEBUG_SUBPAGE)
2240 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2241 __func__, mmio, start, end, idx, eidx, section);
2242 #endif
2243 for (; idx <= eidx; idx++) {
2244 mmio->sub_section[idx] = section;
2247 return 0;
2250 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2252 subpage_t *mmio;
2254 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
2255 mmio->as = as;
2256 mmio->base = base;
2257 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2258 NULL, TARGET_PAGE_SIZE);
2259 mmio->iomem.subpage = true;
2260 #if defined(DEBUG_SUBPAGE)
2261 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2262 mmio, base, TARGET_PAGE_SIZE);
2263 #endif
2264 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2266 return mmio;
2269 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2270 MemoryRegion *mr)
2272 assert(as);
2273 MemoryRegionSection section = {
2274 .address_space = as,
2275 .mr = mr,
2276 .offset_within_address_space = 0,
2277 .offset_within_region = 0,
2278 .size = int128_2_64(),
2281 return phys_section_add(map, &section);
2284 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2286 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2287 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
2288 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
2289 MemoryRegionSection *sections = d->map.sections;
2291 return sections[index & ~TARGET_PAGE_MASK].mr;
2294 static void io_mem_init(void)
2296 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2297 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2298 NULL, UINT64_MAX);
2299 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
2300 NULL, UINT64_MAX);
2301 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2302 NULL, UINT64_MAX);
2305 static void mem_begin(MemoryListener *listener)
2307 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2308 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2309 uint16_t n;
2311 n = dummy_section(&d->map, as, &io_mem_unassigned);
2312 assert(n == PHYS_SECTION_UNASSIGNED);
2313 n = dummy_section(&d->map, as, &io_mem_notdirty);
2314 assert(n == PHYS_SECTION_NOTDIRTY);
2315 n = dummy_section(&d->map, as, &io_mem_rom);
2316 assert(n == PHYS_SECTION_ROM);
2317 n = dummy_section(&d->map, as, &io_mem_watch);
2318 assert(n == PHYS_SECTION_WATCH);
2320 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2321 d->as = as;
2322 as->next_dispatch = d;
2325 static void address_space_dispatch_free(AddressSpaceDispatch *d)
2327 phys_sections_free(&d->map);
2328 g_free(d);
2331 static void mem_commit(MemoryListener *listener)
2333 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2334 AddressSpaceDispatch *cur = as->dispatch;
2335 AddressSpaceDispatch *next = as->next_dispatch;
2337 phys_page_compact_all(next, next->map.nodes_nb);
2339 atomic_rcu_set(&as->dispatch, next);
2340 if (cur) {
2341 call_rcu(cur, address_space_dispatch_free, rcu);
2345 static void tcg_commit(MemoryListener *listener)
2347 CPUAddressSpace *cpuas;
2348 AddressSpaceDispatch *d;
2350 /* since each CPU stores ram addresses in its TLB cache, we must
2351 reset the modified entries */
2352 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2353 cpu_reloading_memory_map();
2354 /* The CPU and TLB are protected by the iothread lock.
2355 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2356 * may have split the RCU critical section.
2358 d = atomic_rcu_read(&cpuas->as->dispatch);
2359 cpuas->memory_dispatch = d;
2360 tlb_flush(cpuas->cpu, 1);
2363 void address_space_init_dispatch(AddressSpace *as)
2365 as->dispatch = NULL;
2366 as->dispatch_listener = (MemoryListener) {
2367 .begin = mem_begin,
2368 .commit = mem_commit,
2369 .region_add = mem_add,
2370 .region_nop = mem_add,
2371 .priority = 0,
2373 memory_listener_register(&as->dispatch_listener, as);
2376 void address_space_unregister(AddressSpace *as)
2378 memory_listener_unregister(&as->dispatch_listener);
2381 void address_space_destroy_dispatch(AddressSpace *as)
2383 AddressSpaceDispatch *d = as->dispatch;
2385 atomic_rcu_set(&as->dispatch, NULL);
2386 if (d) {
2387 call_rcu(d, address_space_dispatch_free, rcu);
2391 static void memory_map_init(void)
2393 system_memory = g_malloc(sizeof(*system_memory));
2395 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2396 address_space_init(&address_space_memory, system_memory, "memory");
2398 system_io = g_malloc(sizeof(*system_io));
2399 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2400 65536);
2401 address_space_init(&address_space_io, system_io, "I/O");
2404 MemoryRegion *get_system_memory(void)
2406 return system_memory;
2409 MemoryRegion *get_system_io(void)
2411 return system_io;
2414 #endif /* !defined(CONFIG_USER_ONLY) */
2416 /* physical memory access (slow version, mainly for debug) */
2417 #if defined(CONFIG_USER_ONLY)
2418 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2419 uint8_t *buf, int len, int is_write)
2421 int l, flags;
2422 target_ulong page;
2423 void * p;
2425 while (len > 0) {
2426 page = addr & TARGET_PAGE_MASK;
2427 l = (page + TARGET_PAGE_SIZE) - addr;
2428 if (l > len)
2429 l = len;
2430 flags = page_get_flags(page);
2431 if (!(flags & PAGE_VALID))
2432 return -1;
2433 if (is_write) {
2434 if (!(flags & PAGE_WRITE))
2435 return -1;
2436 /* XXX: this code should not depend on lock_user */
2437 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2438 return -1;
2439 memcpy(p, buf, l);
2440 unlock_user(p, addr, l);
2441 } else {
2442 if (!(flags & PAGE_READ))
2443 return -1;
2444 /* XXX: this code should not depend on lock_user */
2445 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2446 return -1;
2447 memcpy(buf, p, l);
2448 unlock_user(p, addr, 0);
2450 len -= l;
2451 buf += l;
2452 addr += l;
2454 return 0;
2457 #else
2459 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2460 hwaddr length)
2462 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2463 addr += memory_region_get_ram_addr(mr);
2465 /* No early return if dirty_log_mask is or becomes 0, because
2466 * cpu_physical_memory_set_dirty_range will still call
2467 * xen_modified_memory.
2469 if (dirty_log_mask) {
2470 dirty_log_mask =
2471 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2473 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2474 tb_invalidate_phys_range(addr, addr + length);
2475 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2477 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2480 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2482 unsigned access_size_max = mr->ops->valid.max_access_size;
2484 /* Regions are assumed to support 1-4 byte accesses unless
2485 otherwise specified. */
2486 if (access_size_max == 0) {
2487 access_size_max = 4;
2490 /* Bound the maximum access by the alignment of the address. */
2491 if (!mr->ops->impl.unaligned) {
2492 unsigned align_size_max = addr & -addr;
2493 if (align_size_max != 0 && align_size_max < access_size_max) {
2494 access_size_max = align_size_max;
2498 /* Don't attempt accesses larger than the maximum. */
2499 if (l > access_size_max) {
2500 l = access_size_max;
2502 l = pow2floor(l);
2504 return l;
2507 static bool prepare_mmio_access(MemoryRegion *mr)
2509 bool unlocked = !qemu_mutex_iothread_locked();
2510 bool release_lock = false;
2512 if (unlocked && mr->global_locking) {
2513 qemu_mutex_lock_iothread();
2514 unlocked = false;
2515 release_lock = true;
2517 if (mr->flush_coalesced_mmio) {
2518 if (unlocked) {
2519 qemu_mutex_lock_iothread();
2521 qemu_flush_coalesced_mmio_buffer();
2522 if (unlocked) {
2523 qemu_mutex_unlock_iothread();
2527 return release_lock;
2530 /* Called within RCU critical section. */
2531 static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2532 MemTxAttrs attrs,
2533 const uint8_t *buf,
2534 int len, hwaddr addr1,
2535 hwaddr l, MemoryRegion *mr)
2537 uint8_t *ptr;
2538 uint64_t val;
2539 MemTxResult result = MEMTX_OK;
2540 bool release_lock = false;
2542 for (;;) {
2543 if (!memory_access_is_direct(mr, true)) {
2544 release_lock |= prepare_mmio_access(mr);
2545 l = memory_access_size(mr, l, addr1);
2546 /* XXX: could force current_cpu to NULL to avoid
2547 potential bugs */
2548 switch (l) {
2549 case 8:
2550 /* 64 bit write access */
2551 val = ldq_p(buf);
2552 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2553 attrs);
2554 break;
2555 case 4:
2556 /* 32 bit write access */
2557 val = ldl_p(buf);
2558 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2559 attrs);
2560 break;
2561 case 2:
2562 /* 16 bit write access */
2563 val = lduw_p(buf);
2564 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2565 attrs);
2566 break;
2567 case 1:
2568 /* 8 bit write access */
2569 val = ldub_p(buf);
2570 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2571 attrs);
2572 break;
2573 default:
2574 abort();
2576 } else {
2577 /* RAM case */
2578 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2579 memcpy(ptr, buf, l);
2580 invalidate_and_set_dirty(mr, addr1, l);
2583 if (release_lock) {
2584 qemu_mutex_unlock_iothread();
2585 release_lock = false;
2588 len -= l;
2589 buf += l;
2590 addr += l;
2592 if (!len) {
2593 break;
2596 l = len;
2597 mr = address_space_translate(as, addr, &addr1, &l, true);
2600 return result;
2603 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2604 const uint8_t *buf, int len)
2606 hwaddr l;
2607 hwaddr addr1;
2608 MemoryRegion *mr;
2609 MemTxResult result = MEMTX_OK;
2611 if (len > 0) {
2612 rcu_read_lock();
2613 l = len;
2614 mr = address_space_translate(as, addr, &addr1, &l, true);
2615 result = address_space_write_continue(as, addr, attrs, buf, len,
2616 addr1, l, mr);
2617 rcu_read_unlock();
2620 return result;
2623 /* Called within RCU critical section. */
2624 MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2625 MemTxAttrs attrs, uint8_t *buf,
2626 int len, hwaddr addr1, hwaddr l,
2627 MemoryRegion *mr)
2629 uint8_t *ptr;
2630 uint64_t val;
2631 MemTxResult result = MEMTX_OK;
2632 bool release_lock = false;
2634 for (;;) {
2635 if (!memory_access_is_direct(mr, false)) {
2636 /* I/O case */
2637 release_lock |= prepare_mmio_access(mr);
2638 l = memory_access_size(mr, l, addr1);
2639 switch (l) {
2640 case 8:
2641 /* 64 bit read access */
2642 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2643 attrs);
2644 stq_p(buf, val);
2645 break;
2646 case 4:
2647 /* 32 bit read access */
2648 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2649 attrs);
2650 stl_p(buf, val);
2651 break;
2652 case 2:
2653 /* 16 bit read access */
2654 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2655 attrs);
2656 stw_p(buf, val);
2657 break;
2658 case 1:
2659 /* 8 bit read access */
2660 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2661 attrs);
2662 stb_p(buf, val);
2663 break;
2664 default:
2665 abort();
2667 } else {
2668 /* RAM case */
2669 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2670 memcpy(buf, ptr, l);
2673 if (release_lock) {
2674 qemu_mutex_unlock_iothread();
2675 release_lock = false;
2678 len -= l;
2679 buf += l;
2680 addr += l;
2682 if (!len) {
2683 break;
2686 l = len;
2687 mr = address_space_translate(as, addr, &addr1, &l, false);
2690 return result;
2693 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2694 MemTxAttrs attrs, uint8_t *buf, int len)
2696 hwaddr l;
2697 hwaddr addr1;
2698 MemoryRegion *mr;
2699 MemTxResult result = MEMTX_OK;
2701 if (len > 0) {
2702 rcu_read_lock();
2703 l = len;
2704 mr = address_space_translate(as, addr, &addr1, &l, false);
2705 result = address_space_read_continue(as, addr, attrs, buf, len,
2706 addr1, l, mr);
2707 rcu_read_unlock();
2710 return result;
2713 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2714 uint8_t *buf, int len, bool is_write)
2716 if (is_write) {
2717 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2718 } else {
2719 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2723 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2724 int len, int is_write)
2726 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2727 buf, len, is_write);
2730 enum write_rom_type {
2731 WRITE_DATA,
2732 FLUSH_CACHE,
2735 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2736 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2738 hwaddr l;
2739 uint8_t *ptr;
2740 hwaddr addr1;
2741 MemoryRegion *mr;
2743 rcu_read_lock();
2744 while (len > 0) {
2745 l = len;
2746 mr = address_space_translate(as, addr, &addr1, &l, true);
2748 if (!(memory_region_is_ram(mr) ||
2749 memory_region_is_romd(mr))) {
2750 l = memory_access_size(mr, l, addr1);
2751 } else {
2752 /* ROM/RAM case */
2753 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2754 switch (type) {
2755 case WRITE_DATA:
2756 memcpy(ptr, buf, l);
2757 invalidate_and_set_dirty(mr, addr1, l);
2758 break;
2759 case FLUSH_CACHE:
2760 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2761 break;
2764 len -= l;
2765 buf += l;
2766 addr += l;
2768 rcu_read_unlock();
2771 /* used for ROM loading : can write in RAM and ROM */
2772 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2773 const uint8_t *buf, int len)
2775 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2778 void cpu_flush_icache_range(hwaddr start, int len)
2781 * This function should do the same thing as an icache flush that was
2782 * triggered from within the guest. For TCG we are always cache coherent,
2783 * so there is no need to flush anything. For KVM / Xen we need to flush
2784 * the host's instruction cache at least.
2786 if (tcg_enabled()) {
2787 return;
2790 cpu_physical_memory_write_rom_internal(&address_space_memory,
2791 start, NULL, len, FLUSH_CACHE);
2794 typedef struct {
2795 MemoryRegion *mr;
2796 void *buffer;
2797 hwaddr addr;
2798 hwaddr len;
2799 bool in_use;
2800 } BounceBuffer;
2802 static BounceBuffer bounce;
2804 typedef struct MapClient {
2805 QEMUBH *bh;
2806 QLIST_ENTRY(MapClient) link;
2807 } MapClient;
2809 QemuMutex map_client_list_lock;
2810 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2811 = QLIST_HEAD_INITIALIZER(map_client_list);
2813 static void cpu_unregister_map_client_do(MapClient *client)
2815 QLIST_REMOVE(client, link);
2816 g_free(client);
2819 static void cpu_notify_map_clients_locked(void)
2821 MapClient *client;
2823 while (!QLIST_EMPTY(&map_client_list)) {
2824 client = QLIST_FIRST(&map_client_list);
2825 qemu_bh_schedule(client->bh);
2826 cpu_unregister_map_client_do(client);
2830 void cpu_register_map_client(QEMUBH *bh)
2832 MapClient *client = g_malloc(sizeof(*client));
2834 qemu_mutex_lock(&map_client_list_lock);
2835 client->bh = bh;
2836 QLIST_INSERT_HEAD(&map_client_list, client, link);
2837 if (!atomic_read(&bounce.in_use)) {
2838 cpu_notify_map_clients_locked();
2840 qemu_mutex_unlock(&map_client_list_lock);
2843 void cpu_exec_init_all(void)
2845 qemu_mutex_init(&ram_list.mutex);
2846 /* The data structures we set up here depend on knowing the page size,
2847 * so no more changes can be made after this point.
2848 * In an ideal world, nothing we did before we had finished the
2849 * machine setup would care about the target page size, and we could
2850 * do this much later, rather than requiring board models to state
2851 * up front what their requirements are.
2853 finalize_target_page_bits();
2854 io_mem_init();
2855 memory_map_init();
2856 qemu_mutex_init(&map_client_list_lock);
2859 void cpu_unregister_map_client(QEMUBH *bh)
2861 MapClient *client;
2863 qemu_mutex_lock(&map_client_list_lock);
2864 QLIST_FOREACH(client, &map_client_list, link) {
2865 if (client->bh == bh) {
2866 cpu_unregister_map_client_do(client);
2867 break;
2870 qemu_mutex_unlock(&map_client_list_lock);
2873 static void cpu_notify_map_clients(void)
2875 qemu_mutex_lock(&map_client_list_lock);
2876 cpu_notify_map_clients_locked();
2877 qemu_mutex_unlock(&map_client_list_lock);
2880 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2882 MemoryRegion *mr;
2883 hwaddr l, xlat;
2885 rcu_read_lock();
2886 while (len > 0) {
2887 l = len;
2888 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2889 if (!memory_access_is_direct(mr, is_write)) {
2890 l = memory_access_size(mr, l, addr);
2891 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2892 return false;
2896 len -= l;
2897 addr += l;
2899 rcu_read_unlock();
2900 return true;
2903 /* Map a physical memory region into a host virtual address.
2904 * May map a subset of the requested range, given by and returned in *plen.
2905 * May return NULL if resources needed to perform the mapping are exhausted.
2906 * Use only for reads OR writes - not for read-modify-write operations.
2907 * Use cpu_register_map_client() to know when retrying the map operation is
2908 * likely to succeed.
2910 void *address_space_map(AddressSpace *as,
2911 hwaddr addr,
2912 hwaddr *plen,
2913 bool is_write)
2915 hwaddr len = *plen;
2916 hwaddr done = 0;
2917 hwaddr l, xlat, base;
2918 MemoryRegion *mr, *this_mr;
2919 void *ptr;
2921 if (len == 0) {
2922 return NULL;
2925 l = len;
2926 rcu_read_lock();
2927 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2929 if (!memory_access_is_direct(mr, is_write)) {
2930 if (atomic_xchg(&bounce.in_use, true)) {
2931 rcu_read_unlock();
2932 return NULL;
2934 /* Avoid unbounded allocations */
2935 l = MIN(l, TARGET_PAGE_SIZE);
2936 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2937 bounce.addr = addr;
2938 bounce.len = l;
2940 memory_region_ref(mr);
2941 bounce.mr = mr;
2942 if (!is_write) {
2943 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2944 bounce.buffer, l);
2947 rcu_read_unlock();
2948 *plen = l;
2949 return bounce.buffer;
2952 base = xlat;
2954 for (;;) {
2955 len -= l;
2956 addr += l;
2957 done += l;
2958 if (len == 0) {
2959 break;
2962 l = len;
2963 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2964 if (this_mr != mr || xlat != base + done) {
2965 break;
2969 memory_region_ref(mr);
2970 *plen = done;
2971 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
2972 rcu_read_unlock();
2974 return ptr;
2977 /* Unmaps a memory region previously mapped by address_space_map().
2978 * Will also mark the memory as dirty if is_write == 1. access_len gives
2979 * the amount of memory that was actually read or written by the caller.
2981 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2982 int is_write, hwaddr access_len)
2984 if (buffer != bounce.buffer) {
2985 MemoryRegion *mr;
2986 ram_addr_t addr1;
2988 mr = memory_region_from_host(buffer, &addr1);
2989 assert(mr != NULL);
2990 if (is_write) {
2991 invalidate_and_set_dirty(mr, addr1, access_len);
2993 if (xen_enabled()) {
2994 xen_invalidate_map_cache_entry(buffer);
2996 memory_region_unref(mr);
2997 return;
2999 if (is_write) {
3000 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3001 bounce.buffer, access_len);
3003 qemu_vfree(bounce.buffer);
3004 bounce.buffer = NULL;
3005 memory_region_unref(bounce.mr);
3006 atomic_mb_set(&bounce.in_use, false);
3007 cpu_notify_map_clients();
3010 void *cpu_physical_memory_map(hwaddr addr,
3011 hwaddr *plen,
3012 int is_write)
3014 return address_space_map(&address_space_memory, addr, plen, is_write);
3017 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3018 int is_write, hwaddr access_len)
3020 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3023 /* warning: addr must be aligned */
3024 static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3025 MemTxAttrs attrs,
3026 MemTxResult *result,
3027 enum device_endian endian)
3029 uint8_t *ptr;
3030 uint64_t val;
3031 MemoryRegion *mr;
3032 hwaddr l = 4;
3033 hwaddr addr1;
3034 MemTxResult r;
3035 bool release_lock = false;
3037 rcu_read_lock();
3038 mr = address_space_translate(as, addr, &addr1, &l, false);
3039 if (l < 4 || !memory_access_is_direct(mr, false)) {
3040 release_lock |= prepare_mmio_access(mr);
3042 /* I/O case */
3043 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
3044 #if defined(TARGET_WORDS_BIGENDIAN)
3045 if (endian == DEVICE_LITTLE_ENDIAN) {
3046 val = bswap32(val);
3048 #else
3049 if (endian == DEVICE_BIG_ENDIAN) {
3050 val = bswap32(val);
3052 #endif
3053 } else {
3054 /* RAM case */
3055 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3056 switch (endian) {
3057 case DEVICE_LITTLE_ENDIAN:
3058 val = ldl_le_p(ptr);
3059 break;
3060 case DEVICE_BIG_ENDIAN:
3061 val = ldl_be_p(ptr);
3062 break;
3063 default:
3064 val = ldl_p(ptr);
3065 break;
3067 r = MEMTX_OK;
3069 if (result) {
3070 *result = r;
3072 if (release_lock) {
3073 qemu_mutex_unlock_iothread();
3075 rcu_read_unlock();
3076 return val;
3079 uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3080 MemTxAttrs attrs, MemTxResult *result)
3082 return address_space_ldl_internal(as, addr, attrs, result,
3083 DEVICE_NATIVE_ENDIAN);
3086 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3087 MemTxAttrs attrs, MemTxResult *result)
3089 return address_space_ldl_internal(as, addr, attrs, result,
3090 DEVICE_LITTLE_ENDIAN);
3093 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3094 MemTxAttrs attrs, MemTxResult *result)
3096 return address_space_ldl_internal(as, addr, attrs, result,
3097 DEVICE_BIG_ENDIAN);
3100 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
3102 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3105 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
3107 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3110 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
3112 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3115 /* warning: addr must be aligned */
3116 static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3117 MemTxAttrs attrs,
3118 MemTxResult *result,
3119 enum device_endian endian)
3121 uint8_t *ptr;
3122 uint64_t val;
3123 MemoryRegion *mr;
3124 hwaddr l = 8;
3125 hwaddr addr1;
3126 MemTxResult r;
3127 bool release_lock = false;
3129 rcu_read_lock();
3130 mr = address_space_translate(as, addr, &addr1, &l,
3131 false);
3132 if (l < 8 || !memory_access_is_direct(mr, false)) {
3133 release_lock |= prepare_mmio_access(mr);
3135 /* I/O case */
3136 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
3137 #if defined(TARGET_WORDS_BIGENDIAN)
3138 if (endian == DEVICE_LITTLE_ENDIAN) {
3139 val = bswap64(val);
3141 #else
3142 if (endian == DEVICE_BIG_ENDIAN) {
3143 val = bswap64(val);
3145 #endif
3146 } else {
3147 /* RAM case */
3148 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3149 switch (endian) {
3150 case DEVICE_LITTLE_ENDIAN:
3151 val = ldq_le_p(ptr);
3152 break;
3153 case DEVICE_BIG_ENDIAN:
3154 val = ldq_be_p(ptr);
3155 break;
3156 default:
3157 val = ldq_p(ptr);
3158 break;
3160 r = MEMTX_OK;
3162 if (result) {
3163 *result = r;
3165 if (release_lock) {
3166 qemu_mutex_unlock_iothread();
3168 rcu_read_unlock();
3169 return val;
3172 uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3173 MemTxAttrs attrs, MemTxResult *result)
3175 return address_space_ldq_internal(as, addr, attrs, result,
3176 DEVICE_NATIVE_ENDIAN);
3179 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3180 MemTxAttrs attrs, MemTxResult *result)
3182 return address_space_ldq_internal(as, addr, attrs, result,
3183 DEVICE_LITTLE_ENDIAN);
3186 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3187 MemTxAttrs attrs, MemTxResult *result)
3189 return address_space_ldq_internal(as, addr, attrs, result,
3190 DEVICE_BIG_ENDIAN);
3193 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
3195 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3198 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
3200 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3203 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
3205 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3208 /* XXX: optimize */
3209 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3210 MemTxAttrs attrs, MemTxResult *result)
3212 uint8_t val;
3213 MemTxResult r;
3215 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3216 if (result) {
3217 *result = r;
3219 return val;
3222 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3224 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3227 /* warning: addr must be aligned */
3228 static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3229 hwaddr addr,
3230 MemTxAttrs attrs,
3231 MemTxResult *result,
3232 enum device_endian endian)
3234 uint8_t *ptr;
3235 uint64_t val;
3236 MemoryRegion *mr;
3237 hwaddr l = 2;
3238 hwaddr addr1;
3239 MemTxResult r;
3240 bool release_lock = false;
3242 rcu_read_lock();
3243 mr = address_space_translate(as, addr, &addr1, &l,
3244 false);
3245 if (l < 2 || !memory_access_is_direct(mr, false)) {
3246 release_lock |= prepare_mmio_access(mr);
3248 /* I/O case */
3249 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
3250 #if defined(TARGET_WORDS_BIGENDIAN)
3251 if (endian == DEVICE_LITTLE_ENDIAN) {
3252 val = bswap16(val);
3254 #else
3255 if (endian == DEVICE_BIG_ENDIAN) {
3256 val = bswap16(val);
3258 #endif
3259 } else {
3260 /* RAM case */
3261 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3262 switch (endian) {
3263 case DEVICE_LITTLE_ENDIAN:
3264 val = lduw_le_p(ptr);
3265 break;
3266 case DEVICE_BIG_ENDIAN:
3267 val = lduw_be_p(ptr);
3268 break;
3269 default:
3270 val = lduw_p(ptr);
3271 break;
3273 r = MEMTX_OK;
3275 if (result) {
3276 *result = r;
3278 if (release_lock) {
3279 qemu_mutex_unlock_iothread();
3281 rcu_read_unlock();
3282 return val;
3285 uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3286 MemTxAttrs attrs, MemTxResult *result)
3288 return address_space_lduw_internal(as, addr, attrs, result,
3289 DEVICE_NATIVE_ENDIAN);
3292 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3293 MemTxAttrs attrs, MemTxResult *result)
3295 return address_space_lduw_internal(as, addr, attrs, result,
3296 DEVICE_LITTLE_ENDIAN);
3299 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3300 MemTxAttrs attrs, MemTxResult *result)
3302 return address_space_lduw_internal(as, addr, attrs, result,
3303 DEVICE_BIG_ENDIAN);
3306 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
3308 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3311 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
3313 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3316 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
3318 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3321 /* warning: addr must be aligned. The ram page is not masked as dirty
3322 and the code inside is not invalidated. It is useful if the dirty
3323 bits are used to track modified PTEs */
3324 void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3325 MemTxAttrs attrs, MemTxResult *result)
3327 uint8_t *ptr;
3328 MemoryRegion *mr;
3329 hwaddr l = 4;
3330 hwaddr addr1;
3331 MemTxResult r;
3332 uint8_t dirty_log_mask;
3333 bool release_lock = false;
3335 rcu_read_lock();
3336 mr = address_space_translate(as, addr, &addr1, &l,
3337 true);
3338 if (l < 4 || !memory_access_is_direct(mr, true)) {
3339 release_lock |= prepare_mmio_access(mr);
3341 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3342 } else {
3343 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3344 stl_p(ptr, val);
3346 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3347 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
3348 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3349 4, dirty_log_mask);
3350 r = MEMTX_OK;
3352 if (result) {
3353 *result = r;
3355 if (release_lock) {
3356 qemu_mutex_unlock_iothread();
3358 rcu_read_unlock();
3361 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3363 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3366 /* warning: addr must be aligned */
3367 static inline void address_space_stl_internal(AddressSpace *as,
3368 hwaddr addr, uint32_t val,
3369 MemTxAttrs attrs,
3370 MemTxResult *result,
3371 enum device_endian endian)
3373 uint8_t *ptr;
3374 MemoryRegion *mr;
3375 hwaddr l = 4;
3376 hwaddr addr1;
3377 MemTxResult r;
3378 bool release_lock = false;
3380 rcu_read_lock();
3381 mr = address_space_translate(as, addr, &addr1, &l,
3382 true);
3383 if (l < 4 || !memory_access_is_direct(mr, true)) {
3384 release_lock |= prepare_mmio_access(mr);
3386 #if defined(TARGET_WORDS_BIGENDIAN)
3387 if (endian == DEVICE_LITTLE_ENDIAN) {
3388 val = bswap32(val);
3390 #else
3391 if (endian == DEVICE_BIG_ENDIAN) {
3392 val = bswap32(val);
3394 #endif
3395 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3396 } else {
3397 /* RAM case */
3398 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3399 switch (endian) {
3400 case DEVICE_LITTLE_ENDIAN:
3401 stl_le_p(ptr, val);
3402 break;
3403 case DEVICE_BIG_ENDIAN:
3404 stl_be_p(ptr, val);
3405 break;
3406 default:
3407 stl_p(ptr, val);
3408 break;
3410 invalidate_and_set_dirty(mr, addr1, 4);
3411 r = MEMTX_OK;
3413 if (result) {
3414 *result = r;
3416 if (release_lock) {
3417 qemu_mutex_unlock_iothread();
3419 rcu_read_unlock();
3422 void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3423 MemTxAttrs attrs, MemTxResult *result)
3425 address_space_stl_internal(as, addr, val, attrs, result,
3426 DEVICE_NATIVE_ENDIAN);
3429 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3430 MemTxAttrs attrs, MemTxResult *result)
3432 address_space_stl_internal(as, addr, val, attrs, result,
3433 DEVICE_LITTLE_ENDIAN);
3436 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3437 MemTxAttrs attrs, MemTxResult *result)
3439 address_space_stl_internal(as, addr, val, attrs, result,
3440 DEVICE_BIG_ENDIAN);
3443 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3445 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3448 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3450 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3453 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3455 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3458 /* XXX: optimize */
3459 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3460 MemTxAttrs attrs, MemTxResult *result)
3462 uint8_t v = val;
3463 MemTxResult r;
3465 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3466 if (result) {
3467 *result = r;
3471 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3473 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3476 /* warning: addr must be aligned */
3477 static inline void address_space_stw_internal(AddressSpace *as,
3478 hwaddr addr, uint32_t val,
3479 MemTxAttrs attrs,
3480 MemTxResult *result,
3481 enum device_endian endian)
3483 uint8_t *ptr;
3484 MemoryRegion *mr;
3485 hwaddr l = 2;
3486 hwaddr addr1;
3487 MemTxResult r;
3488 bool release_lock = false;
3490 rcu_read_lock();
3491 mr = address_space_translate(as, addr, &addr1, &l, true);
3492 if (l < 2 || !memory_access_is_direct(mr, true)) {
3493 release_lock |= prepare_mmio_access(mr);
3495 #if defined(TARGET_WORDS_BIGENDIAN)
3496 if (endian == DEVICE_LITTLE_ENDIAN) {
3497 val = bswap16(val);
3499 #else
3500 if (endian == DEVICE_BIG_ENDIAN) {
3501 val = bswap16(val);
3503 #endif
3504 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
3505 } else {
3506 /* RAM case */
3507 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3508 switch (endian) {
3509 case DEVICE_LITTLE_ENDIAN:
3510 stw_le_p(ptr, val);
3511 break;
3512 case DEVICE_BIG_ENDIAN:
3513 stw_be_p(ptr, val);
3514 break;
3515 default:
3516 stw_p(ptr, val);
3517 break;
3519 invalidate_and_set_dirty(mr, addr1, 2);
3520 r = MEMTX_OK;
3522 if (result) {
3523 *result = r;
3525 if (release_lock) {
3526 qemu_mutex_unlock_iothread();
3528 rcu_read_unlock();
3531 void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3532 MemTxAttrs attrs, MemTxResult *result)
3534 address_space_stw_internal(as, addr, val, attrs, result,
3535 DEVICE_NATIVE_ENDIAN);
3538 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3539 MemTxAttrs attrs, MemTxResult *result)
3541 address_space_stw_internal(as, addr, val, attrs, result,
3542 DEVICE_LITTLE_ENDIAN);
3545 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3546 MemTxAttrs attrs, MemTxResult *result)
3548 address_space_stw_internal(as, addr, val, attrs, result,
3549 DEVICE_BIG_ENDIAN);
3552 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3554 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3557 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3559 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3562 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3564 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3567 /* XXX: optimize */
3568 void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3569 MemTxAttrs attrs, MemTxResult *result)
3571 MemTxResult r;
3572 val = tswap64(val);
3573 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3574 if (result) {
3575 *result = r;
3579 void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3580 MemTxAttrs attrs, MemTxResult *result)
3582 MemTxResult r;
3583 val = cpu_to_le64(val);
3584 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3585 if (result) {
3586 *result = r;
3589 void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3590 MemTxAttrs attrs, MemTxResult *result)
3592 MemTxResult r;
3593 val = cpu_to_be64(val);
3594 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3595 if (result) {
3596 *result = r;
3600 void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3602 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3605 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3607 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3610 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3612 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3615 /* virtual memory access for debug (includes writing to ROM) */
3616 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3617 uint8_t *buf, int len, int is_write)
3619 int l;
3620 hwaddr phys_addr;
3621 target_ulong page;
3623 while (len > 0) {
3624 int asidx;
3625 MemTxAttrs attrs;
3627 page = addr & TARGET_PAGE_MASK;
3628 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3629 asidx = cpu_asidx_from_attrs(cpu, attrs);
3630 /* if no physical page mapped, return an error */
3631 if (phys_addr == -1)
3632 return -1;
3633 l = (page + TARGET_PAGE_SIZE) - addr;
3634 if (l > len)
3635 l = len;
3636 phys_addr += (addr & ~TARGET_PAGE_MASK);
3637 if (is_write) {
3638 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3639 phys_addr, buf, l);
3640 } else {
3641 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3642 MEMTXATTRS_UNSPECIFIED,
3643 buf, l, 0);
3645 len -= l;
3646 buf += l;
3647 addr += l;
3649 return 0;
3653 * Allows code that needs to deal with migration bitmaps etc to still be built
3654 * target independent.
3656 size_t qemu_target_page_bits(void)
3658 return TARGET_PAGE_BITS;
3661 #endif
3664 * A helper function for the _utterly broken_ virtio device model to find out if
3665 * it's running on a big endian machine. Don't do this at home kids!
3667 bool target_words_bigendian(void);
3668 bool target_words_bigendian(void)
3670 #if defined(TARGET_WORDS_BIGENDIAN)
3671 return true;
3672 #else
3673 return false;
3674 #endif
3677 #ifndef CONFIG_USER_ONLY
3678 bool cpu_physical_memory_is_io(hwaddr phys_addr)
3680 MemoryRegion*mr;
3681 hwaddr l = 1;
3682 bool res;
3684 rcu_read_lock();
3685 mr = address_space_translate(&address_space_memory,
3686 phys_addr, &phys_addr, &l, false);
3688 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3689 rcu_read_unlock();
3690 return res;
3693 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3695 RAMBlock *block;
3696 int ret = 0;
3698 rcu_read_lock();
3699 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3700 ret = func(block->idstr, block->host, block->offset,
3701 block->used_length, opaque);
3702 if (ret) {
3703 break;
3706 rcu_read_unlock();
3707 return ret;
3709 #endif