Merge remote-tracking branch 'qemu/master'
[qemu/ar7.git] / exec.c
blob1b7842c0e4b9247b52a0132e459d142e1c8fa622
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
21 #ifndef _WIN32
22 #endif
24 #include "qemu/cutils.h"
25 #include "cpu.h"
26 #include "exec/exec-all.h"
27 #include "tcg.h"
28 #include "hw/qdev-core.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #include "hw/xen/xen.h"
32 #endif
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #if defined(CONFIG_USER_ONLY)
39 #include <qemu.h>
40 #else /* !CONFIG_USER_ONLY */
41 #include "hw/hw.h"
42 #include "exec/memory.h"
43 #include "exec/ioport.h"
44 #include "sysemu/dma.h"
45 #include "exec/address-spaces.h"
46 #include "sysemu/xen-mapcache.h"
47 #include "trace.h"
48 #endif
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
57 #include "exec/log.h"
59 #include "migration/vmstate.h"
61 #include "qemu/range.h"
62 #ifndef _WIN32
63 #include "qemu/mmap-alloc.h"
64 #endif
66 //#define DEBUG_SUBPAGE
68 #if !defined(CONFIG_USER_ONLY)
69 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
72 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
74 static MemoryRegion *system_memory;
75 static MemoryRegion *system_io;
77 AddressSpace address_space_io;
78 AddressSpace address_space_memory;
80 MemoryRegion io_mem_rom, io_mem_notdirty;
81 static MemoryRegion io_mem_unassigned;
83 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84 #define RAM_PREALLOC (1 << 0)
86 /* RAM is mmap-ed with MAP_SHARED */
87 #define RAM_SHARED (1 << 1)
89 /* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
92 #define RAM_RESIZEABLE (1 << 2)
94 #endif
96 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
97 /* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
99 __thread CPUState *current_cpu;
100 /* 0 = Do not count executed instructions.
101 1 = Precise instruction counting.
102 2 = Adaptive rate instruction counting. */
103 int use_icount;
105 #if !defined(CONFIG_USER_ONLY)
107 typedef struct PhysPageEntry PhysPageEntry;
109 struct PhysPageEntry {
110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
111 uint32_t skip : 6;
112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
113 uint32_t ptr : 26;
116 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
118 /* Size of the L2 (and L3, etc) page tables. */
119 #define ADDR_SPACE_BITS 64
121 #define P_L2_BITS 9
122 #define P_L2_SIZE (1 << P_L2_BITS)
124 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
126 typedef PhysPageEntry Node[P_L2_SIZE];
128 typedef struct PhysPageMap {
129 struct rcu_head rcu;
131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137 } PhysPageMap;
139 struct AddressSpaceDispatch {
140 struct rcu_head rcu;
142 MemoryRegionSection *mru_section;
143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
146 PhysPageEntry phys_map;
147 PhysPageMap map;
148 AddressSpace *as;
151 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152 typedef struct subpage_t {
153 MemoryRegion iomem;
154 AddressSpace *as;
155 hwaddr base;
156 uint16_t sub_section[TARGET_PAGE_SIZE];
157 } subpage_t;
159 #define PHYS_SECTION_UNASSIGNED 0
160 #define PHYS_SECTION_NOTDIRTY 1
161 #define PHYS_SECTION_ROM 2
162 #define PHYS_SECTION_WATCH 3
164 static void io_mem_init(void);
165 static void memory_map_init(void);
166 static void tcg_commit(MemoryListener *listener);
168 static MemoryRegion io_mem_watch;
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
177 struct CPUAddressSpace {
178 CPUState *cpu;
179 AddressSpace *as;
180 struct AddressSpaceDispatch *memory_dispatch;
181 MemoryListener tcg_as_listener;
184 #endif
186 #if !defined(CONFIG_USER_ONLY)
188 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
190 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
191 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
192 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
193 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
197 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
199 unsigned i;
200 uint32_t ret;
201 PhysPageEntry e;
202 PhysPageEntry *p;
204 ret = map->nodes_nb++;
205 p = map->nodes[ret];
206 assert(ret != PHYS_MAP_NODE_NIL);
207 assert(ret != map->nodes_nb_alloc);
209 e.skip = leaf ? 0 : 1;
210 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
211 for (i = 0; i < P_L2_SIZE; ++i) {
212 memcpy(&p[i], &e, sizeof(e));
214 return ret;
217 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
218 hwaddr *index, hwaddr *nb, uint16_t leaf,
219 int level)
221 PhysPageEntry *p;
222 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
224 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
225 lp->ptr = phys_map_node_alloc(map, level == 0);
227 p = map->nodes[lp->ptr];
228 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
230 while (*nb && lp < &p[P_L2_SIZE]) {
231 if ((*index & (step - 1)) == 0 && *nb >= step) {
232 lp->skip = 0;
233 lp->ptr = leaf;
234 *index += step;
235 *nb -= step;
236 } else {
237 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
239 ++lp;
243 static void phys_page_set(AddressSpaceDispatch *d,
244 hwaddr index, hwaddr nb,
245 uint16_t leaf)
247 /* Wildly overreserve - it doesn't matter much. */
248 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
250 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
253 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
254 * and update our entry so we can skip it and go directly to the destination.
256 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
258 unsigned valid_ptr = P_L2_SIZE;
259 int valid = 0;
260 PhysPageEntry *p;
261 int i;
263 if (lp->ptr == PHYS_MAP_NODE_NIL) {
264 return;
267 p = nodes[lp->ptr];
268 for (i = 0; i < P_L2_SIZE; i++) {
269 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
270 continue;
273 valid_ptr = i;
274 valid++;
275 if (p[i].skip) {
276 phys_page_compact(&p[i], nodes, compacted);
280 /* We can only compress if there's only one child. */
281 if (valid != 1) {
282 return;
285 assert(valid_ptr < P_L2_SIZE);
287 /* Don't compress if it won't fit in the # of bits we have. */
288 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
289 return;
292 lp->ptr = p[valid_ptr].ptr;
293 if (!p[valid_ptr].skip) {
294 /* If our only child is a leaf, make this a leaf. */
295 /* By design, we should have made this node a leaf to begin with so we
296 * should never reach here.
297 * But since it's so simple to handle this, let's do it just in case we
298 * change this rule.
300 lp->skip = 0;
301 } else {
302 lp->skip += p[valid_ptr].skip;
306 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
308 DECLARE_BITMAP(compacted, nodes_nb);
310 if (d->phys_map.skip) {
311 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
315 static inline bool section_covers_addr(const MemoryRegionSection *section,
316 hwaddr addr)
318 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
319 * the section must cover the entire address space.
321 return section->size.hi ||
322 range_covers_byte(section->offset_within_address_space,
323 section->size.lo, addr);
326 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
327 Node *nodes, MemoryRegionSection *sections)
329 PhysPageEntry *p;
330 hwaddr index = addr >> TARGET_PAGE_BITS;
331 int i;
333 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
334 if (lp.ptr == PHYS_MAP_NODE_NIL) {
335 return &sections[PHYS_SECTION_UNASSIGNED];
337 p = nodes[lp.ptr];
338 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
341 if (section_covers_addr(&sections[lp.ptr], addr)) {
342 return &sections[lp.ptr];
343 } else {
344 return &sections[PHYS_SECTION_UNASSIGNED];
348 bool memory_region_is_unassigned(MemoryRegion *mr)
350 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
351 && mr != &io_mem_watch;
354 /* Called from RCU critical section */
355 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
356 hwaddr addr,
357 bool resolve_subpage)
359 MemoryRegionSection *section = atomic_read(&d->mru_section);
360 subpage_t *subpage;
361 bool update;
363 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
364 section_covers_addr(section, addr)) {
365 update = false;
366 } else {
367 section = phys_page_find(d->phys_map, addr, d->map.nodes,
368 d->map.sections);
369 update = true;
371 if (resolve_subpage && section->mr->subpage) {
372 subpage = container_of(section->mr, subpage_t, iomem);
373 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
375 if (update) {
376 atomic_set(&d->mru_section, section);
378 return section;
381 /* Called from RCU critical section */
382 static MemoryRegionSection *
383 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
384 hwaddr *plen, bool resolve_subpage)
386 MemoryRegionSection *section;
387 MemoryRegion *mr;
388 Int128 diff;
390 section = address_space_lookup_region(d, addr, resolve_subpage);
391 /* Compute offset within MemoryRegionSection */
392 addr -= section->offset_within_address_space;
394 /* Compute offset within MemoryRegion */
395 *xlat = addr + section->offset_within_region;
397 mr = section->mr;
399 /* MMIO registers can be expected to perform full-width accesses based only
400 * on their address, without considering adjacent registers that could
401 * decode to completely different MemoryRegions. When such registers
402 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
403 * regions overlap wildly. For this reason we cannot clamp the accesses
404 * here.
406 * If the length is small (as is the case for address_space_ldl/stl),
407 * everything works fine. If the incoming length is large, however,
408 * the caller really has to do the clamping through memory_access_size.
410 if (memory_region_is_ram(mr)) {
411 diff = int128_sub(section->size, int128_make64(addr));
412 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
414 return section;
417 /* Called from RCU critical section */
418 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
419 hwaddr *xlat, hwaddr *plen,
420 bool is_write)
422 IOMMUTLBEntry iotlb;
423 MemoryRegionSection *section;
424 MemoryRegion *mr;
426 for (;;) {
427 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
428 section = address_space_translate_internal(d, addr, &addr, plen, true);
429 mr = section->mr;
431 if (!mr->iommu_ops) {
432 break;
435 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
436 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
437 | (addr & iotlb.addr_mask));
438 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
439 if (!(iotlb.perm & (1 << is_write))) {
440 mr = &io_mem_unassigned;
441 break;
444 as = iotlb.target_as;
447 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
448 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
449 *plen = MIN(page, *plen);
452 *xlat = addr;
453 return mr;
456 /* Called from RCU critical section */
457 MemoryRegionSection *
458 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
459 hwaddr *xlat, hwaddr *plen)
461 MemoryRegionSection *section;
462 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
464 section = address_space_translate_internal(d, addr, xlat, plen, false);
466 assert(!section->mr->iommu_ops);
467 return section;
469 #endif
471 #if !defined(CONFIG_USER_ONLY)
473 static int cpu_common_post_load(void *opaque, int version_id)
475 CPUState *cpu = opaque;
477 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
478 version_id is increased. */
479 cpu->interrupt_request &= ~0x01;
480 tlb_flush(cpu, 1);
482 return 0;
485 static int cpu_common_pre_load(void *opaque)
487 CPUState *cpu = opaque;
489 cpu->exception_index = -1;
491 return 0;
494 static bool cpu_common_exception_index_needed(void *opaque)
496 CPUState *cpu = opaque;
498 return tcg_enabled() && cpu->exception_index != -1;
501 static const VMStateDescription vmstate_cpu_common_exception_index = {
502 .name = "cpu_common/exception_index",
503 .version_id = 1,
504 .minimum_version_id = 1,
505 .needed = cpu_common_exception_index_needed,
506 .fields = (VMStateField[]) {
507 VMSTATE_INT32(exception_index, CPUState),
508 VMSTATE_END_OF_LIST()
512 static bool cpu_common_crash_occurred_needed(void *opaque)
514 CPUState *cpu = opaque;
516 return cpu->crash_occurred;
519 static const VMStateDescription vmstate_cpu_common_crash_occurred = {
520 .name = "cpu_common/crash_occurred",
521 .version_id = 1,
522 .minimum_version_id = 1,
523 .needed = cpu_common_crash_occurred_needed,
524 .fields = (VMStateField[]) {
525 VMSTATE_BOOL(crash_occurred, CPUState),
526 VMSTATE_END_OF_LIST()
530 const VMStateDescription vmstate_cpu_common = {
531 .name = "cpu_common",
532 .version_id = 1,
533 .minimum_version_id = 1,
534 .pre_load = cpu_common_pre_load,
535 .post_load = cpu_common_post_load,
536 .fields = (VMStateField[]) {
537 VMSTATE_UINT32(halted, CPUState),
538 VMSTATE_UINT32(interrupt_request, CPUState),
539 VMSTATE_END_OF_LIST()
541 .subsections = (const VMStateDescription*[]) {
542 &vmstate_cpu_common_exception_index,
543 &vmstate_cpu_common_crash_occurred,
544 NULL
548 #endif
550 CPUState *qemu_get_cpu(int index)
552 CPUState *cpu;
554 CPU_FOREACH(cpu) {
555 if (cpu->cpu_index == index) {
556 return cpu;
560 return NULL;
563 #if !defined(CONFIG_USER_ONLY)
564 void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
566 CPUAddressSpace *newas;
568 /* Target code should have set num_ases before calling us */
569 assert(asidx < cpu->num_ases);
571 if (asidx == 0) {
572 /* address space 0 gets the convenience alias */
573 cpu->as = as;
576 /* KVM cannot currently support multiple address spaces. */
577 assert(asidx == 0 || !kvm_enabled());
579 if (!cpu->cpu_ases) {
580 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
583 newas = &cpu->cpu_ases[asidx];
584 newas->cpu = cpu;
585 newas->as = as;
586 if (tcg_enabled()) {
587 newas->tcg_as_listener.commit = tcg_commit;
588 memory_listener_register(&newas->tcg_as_listener, as);
592 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
594 /* Return the AddressSpace corresponding to the specified index */
595 return cpu->cpu_ases[asidx].as;
597 #endif
599 #ifndef CONFIG_USER_ONLY
600 static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
602 static int cpu_get_free_index(Error **errp)
604 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
606 if (cpu >= MAX_CPUMASK_BITS) {
607 error_setg(errp, "Trying to use more CPUs than max of %d",
608 MAX_CPUMASK_BITS);
609 return -1;
612 bitmap_set(cpu_index_map, cpu, 1);
613 return cpu;
616 static void cpu_release_index(CPUState *cpu)
618 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
620 #else
622 static int cpu_get_free_index(Error **errp)
624 CPUState *some_cpu;
625 int cpu_index = 0;
627 CPU_FOREACH(some_cpu) {
628 cpu_index++;
630 return cpu_index;
633 static void cpu_release_index(CPUState *cpu)
635 return;
637 #endif
639 void cpu_exec_exit(CPUState *cpu)
641 CPUClass *cc = CPU_GET_CLASS(cpu);
643 #if defined(CONFIG_USER_ONLY)
644 cpu_list_lock();
645 #endif
646 if (cpu->cpu_index == -1) {
647 /* cpu_index was never allocated by this @cpu or was already freed. */
648 #if defined(CONFIG_USER_ONLY)
649 cpu_list_unlock();
650 #endif
651 return;
654 QTAILQ_REMOVE(&cpus, cpu, node);
655 cpu_release_index(cpu);
656 cpu->cpu_index = -1;
657 #if defined(CONFIG_USER_ONLY)
658 cpu_list_unlock();
659 #endif
661 if (cc->vmsd != NULL) {
662 vmstate_unregister(NULL, cc->vmsd, cpu);
664 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
665 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
669 void cpu_exec_init(CPUState *cpu, Error **errp)
671 CPUClass *cc = CPU_GET_CLASS(cpu);
672 Error *local_err = NULL;
674 #ifdef TARGET_WORDS_BIGENDIAN
675 cpu->bigendian = true;
676 #else
677 cpu->bigendian = false;
678 #endif
679 cpu->as = NULL;
680 cpu->num_ases = 0;
682 #ifndef CONFIG_USER_ONLY
683 cpu->thread_id = qemu_get_thread_id();
685 /* This is a softmmu CPU object, so create a property for it
686 * so users can wire up its memory. (This can't go in qom/cpu.c
687 * because that file is compiled only once for both user-mode
688 * and system builds.) The default if no link is set up is to use
689 * the system address space.
691 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
692 (Object **)&cpu->memory,
693 qdev_prop_allow_set_link_before_realize,
694 OBJ_PROP_LINK_UNREF_ON_RELEASE,
695 &error_abort);
696 cpu->memory = system_memory;
697 object_ref(OBJECT(cpu->memory));
698 #endif
700 #if defined(CONFIG_USER_ONLY)
701 cpu_list_lock();
702 #endif
703 cpu->cpu_index = cpu_get_free_index(&local_err);
704 if (local_err) {
705 error_propagate(errp, local_err);
706 #if defined(CONFIG_USER_ONLY)
707 cpu_list_unlock();
708 #endif
709 return;
711 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
712 #if defined(CONFIG_USER_ONLY)
713 (void) cc;
714 cpu_list_unlock();
715 #else
716 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
717 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
719 if (cc->vmsd != NULL) {
720 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
722 #endif
725 #if defined(CONFIG_USER_ONLY)
726 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
728 tb_invalidate_phys_page_range(pc, pc + 1, 0);
730 #else
731 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
733 MemTxAttrs attrs;
734 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
735 int asidx = cpu_asidx_from_attrs(cpu, attrs);
736 if (phys != -1) {
737 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
738 phys | (pc & ~TARGET_PAGE_MASK));
741 #endif
743 #if defined(CONFIG_USER_ONLY)
744 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
749 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
750 int flags)
752 return -ENOSYS;
755 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
759 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
760 int flags, CPUWatchpoint **watchpoint)
762 return -ENOSYS;
764 #else
765 /* Add a watchpoint. */
766 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
767 int flags, CPUWatchpoint **watchpoint)
769 CPUWatchpoint *wp;
771 /* forbid ranges which are empty or run off the end of the address space */
772 if (len == 0 || (addr + len - 1) < addr) {
773 error_report("tried to set invalid watchpoint at %"
774 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
775 return -EINVAL;
777 wp = g_malloc(sizeof(*wp));
779 wp->vaddr = addr;
780 wp->len = len;
781 wp->flags = flags;
783 /* keep all GDB-injected watchpoints in front */
784 if (flags & BP_GDB) {
785 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
786 } else {
787 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
790 tlb_flush_page(cpu, addr);
792 if (watchpoint)
793 *watchpoint = wp;
794 return 0;
797 /* Remove a specific watchpoint. */
798 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
799 int flags)
801 CPUWatchpoint *wp;
803 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
804 if (addr == wp->vaddr && len == wp->len
805 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
806 cpu_watchpoint_remove_by_ref(cpu, wp);
807 return 0;
810 return -ENOENT;
813 /* Remove a specific watchpoint by reference. */
814 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
816 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
818 tlb_flush_page(cpu, watchpoint->vaddr);
820 g_free(watchpoint);
823 /* Remove all matching watchpoints. */
824 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
826 CPUWatchpoint *wp, *next;
828 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
829 if (wp->flags & mask) {
830 cpu_watchpoint_remove_by_ref(cpu, wp);
835 /* Return true if this watchpoint address matches the specified
836 * access (ie the address range covered by the watchpoint overlaps
837 * partially or completely with the address range covered by the
838 * access).
840 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
841 vaddr addr,
842 vaddr len)
844 /* We know the lengths are non-zero, but a little caution is
845 * required to avoid errors in the case where the range ends
846 * exactly at the top of the address space and so addr + len
847 * wraps round to zero.
849 vaddr wpend = wp->vaddr + wp->len - 1;
850 vaddr addrend = addr + len - 1;
852 return !(addr > wpend || wp->vaddr > addrend);
855 #endif
857 /* Add a breakpoint. */
858 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
859 CPUBreakpoint **breakpoint)
861 CPUBreakpoint *bp;
863 bp = g_malloc(sizeof(*bp));
865 bp->pc = pc;
866 bp->flags = flags;
868 /* keep all GDB-injected breakpoints in front */
869 if (flags & BP_GDB) {
870 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
871 } else {
872 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
875 breakpoint_invalidate(cpu, pc);
877 if (breakpoint) {
878 *breakpoint = bp;
880 return 0;
883 /* Remove a specific breakpoint. */
884 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
886 CPUBreakpoint *bp;
888 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
889 if (bp->pc == pc && bp->flags == flags) {
890 cpu_breakpoint_remove_by_ref(cpu, bp);
891 return 0;
894 return -ENOENT;
897 /* Remove a specific breakpoint by reference. */
898 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
900 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
902 breakpoint_invalidate(cpu, breakpoint->pc);
904 g_free(breakpoint);
907 /* Remove all matching breakpoints. */
908 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
910 CPUBreakpoint *bp, *next;
912 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
913 if (bp->flags & mask) {
914 cpu_breakpoint_remove_by_ref(cpu, bp);
919 /* enable or disable single step mode. EXCP_DEBUG is returned by the
920 CPU loop after each instruction */
921 void cpu_single_step(CPUState *cpu, int enabled)
923 if (cpu->singlestep_enabled != enabled) {
924 cpu->singlestep_enabled = enabled;
925 if (kvm_enabled()) {
926 kvm_update_guest_debug(cpu, 0);
927 } else {
928 /* must flush all the translated code to avoid inconsistencies */
929 /* XXX: only flush what is necessary */
930 tb_flush(cpu);
935 void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
937 va_list ap;
938 va_list ap2;
940 va_start(ap, fmt);
941 va_copy(ap2, ap);
942 fprintf(stderr, "qemu: fatal: ");
943 vfprintf(stderr, fmt, ap);
944 fprintf(stderr, "\n");
945 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
946 if (qemu_log_separate()) {
947 qemu_log("qemu: fatal: ");
948 qemu_log_vprintf(fmt, ap2);
949 qemu_log("\n");
950 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
951 qemu_log_flush();
952 qemu_log_close();
954 va_end(ap2);
955 va_end(ap);
956 replay_finish();
957 #if defined(CONFIG_USER_ONLY)
959 struct sigaction act;
960 sigfillset(&act.sa_mask);
961 act.sa_handler = SIG_DFL;
962 sigaction(SIGABRT, &act, NULL);
964 #endif
965 abort();
968 #if !defined(CONFIG_USER_ONLY)
969 /* Called from RCU critical section */
970 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
972 RAMBlock *block;
974 block = atomic_rcu_read(&ram_list.mru_block);
975 if (block && addr - block->offset < block->max_length) {
976 return block;
978 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
979 if (addr - block->offset < block->max_length) {
980 goto found;
984 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
985 abort();
987 found:
988 /* It is safe to write mru_block outside the iothread lock. This
989 * is what happens:
991 * mru_block = xxx
992 * rcu_read_unlock()
993 * xxx removed from list
994 * rcu_read_lock()
995 * read mru_block
996 * mru_block = NULL;
997 * call_rcu(reclaim_ramblock, xxx);
998 * rcu_read_unlock()
1000 * atomic_rcu_set is not needed here. The block was already published
1001 * when it was placed into the list. Here we're just making an extra
1002 * copy of the pointer.
1004 ram_list.mru_block = block;
1005 return block;
1008 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
1010 CPUState *cpu;
1011 ram_addr_t start1;
1012 RAMBlock *block;
1013 ram_addr_t end;
1015 end = TARGET_PAGE_ALIGN(start + length);
1016 start &= TARGET_PAGE_MASK;
1018 rcu_read_lock();
1019 block = qemu_get_ram_block(start);
1020 assert(block == qemu_get_ram_block(end - 1));
1021 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
1022 CPU_FOREACH(cpu) {
1023 tlb_reset_dirty(cpu, start1, length);
1025 rcu_read_unlock();
1028 /* Note: start and end must be within the same ram block. */
1029 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1030 ram_addr_t length,
1031 unsigned client)
1033 DirtyMemoryBlocks *blocks;
1034 unsigned long end, page;
1035 bool dirty = false;
1037 if (length == 0) {
1038 return false;
1041 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1042 page = start >> TARGET_PAGE_BITS;
1044 rcu_read_lock();
1046 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1048 while (page < end) {
1049 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1050 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1051 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1053 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1054 offset, num);
1055 page += num;
1058 rcu_read_unlock();
1060 if (dirty && tcg_enabled()) {
1061 tlb_reset_dirty_range_all(start, length);
1064 return dirty;
1067 /* Called from RCU critical section */
1068 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1069 MemoryRegionSection *section,
1070 target_ulong vaddr,
1071 hwaddr paddr, hwaddr xlat,
1072 int prot,
1073 target_ulong *address)
1075 hwaddr iotlb;
1076 CPUWatchpoint *wp;
1078 if (memory_region_is_ram(section->mr)) {
1079 /* Normal RAM. */
1080 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1081 if (!section->readonly) {
1082 iotlb |= PHYS_SECTION_NOTDIRTY;
1083 } else {
1084 iotlb |= PHYS_SECTION_ROM;
1086 } else {
1087 AddressSpaceDispatch *d;
1089 d = atomic_rcu_read(&section->address_space->dispatch);
1090 iotlb = section - d->map.sections;
1091 iotlb += xlat;
1094 /* Make accesses to pages with watchpoints go via the
1095 watchpoint trap routines. */
1096 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1097 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1098 /* Avoid trapping reads of pages with a write breakpoint. */
1099 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1100 iotlb = PHYS_SECTION_WATCH + paddr;
1101 *address |= TLB_MMIO;
1102 break;
1107 return iotlb;
1109 #endif /* defined(CONFIG_USER_ONLY) */
1111 #if !defined(CONFIG_USER_ONLY)
1113 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1114 uint16_t section);
1115 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
1117 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1118 qemu_anon_ram_alloc;
1121 * Set a custom physical guest memory alloator.
1122 * Accelerators with unusual needs may need this. Hopefully, we can
1123 * get rid of it eventually.
1125 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
1127 phys_mem_alloc = alloc;
1130 static uint16_t phys_section_add(PhysPageMap *map,
1131 MemoryRegionSection *section)
1133 /* The physical section number is ORed with a page-aligned
1134 * pointer to produce the iotlb entries. Thus it should
1135 * never overflow into the page-aligned value.
1137 assert(map->sections_nb < TARGET_PAGE_SIZE);
1139 if (map->sections_nb == map->sections_nb_alloc) {
1140 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1141 map->sections = g_renew(MemoryRegionSection, map->sections,
1142 map->sections_nb_alloc);
1144 map->sections[map->sections_nb] = *section;
1145 memory_region_ref(section->mr);
1146 return map->sections_nb++;
1149 static void phys_section_destroy(MemoryRegion *mr)
1151 bool have_sub_page = mr->subpage;
1153 memory_region_unref(mr);
1155 if (have_sub_page) {
1156 subpage_t *subpage = container_of(mr, subpage_t, iomem);
1157 object_unref(OBJECT(&subpage->iomem));
1158 g_free(subpage);
1162 static void phys_sections_free(PhysPageMap *map)
1164 while (map->sections_nb > 0) {
1165 MemoryRegionSection *section = &map->sections[--map->sections_nb];
1166 phys_section_destroy(section->mr);
1168 g_free(map->sections);
1169 g_free(map->nodes);
1172 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
1174 subpage_t *subpage;
1175 hwaddr base = section->offset_within_address_space
1176 & TARGET_PAGE_MASK;
1177 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
1178 d->map.nodes, d->map.sections);
1179 MemoryRegionSection subsection = {
1180 .offset_within_address_space = base,
1181 .size = int128_make64(TARGET_PAGE_SIZE),
1183 hwaddr start, end;
1185 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1187 if (!(existing->mr->subpage)) {
1188 subpage = subpage_init(d->as, base);
1189 subsection.address_space = d->as;
1190 subsection.mr = &subpage->iomem;
1191 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1192 phys_section_add(&d->map, &subsection));
1193 } else {
1194 subpage = container_of(existing->mr, subpage_t, iomem);
1196 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1197 end = start + int128_get64(section->size) - 1;
1198 subpage_register(subpage, start, end,
1199 phys_section_add(&d->map, section));
1203 static void register_multipage(AddressSpaceDispatch *d,
1204 MemoryRegionSection *section)
1206 hwaddr start_addr = section->offset_within_address_space;
1207 uint16_t section_index = phys_section_add(&d->map, section);
1208 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1209 TARGET_PAGE_BITS));
1211 assert(num_pages);
1212 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1215 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1217 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1218 AddressSpaceDispatch *d = as->next_dispatch;
1219 MemoryRegionSection now = *section, remain = *section;
1220 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1222 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1223 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1224 - now.offset_within_address_space;
1226 now.size = int128_min(int128_make64(left), now.size);
1227 register_subpage(d, &now);
1228 } else {
1229 now.size = int128_zero();
1231 while (int128_ne(remain.size, now.size)) {
1232 remain.size = int128_sub(remain.size, now.size);
1233 remain.offset_within_address_space += int128_get64(now.size);
1234 remain.offset_within_region += int128_get64(now.size);
1235 now = remain;
1236 if (int128_lt(remain.size, page_size)) {
1237 register_subpage(d, &now);
1238 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1239 now.size = page_size;
1240 register_subpage(d, &now);
1241 } else {
1242 now.size = int128_and(now.size, int128_neg(page_size));
1243 register_multipage(d, &now);
1248 void qemu_flush_coalesced_mmio_buffer(void)
1250 if (kvm_enabled())
1251 kvm_flush_coalesced_mmio_buffer();
1254 void qemu_mutex_lock_ramlist(void)
1256 qemu_mutex_lock(&ram_list.mutex);
1259 void qemu_mutex_unlock_ramlist(void)
1261 qemu_mutex_unlock(&ram_list.mutex);
1264 #ifdef __linux__
1265 static void *file_ram_alloc(RAMBlock *block,
1266 ram_addr_t memory,
1267 const char *path,
1268 Error **errp)
1270 bool unlink_on_error = false;
1271 char *filename;
1272 char *sanitized_name;
1273 char *c;
1274 void * volatile area = NULL;
1275 int fd = -1;
1276 int64_t page_size;
1278 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1279 error_setg(errp,
1280 "host lacks kvm mmu notifiers, -mem-path unsupported");
1281 return NULL;
1284 for (;;) {
1285 fd = open(path, O_RDWR);
1286 if (fd >= 0) {
1287 /* @path names an existing file, use it */
1288 break;
1290 if (errno == ENOENT) {
1291 /* @path names a file that doesn't exist, create it */
1292 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1293 if (fd >= 0) {
1294 unlink_on_error = true;
1295 break;
1297 } else if (errno == EISDIR) {
1298 /* @path names a directory, create a file there */
1299 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1300 sanitized_name = g_strdup(memory_region_name(block->mr));
1301 for (c = sanitized_name; *c != '\0'; c++) {
1302 if (*c == '/') {
1303 *c = '_';
1307 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1308 sanitized_name);
1309 g_free(sanitized_name);
1311 fd = mkstemp(filename);
1312 if (fd >= 0) {
1313 unlink(filename);
1314 g_free(filename);
1315 break;
1317 g_free(filename);
1319 if (errno != EEXIST && errno != EINTR) {
1320 error_setg_errno(errp, errno,
1321 "can't open backing store %s for guest RAM",
1322 path);
1323 goto error;
1326 * Try again on EINTR and EEXIST. The latter happens when
1327 * something else creates the file between our two open().
1331 page_size = qemu_fd_getpagesize(fd);
1332 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
1334 if (memory < page_size) {
1335 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1336 "or larger than page size 0x%" PRIx64,
1337 memory, page_size);
1338 goto error;
1341 memory = ROUND_UP(memory, page_size);
1344 * ftruncate is not supported by hugetlbfs in older
1345 * hosts, so don't bother bailing out on errors.
1346 * If anything goes wrong with it under other filesystems,
1347 * mmap will fail.
1349 if (ftruncate(fd, memory)) {
1350 perror("ftruncate");
1353 area = qemu_ram_mmap(fd, memory, block->mr->align,
1354 block->flags & RAM_SHARED);
1355 if (area == MAP_FAILED) {
1356 error_setg_errno(errp, errno,
1357 "unable to map backing store for guest RAM");
1358 goto error;
1361 if (mem_prealloc) {
1362 os_mem_prealloc(fd, area, memory);
1365 block->fd = fd;
1366 return area;
1368 error:
1369 if (unlink_on_error) {
1370 unlink(path);
1372 if (fd != -1) {
1373 close(fd);
1375 return NULL;
1377 #endif
1379 /* Called with the ramlist lock held. */
1380 static ram_addr_t find_ram_offset(ram_addr_t size)
1382 RAMBlock *block, *next_block;
1383 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1385 assert(size != 0); /* it would hand out same offset multiple times */
1387 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1388 return 0;
1391 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1392 ram_addr_t end, next = RAM_ADDR_MAX;
1394 end = block->offset + block->max_length;
1396 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
1397 if (next_block->offset >= end) {
1398 next = MIN(next, next_block->offset);
1401 if (next - end >= size && next - end < mingap) {
1402 offset = end;
1403 mingap = next - end;
1407 if (offset == RAM_ADDR_MAX) {
1408 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1409 (uint64_t)size);
1410 abort();
1413 return offset;
1416 ram_addr_t last_ram_offset(void)
1418 RAMBlock *block;
1419 ram_addr_t last = 0;
1421 rcu_read_lock();
1422 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1423 last = MAX(last, block->offset + block->max_length);
1425 rcu_read_unlock();
1426 return last;
1429 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1431 int ret;
1433 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1434 if (!machine_dump_guest_core(current_machine)) {
1435 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1436 if (ret) {
1437 perror("qemu_madvise");
1438 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1439 "but dump_guest_core=off specified\n");
1444 const char *qemu_ram_get_idstr(RAMBlock *rb)
1446 return rb->idstr;
1449 /* Called with iothread lock held. */
1450 void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
1452 RAMBlock *block;
1454 assert(new_block);
1455 assert(!new_block->idstr[0]);
1457 if (dev) {
1458 char *id = qdev_get_dev_path(dev);
1459 if (id) {
1460 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1461 g_free(id);
1464 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1466 rcu_read_lock();
1467 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1468 if (block != new_block &&
1469 !strcmp(block->idstr, new_block->idstr)) {
1470 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1471 new_block->idstr);
1472 abort();
1475 rcu_read_unlock();
1478 /* Called with iothread lock held. */
1479 void qemu_ram_unset_idstr(RAMBlock *block)
1481 /* FIXME: arch_init.c assumes that this is not called throughout
1482 * migration. Ignore the problem since hot-unplug during migration
1483 * does not work anyway.
1485 if (block) {
1486 memset(block->idstr, 0, sizeof(block->idstr));
1490 static int memory_try_enable_merging(void *addr, size_t len)
1492 if (!machine_mem_merge(current_machine)) {
1493 /* disabled by the user */
1494 return 0;
1497 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1500 /* Only legal before guest might have detected the memory size: e.g. on
1501 * incoming migration, or right after reset.
1503 * As memory core doesn't know how is memory accessed, it is up to
1504 * resize callback to update device state and/or add assertions to detect
1505 * misuse, if necessary.
1507 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
1509 assert(block);
1511 newsize = HOST_PAGE_ALIGN(newsize);
1513 if (block->used_length == newsize) {
1514 return 0;
1517 if (!(block->flags & RAM_RESIZEABLE)) {
1518 error_setg_errno(errp, EINVAL,
1519 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1520 " in != 0x" RAM_ADDR_FMT, block->idstr,
1521 newsize, block->used_length);
1522 return -EINVAL;
1525 if (block->max_length < newsize) {
1526 error_setg_errno(errp, EINVAL,
1527 "Length too large: %s: 0x" RAM_ADDR_FMT
1528 " > 0x" RAM_ADDR_FMT, block->idstr,
1529 newsize, block->max_length);
1530 return -EINVAL;
1533 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1534 block->used_length = newsize;
1535 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1536 DIRTY_CLIENTS_ALL);
1537 memory_region_set_size(block->mr, newsize);
1538 if (block->resized) {
1539 block->resized(block->idstr, newsize, block->host);
1541 return 0;
1544 /* Called with ram_list.mutex held */
1545 static void dirty_memory_extend(ram_addr_t old_ram_size,
1546 ram_addr_t new_ram_size)
1548 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1549 DIRTY_MEMORY_BLOCK_SIZE);
1550 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1551 DIRTY_MEMORY_BLOCK_SIZE);
1552 int i;
1554 /* Only need to extend if block count increased */
1555 if (new_num_blocks <= old_num_blocks) {
1556 return;
1559 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1560 DirtyMemoryBlocks *old_blocks;
1561 DirtyMemoryBlocks *new_blocks;
1562 int j;
1564 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1565 new_blocks = g_malloc(sizeof(*new_blocks) +
1566 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1568 if (old_num_blocks) {
1569 memcpy(new_blocks->blocks, old_blocks->blocks,
1570 old_num_blocks * sizeof(old_blocks->blocks[0]));
1573 for (j = old_num_blocks; j < new_num_blocks; j++) {
1574 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1577 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1579 if (old_blocks) {
1580 g_free_rcu(old_blocks, rcu);
1585 static void ram_block_add(RAMBlock *new_block, Error **errp)
1587 RAMBlock *block;
1588 RAMBlock *last_block = NULL;
1589 ram_addr_t old_ram_size, new_ram_size;
1590 Error *err = NULL;
1592 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1594 qemu_mutex_lock_ramlist();
1595 new_block->offset = find_ram_offset(new_block->max_length);
1597 if (!new_block->host) {
1598 if (xen_enabled()) {
1599 xen_ram_alloc(new_block->offset, new_block->max_length,
1600 new_block->mr, &err);
1601 if (err) {
1602 error_propagate(errp, err);
1603 qemu_mutex_unlock_ramlist();
1604 return;
1606 } else {
1607 new_block->host = phys_mem_alloc(new_block->max_length,
1608 &new_block->mr->align);
1609 if (!new_block->host) {
1610 error_setg_errno(errp, errno,
1611 "cannot set up guest memory '%s'",
1612 memory_region_name(new_block->mr));
1613 qemu_mutex_unlock_ramlist();
1614 return;
1616 memory_try_enable_merging(new_block->host, new_block->max_length);
1620 new_ram_size = MAX(old_ram_size,
1621 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1622 if (new_ram_size > old_ram_size) {
1623 migration_bitmap_extend(old_ram_size, new_ram_size);
1624 dirty_memory_extend(old_ram_size, new_ram_size);
1626 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1627 * QLIST (which has an RCU-friendly variant) does not have insertion at
1628 * tail, so save the last element in last_block.
1630 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1631 last_block = block;
1632 if (block->max_length < new_block->max_length) {
1633 break;
1636 if (block) {
1637 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1638 } else if (last_block) {
1639 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1640 } else { /* list is empty */
1641 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1643 ram_list.mru_block = NULL;
1645 /* Write list before version */
1646 smp_wmb();
1647 ram_list.version++;
1648 qemu_mutex_unlock_ramlist();
1650 cpu_physical_memory_set_dirty_range(new_block->offset,
1651 new_block->used_length,
1652 DIRTY_CLIENTS_ALL);
1654 if (new_block->host) {
1655 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1656 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1657 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1658 if (kvm_enabled()) {
1659 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1664 #ifdef __linux__
1665 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1666 bool share, const char *mem_path,
1667 Error **errp)
1669 RAMBlock *new_block;
1670 Error *local_err = NULL;
1672 if (xen_enabled()) {
1673 error_setg(errp, "-mem-path not supported with Xen");
1674 return NULL;
1677 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1679 * file_ram_alloc() needs to allocate just like
1680 * phys_mem_alloc, but we haven't bothered to provide
1681 * a hook there.
1683 error_setg(errp,
1684 "-mem-path not supported with this accelerator");
1685 return NULL;
1688 size = HOST_PAGE_ALIGN(size);
1689 new_block = g_malloc0(sizeof(*new_block));
1690 new_block->mr = mr;
1691 new_block->used_length = size;
1692 new_block->max_length = size;
1693 new_block->flags = share ? RAM_SHARED : 0;
1694 new_block->host = file_ram_alloc(new_block, size,
1695 mem_path, errp);
1696 if (!new_block->host) {
1697 g_free(new_block);
1698 return NULL;
1701 ram_block_add(new_block, &local_err);
1702 if (local_err) {
1703 g_free(new_block);
1704 error_propagate(errp, local_err);
1705 return NULL;
1707 return new_block;
1709 #endif
1711 static
1712 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1713 void (*resized)(const char*,
1714 uint64_t length,
1715 void *host),
1716 void *host, bool resizeable,
1717 MemoryRegion *mr, Error **errp)
1719 RAMBlock *new_block;
1720 Error *local_err = NULL;
1722 size = HOST_PAGE_ALIGN(size);
1723 max_size = HOST_PAGE_ALIGN(max_size);
1724 new_block = g_malloc0(sizeof(*new_block));
1725 new_block->mr = mr;
1726 new_block->resized = resized;
1727 new_block->used_length = size;
1728 new_block->max_length = max_size;
1729 assert(max_size >= size);
1730 new_block->fd = -1;
1731 new_block->host = host;
1732 if (host) {
1733 new_block->flags |= RAM_PREALLOC;
1735 if (resizeable) {
1736 new_block->flags |= RAM_RESIZEABLE;
1738 ram_block_add(new_block, &local_err);
1739 if (local_err) {
1740 g_free(new_block);
1741 error_propagate(errp, local_err);
1742 return NULL;
1744 return new_block;
1747 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1748 MemoryRegion *mr, Error **errp)
1750 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1753 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
1755 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1758 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1759 void (*resized)(const char*,
1760 uint64_t length,
1761 void *host),
1762 MemoryRegion *mr, Error **errp)
1764 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
1767 static void reclaim_ramblock(RAMBlock *block)
1769 if (block->flags & RAM_PREALLOC) {
1771 } else if (xen_enabled()) {
1772 xen_invalidate_map_cache_entry(block->host);
1773 #ifndef _WIN32
1774 } else if (block->fd >= 0) {
1775 qemu_ram_munmap(block->host, block->max_length);
1776 close(block->fd);
1777 #endif
1778 } else {
1779 qemu_anon_ram_free(block->host, block->max_length);
1781 g_free(block);
1784 void qemu_ram_free(RAMBlock *block)
1786 if (!block) {
1787 return;
1790 qemu_mutex_lock_ramlist();
1791 QLIST_REMOVE_RCU(block, next);
1792 ram_list.mru_block = NULL;
1793 /* Write list before version */
1794 smp_wmb();
1795 ram_list.version++;
1796 call_rcu(block, reclaim_ramblock, rcu);
1797 qemu_mutex_unlock_ramlist();
1800 #ifndef _WIN32
1801 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1803 RAMBlock *block;
1804 ram_addr_t offset;
1805 int flags;
1806 void *area, *vaddr;
1808 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1809 offset = addr - block->offset;
1810 if (offset < block->max_length) {
1811 vaddr = ramblock_ptr(block, offset);
1812 if (block->flags & RAM_PREALLOC) {
1814 } else if (xen_enabled()) {
1815 abort();
1816 } else {
1817 flags = MAP_FIXED;
1818 if (block->fd >= 0) {
1819 flags |= (block->flags & RAM_SHARED ?
1820 MAP_SHARED : MAP_PRIVATE);
1821 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1822 flags, block->fd, offset);
1823 } else {
1825 * Remap needs to match alloc. Accelerators that
1826 * set phys_mem_alloc never remap. If they did,
1827 * we'd need a remap hook here.
1829 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1831 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1832 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1833 flags, -1, 0);
1835 if (area != vaddr) {
1836 fprintf(stderr, "Could not remap addr: "
1837 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1838 length, addr);
1839 exit(1);
1841 memory_try_enable_merging(vaddr, length);
1842 qemu_ram_setup_dump(vaddr, length);
1847 #endif /* !_WIN32 */
1849 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1850 * This should not be used for general purpose DMA. Use address_space_map
1851 * or address_space_rw instead. For local memory (e.g. video ram) that the
1852 * device owns, use memory_region_get_ram_ptr.
1854 * Called within RCU critical section.
1856 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
1858 RAMBlock *block = ram_block;
1860 if (block == NULL) {
1861 block = qemu_get_ram_block(addr);
1862 addr -= block->offset;
1865 if (xen_enabled() && block->host == NULL) {
1866 /* We need to check if the requested address is in the RAM
1867 * because we don't want to map the entire memory in QEMU.
1868 * In that case just map until the end of the page.
1870 if (block->offset == 0) {
1871 return xen_map_cache(addr, 0, 0);
1874 block->host = xen_map_cache(block->offset, block->max_length, 1);
1876 return ramblock_ptr(block, addr);
1879 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1880 * but takes a size argument.
1882 * Called within RCU critical section.
1884 static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1885 hwaddr *size)
1887 RAMBlock *block = ram_block;
1888 if (*size == 0) {
1889 return NULL;
1892 if (block == NULL) {
1893 block = qemu_get_ram_block(addr);
1894 addr -= block->offset;
1896 *size = MIN(*size, block->max_length - addr);
1898 if (xen_enabled() && block->host == NULL) {
1899 /* We need to check if the requested address is in the RAM
1900 * because we don't want to map the entire memory in QEMU.
1901 * In that case just map the requested area.
1903 if (block->offset == 0) {
1904 return xen_map_cache(addr, *size, 1);
1907 block->host = xen_map_cache(block->offset, block->max_length, 1);
1910 return ramblock_ptr(block, addr);
1914 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1915 * in that RAMBlock.
1917 * ptr: Host pointer to look up
1918 * round_offset: If true round the result offset down to a page boundary
1919 * *ram_addr: set to result ram_addr
1920 * *offset: set to result offset within the RAMBlock
1922 * Returns: RAMBlock (or NULL if not found)
1924 * By the time this function returns, the returned pointer is not protected
1925 * by RCU anymore. If the caller is not within an RCU critical section and
1926 * does not hold the iothread lock, it must have other means of protecting the
1927 * pointer, such as a reference to the region that includes the incoming
1928 * ram_addr_t.
1930 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1931 ram_addr_t *offset)
1933 RAMBlock *block;
1934 uint8_t *host = ptr;
1936 if (xen_enabled()) {
1937 ram_addr_t ram_addr;
1938 rcu_read_lock();
1939 ram_addr = xen_ram_addr_from_mapcache(ptr);
1940 block = qemu_get_ram_block(ram_addr);
1941 if (block) {
1942 *offset = ram_addr - block->offset;
1944 rcu_read_unlock();
1945 return block;
1948 rcu_read_lock();
1949 block = atomic_rcu_read(&ram_list.mru_block);
1950 if (block && block->host && host - block->host < block->max_length) {
1951 goto found;
1954 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1955 /* This case append when the block is not mapped. */
1956 if (block->host == NULL) {
1957 continue;
1959 if (host - block->host < block->max_length) {
1960 goto found;
1964 rcu_read_unlock();
1965 return NULL;
1967 found:
1968 *offset = (host - block->host);
1969 if (round_offset) {
1970 *offset &= TARGET_PAGE_MASK;
1972 rcu_read_unlock();
1973 return block;
1977 * Finds the named RAMBlock
1979 * name: The name of RAMBlock to find
1981 * Returns: RAMBlock (or NULL if not found)
1983 RAMBlock *qemu_ram_block_by_name(const char *name)
1985 RAMBlock *block;
1987 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1988 if (!strcmp(name, block->idstr)) {
1989 return block;
1993 return NULL;
1996 /* Some of the softmmu routines need to translate from a host pointer
1997 (typically a TLB entry) back to a ram offset. */
1998 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2000 RAMBlock *block;
2001 ram_addr_t offset;
2003 block = qemu_ram_block_from_host(ptr, false, &offset);
2004 if (!block) {
2005 return RAM_ADDR_INVALID;
2008 return block->offset + offset;
2011 /* Called within RCU critical section. */
2012 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
2013 uint64_t val, unsigned size)
2015 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
2016 tb_invalidate_phys_page_fast(ram_addr, size);
2018 switch (size) {
2019 case 1:
2020 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2021 break;
2022 case 2:
2023 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2024 break;
2025 case 4:
2026 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2027 break;
2028 default:
2029 abort();
2031 /* Set both VGA and migration bits for simplicity and to remove
2032 * the notdirty callback faster.
2034 cpu_physical_memory_set_dirty_range(ram_addr, size,
2035 DIRTY_CLIENTS_NOCODE);
2036 /* we remove the notdirty callback only if the code has been
2037 flushed */
2038 if (!cpu_physical_memory_is_clean(ram_addr)) {
2039 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
2043 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2044 unsigned size, bool is_write)
2046 return is_write;
2049 static const MemoryRegionOps notdirty_mem_ops = {
2050 .write = notdirty_mem_write,
2051 .valid.accepts = notdirty_mem_accepts,
2052 .endianness = DEVICE_NATIVE_ENDIAN,
2055 /* Generate a debug exception if a watchpoint has been hit. */
2056 static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
2058 CPUState *cpu = current_cpu;
2059 CPUClass *cc = CPU_GET_CLASS(cpu);
2060 CPUArchState *env = cpu->env_ptr;
2061 target_ulong pc, cs_base;
2062 target_ulong vaddr;
2063 CPUWatchpoint *wp;
2064 uint32_t cpu_flags;
2066 if (cpu->watchpoint_hit) {
2067 /* We re-entered the check after replacing the TB. Now raise
2068 * the debug interrupt so that is will trigger after the
2069 * current instruction. */
2070 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2071 return;
2073 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2074 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
2075 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2076 && (wp->flags & flags)) {
2077 if (flags == BP_MEM_READ) {
2078 wp->flags |= BP_WATCHPOINT_HIT_READ;
2079 } else {
2080 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2082 wp->hitaddr = vaddr;
2083 wp->hitattrs = attrs;
2084 if (!cpu->watchpoint_hit) {
2085 if (wp->flags & BP_CPU &&
2086 !cc->debug_check_watchpoint(cpu, wp)) {
2087 wp->flags &= ~BP_WATCHPOINT_HIT;
2088 continue;
2090 cpu->watchpoint_hit = wp;
2091 tb_check_watchpoint(cpu);
2092 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2093 cpu->exception_index = EXCP_DEBUG;
2094 cpu_loop_exit(cpu);
2095 } else {
2096 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2097 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
2098 cpu_loop_exit_noexc(cpu);
2101 } else {
2102 wp->flags &= ~BP_WATCHPOINT_HIT;
2107 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2108 so these check for a hit then pass through to the normal out-of-line
2109 phys routines. */
2110 static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2111 unsigned size, MemTxAttrs attrs)
2113 MemTxResult res;
2114 uint64_t data;
2115 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2116 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2118 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
2119 switch (size) {
2120 case 1:
2121 data = address_space_ldub(as, addr, attrs, &res);
2122 break;
2123 case 2:
2124 data = address_space_lduw(as, addr, attrs, &res);
2125 break;
2126 case 4:
2127 data = address_space_ldl(as, addr, attrs, &res);
2128 break;
2129 default: abort();
2131 *pdata = data;
2132 return res;
2135 static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2136 uint64_t val, unsigned size,
2137 MemTxAttrs attrs)
2139 MemTxResult res;
2140 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2141 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2143 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2144 switch (size) {
2145 case 1:
2146 address_space_stb(as, addr, val, attrs, &res);
2147 break;
2148 case 2:
2149 address_space_stw(as, addr, val, attrs, &res);
2150 break;
2151 case 4:
2152 address_space_stl(as, addr, val, attrs, &res);
2153 break;
2154 default: abort();
2156 return res;
2159 static const MemoryRegionOps watch_mem_ops = {
2160 .read_with_attrs = watch_mem_read,
2161 .write_with_attrs = watch_mem_write,
2162 .endianness = DEVICE_NATIVE_ENDIAN,
2165 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2166 unsigned len, MemTxAttrs attrs)
2168 subpage_t *subpage = opaque;
2169 uint8_t buf[8];
2170 MemTxResult res;
2172 #if defined(DEBUG_SUBPAGE)
2173 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2174 subpage, len, addr);
2175 #endif
2176 res = address_space_read(subpage->as, addr + subpage->base,
2177 attrs, buf, len);
2178 if (res) {
2179 return res;
2181 switch (len) {
2182 case 1:
2183 *data = ldub_p(buf);
2184 return MEMTX_OK;
2185 case 2:
2186 *data = lduw_p(buf);
2187 return MEMTX_OK;
2188 case 4:
2189 *data = ldl_p(buf);
2190 return MEMTX_OK;
2191 case 8:
2192 *data = ldq_p(buf);
2193 return MEMTX_OK;
2194 default:
2195 abort();
2199 static MemTxResult subpage_write(void *opaque, hwaddr addr,
2200 uint64_t value, unsigned len, MemTxAttrs attrs)
2202 subpage_t *subpage = opaque;
2203 uint8_t buf[8];
2205 #if defined(DEBUG_SUBPAGE)
2206 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2207 " value %"PRIx64"\n",
2208 __func__, subpage, len, addr, value);
2209 #endif
2210 switch (len) {
2211 case 1:
2212 stb_p(buf, value);
2213 break;
2214 case 2:
2215 stw_p(buf, value);
2216 break;
2217 case 4:
2218 stl_p(buf, value);
2219 break;
2220 case 8:
2221 stq_p(buf, value);
2222 break;
2223 default:
2224 abort();
2226 return address_space_write(subpage->as, addr + subpage->base,
2227 attrs, buf, len);
2230 static bool subpage_accepts(void *opaque, hwaddr addr,
2231 unsigned len, bool is_write)
2233 subpage_t *subpage = opaque;
2234 #if defined(DEBUG_SUBPAGE)
2235 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2236 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2237 #endif
2239 return address_space_access_valid(subpage->as, addr + subpage->base,
2240 len, is_write);
2243 static const MemoryRegionOps subpage_ops = {
2244 .read_with_attrs = subpage_read,
2245 .write_with_attrs = subpage_write,
2246 .impl.min_access_size = 1,
2247 .impl.max_access_size = 8,
2248 .valid.min_access_size = 1,
2249 .valid.max_access_size = 8,
2250 .valid.accepts = subpage_accepts,
2251 .endianness = DEVICE_NATIVE_ENDIAN,
2254 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2255 uint16_t section)
2257 int idx, eidx;
2259 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2260 return -1;
2261 idx = SUBPAGE_IDX(start);
2262 eidx = SUBPAGE_IDX(end);
2263 #if defined(DEBUG_SUBPAGE)
2264 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2265 __func__, mmio, start, end, idx, eidx, section);
2266 #endif
2267 for (; idx <= eidx; idx++) {
2268 mmio->sub_section[idx] = section;
2271 return 0;
2274 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2276 subpage_t *mmio;
2278 mmio = g_malloc0(sizeof(subpage_t));
2280 mmio->as = as;
2281 mmio->base = base;
2282 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2283 NULL, TARGET_PAGE_SIZE);
2284 mmio->iomem.subpage = true;
2285 #if defined(DEBUG_SUBPAGE)
2286 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2287 mmio, base, TARGET_PAGE_SIZE);
2288 #endif
2289 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2291 return mmio;
2294 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2295 MemoryRegion *mr)
2297 assert(as);
2298 MemoryRegionSection section = {
2299 .address_space = as,
2300 .mr = mr,
2301 .offset_within_address_space = 0,
2302 .offset_within_region = 0,
2303 .size = int128_2_64(),
2306 return phys_section_add(map, &section);
2309 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2311 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2312 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
2313 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
2314 MemoryRegionSection *sections = d->map.sections;
2316 return sections[index & ~TARGET_PAGE_MASK].mr;
2319 static void io_mem_init(void)
2321 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2322 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2323 NULL, UINT64_MAX);
2324 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
2325 NULL, UINT64_MAX);
2326 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2327 NULL, UINT64_MAX);
2330 static void mem_begin(MemoryListener *listener)
2332 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2333 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2334 uint16_t n;
2336 n = dummy_section(&d->map, as, &io_mem_unassigned);
2337 assert(n == PHYS_SECTION_UNASSIGNED);
2338 n = dummy_section(&d->map, as, &io_mem_notdirty);
2339 assert(n == PHYS_SECTION_NOTDIRTY);
2340 n = dummy_section(&d->map, as, &io_mem_rom);
2341 assert(n == PHYS_SECTION_ROM);
2342 n = dummy_section(&d->map, as, &io_mem_watch);
2343 assert(n == PHYS_SECTION_WATCH);
2345 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2346 d->as = as;
2347 as->next_dispatch = d;
2350 static void address_space_dispatch_free(AddressSpaceDispatch *d)
2352 phys_sections_free(&d->map);
2353 g_free(d);
2356 static void mem_commit(MemoryListener *listener)
2358 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2359 AddressSpaceDispatch *cur = as->dispatch;
2360 AddressSpaceDispatch *next = as->next_dispatch;
2362 phys_page_compact_all(next, next->map.nodes_nb);
2364 atomic_rcu_set(&as->dispatch, next);
2365 if (cur) {
2366 call_rcu(cur, address_space_dispatch_free, rcu);
2370 static void tcg_commit(MemoryListener *listener)
2372 CPUAddressSpace *cpuas;
2373 AddressSpaceDispatch *d;
2375 /* since each CPU stores ram addresses in its TLB cache, we must
2376 reset the modified entries */
2377 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2378 cpu_reloading_memory_map();
2379 /* The CPU and TLB are protected by the iothread lock.
2380 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2381 * may have split the RCU critical section.
2383 d = atomic_rcu_read(&cpuas->as->dispatch);
2384 cpuas->memory_dispatch = d;
2385 tlb_flush(cpuas->cpu, 1);
2388 void address_space_init_dispatch(AddressSpace *as)
2390 as->dispatch = NULL;
2391 as->dispatch_listener = (MemoryListener) {
2392 .begin = mem_begin,
2393 .commit = mem_commit,
2394 .region_add = mem_add,
2395 .region_nop = mem_add,
2396 .priority = 0,
2398 memory_listener_register(&as->dispatch_listener, as);
2401 void address_space_unregister(AddressSpace *as)
2403 memory_listener_unregister(&as->dispatch_listener);
2406 void address_space_destroy_dispatch(AddressSpace *as)
2408 AddressSpaceDispatch *d = as->dispatch;
2410 atomic_rcu_set(&as->dispatch, NULL);
2411 if (d) {
2412 call_rcu(d, address_space_dispatch_free, rcu);
2416 static void memory_map_init(void)
2418 system_memory = g_malloc(sizeof(*system_memory));
2420 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2421 address_space_init(&address_space_memory, system_memory, "memory");
2423 system_io = g_malloc(sizeof(*system_io));
2424 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2425 65536);
2426 address_space_init(&address_space_io, system_io, "I/O");
2429 MemoryRegion *get_system_memory(void)
2431 return system_memory;
2434 MemoryRegion *get_system_io(void)
2436 return system_io;
2439 #endif /* !defined(CONFIG_USER_ONLY) */
2441 /* physical memory access (slow version, mainly for debug) */
2442 #if defined(CONFIG_USER_ONLY)
2443 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2444 uint8_t *buf, int len, int is_write)
2446 int l, flags;
2447 target_ulong page;
2448 void * p;
2450 while (len > 0) {
2451 page = addr & TARGET_PAGE_MASK;
2452 l = (page + TARGET_PAGE_SIZE) - addr;
2453 if (l > len)
2454 l = len;
2455 flags = page_get_flags(page);
2456 if (!(flags & PAGE_VALID))
2457 return -1;
2458 if (is_write) {
2459 if (!(flags & PAGE_WRITE))
2460 return -1;
2461 /* XXX: this code should not depend on lock_user */
2462 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2463 return -1;
2464 memcpy(p, buf, l);
2465 unlock_user(p, addr, l);
2466 } else {
2467 if (!(flags & PAGE_READ))
2468 return -1;
2469 /* XXX: this code should not depend on lock_user */
2470 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2471 return -1;
2472 memcpy(buf, p, l);
2473 unlock_user(p, addr, 0);
2475 len -= l;
2476 buf += l;
2477 addr += l;
2479 return 0;
2482 #else
2484 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2485 hwaddr length)
2487 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2488 addr += memory_region_get_ram_addr(mr);
2490 /* No early return if dirty_log_mask is or becomes 0, because
2491 * cpu_physical_memory_set_dirty_range will still call
2492 * xen_modified_memory.
2494 if (dirty_log_mask) {
2495 dirty_log_mask =
2496 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2498 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2499 tb_invalidate_phys_range(addr, addr + length);
2500 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2502 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2505 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2507 unsigned access_size_max = mr->ops->valid.max_access_size;
2509 /* Regions are assumed to support 1-4 byte accesses unless
2510 otherwise specified. */
2511 if (access_size_max == 0) {
2512 access_size_max = 4;
2515 /* Bound the maximum access by the alignment of the address. */
2516 if (!mr->ops->impl.unaligned) {
2517 unsigned align_size_max = addr & -addr;
2518 if (align_size_max != 0 && align_size_max < access_size_max) {
2519 access_size_max = align_size_max;
2523 /* Don't attempt accesses larger than the maximum. */
2524 if (l > access_size_max) {
2525 l = access_size_max;
2527 l = pow2floor(l);
2529 return l;
2532 static bool prepare_mmio_access(MemoryRegion *mr)
2534 bool unlocked = !qemu_mutex_iothread_locked();
2535 bool release_lock = false;
2537 if (unlocked && mr->global_locking) {
2538 qemu_mutex_lock_iothread();
2539 unlocked = false;
2540 release_lock = true;
2542 if (mr->flush_coalesced_mmio) {
2543 if (unlocked) {
2544 qemu_mutex_lock_iothread();
2546 qemu_flush_coalesced_mmio_buffer();
2547 if (unlocked) {
2548 qemu_mutex_unlock_iothread();
2552 return release_lock;
2555 /* Called within RCU critical section. */
2556 static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2557 MemTxAttrs attrs,
2558 const uint8_t *buf,
2559 int len, hwaddr addr1,
2560 hwaddr l, MemoryRegion *mr)
2562 uint8_t *ptr;
2563 uint64_t val;
2564 MemTxResult result = MEMTX_OK;
2565 bool release_lock = false;
2567 for (;;) {
2568 if (!memory_access_is_direct(mr, true)) {
2569 release_lock |= prepare_mmio_access(mr);
2570 l = memory_access_size(mr, l, addr1);
2571 /* XXX: could force current_cpu to NULL to avoid
2572 potential bugs */
2573 switch (l) {
2574 case 8:
2575 /* 64 bit write access */
2576 val = ldq_p(buf);
2577 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2578 attrs);
2579 break;
2580 case 4:
2581 /* 32 bit write access */
2582 val = ldl_p(buf);
2583 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2584 attrs);
2585 break;
2586 case 2:
2587 /* 16 bit write access */
2588 val = lduw_p(buf);
2589 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2590 attrs);
2591 break;
2592 case 1:
2593 /* 8 bit write access */
2594 val = ldub_p(buf);
2595 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2596 attrs);
2597 break;
2598 default:
2599 abort();
2601 } else {
2602 /* RAM case */
2603 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2604 memcpy(ptr, buf, l);
2605 invalidate_and_set_dirty(mr, addr1, l);
2608 if (release_lock) {
2609 qemu_mutex_unlock_iothread();
2610 release_lock = false;
2613 len -= l;
2614 buf += l;
2615 addr += l;
2617 if (!len) {
2618 break;
2621 l = len;
2622 mr = address_space_translate(as, addr, &addr1, &l, true);
2625 return result;
2628 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2629 const uint8_t *buf, int len)
2631 hwaddr l;
2632 hwaddr addr1;
2633 MemoryRegion *mr;
2634 MemTxResult result = MEMTX_OK;
2636 if (len > 0) {
2637 rcu_read_lock();
2638 l = len;
2639 mr = address_space_translate(as, addr, &addr1, &l, true);
2640 result = address_space_write_continue(as, addr, attrs, buf, len,
2641 addr1, l, mr);
2642 rcu_read_unlock();
2645 return result;
2648 /* Called within RCU critical section. */
2649 MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2650 MemTxAttrs attrs, uint8_t *buf,
2651 int len, hwaddr addr1, hwaddr l,
2652 MemoryRegion *mr)
2654 uint8_t *ptr;
2655 uint64_t val;
2656 MemTxResult result = MEMTX_OK;
2657 bool release_lock = false;
2659 for (;;) {
2660 if (!memory_access_is_direct(mr, false)) {
2661 /* I/O case */
2662 release_lock |= prepare_mmio_access(mr);
2663 l = memory_access_size(mr, l, addr1);
2664 switch (l) {
2665 case 8:
2666 /* 64 bit read access */
2667 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2668 attrs);
2669 stq_p(buf, val);
2670 break;
2671 case 4:
2672 /* 32 bit read access */
2673 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2674 attrs);
2675 stl_p(buf, val);
2676 break;
2677 case 2:
2678 /* 16 bit read access */
2679 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2680 attrs);
2681 stw_p(buf, val);
2682 break;
2683 case 1:
2684 /* 8 bit read access */
2685 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2686 attrs);
2687 stb_p(buf, val);
2688 break;
2689 default:
2690 abort();
2692 } else {
2693 /* RAM case */
2694 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2695 memcpy(buf, ptr, l);
2698 if (release_lock) {
2699 qemu_mutex_unlock_iothread();
2700 release_lock = false;
2703 len -= l;
2704 buf += l;
2705 addr += l;
2707 if (!len) {
2708 break;
2711 l = len;
2712 mr = address_space_translate(as, addr, &addr1, &l, false);
2715 return result;
2718 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2719 MemTxAttrs attrs, uint8_t *buf, int len)
2721 hwaddr l;
2722 hwaddr addr1;
2723 MemoryRegion *mr;
2724 MemTxResult result = MEMTX_OK;
2726 if (len > 0) {
2727 rcu_read_lock();
2728 l = len;
2729 mr = address_space_translate(as, addr, &addr1, &l, false);
2730 result = address_space_read_continue(as, addr, attrs, buf, len,
2731 addr1, l, mr);
2732 rcu_read_unlock();
2735 return result;
2738 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2739 uint8_t *buf, int len, bool is_write)
2741 if (is_write) {
2742 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2743 } else {
2744 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2748 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2749 int len, int is_write)
2751 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2752 buf, len, is_write);
2755 enum write_rom_type {
2756 WRITE_DATA,
2757 FLUSH_CACHE,
2760 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2761 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2763 hwaddr l;
2764 uint8_t *ptr;
2765 hwaddr addr1;
2766 MemoryRegion *mr;
2768 rcu_read_lock();
2769 while (len > 0) {
2770 l = len;
2771 mr = address_space_translate(as, addr, &addr1, &l, true);
2773 if (!(memory_region_is_ram(mr) ||
2774 memory_region_is_romd(mr))) {
2775 l = memory_access_size(mr, l, addr1);
2776 } else {
2777 /* ROM/RAM case */
2778 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2779 switch (type) {
2780 case WRITE_DATA:
2781 memcpy(ptr, buf, l);
2782 invalidate_and_set_dirty(mr, addr1, l);
2783 break;
2784 case FLUSH_CACHE:
2785 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2786 break;
2789 len -= l;
2790 buf += l;
2791 addr += l;
2793 rcu_read_unlock();
2796 /* used for ROM loading : can write in RAM and ROM */
2797 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2798 const uint8_t *buf, int len)
2800 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2803 void cpu_flush_icache_range(hwaddr start, int len)
2806 * This function should do the same thing as an icache flush that was
2807 * triggered from within the guest. For TCG we are always cache coherent,
2808 * so there is no need to flush anything. For KVM / Xen we need to flush
2809 * the host's instruction cache at least.
2811 if (tcg_enabled()) {
2812 return;
2815 cpu_physical_memory_write_rom_internal(&address_space_memory,
2816 start, NULL, len, FLUSH_CACHE);
2819 typedef struct {
2820 MemoryRegion *mr;
2821 void *buffer;
2822 hwaddr addr;
2823 hwaddr len;
2824 bool in_use;
2825 } BounceBuffer;
2827 static BounceBuffer bounce;
2829 typedef struct MapClient {
2830 QEMUBH *bh;
2831 QLIST_ENTRY(MapClient) link;
2832 } MapClient;
2834 QemuMutex map_client_list_lock;
2835 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2836 = QLIST_HEAD_INITIALIZER(map_client_list);
2838 static void cpu_unregister_map_client_do(MapClient *client)
2840 QLIST_REMOVE(client, link);
2841 g_free(client);
2844 static void cpu_notify_map_clients_locked(void)
2846 MapClient *client;
2848 while (!QLIST_EMPTY(&map_client_list)) {
2849 client = QLIST_FIRST(&map_client_list);
2850 qemu_bh_schedule(client->bh);
2851 cpu_unregister_map_client_do(client);
2855 void cpu_register_map_client(QEMUBH *bh)
2857 MapClient *client = g_malloc(sizeof(*client));
2859 qemu_mutex_lock(&map_client_list_lock);
2860 client->bh = bh;
2861 QLIST_INSERT_HEAD(&map_client_list, client, link);
2862 if (!atomic_read(&bounce.in_use)) {
2863 cpu_notify_map_clients_locked();
2865 qemu_mutex_unlock(&map_client_list_lock);
2868 void cpu_exec_init_all(void)
2870 qemu_mutex_init(&ram_list.mutex);
2871 io_mem_init();
2872 memory_map_init();
2873 qemu_mutex_init(&map_client_list_lock);
2876 void cpu_unregister_map_client(QEMUBH *bh)
2878 MapClient *client;
2880 qemu_mutex_lock(&map_client_list_lock);
2881 QLIST_FOREACH(client, &map_client_list, link) {
2882 if (client->bh == bh) {
2883 cpu_unregister_map_client_do(client);
2884 break;
2887 qemu_mutex_unlock(&map_client_list_lock);
2890 static void cpu_notify_map_clients(void)
2892 qemu_mutex_lock(&map_client_list_lock);
2893 cpu_notify_map_clients_locked();
2894 qemu_mutex_unlock(&map_client_list_lock);
2897 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2899 MemoryRegion *mr;
2900 hwaddr l, xlat;
2902 rcu_read_lock();
2903 while (len > 0) {
2904 l = len;
2905 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2906 if (!memory_access_is_direct(mr, is_write)) {
2907 l = memory_access_size(mr, l, addr);
2908 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2909 return false;
2913 len -= l;
2914 addr += l;
2916 rcu_read_unlock();
2917 return true;
2920 /* Map a physical memory region into a host virtual address.
2921 * May map a subset of the requested range, given by and returned in *plen.
2922 * May return NULL if resources needed to perform the mapping are exhausted.
2923 * Use only for reads OR writes - not for read-modify-write operations.
2924 * Use cpu_register_map_client() to know when retrying the map operation is
2925 * likely to succeed.
2927 void *address_space_map(AddressSpace *as,
2928 hwaddr addr,
2929 hwaddr *plen,
2930 bool is_write)
2932 hwaddr len = *plen;
2933 hwaddr done = 0;
2934 hwaddr l, xlat, base;
2935 MemoryRegion *mr, *this_mr;
2936 void *ptr;
2938 if (len == 0) {
2939 return NULL;
2942 l = len;
2943 rcu_read_lock();
2944 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2946 if (!memory_access_is_direct(mr, is_write)) {
2947 if (atomic_xchg(&bounce.in_use, true)) {
2948 rcu_read_unlock();
2949 return NULL;
2951 /* Avoid unbounded allocations */
2952 l = MIN(l, TARGET_PAGE_SIZE);
2953 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2954 bounce.addr = addr;
2955 bounce.len = l;
2957 memory_region_ref(mr);
2958 bounce.mr = mr;
2959 if (!is_write) {
2960 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2961 bounce.buffer, l);
2964 rcu_read_unlock();
2965 *plen = l;
2966 return bounce.buffer;
2969 base = xlat;
2971 for (;;) {
2972 len -= l;
2973 addr += l;
2974 done += l;
2975 if (len == 0) {
2976 break;
2979 l = len;
2980 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2981 if (this_mr != mr || xlat != base + done) {
2982 break;
2986 memory_region_ref(mr);
2987 *plen = done;
2988 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
2989 rcu_read_unlock();
2991 return ptr;
2994 /* Unmaps a memory region previously mapped by address_space_map().
2995 * Will also mark the memory as dirty if is_write == 1. access_len gives
2996 * the amount of memory that was actually read or written by the caller.
2998 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2999 int is_write, hwaddr access_len)
3001 if (buffer != bounce.buffer) {
3002 MemoryRegion *mr;
3003 ram_addr_t addr1;
3005 mr = memory_region_from_host(buffer, &addr1);
3006 assert(mr != NULL);
3007 if (is_write) {
3008 invalidate_and_set_dirty(mr, addr1, access_len);
3010 if (xen_enabled()) {
3011 xen_invalidate_map_cache_entry(buffer);
3013 memory_region_unref(mr);
3014 return;
3016 if (is_write) {
3017 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3018 bounce.buffer, access_len);
3020 qemu_vfree(bounce.buffer);
3021 bounce.buffer = NULL;
3022 memory_region_unref(bounce.mr);
3023 atomic_mb_set(&bounce.in_use, false);
3024 cpu_notify_map_clients();
3027 void *cpu_physical_memory_map(hwaddr addr,
3028 hwaddr *plen,
3029 int is_write)
3031 return address_space_map(&address_space_memory, addr, plen, is_write);
3034 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3035 int is_write, hwaddr access_len)
3037 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3040 /* warning: addr must be aligned */
3041 static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3042 MemTxAttrs attrs,
3043 MemTxResult *result,
3044 enum device_endian endian)
3046 uint8_t *ptr;
3047 uint64_t val;
3048 MemoryRegion *mr;
3049 hwaddr l = 4;
3050 hwaddr addr1;
3051 MemTxResult r;
3052 bool release_lock = false;
3054 rcu_read_lock();
3055 mr = address_space_translate(as, addr, &addr1, &l, false);
3056 if (l < 4 || !memory_access_is_direct(mr, false)) {
3057 release_lock |= prepare_mmio_access(mr);
3059 /* I/O case */
3060 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
3061 #if defined(TARGET_WORDS_BIGENDIAN)
3062 if (endian == DEVICE_LITTLE_ENDIAN) {
3063 val = bswap32(val);
3065 #else
3066 if (endian == DEVICE_BIG_ENDIAN) {
3067 val = bswap32(val);
3069 #endif
3070 } else {
3071 /* RAM case */
3072 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3073 switch (endian) {
3074 case DEVICE_LITTLE_ENDIAN:
3075 val = ldl_le_p(ptr);
3076 break;
3077 case DEVICE_BIG_ENDIAN:
3078 val = ldl_be_p(ptr);
3079 break;
3080 default:
3081 val = ldl_p(ptr);
3082 break;
3084 r = MEMTX_OK;
3086 if (result) {
3087 *result = r;
3089 if (release_lock) {
3090 qemu_mutex_unlock_iothread();
3092 rcu_read_unlock();
3093 return val;
3096 uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3097 MemTxAttrs attrs, MemTxResult *result)
3099 return address_space_ldl_internal(as, addr, attrs, result,
3100 DEVICE_NATIVE_ENDIAN);
3103 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3104 MemTxAttrs attrs, MemTxResult *result)
3106 return address_space_ldl_internal(as, addr, attrs, result,
3107 DEVICE_LITTLE_ENDIAN);
3110 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3111 MemTxAttrs attrs, MemTxResult *result)
3113 return address_space_ldl_internal(as, addr, attrs, result,
3114 DEVICE_BIG_ENDIAN);
3117 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
3119 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3122 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
3124 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3127 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
3129 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3132 /* warning: addr must be aligned */
3133 static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3134 MemTxAttrs attrs,
3135 MemTxResult *result,
3136 enum device_endian endian)
3138 uint8_t *ptr;
3139 uint64_t val;
3140 MemoryRegion *mr;
3141 hwaddr l = 8;
3142 hwaddr addr1;
3143 MemTxResult r;
3144 bool release_lock = false;
3146 rcu_read_lock();
3147 mr = address_space_translate(as, addr, &addr1, &l,
3148 false);
3149 if (l < 8 || !memory_access_is_direct(mr, false)) {
3150 release_lock |= prepare_mmio_access(mr);
3152 /* I/O case */
3153 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
3154 #if defined(TARGET_WORDS_BIGENDIAN)
3155 if (endian == DEVICE_LITTLE_ENDIAN) {
3156 val = bswap64(val);
3158 #else
3159 if (endian == DEVICE_BIG_ENDIAN) {
3160 val = bswap64(val);
3162 #endif
3163 } else {
3164 /* RAM case */
3165 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3166 switch (endian) {
3167 case DEVICE_LITTLE_ENDIAN:
3168 val = ldq_le_p(ptr);
3169 break;
3170 case DEVICE_BIG_ENDIAN:
3171 val = ldq_be_p(ptr);
3172 break;
3173 default:
3174 val = ldq_p(ptr);
3175 break;
3177 r = MEMTX_OK;
3179 if (result) {
3180 *result = r;
3182 if (release_lock) {
3183 qemu_mutex_unlock_iothread();
3185 rcu_read_unlock();
3186 return val;
3189 uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3190 MemTxAttrs attrs, MemTxResult *result)
3192 return address_space_ldq_internal(as, addr, attrs, result,
3193 DEVICE_NATIVE_ENDIAN);
3196 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3197 MemTxAttrs attrs, MemTxResult *result)
3199 return address_space_ldq_internal(as, addr, attrs, result,
3200 DEVICE_LITTLE_ENDIAN);
3203 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3204 MemTxAttrs attrs, MemTxResult *result)
3206 return address_space_ldq_internal(as, addr, attrs, result,
3207 DEVICE_BIG_ENDIAN);
3210 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
3212 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3215 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
3217 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3220 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
3222 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3225 /* XXX: optimize */
3226 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3227 MemTxAttrs attrs, MemTxResult *result)
3229 uint8_t val;
3230 MemTxResult r;
3232 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3233 if (result) {
3234 *result = r;
3236 return val;
3239 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3241 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3244 /* warning: addr must be aligned */
3245 static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3246 hwaddr addr,
3247 MemTxAttrs attrs,
3248 MemTxResult *result,
3249 enum device_endian endian)
3251 uint8_t *ptr;
3252 uint64_t val;
3253 MemoryRegion *mr;
3254 hwaddr l = 2;
3255 hwaddr addr1;
3256 MemTxResult r;
3257 bool release_lock = false;
3259 rcu_read_lock();
3260 mr = address_space_translate(as, addr, &addr1, &l,
3261 false);
3262 if (l < 2 || !memory_access_is_direct(mr, false)) {
3263 release_lock |= prepare_mmio_access(mr);
3265 /* I/O case */
3266 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
3267 #if defined(TARGET_WORDS_BIGENDIAN)
3268 if (endian == DEVICE_LITTLE_ENDIAN) {
3269 val = bswap16(val);
3271 #else
3272 if (endian == DEVICE_BIG_ENDIAN) {
3273 val = bswap16(val);
3275 #endif
3276 } else {
3277 /* RAM case */
3278 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3279 switch (endian) {
3280 case DEVICE_LITTLE_ENDIAN:
3281 val = lduw_le_p(ptr);
3282 break;
3283 case DEVICE_BIG_ENDIAN:
3284 val = lduw_be_p(ptr);
3285 break;
3286 default:
3287 val = lduw_p(ptr);
3288 break;
3290 r = MEMTX_OK;
3292 if (result) {
3293 *result = r;
3295 if (release_lock) {
3296 qemu_mutex_unlock_iothread();
3298 rcu_read_unlock();
3299 return val;
3302 uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3303 MemTxAttrs attrs, MemTxResult *result)
3305 return address_space_lduw_internal(as, addr, attrs, result,
3306 DEVICE_NATIVE_ENDIAN);
3309 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3310 MemTxAttrs attrs, MemTxResult *result)
3312 return address_space_lduw_internal(as, addr, attrs, result,
3313 DEVICE_LITTLE_ENDIAN);
3316 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3317 MemTxAttrs attrs, MemTxResult *result)
3319 return address_space_lduw_internal(as, addr, attrs, result,
3320 DEVICE_BIG_ENDIAN);
3323 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
3325 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3328 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
3330 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3333 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
3335 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3338 /* warning: addr must be aligned. The ram page is not masked as dirty
3339 and the code inside is not invalidated. It is useful if the dirty
3340 bits are used to track modified PTEs */
3341 void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3342 MemTxAttrs attrs, MemTxResult *result)
3344 uint8_t *ptr;
3345 MemoryRegion *mr;
3346 hwaddr l = 4;
3347 hwaddr addr1;
3348 MemTxResult r;
3349 uint8_t dirty_log_mask;
3350 bool release_lock = false;
3352 rcu_read_lock();
3353 mr = address_space_translate(as, addr, &addr1, &l,
3354 true);
3355 if (l < 4 || !memory_access_is_direct(mr, true)) {
3356 release_lock |= prepare_mmio_access(mr);
3358 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3359 } else {
3360 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3361 stl_p(ptr, val);
3363 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3364 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
3365 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3366 4, dirty_log_mask);
3367 r = MEMTX_OK;
3369 if (result) {
3370 *result = r;
3372 if (release_lock) {
3373 qemu_mutex_unlock_iothread();
3375 rcu_read_unlock();
3378 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3380 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3383 /* warning: addr must be aligned */
3384 static inline void address_space_stl_internal(AddressSpace *as,
3385 hwaddr addr, uint32_t val,
3386 MemTxAttrs attrs,
3387 MemTxResult *result,
3388 enum device_endian endian)
3390 uint8_t *ptr;
3391 MemoryRegion *mr;
3392 hwaddr l = 4;
3393 hwaddr addr1;
3394 MemTxResult r;
3395 bool release_lock = false;
3397 rcu_read_lock();
3398 mr = address_space_translate(as, addr, &addr1, &l,
3399 true);
3400 if (l < 4 || !memory_access_is_direct(mr, true)) {
3401 release_lock |= prepare_mmio_access(mr);
3403 #if defined(TARGET_WORDS_BIGENDIAN)
3404 if (endian == DEVICE_LITTLE_ENDIAN) {
3405 val = bswap32(val);
3407 #else
3408 if (endian == DEVICE_BIG_ENDIAN) {
3409 val = bswap32(val);
3411 #endif
3412 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3413 } else {
3414 /* RAM case */
3415 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3416 switch (endian) {
3417 case DEVICE_LITTLE_ENDIAN:
3418 stl_le_p(ptr, val);
3419 break;
3420 case DEVICE_BIG_ENDIAN:
3421 stl_be_p(ptr, val);
3422 break;
3423 default:
3424 stl_p(ptr, val);
3425 break;
3427 invalidate_and_set_dirty(mr, addr1, 4);
3428 r = MEMTX_OK;
3430 if (result) {
3431 *result = r;
3433 if (release_lock) {
3434 qemu_mutex_unlock_iothread();
3436 rcu_read_unlock();
3439 void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3440 MemTxAttrs attrs, MemTxResult *result)
3442 address_space_stl_internal(as, addr, val, attrs, result,
3443 DEVICE_NATIVE_ENDIAN);
3446 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3447 MemTxAttrs attrs, MemTxResult *result)
3449 address_space_stl_internal(as, addr, val, attrs, result,
3450 DEVICE_LITTLE_ENDIAN);
3453 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3454 MemTxAttrs attrs, MemTxResult *result)
3456 address_space_stl_internal(as, addr, val, attrs, result,
3457 DEVICE_BIG_ENDIAN);
3460 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3462 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3465 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3467 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3470 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3472 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3475 /* XXX: optimize */
3476 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3477 MemTxAttrs attrs, MemTxResult *result)
3479 uint8_t v = val;
3480 MemTxResult r;
3482 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3483 if (result) {
3484 *result = r;
3488 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3490 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3493 /* warning: addr must be aligned */
3494 static inline void address_space_stw_internal(AddressSpace *as,
3495 hwaddr addr, uint32_t val,
3496 MemTxAttrs attrs,
3497 MemTxResult *result,
3498 enum device_endian endian)
3500 uint8_t *ptr;
3501 MemoryRegion *mr;
3502 hwaddr l = 2;
3503 hwaddr addr1;
3504 MemTxResult r;
3505 bool release_lock = false;
3507 rcu_read_lock();
3508 mr = address_space_translate(as, addr, &addr1, &l, true);
3509 if (l < 2 || !memory_access_is_direct(mr, true)) {
3510 release_lock |= prepare_mmio_access(mr);
3512 #if defined(TARGET_WORDS_BIGENDIAN)
3513 if (endian == DEVICE_LITTLE_ENDIAN) {
3514 val = bswap16(val);
3516 #else
3517 if (endian == DEVICE_BIG_ENDIAN) {
3518 val = bswap16(val);
3520 #endif
3521 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
3522 } else {
3523 /* RAM case */
3524 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3525 switch (endian) {
3526 case DEVICE_LITTLE_ENDIAN:
3527 stw_le_p(ptr, val);
3528 break;
3529 case DEVICE_BIG_ENDIAN:
3530 stw_be_p(ptr, val);
3531 break;
3532 default:
3533 stw_p(ptr, val);
3534 break;
3536 invalidate_and_set_dirty(mr, addr1, 2);
3537 r = MEMTX_OK;
3539 if (result) {
3540 *result = r;
3542 if (release_lock) {
3543 qemu_mutex_unlock_iothread();
3545 rcu_read_unlock();
3548 void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3549 MemTxAttrs attrs, MemTxResult *result)
3551 address_space_stw_internal(as, addr, val, attrs, result,
3552 DEVICE_NATIVE_ENDIAN);
3555 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3556 MemTxAttrs attrs, MemTxResult *result)
3558 address_space_stw_internal(as, addr, val, attrs, result,
3559 DEVICE_LITTLE_ENDIAN);
3562 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3563 MemTxAttrs attrs, MemTxResult *result)
3565 address_space_stw_internal(as, addr, val, attrs, result,
3566 DEVICE_BIG_ENDIAN);
3569 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3571 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3574 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3576 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3579 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3581 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3584 /* XXX: optimize */
3585 void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3586 MemTxAttrs attrs, MemTxResult *result)
3588 MemTxResult r;
3589 val = tswap64(val);
3590 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3591 if (result) {
3592 *result = r;
3596 void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3597 MemTxAttrs attrs, MemTxResult *result)
3599 MemTxResult r;
3600 val = cpu_to_le64(val);
3601 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3602 if (result) {
3603 *result = r;
3606 void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3607 MemTxAttrs attrs, MemTxResult *result)
3609 MemTxResult r;
3610 val = cpu_to_be64(val);
3611 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3612 if (result) {
3613 *result = r;
3617 void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3619 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3622 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3624 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3627 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3629 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3632 /* virtual memory access for debug (includes writing to ROM) */
3633 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3634 uint8_t *buf, int len, int is_write)
3636 int l;
3637 hwaddr phys_addr;
3638 target_ulong page;
3640 while (len > 0) {
3641 int asidx;
3642 MemTxAttrs attrs;
3644 page = addr & TARGET_PAGE_MASK;
3645 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3646 asidx = cpu_asidx_from_attrs(cpu, attrs);
3647 /* if no physical page mapped, return an error */
3648 if (phys_addr == -1)
3649 return -1;
3650 l = (page + TARGET_PAGE_SIZE) - addr;
3651 if (l > len)
3652 l = len;
3653 phys_addr += (addr & ~TARGET_PAGE_MASK);
3654 if (is_write) {
3655 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3656 phys_addr, buf, l);
3657 } else {
3658 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3659 MEMTXATTRS_UNSPECIFIED,
3660 buf, l, 0);
3662 len -= l;
3663 buf += l;
3664 addr += l;
3666 return 0;
3670 * Allows code that needs to deal with migration bitmaps etc to still be built
3671 * target independent.
3673 size_t qemu_target_page_bits(void)
3675 return TARGET_PAGE_BITS;
3678 #endif
3681 * A helper function for the _utterly broken_ virtio device model to find out if
3682 * it's running on a big endian machine. Don't do this at home kids!
3684 bool target_words_bigendian(void);
3685 bool target_words_bigendian(void)
3687 #if defined(TARGET_WORDS_BIGENDIAN)
3688 return true;
3689 #else
3690 return false;
3691 #endif
3694 #ifndef CONFIG_USER_ONLY
3695 bool cpu_physical_memory_is_io(hwaddr phys_addr)
3697 MemoryRegion*mr;
3698 hwaddr l = 1;
3699 bool res;
3701 rcu_read_lock();
3702 mr = address_space_translate(&address_space_memory,
3703 phys_addr, &phys_addr, &l, false);
3705 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3706 rcu_read_unlock();
3707 return res;
3710 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3712 RAMBlock *block;
3713 int ret = 0;
3715 rcu_read_lock();
3716 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3717 ret = func(block->idstr, block->host, block->offset,
3718 block->used_length, opaque);
3719 if (ret) {
3720 break;
3723 rcu_read_unlock();
3724 return ret;
3726 #endif