target-ppc: Altivec 2.07: Vector Polynomial Multiply Sum
[qemu/ar7.git] / exec.c
blob680268a1631fc19d67c1441c3060fa39f8a2b845
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifndef _WIN32
21 #include <sys/types.h>
22 #include <sys/mman.h>
23 #endif
25 #include "qemu-common.h"
26 #include "cpu.h"
27 #include "tcg.h"
28 #include "hw/hw.h"
29 #include "hw/qdev.h"
30 #include "qemu/osdep.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/xen/xen.h"
34 #include "qemu/timer.h"
35 #include "qemu/config-file.h"
36 #include "exec/memory.h"
37 #include "sysemu/dma.h"
38 #include "exec/address-spaces.h"
39 #if defined(CONFIG_USER_ONLY)
40 #include <qemu.h>
41 #else /* !CONFIG_USER_ONLY */
42 #include "sysemu/xen-mapcache.h"
43 #include "trace.h"
44 #endif
45 #include "exec/cpu-all.h"
47 #include "exec/cputlb.h"
48 #include "translate-all.h"
50 #include "exec/memory-internal.h"
51 #include "exec/ram_addr.h"
52 #include "qemu/cache-utils.h"
54 #include "qemu/range.h"
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 static bool in_migration;
61 RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
63 static MemoryRegion *system_memory;
64 static MemoryRegion *system_io;
66 AddressSpace address_space_io;
67 AddressSpace address_space_memory;
69 MemoryRegion io_mem_rom, io_mem_notdirty;
70 static MemoryRegion io_mem_unassigned;
72 #endif
74 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
75 /* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
77 DEFINE_TLS(CPUState *, current_cpu);
78 /* 0 = Do not count executed instructions.
79 1 = Precise instruction counting.
80 2 = Adaptive rate instruction counting. */
81 int use_icount;
83 #if !defined(CONFIG_USER_ONLY)
85 typedef struct PhysPageEntry PhysPageEntry;
87 struct PhysPageEntry {
88 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
89 uint32_t skip : 6;
90 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
91 uint32_t ptr : 26;
94 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
96 /* Size of the L2 (and L3, etc) page tables. */
97 #define ADDR_SPACE_BITS 64
99 #define P_L2_BITS 9
100 #define P_L2_SIZE (1 << P_L2_BITS)
102 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
104 typedef PhysPageEntry Node[P_L2_SIZE];
106 typedef struct PhysPageMap {
107 unsigned sections_nb;
108 unsigned sections_nb_alloc;
109 unsigned nodes_nb;
110 unsigned nodes_nb_alloc;
111 Node *nodes;
112 MemoryRegionSection *sections;
113 } PhysPageMap;
115 struct AddressSpaceDispatch {
116 /* This is a multi-level map on the physical address space.
117 * The bottom level has pointers to MemoryRegionSections.
119 PhysPageEntry phys_map;
120 PhysPageMap map;
121 AddressSpace *as;
124 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
125 typedef struct subpage_t {
126 MemoryRegion iomem;
127 AddressSpace *as;
128 hwaddr base;
129 uint16_t sub_section[TARGET_PAGE_SIZE];
130 } subpage_t;
132 #define PHYS_SECTION_UNASSIGNED 0
133 #define PHYS_SECTION_NOTDIRTY 1
134 #define PHYS_SECTION_ROM 2
135 #define PHYS_SECTION_WATCH 3
137 static void io_mem_init(void);
138 static void memory_map_init(void);
139 static void tcg_commit(MemoryListener *listener);
141 static MemoryRegion io_mem_watch;
142 #endif
144 #if !defined(CONFIG_USER_ONLY)
146 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
148 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
149 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
150 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
151 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
155 static uint32_t phys_map_node_alloc(PhysPageMap *map)
157 unsigned i;
158 uint32_t ret;
160 ret = map->nodes_nb++;
161 assert(ret != PHYS_MAP_NODE_NIL);
162 assert(ret != map->nodes_nb_alloc);
163 for (i = 0; i < P_L2_SIZE; ++i) {
164 map->nodes[ret][i].skip = 1;
165 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
167 return ret;
170 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
171 hwaddr *index, hwaddr *nb, uint16_t leaf,
172 int level)
174 PhysPageEntry *p;
175 int i;
176 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
178 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
179 lp->ptr = phys_map_node_alloc(map);
180 p = map->nodes[lp->ptr];
181 if (level == 0) {
182 for (i = 0; i < P_L2_SIZE; i++) {
183 p[i].skip = 0;
184 p[i].ptr = PHYS_SECTION_UNASSIGNED;
187 } else {
188 p = map->nodes[lp->ptr];
190 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
192 while (*nb && lp < &p[P_L2_SIZE]) {
193 if ((*index & (step - 1)) == 0 && *nb >= step) {
194 lp->skip = 0;
195 lp->ptr = leaf;
196 *index += step;
197 *nb -= step;
198 } else {
199 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
201 ++lp;
205 static void phys_page_set(AddressSpaceDispatch *d,
206 hwaddr index, hwaddr nb,
207 uint16_t leaf)
209 /* Wildly overreserve - it doesn't matter much. */
210 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
212 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
215 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
216 * and update our entry so we can skip it and go directly to the destination.
218 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
220 unsigned valid_ptr = P_L2_SIZE;
221 int valid = 0;
222 PhysPageEntry *p;
223 int i;
225 if (lp->ptr == PHYS_MAP_NODE_NIL) {
226 return;
229 p = nodes[lp->ptr];
230 for (i = 0; i < P_L2_SIZE; i++) {
231 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
232 continue;
235 valid_ptr = i;
236 valid++;
237 if (p[i].skip) {
238 phys_page_compact(&p[i], nodes, compacted);
242 /* We can only compress if there's only one child. */
243 if (valid != 1) {
244 return;
247 assert(valid_ptr < P_L2_SIZE);
249 /* Don't compress if it won't fit in the # of bits we have. */
250 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
251 return;
254 lp->ptr = p[valid_ptr].ptr;
255 if (!p[valid_ptr].skip) {
256 /* If our only child is a leaf, make this a leaf. */
257 /* By design, we should have made this node a leaf to begin with so we
258 * should never reach here.
259 * But since it's so simple to handle this, let's do it just in case we
260 * change this rule.
262 lp->skip = 0;
263 } else {
264 lp->skip += p[valid_ptr].skip;
268 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
270 DECLARE_BITMAP(compacted, nodes_nb);
272 if (d->phys_map.skip) {
273 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
277 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
278 Node *nodes, MemoryRegionSection *sections)
280 PhysPageEntry *p;
281 hwaddr index = addr >> TARGET_PAGE_BITS;
282 int i;
284 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
285 if (lp.ptr == PHYS_MAP_NODE_NIL) {
286 return &sections[PHYS_SECTION_UNASSIGNED];
288 p = nodes[lp.ptr];
289 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
292 if (sections[lp.ptr].size.hi ||
293 range_covers_byte(sections[lp.ptr].offset_within_address_space,
294 sections[lp.ptr].size.lo, addr)) {
295 return &sections[lp.ptr];
296 } else {
297 return &sections[PHYS_SECTION_UNASSIGNED];
301 bool memory_region_is_unassigned(MemoryRegion *mr)
303 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
304 && mr != &io_mem_watch;
307 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
308 hwaddr addr,
309 bool resolve_subpage)
311 MemoryRegionSection *section;
312 subpage_t *subpage;
314 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
315 if (resolve_subpage && section->mr->subpage) {
316 subpage = container_of(section->mr, subpage_t, iomem);
317 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
319 return section;
322 static MemoryRegionSection *
323 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
324 hwaddr *plen, bool resolve_subpage)
326 MemoryRegionSection *section;
327 Int128 diff;
329 section = address_space_lookup_region(d, addr, resolve_subpage);
330 /* Compute offset within MemoryRegionSection */
331 addr -= section->offset_within_address_space;
333 /* Compute offset within MemoryRegion */
334 *xlat = addr + section->offset_within_region;
336 diff = int128_sub(section->mr->size, int128_make64(addr));
337 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
338 return section;
341 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
343 if (memory_region_is_ram(mr)) {
344 return !(is_write && mr->readonly);
346 if (memory_region_is_romd(mr)) {
347 return !is_write;
350 return false;
353 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
354 hwaddr *xlat, hwaddr *plen,
355 bool is_write)
357 IOMMUTLBEntry iotlb;
358 MemoryRegionSection *section;
359 MemoryRegion *mr;
360 hwaddr len = *plen;
362 for (;;) {
363 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
364 mr = section->mr;
366 if (!mr->iommu_ops) {
367 break;
370 iotlb = mr->iommu_ops->translate(mr, addr);
371 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
372 | (addr & iotlb.addr_mask));
373 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
374 if (!(iotlb.perm & (1 << is_write))) {
375 mr = &io_mem_unassigned;
376 break;
379 as = iotlb.target_as;
382 if (memory_access_is_direct(mr, is_write)) {
383 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
384 len = MIN(page, len);
387 *plen = len;
388 *xlat = addr;
389 return mr;
392 MemoryRegionSection *
393 address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
394 hwaddr *plen)
396 MemoryRegionSection *section;
397 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
399 assert(!section->mr->iommu_ops);
400 return section;
402 #endif
404 void cpu_exec_init_all(void)
406 #if !defined(CONFIG_USER_ONLY)
407 qemu_mutex_init(&ram_list.mutex);
408 memory_map_init();
409 io_mem_init();
410 #endif
413 #if !defined(CONFIG_USER_ONLY)
415 static int cpu_common_post_load(void *opaque, int version_id)
417 CPUState *cpu = opaque;
419 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
420 version_id is increased. */
421 cpu->interrupt_request &= ~0x01;
422 tlb_flush(cpu->env_ptr, 1);
424 return 0;
427 const VMStateDescription vmstate_cpu_common = {
428 .name = "cpu_common",
429 .version_id = 1,
430 .minimum_version_id = 1,
431 .minimum_version_id_old = 1,
432 .post_load = cpu_common_post_load,
433 .fields = (VMStateField []) {
434 VMSTATE_UINT32(halted, CPUState),
435 VMSTATE_UINT32(interrupt_request, CPUState),
436 VMSTATE_END_OF_LIST()
440 #endif
442 CPUState *qemu_get_cpu(int index)
444 CPUState *cpu;
446 CPU_FOREACH(cpu) {
447 if (cpu->cpu_index == index) {
448 return cpu;
452 return NULL;
455 #if !defined(CONFIG_USER_ONLY)
456 void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
458 /* We only support one address space per cpu at the moment. */
459 assert(cpu->as == as);
461 if (cpu->tcg_as_listener) {
462 memory_listener_unregister(cpu->tcg_as_listener);
463 } else {
464 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
466 cpu->tcg_as_listener->commit = tcg_commit;
467 memory_listener_register(cpu->tcg_as_listener, as);
469 #endif
471 void cpu_exec_init(CPUArchState *env)
473 CPUState *cpu = ENV_GET_CPU(env);
474 CPUClass *cc = CPU_GET_CLASS(cpu);
475 CPUState *some_cpu;
476 int cpu_index;
478 #if defined(CONFIG_USER_ONLY)
479 cpu_list_lock();
480 #endif
481 cpu_index = 0;
482 CPU_FOREACH(some_cpu) {
483 cpu_index++;
485 cpu->cpu_index = cpu_index;
486 cpu->numa_node = 0;
487 QTAILQ_INIT(&env->breakpoints);
488 QTAILQ_INIT(&env->watchpoints);
489 #ifndef CONFIG_USER_ONLY
490 cpu->as = &address_space_memory;
491 cpu->thread_id = qemu_get_thread_id();
492 #endif
493 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
494 #if defined(CONFIG_USER_ONLY)
495 cpu_list_unlock();
496 #endif
497 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
498 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
500 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
501 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
502 cpu_save, cpu_load, env);
503 assert(cc->vmsd == NULL);
504 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
505 #endif
506 if (cc->vmsd != NULL) {
507 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
511 #if defined(TARGET_HAS_ICE)
512 #if defined(CONFIG_USER_ONLY)
513 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
515 tb_invalidate_phys_page_range(pc, pc + 1, 0);
517 #else
518 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
520 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
521 if (phys != -1) {
522 tb_invalidate_phys_addr(cpu->as,
523 phys | (pc & ~TARGET_PAGE_MASK));
526 #endif
527 #endif /* TARGET_HAS_ICE */
529 #if defined(CONFIG_USER_ONLY)
530 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
535 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
536 int flags, CPUWatchpoint **watchpoint)
538 return -ENOSYS;
540 #else
541 /* Add a watchpoint. */
542 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
543 int flags, CPUWatchpoint **watchpoint)
545 target_ulong len_mask = ~(len - 1);
546 CPUWatchpoint *wp;
548 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
549 if ((len & (len - 1)) || (addr & ~len_mask) ||
550 len == 0 || len > TARGET_PAGE_SIZE) {
551 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
552 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
553 return -EINVAL;
555 wp = g_malloc(sizeof(*wp));
557 wp->vaddr = addr;
558 wp->len_mask = len_mask;
559 wp->flags = flags;
561 /* keep all GDB-injected watchpoints in front */
562 if (flags & BP_GDB)
563 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
564 else
565 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
567 tlb_flush_page(env, addr);
569 if (watchpoint)
570 *watchpoint = wp;
571 return 0;
574 /* Remove a specific watchpoint. */
575 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
576 int flags)
578 target_ulong len_mask = ~(len - 1);
579 CPUWatchpoint *wp;
581 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
582 if (addr == wp->vaddr && len_mask == wp->len_mask
583 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
584 cpu_watchpoint_remove_by_ref(env, wp);
585 return 0;
588 return -ENOENT;
591 /* Remove a specific watchpoint by reference. */
592 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
594 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
596 tlb_flush_page(env, watchpoint->vaddr);
598 g_free(watchpoint);
601 /* Remove all matching watchpoints. */
602 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
604 CPUWatchpoint *wp, *next;
606 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
607 if (wp->flags & mask)
608 cpu_watchpoint_remove_by_ref(env, wp);
611 #endif
613 /* Add a breakpoint. */
614 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
615 CPUBreakpoint **breakpoint)
617 #if defined(TARGET_HAS_ICE)
618 CPUBreakpoint *bp;
620 bp = g_malloc(sizeof(*bp));
622 bp->pc = pc;
623 bp->flags = flags;
625 /* keep all GDB-injected breakpoints in front */
626 if (flags & BP_GDB) {
627 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
628 } else {
629 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
632 breakpoint_invalidate(ENV_GET_CPU(env), pc);
634 if (breakpoint) {
635 *breakpoint = bp;
637 return 0;
638 #else
639 return -ENOSYS;
640 #endif
643 /* Remove a specific breakpoint. */
644 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
646 #if defined(TARGET_HAS_ICE)
647 CPUBreakpoint *bp;
649 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
650 if (bp->pc == pc && bp->flags == flags) {
651 cpu_breakpoint_remove_by_ref(env, bp);
652 return 0;
655 return -ENOENT;
656 #else
657 return -ENOSYS;
658 #endif
661 /* Remove a specific breakpoint by reference. */
662 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
664 #if defined(TARGET_HAS_ICE)
665 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
667 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
669 g_free(breakpoint);
670 #endif
673 /* Remove all matching breakpoints. */
674 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
676 #if defined(TARGET_HAS_ICE)
677 CPUBreakpoint *bp, *next;
679 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
680 if (bp->flags & mask)
681 cpu_breakpoint_remove_by_ref(env, bp);
683 #endif
686 /* enable or disable single step mode. EXCP_DEBUG is returned by the
687 CPU loop after each instruction */
688 void cpu_single_step(CPUState *cpu, int enabled)
690 #if defined(TARGET_HAS_ICE)
691 if (cpu->singlestep_enabled != enabled) {
692 cpu->singlestep_enabled = enabled;
693 if (kvm_enabled()) {
694 kvm_update_guest_debug(cpu, 0);
695 } else {
696 /* must flush all the translated code to avoid inconsistencies */
697 /* XXX: only flush what is necessary */
698 CPUArchState *env = cpu->env_ptr;
699 tb_flush(env);
702 #endif
705 void cpu_abort(CPUArchState *env, const char *fmt, ...)
707 CPUState *cpu = ENV_GET_CPU(env);
708 va_list ap;
709 va_list ap2;
711 va_start(ap, fmt);
712 va_copy(ap2, ap);
713 fprintf(stderr, "qemu: fatal: ");
714 vfprintf(stderr, fmt, ap);
715 fprintf(stderr, "\n");
716 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
717 if (qemu_log_enabled()) {
718 qemu_log("qemu: fatal: ");
719 qemu_log_vprintf(fmt, ap2);
720 qemu_log("\n");
721 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
722 qemu_log_flush();
723 qemu_log_close();
725 va_end(ap2);
726 va_end(ap);
727 #if defined(CONFIG_USER_ONLY)
729 struct sigaction act;
730 sigfillset(&act.sa_mask);
731 act.sa_handler = SIG_DFL;
732 sigaction(SIGABRT, &act, NULL);
734 #endif
735 abort();
738 #if !defined(CONFIG_USER_ONLY)
739 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
741 RAMBlock *block;
743 /* The list is protected by the iothread lock here. */
744 block = ram_list.mru_block;
745 if (block && addr - block->offset < block->length) {
746 goto found;
748 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
749 if (addr - block->offset < block->length) {
750 goto found;
754 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
755 abort();
757 found:
758 ram_list.mru_block = block;
759 return block;
762 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
764 ram_addr_t start1;
765 RAMBlock *block;
766 ram_addr_t end;
768 end = TARGET_PAGE_ALIGN(start + length);
769 start &= TARGET_PAGE_MASK;
771 block = qemu_get_ram_block(start);
772 assert(block == qemu_get_ram_block(end - 1));
773 start1 = (uintptr_t)block->host + (start - block->offset);
774 cpu_tlb_reset_dirty_all(start1, length);
777 /* Note: start and end must be within the same ram block. */
778 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
779 unsigned client)
781 if (length == 0)
782 return;
783 cpu_physical_memory_clear_dirty_range(start, length, client);
785 if (tcg_enabled()) {
786 tlb_reset_dirty_range_all(start, length);
790 static void cpu_physical_memory_set_dirty_tracking(bool enable)
792 in_migration = enable;
795 hwaddr memory_region_section_get_iotlb(CPUArchState *env,
796 MemoryRegionSection *section,
797 target_ulong vaddr,
798 hwaddr paddr, hwaddr xlat,
799 int prot,
800 target_ulong *address)
802 hwaddr iotlb;
803 CPUWatchpoint *wp;
805 if (memory_region_is_ram(section->mr)) {
806 /* Normal RAM. */
807 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
808 + xlat;
809 if (!section->readonly) {
810 iotlb |= PHYS_SECTION_NOTDIRTY;
811 } else {
812 iotlb |= PHYS_SECTION_ROM;
814 } else {
815 iotlb = section - section->address_space->dispatch->map.sections;
816 iotlb += xlat;
819 /* Make accesses to pages with watchpoints go via the
820 watchpoint trap routines. */
821 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
822 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
823 /* Avoid trapping reads of pages with a write breakpoint. */
824 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
825 iotlb = PHYS_SECTION_WATCH + paddr;
826 *address |= TLB_MMIO;
827 break;
832 return iotlb;
834 #endif /* defined(CONFIG_USER_ONLY) */
836 #if !defined(CONFIG_USER_ONLY)
838 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
839 uint16_t section);
840 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
842 static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
845 * Set a custom physical guest memory alloator.
846 * Accelerators with unusual needs may need this. Hopefully, we can
847 * get rid of it eventually.
849 void phys_mem_set_alloc(void *(*alloc)(size_t))
851 phys_mem_alloc = alloc;
854 static uint16_t phys_section_add(PhysPageMap *map,
855 MemoryRegionSection *section)
857 /* The physical section number is ORed with a page-aligned
858 * pointer to produce the iotlb entries. Thus it should
859 * never overflow into the page-aligned value.
861 assert(map->sections_nb < TARGET_PAGE_SIZE);
863 if (map->sections_nb == map->sections_nb_alloc) {
864 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
865 map->sections = g_renew(MemoryRegionSection, map->sections,
866 map->sections_nb_alloc);
868 map->sections[map->sections_nb] = *section;
869 memory_region_ref(section->mr);
870 return map->sections_nb++;
873 static void phys_section_destroy(MemoryRegion *mr)
875 memory_region_unref(mr);
877 if (mr->subpage) {
878 subpage_t *subpage = container_of(mr, subpage_t, iomem);
879 memory_region_destroy(&subpage->iomem);
880 g_free(subpage);
884 static void phys_sections_free(PhysPageMap *map)
886 while (map->sections_nb > 0) {
887 MemoryRegionSection *section = &map->sections[--map->sections_nb];
888 phys_section_destroy(section->mr);
890 g_free(map->sections);
891 g_free(map->nodes);
894 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
896 subpage_t *subpage;
897 hwaddr base = section->offset_within_address_space
898 & TARGET_PAGE_MASK;
899 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
900 d->map.nodes, d->map.sections);
901 MemoryRegionSection subsection = {
902 .offset_within_address_space = base,
903 .size = int128_make64(TARGET_PAGE_SIZE),
905 hwaddr start, end;
907 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
909 if (!(existing->mr->subpage)) {
910 subpage = subpage_init(d->as, base);
911 subsection.address_space = d->as;
912 subsection.mr = &subpage->iomem;
913 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
914 phys_section_add(&d->map, &subsection));
915 } else {
916 subpage = container_of(existing->mr, subpage_t, iomem);
918 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
919 end = start + int128_get64(section->size) - 1;
920 subpage_register(subpage, start, end,
921 phys_section_add(&d->map, section));
925 static void register_multipage(AddressSpaceDispatch *d,
926 MemoryRegionSection *section)
928 hwaddr start_addr = section->offset_within_address_space;
929 uint16_t section_index = phys_section_add(&d->map, section);
930 uint64_t num_pages = int128_get64(int128_rshift(section->size,
931 TARGET_PAGE_BITS));
933 assert(num_pages);
934 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
937 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
939 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
940 AddressSpaceDispatch *d = as->next_dispatch;
941 MemoryRegionSection now = *section, remain = *section;
942 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
944 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
945 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
946 - now.offset_within_address_space;
948 now.size = int128_min(int128_make64(left), now.size);
949 register_subpage(d, &now);
950 } else {
951 now.size = int128_zero();
953 while (int128_ne(remain.size, now.size)) {
954 remain.size = int128_sub(remain.size, now.size);
955 remain.offset_within_address_space += int128_get64(now.size);
956 remain.offset_within_region += int128_get64(now.size);
957 now = remain;
958 if (int128_lt(remain.size, page_size)) {
959 register_subpage(d, &now);
960 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
961 now.size = page_size;
962 register_subpage(d, &now);
963 } else {
964 now.size = int128_and(now.size, int128_neg(page_size));
965 register_multipage(d, &now);
970 void qemu_flush_coalesced_mmio_buffer(void)
972 if (kvm_enabled())
973 kvm_flush_coalesced_mmio_buffer();
976 void qemu_mutex_lock_ramlist(void)
978 qemu_mutex_lock(&ram_list.mutex);
981 void qemu_mutex_unlock_ramlist(void)
983 qemu_mutex_unlock(&ram_list.mutex);
986 #ifdef __linux__
988 #include <sys/vfs.h>
990 #define HUGETLBFS_MAGIC 0x958458f6
992 static long gethugepagesize(const char *path)
994 struct statfs fs;
995 int ret;
997 do {
998 ret = statfs(path, &fs);
999 } while (ret != 0 && errno == EINTR);
1001 if (ret != 0) {
1002 perror(path);
1003 return 0;
1006 if (fs.f_type != HUGETLBFS_MAGIC)
1007 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
1009 return fs.f_bsize;
1012 static sigjmp_buf sigjump;
1014 static void sigbus_handler(int signal)
1016 siglongjmp(sigjump, 1);
1019 static void *file_ram_alloc(RAMBlock *block,
1020 ram_addr_t memory,
1021 const char *path)
1023 char *filename;
1024 char *sanitized_name;
1025 char *c;
1026 void *area;
1027 int fd;
1028 unsigned long hpagesize;
1030 hpagesize = gethugepagesize(path);
1031 if (!hpagesize) {
1032 return NULL;
1035 if (memory < hpagesize) {
1036 return NULL;
1039 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1040 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1041 return NULL;
1044 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1045 sanitized_name = g_strdup(block->mr->name);
1046 for (c = sanitized_name; *c != '\0'; c++) {
1047 if (*c == '/')
1048 *c = '_';
1051 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1052 sanitized_name);
1053 g_free(sanitized_name);
1055 fd = mkstemp(filename);
1056 if (fd < 0) {
1057 perror("unable to create backing store for hugepages");
1058 g_free(filename);
1059 return NULL;
1061 unlink(filename);
1062 g_free(filename);
1064 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1067 * ftruncate is not supported by hugetlbfs in older
1068 * hosts, so don't bother bailing out on errors.
1069 * If anything goes wrong with it under other filesystems,
1070 * mmap will fail.
1072 if (ftruncate(fd, memory))
1073 perror("ftruncate");
1075 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1076 if (area == MAP_FAILED) {
1077 perror("file_ram_alloc: can't mmap RAM pages");
1078 close(fd);
1079 return (NULL);
1082 if (mem_prealloc) {
1083 int ret, i;
1084 struct sigaction act, oldact;
1085 sigset_t set, oldset;
1087 memset(&act, 0, sizeof(act));
1088 act.sa_handler = &sigbus_handler;
1089 act.sa_flags = 0;
1091 ret = sigaction(SIGBUS, &act, &oldact);
1092 if (ret) {
1093 perror("file_ram_alloc: failed to install signal handler");
1094 exit(1);
1097 /* unblock SIGBUS */
1098 sigemptyset(&set);
1099 sigaddset(&set, SIGBUS);
1100 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1102 if (sigsetjmp(sigjump, 1)) {
1103 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1104 exit(1);
1107 /* MAP_POPULATE silently ignores failures */
1108 for (i = 0; i < (memory/hpagesize); i++) {
1109 memset(area + (hpagesize*i), 0, 1);
1112 ret = sigaction(SIGBUS, &oldact, NULL);
1113 if (ret) {
1114 perror("file_ram_alloc: failed to reinstall signal handler");
1115 exit(1);
1118 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1121 block->fd = fd;
1122 return area;
1124 #else
1125 static void *file_ram_alloc(RAMBlock *block,
1126 ram_addr_t memory,
1127 const char *path)
1129 fprintf(stderr, "-mem-path not supported on this host\n");
1130 exit(1);
1132 #endif
1134 static ram_addr_t find_ram_offset(ram_addr_t size)
1136 RAMBlock *block, *next_block;
1137 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1139 assert(size != 0); /* it would hand out same offset multiple times */
1141 if (QTAILQ_EMPTY(&ram_list.blocks))
1142 return 0;
1144 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1145 ram_addr_t end, next = RAM_ADDR_MAX;
1147 end = block->offset + block->length;
1149 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
1150 if (next_block->offset >= end) {
1151 next = MIN(next, next_block->offset);
1154 if (next - end >= size && next - end < mingap) {
1155 offset = end;
1156 mingap = next - end;
1160 if (offset == RAM_ADDR_MAX) {
1161 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1162 (uint64_t)size);
1163 abort();
1166 return offset;
1169 ram_addr_t last_ram_offset(void)
1171 RAMBlock *block;
1172 ram_addr_t last = 0;
1174 QTAILQ_FOREACH(block, &ram_list.blocks, next)
1175 last = MAX(last, block->offset + block->length);
1177 return last;
1180 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1182 int ret;
1184 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1185 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1186 "dump-guest-core", true)) {
1187 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1188 if (ret) {
1189 perror("qemu_madvise");
1190 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1191 "but dump_guest_core=off specified\n");
1196 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1198 RAMBlock *new_block, *block;
1200 new_block = NULL;
1201 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1202 if (block->offset == addr) {
1203 new_block = block;
1204 break;
1207 assert(new_block);
1208 assert(!new_block->idstr[0]);
1210 if (dev) {
1211 char *id = qdev_get_dev_path(dev);
1212 if (id) {
1213 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1214 g_free(id);
1217 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1219 /* This assumes the iothread lock is taken here too. */
1220 qemu_mutex_lock_ramlist();
1221 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1222 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1223 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1224 new_block->idstr);
1225 abort();
1228 qemu_mutex_unlock_ramlist();
1231 static int memory_try_enable_merging(void *addr, size_t len)
1233 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1234 /* disabled by the user */
1235 return 0;
1238 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1241 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1242 MemoryRegion *mr)
1244 RAMBlock *block, *new_block;
1245 ram_addr_t old_ram_size, new_ram_size;
1247 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1249 size = TARGET_PAGE_ALIGN(size);
1250 new_block = g_malloc0(sizeof(*new_block));
1251 new_block->fd = -1;
1253 /* This assumes the iothread lock is taken here too. */
1254 qemu_mutex_lock_ramlist();
1255 new_block->mr = mr;
1256 new_block->offset = find_ram_offset(size);
1257 if (host) {
1258 new_block->host = host;
1259 new_block->flags |= RAM_PREALLOC_MASK;
1260 } else if (xen_enabled()) {
1261 if (mem_path) {
1262 fprintf(stderr, "-mem-path not supported with Xen\n");
1263 exit(1);
1265 xen_ram_alloc(new_block->offset, size, mr);
1266 } else {
1267 if (mem_path) {
1268 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1270 * file_ram_alloc() needs to allocate just like
1271 * phys_mem_alloc, but we haven't bothered to provide
1272 * a hook there.
1274 fprintf(stderr,
1275 "-mem-path not supported with this accelerator\n");
1276 exit(1);
1278 new_block->host = file_ram_alloc(new_block, size, mem_path);
1280 if (!new_block->host) {
1281 new_block->host = phys_mem_alloc(size);
1282 if (!new_block->host) {
1283 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1284 new_block->mr->name, strerror(errno));
1285 exit(1);
1287 memory_try_enable_merging(new_block->host, size);
1290 new_block->length = size;
1292 /* Keep the list sorted from biggest to smallest block. */
1293 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1294 if (block->length < new_block->length) {
1295 break;
1298 if (block) {
1299 QTAILQ_INSERT_BEFORE(block, new_block, next);
1300 } else {
1301 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1303 ram_list.mru_block = NULL;
1305 ram_list.version++;
1306 qemu_mutex_unlock_ramlist();
1308 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1310 if (new_ram_size > old_ram_size) {
1311 int i;
1312 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1313 ram_list.dirty_memory[i] =
1314 bitmap_zero_extend(ram_list.dirty_memory[i],
1315 old_ram_size, new_ram_size);
1318 cpu_physical_memory_set_dirty_range(new_block->offset, size);
1320 qemu_ram_setup_dump(new_block->host, size);
1321 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1322 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
1324 if (kvm_enabled())
1325 kvm_setup_guest_memory(new_block->host, size);
1327 return new_block->offset;
1330 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1332 return qemu_ram_alloc_from_ptr(size, NULL, mr);
1335 void qemu_ram_free_from_ptr(ram_addr_t addr)
1337 RAMBlock *block;
1339 /* This assumes the iothread lock is taken here too. */
1340 qemu_mutex_lock_ramlist();
1341 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1342 if (addr == block->offset) {
1343 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1344 ram_list.mru_block = NULL;
1345 ram_list.version++;
1346 g_free(block);
1347 break;
1350 qemu_mutex_unlock_ramlist();
1353 void qemu_ram_free(ram_addr_t addr)
1355 RAMBlock *block;
1357 /* This assumes the iothread lock is taken here too. */
1358 qemu_mutex_lock_ramlist();
1359 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1360 if (addr == block->offset) {
1361 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1362 ram_list.mru_block = NULL;
1363 ram_list.version++;
1364 if (block->flags & RAM_PREALLOC_MASK) {
1366 } else if (xen_enabled()) {
1367 xen_invalidate_map_cache_entry(block->host);
1368 #ifndef _WIN32
1369 } else if (block->fd >= 0) {
1370 munmap(block->host, block->length);
1371 close(block->fd);
1372 #endif
1373 } else {
1374 qemu_anon_ram_free(block->host, block->length);
1376 g_free(block);
1377 break;
1380 qemu_mutex_unlock_ramlist();
1384 #ifndef _WIN32
1385 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1387 RAMBlock *block;
1388 ram_addr_t offset;
1389 int flags;
1390 void *area, *vaddr;
1392 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1393 offset = addr - block->offset;
1394 if (offset < block->length) {
1395 vaddr = block->host + offset;
1396 if (block->flags & RAM_PREALLOC_MASK) {
1398 } else if (xen_enabled()) {
1399 abort();
1400 } else {
1401 flags = MAP_FIXED;
1402 munmap(vaddr, length);
1403 if (block->fd >= 0) {
1404 #ifdef MAP_POPULATE
1405 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1406 MAP_PRIVATE;
1407 #else
1408 flags |= MAP_PRIVATE;
1409 #endif
1410 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1411 flags, block->fd, offset);
1412 } else {
1414 * Remap needs to match alloc. Accelerators that
1415 * set phys_mem_alloc never remap. If they did,
1416 * we'd need a remap hook here.
1418 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1420 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1421 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1422 flags, -1, 0);
1424 if (area != vaddr) {
1425 fprintf(stderr, "Could not remap addr: "
1426 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1427 length, addr);
1428 exit(1);
1430 memory_try_enable_merging(vaddr, length);
1431 qemu_ram_setup_dump(vaddr, length);
1433 return;
1437 #endif /* !_WIN32 */
1439 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1440 With the exception of the softmmu code in this file, this should
1441 only be used for local memory (e.g. video ram) that the device owns,
1442 and knows it isn't going to access beyond the end of the block.
1444 It should not be used for general purpose DMA.
1445 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1447 void *qemu_get_ram_ptr(ram_addr_t addr)
1449 RAMBlock *block = qemu_get_ram_block(addr);
1451 if (xen_enabled()) {
1452 /* We need to check if the requested address is in the RAM
1453 * because we don't want to map the entire memory in QEMU.
1454 * In that case just map until the end of the page.
1456 if (block->offset == 0) {
1457 return xen_map_cache(addr, 0, 0);
1458 } else if (block->host == NULL) {
1459 block->host =
1460 xen_map_cache(block->offset, block->length, 1);
1463 return block->host + (addr - block->offset);
1466 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1467 * but takes a size argument */
1468 static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1470 if (*size == 0) {
1471 return NULL;
1473 if (xen_enabled()) {
1474 return xen_map_cache(addr, *size, 1);
1475 } else {
1476 RAMBlock *block;
1478 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1479 if (addr - block->offset < block->length) {
1480 if (addr - block->offset + *size > block->length)
1481 *size = block->length - addr + block->offset;
1482 return block->host + (addr - block->offset);
1486 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1487 abort();
1491 /* Some of the softmmu routines need to translate from a host pointer
1492 (typically a TLB entry) back to a ram offset. */
1493 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1495 RAMBlock *block;
1496 uint8_t *host = ptr;
1498 if (xen_enabled()) {
1499 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1500 return qemu_get_ram_block(*ram_addr)->mr;
1503 block = ram_list.mru_block;
1504 if (block && block->host && host - block->host < block->length) {
1505 goto found;
1508 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1509 /* This case append when the block is not mapped. */
1510 if (block->host == NULL) {
1511 continue;
1513 if (host - block->host < block->length) {
1514 goto found;
1518 return NULL;
1520 found:
1521 *ram_addr = block->offset + (host - block->host);
1522 return block->mr;
1525 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1526 uint64_t val, unsigned size)
1528 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1529 tb_invalidate_phys_page_fast(ram_addr, size);
1531 switch (size) {
1532 case 1:
1533 stb_p(qemu_get_ram_ptr(ram_addr), val);
1534 break;
1535 case 2:
1536 stw_p(qemu_get_ram_ptr(ram_addr), val);
1537 break;
1538 case 4:
1539 stl_p(qemu_get_ram_ptr(ram_addr), val);
1540 break;
1541 default:
1542 abort();
1544 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1545 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
1546 /* we remove the notdirty callback only if the code has been
1547 flushed */
1548 if (!cpu_physical_memory_is_clean(ram_addr)) {
1549 CPUArchState *env = current_cpu->env_ptr;
1550 tlb_set_dirty(env, env->mem_io_vaddr);
1554 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1555 unsigned size, bool is_write)
1557 return is_write;
1560 static const MemoryRegionOps notdirty_mem_ops = {
1561 .write = notdirty_mem_write,
1562 .valid.accepts = notdirty_mem_accepts,
1563 .endianness = DEVICE_NATIVE_ENDIAN,
1566 /* Generate a debug exception if a watchpoint has been hit. */
1567 static void check_watchpoint(int offset, int len_mask, int flags)
1569 CPUArchState *env = current_cpu->env_ptr;
1570 target_ulong pc, cs_base;
1571 target_ulong vaddr;
1572 CPUWatchpoint *wp;
1573 int cpu_flags;
1575 if (env->watchpoint_hit) {
1576 /* We re-entered the check after replacing the TB. Now raise
1577 * the debug interrupt so that is will trigger after the
1578 * current instruction. */
1579 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
1580 return;
1582 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1583 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1584 if ((vaddr == (wp->vaddr & len_mask) ||
1585 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1586 wp->flags |= BP_WATCHPOINT_HIT;
1587 if (!env->watchpoint_hit) {
1588 env->watchpoint_hit = wp;
1589 tb_check_watchpoint(env);
1590 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1591 env->exception_index = EXCP_DEBUG;
1592 cpu_loop_exit(env);
1593 } else {
1594 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1595 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1596 cpu_resume_from_signal(env, NULL);
1599 } else {
1600 wp->flags &= ~BP_WATCHPOINT_HIT;
1605 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1606 so these check for a hit then pass through to the normal out-of-line
1607 phys routines. */
1608 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1609 unsigned size)
1611 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1612 switch (size) {
1613 case 1: return ldub_phys(&address_space_memory, addr);
1614 case 2: return lduw_phys(&address_space_memory, addr);
1615 case 4: return ldl_phys(&address_space_memory, addr);
1616 default: abort();
1620 static void watch_mem_write(void *opaque, hwaddr addr,
1621 uint64_t val, unsigned size)
1623 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1624 switch (size) {
1625 case 1:
1626 stb_phys(&address_space_memory, addr, val);
1627 break;
1628 case 2:
1629 stw_phys(&address_space_memory, addr, val);
1630 break;
1631 case 4:
1632 stl_phys(&address_space_memory, addr, val);
1633 break;
1634 default: abort();
1638 static const MemoryRegionOps watch_mem_ops = {
1639 .read = watch_mem_read,
1640 .write = watch_mem_write,
1641 .endianness = DEVICE_NATIVE_ENDIAN,
1644 static uint64_t subpage_read(void *opaque, hwaddr addr,
1645 unsigned len)
1647 subpage_t *subpage = opaque;
1648 uint8_t buf[4];
1650 #if defined(DEBUG_SUBPAGE)
1651 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1652 subpage, len, addr);
1653 #endif
1654 address_space_read(subpage->as, addr + subpage->base, buf, len);
1655 switch (len) {
1656 case 1:
1657 return ldub_p(buf);
1658 case 2:
1659 return lduw_p(buf);
1660 case 4:
1661 return ldl_p(buf);
1662 default:
1663 abort();
1667 static void subpage_write(void *opaque, hwaddr addr,
1668 uint64_t value, unsigned len)
1670 subpage_t *subpage = opaque;
1671 uint8_t buf[4];
1673 #if defined(DEBUG_SUBPAGE)
1674 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1675 " value %"PRIx64"\n",
1676 __func__, subpage, len, addr, value);
1677 #endif
1678 switch (len) {
1679 case 1:
1680 stb_p(buf, value);
1681 break;
1682 case 2:
1683 stw_p(buf, value);
1684 break;
1685 case 4:
1686 stl_p(buf, value);
1687 break;
1688 default:
1689 abort();
1691 address_space_write(subpage->as, addr + subpage->base, buf, len);
1694 static bool subpage_accepts(void *opaque, hwaddr addr,
1695 unsigned len, bool is_write)
1697 subpage_t *subpage = opaque;
1698 #if defined(DEBUG_SUBPAGE)
1699 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1700 __func__, subpage, is_write ? 'w' : 'r', len, addr);
1701 #endif
1703 return address_space_access_valid(subpage->as, addr + subpage->base,
1704 len, is_write);
1707 static const MemoryRegionOps subpage_ops = {
1708 .read = subpage_read,
1709 .write = subpage_write,
1710 .valid.accepts = subpage_accepts,
1711 .endianness = DEVICE_NATIVE_ENDIAN,
1714 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1715 uint16_t section)
1717 int idx, eidx;
1719 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1720 return -1;
1721 idx = SUBPAGE_IDX(start);
1722 eidx = SUBPAGE_IDX(end);
1723 #if defined(DEBUG_SUBPAGE)
1724 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1725 __func__, mmio, start, end, idx, eidx, section);
1726 #endif
1727 for (; idx <= eidx; idx++) {
1728 mmio->sub_section[idx] = section;
1731 return 0;
1734 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1736 subpage_t *mmio;
1738 mmio = g_malloc0(sizeof(subpage_t));
1740 mmio->as = as;
1741 mmio->base = base;
1742 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
1743 "subpage", TARGET_PAGE_SIZE);
1744 mmio->iomem.subpage = true;
1745 #if defined(DEBUG_SUBPAGE)
1746 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1747 mmio, base, TARGET_PAGE_SIZE);
1748 #endif
1749 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1751 return mmio;
1754 static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
1756 MemoryRegionSection section = {
1757 .address_space = &address_space_memory,
1758 .mr = mr,
1759 .offset_within_address_space = 0,
1760 .offset_within_region = 0,
1761 .size = int128_2_64(),
1764 return phys_section_add(map, &section);
1767 MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
1769 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
1772 static void io_mem_init(void)
1774 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1775 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1776 "unassigned", UINT64_MAX);
1777 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1778 "notdirty", UINT64_MAX);
1779 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1780 "watch", UINT64_MAX);
1783 static void mem_begin(MemoryListener *listener)
1785 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1786 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1787 uint16_t n;
1789 n = dummy_section(&d->map, &io_mem_unassigned);
1790 assert(n == PHYS_SECTION_UNASSIGNED);
1791 n = dummy_section(&d->map, &io_mem_notdirty);
1792 assert(n == PHYS_SECTION_NOTDIRTY);
1793 n = dummy_section(&d->map, &io_mem_rom);
1794 assert(n == PHYS_SECTION_ROM);
1795 n = dummy_section(&d->map, &io_mem_watch);
1796 assert(n == PHYS_SECTION_WATCH);
1798 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1799 d->as = as;
1800 as->next_dispatch = d;
1803 static void mem_commit(MemoryListener *listener)
1805 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1806 AddressSpaceDispatch *cur = as->dispatch;
1807 AddressSpaceDispatch *next = as->next_dispatch;
1809 phys_page_compact_all(next, next->map.nodes_nb);
1811 as->dispatch = next;
1813 if (cur) {
1814 phys_sections_free(&cur->map);
1815 g_free(cur);
1819 static void tcg_commit(MemoryListener *listener)
1821 CPUState *cpu;
1823 /* since each CPU stores ram addresses in its TLB cache, we must
1824 reset the modified entries */
1825 /* XXX: slow ! */
1826 CPU_FOREACH(cpu) {
1827 CPUArchState *env = cpu->env_ptr;
1829 /* FIXME: Disentangle the cpu.h circular files deps so we can
1830 directly get the right CPU from listener. */
1831 if (cpu->tcg_as_listener != listener) {
1832 continue;
1834 tlb_flush(env, 1);
1838 static void core_log_global_start(MemoryListener *listener)
1840 cpu_physical_memory_set_dirty_tracking(true);
1843 static void core_log_global_stop(MemoryListener *listener)
1845 cpu_physical_memory_set_dirty_tracking(false);
1848 static MemoryListener core_memory_listener = {
1849 .log_global_start = core_log_global_start,
1850 .log_global_stop = core_log_global_stop,
1851 .priority = 1,
1854 void address_space_init_dispatch(AddressSpace *as)
1856 as->dispatch = NULL;
1857 as->dispatch_listener = (MemoryListener) {
1858 .begin = mem_begin,
1859 .commit = mem_commit,
1860 .region_add = mem_add,
1861 .region_nop = mem_add,
1862 .priority = 0,
1864 memory_listener_register(&as->dispatch_listener, as);
1867 void address_space_destroy_dispatch(AddressSpace *as)
1869 AddressSpaceDispatch *d = as->dispatch;
1871 memory_listener_unregister(&as->dispatch_listener);
1872 g_free(d);
1873 as->dispatch = NULL;
1876 static void memory_map_init(void)
1878 system_memory = g_malloc(sizeof(*system_memory));
1880 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
1881 address_space_init(&address_space_memory, system_memory, "memory");
1883 system_io = g_malloc(sizeof(*system_io));
1884 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1885 65536);
1886 address_space_init(&address_space_io, system_io, "I/O");
1888 memory_listener_register(&core_memory_listener, &address_space_memory);
1891 MemoryRegion *get_system_memory(void)
1893 return system_memory;
1896 MemoryRegion *get_system_io(void)
1898 return system_io;
1901 #endif /* !defined(CONFIG_USER_ONLY) */
1903 /* physical memory access (slow version, mainly for debug) */
1904 #if defined(CONFIG_USER_ONLY)
1905 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
1906 uint8_t *buf, int len, int is_write)
1908 int l, flags;
1909 target_ulong page;
1910 void * p;
1912 while (len > 0) {
1913 page = addr & TARGET_PAGE_MASK;
1914 l = (page + TARGET_PAGE_SIZE) - addr;
1915 if (l > len)
1916 l = len;
1917 flags = page_get_flags(page);
1918 if (!(flags & PAGE_VALID))
1919 return -1;
1920 if (is_write) {
1921 if (!(flags & PAGE_WRITE))
1922 return -1;
1923 /* XXX: this code should not depend on lock_user */
1924 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1925 return -1;
1926 memcpy(p, buf, l);
1927 unlock_user(p, addr, l);
1928 } else {
1929 if (!(flags & PAGE_READ))
1930 return -1;
1931 /* XXX: this code should not depend on lock_user */
1932 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1933 return -1;
1934 memcpy(buf, p, l);
1935 unlock_user(p, addr, 0);
1937 len -= l;
1938 buf += l;
1939 addr += l;
1941 return 0;
1944 #else
1946 static void invalidate_and_set_dirty(hwaddr addr,
1947 hwaddr length)
1949 if (cpu_physical_memory_is_clean(addr)) {
1950 /* invalidate code */
1951 tb_invalidate_phys_page_range(addr, addr + length, 0);
1952 /* set dirty bit */
1953 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1954 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
1956 xen_modified_memory(addr, length);
1959 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
1961 unsigned access_size_max = mr->ops->valid.max_access_size;
1963 /* Regions are assumed to support 1-4 byte accesses unless
1964 otherwise specified. */
1965 if (access_size_max == 0) {
1966 access_size_max = 4;
1969 /* Bound the maximum access by the alignment of the address. */
1970 if (!mr->ops->impl.unaligned) {
1971 unsigned align_size_max = addr & -addr;
1972 if (align_size_max != 0 && align_size_max < access_size_max) {
1973 access_size_max = align_size_max;
1977 /* Don't attempt accesses larger than the maximum. */
1978 if (l > access_size_max) {
1979 l = access_size_max;
1981 if (l & (l - 1)) {
1982 l = 1 << (qemu_fls(l) - 1);
1985 return l;
1988 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1989 int len, bool is_write)
1991 hwaddr l;
1992 uint8_t *ptr;
1993 uint64_t val;
1994 hwaddr addr1;
1995 MemoryRegion *mr;
1996 bool error = false;
1998 while (len > 0) {
1999 l = len;
2000 mr = address_space_translate(as, addr, &addr1, &l, is_write);
2002 if (is_write) {
2003 if (!memory_access_is_direct(mr, is_write)) {
2004 l = memory_access_size(mr, l, addr1);
2005 /* XXX: could force current_cpu to NULL to avoid
2006 potential bugs */
2007 switch (l) {
2008 case 8:
2009 /* 64 bit write access */
2010 val = ldq_p(buf);
2011 error |= io_mem_write(mr, addr1, val, 8);
2012 break;
2013 case 4:
2014 /* 32 bit write access */
2015 val = ldl_p(buf);
2016 error |= io_mem_write(mr, addr1, val, 4);
2017 break;
2018 case 2:
2019 /* 16 bit write access */
2020 val = lduw_p(buf);
2021 error |= io_mem_write(mr, addr1, val, 2);
2022 break;
2023 case 1:
2024 /* 8 bit write access */
2025 val = ldub_p(buf);
2026 error |= io_mem_write(mr, addr1, val, 1);
2027 break;
2028 default:
2029 abort();
2031 } else {
2032 addr1 += memory_region_get_ram_addr(mr);
2033 /* RAM case */
2034 ptr = qemu_get_ram_ptr(addr1);
2035 memcpy(ptr, buf, l);
2036 invalidate_and_set_dirty(addr1, l);
2038 } else {
2039 if (!memory_access_is_direct(mr, is_write)) {
2040 /* I/O case */
2041 l = memory_access_size(mr, l, addr1);
2042 switch (l) {
2043 case 8:
2044 /* 64 bit read access */
2045 error |= io_mem_read(mr, addr1, &val, 8);
2046 stq_p(buf, val);
2047 break;
2048 case 4:
2049 /* 32 bit read access */
2050 error |= io_mem_read(mr, addr1, &val, 4);
2051 stl_p(buf, val);
2052 break;
2053 case 2:
2054 /* 16 bit read access */
2055 error |= io_mem_read(mr, addr1, &val, 2);
2056 stw_p(buf, val);
2057 break;
2058 case 1:
2059 /* 8 bit read access */
2060 error |= io_mem_read(mr, addr1, &val, 1);
2061 stb_p(buf, val);
2062 break;
2063 default:
2064 abort();
2066 } else {
2067 /* RAM case */
2068 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2069 memcpy(buf, ptr, l);
2072 len -= l;
2073 buf += l;
2074 addr += l;
2077 return error;
2080 bool address_space_write(AddressSpace *as, hwaddr addr,
2081 const uint8_t *buf, int len)
2083 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2086 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2088 return address_space_rw(as, addr, buf, len, false);
2092 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2093 int len, int is_write)
2095 address_space_rw(&address_space_memory, addr, buf, len, is_write);
2098 enum write_rom_type {
2099 WRITE_DATA,
2100 FLUSH_CACHE,
2103 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2104 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2106 hwaddr l;
2107 uint8_t *ptr;
2108 hwaddr addr1;
2109 MemoryRegion *mr;
2111 while (len > 0) {
2112 l = len;
2113 mr = address_space_translate(as, addr, &addr1, &l, true);
2115 if (!(memory_region_is_ram(mr) ||
2116 memory_region_is_romd(mr))) {
2117 /* do nothing */
2118 } else {
2119 addr1 += memory_region_get_ram_addr(mr);
2120 /* ROM/RAM case */
2121 ptr = qemu_get_ram_ptr(addr1);
2122 switch (type) {
2123 case WRITE_DATA:
2124 memcpy(ptr, buf, l);
2125 invalidate_and_set_dirty(addr1, l);
2126 break;
2127 case FLUSH_CACHE:
2128 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2129 break;
2132 len -= l;
2133 buf += l;
2134 addr += l;
2138 /* used for ROM loading : can write in RAM and ROM */
2139 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2140 const uint8_t *buf, int len)
2142 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2145 void cpu_flush_icache_range(hwaddr start, int len)
2148 * This function should do the same thing as an icache flush that was
2149 * triggered from within the guest. For TCG we are always cache coherent,
2150 * so there is no need to flush anything. For KVM / Xen we need to flush
2151 * the host's instruction cache at least.
2153 if (tcg_enabled()) {
2154 return;
2157 cpu_physical_memory_write_rom_internal(&address_space_memory,
2158 start, NULL, len, FLUSH_CACHE);
2161 typedef struct {
2162 MemoryRegion *mr;
2163 void *buffer;
2164 hwaddr addr;
2165 hwaddr len;
2166 } BounceBuffer;
2168 static BounceBuffer bounce;
2170 typedef struct MapClient {
2171 void *opaque;
2172 void (*callback)(void *opaque);
2173 QLIST_ENTRY(MapClient) link;
2174 } MapClient;
2176 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2177 = QLIST_HEAD_INITIALIZER(map_client_list);
2179 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2181 MapClient *client = g_malloc(sizeof(*client));
2183 client->opaque = opaque;
2184 client->callback = callback;
2185 QLIST_INSERT_HEAD(&map_client_list, client, link);
2186 return client;
2189 static void cpu_unregister_map_client(void *_client)
2191 MapClient *client = (MapClient *)_client;
2193 QLIST_REMOVE(client, link);
2194 g_free(client);
2197 static void cpu_notify_map_clients(void)
2199 MapClient *client;
2201 while (!QLIST_EMPTY(&map_client_list)) {
2202 client = QLIST_FIRST(&map_client_list);
2203 client->callback(client->opaque);
2204 cpu_unregister_map_client(client);
2208 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2210 MemoryRegion *mr;
2211 hwaddr l, xlat;
2213 while (len > 0) {
2214 l = len;
2215 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2216 if (!memory_access_is_direct(mr, is_write)) {
2217 l = memory_access_size(mr, l, addr);
2218 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2219 return false;
2223 len -= l;
2224 addr += l;
2226 return true;
2229 /* Map a physical memory region into a host virtual address.
2230 * May map a subset of the requested range, given by and returned in *plen.
2231 * May return NULL if resources needed to perform the mapping are exhausted.
2232 * Use only for reads OR writes - not for read-modify-write operations.
2233 * Use cpu_register_map_client() to know when retrying the map operation is
2234 * likely to succeed.
2236 void *address_space_map(AddressSpace *as,
2237 hwaddr addr,
2238 hwaddr *plen,
2239 bool is_write)
2241 hwaddr len = *plen;
2242 hwaddr done = 0;
2243 hwaddr l, xlat, base;
2244 MemoryRegion *mr, *this_mr;
2245 ram_addr_t raddr;
2247 if (len == 0) {
2248 return NULL;
2251 l = len;
2252 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2253 if (!memory_access_is_direct(mr, is_write)) {
2254 if (bounce.buffer) {
2255 return NULL;
2257 /* Avoid unbounded allocations */
2258 l = MIN(l, TARGET_PAGE_SIZE);
2259 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2260 bounce.addr = addr;
2261 bounce.len = l;
2263 memory_region_ref(mr);
2264 bounce.mr = mr;
2265 if (!is_write) {
2266 address_space_read(as, addr, bounce.buffer, l);
2269 *plen = l;
2270 return bounce.buffer;
2273 base = xlat;
2274 raddr = memory_region_get_ram_addr(mr);
2276 for (;;) {
2277 len -= l;
2278 addr += l;
2279 done += l;
2280 if (len == 0) {
2281 break;
2284 l = len;
2285 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2286 if (this_mr != mr || xlat != base + done) {
2287 break;
2291 memory_region_ref(mr);
2292 *plen = done;
2293 return qemu_ram_ptr_length(raddr + base, plen);
2296 /* Unmaps a memory region previously mapped by address_space_map().
2297 * Will also mark the memory as dirty if is_write == 1. access_len gives
2298 * the amount of memory that was actually read or written by the caller.
2300 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2301 int is_write, hwaddr access_len)
2303 if (buffer != bounce.buffer) {
2304 MemoryRegion *mr;
2305 ram_addr_t addr1;
2307 mr = qemu_ram_addr_from_host(buffer, &addr1);
2308 assert(mr != NULL);
2309 if (is_write) {
2310 while (access_len) {
2311 unsigned l;
2312 l = TARGET_PAGE_SIZE;
2313 if (l > access_len)
2314 l = access_len;
2315 invalidate_and_set_dirty(addr1, l);
2316 addr1 += l;
2317 access_len -= l;
2320 if (xen_enabled()) {
2321 xen_invalidate_map_cache_entry(buffer);
2323 memory_region_unref(mr);
2324 return;
2326 if (is_write) {
2327 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2329 qemu_vfree(bounce.buffer);
2330 bounce.buffer = NULL;
2331 memory_region_unref(bounce.mr);
2332 cpu_notify_map_clients();
2335 void *cpu_physical_memory_map(hwaddr addr,
2336 hwaddr *plen,
2337 int is_write)
2339 return address_space_map(&address_space_memory, addr, plen, is_write);
2342 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2343 int is_write, hwaddr access_len)
2345 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2348 /* warning: addr must be aligned */
2349 static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
2350 enum device_endian endian)
2352 uint8_t *ptr;
2353 uint64_t val;
2354 MemoryRegion *mr;
2355 hwaddr l = 4;
2356 hwaddr addr1;
2358 mr = address_space_translate(as, addr, &addr1, &l, false);
2359 if (l < 4 || !memory_access_is_direct(mr, false)) {
2360 /* I/O case */
2361 io_mem_read(mr, addr1, &val, 4);
2362 #if defined(TARGET_WORDS_BIGENDIAN)
2363 if (endian == DEVICE_LITTLE_ENDIAN) {
2364 val = bswap32(val);
2366 #else
2367 if (endian == DEVICE_BIG_ENDIAN) {
2368 val = bswap32(val);
2370 #endif
2371 } else {
2372 /* RAM case */
2373 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2374 & TARGET_PAGE_MASK)
2375 + addr1);
2376 switch (endian) {
2377 case DEVICE_LITTLE_ENDIAN:
2378 val = ldl_le_p(ptr);
2379 break;
2380 case DEVICE_BIG_ENDIAN:
2381 val = ldl_be_p(ptr);
2382 break;
2383 default:
2384 val = ldl_p(ptr);
2385 break;
2388 return val;
2391 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
2393 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2396 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
2398 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2401 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
2403 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2406 /* warning: addr must be aligned */
2407 static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
2408 enum device_endian endian)
2410 uint8_t *ptr;
2411 uint64_t val;
2412 MemoryRegion *mr;
2413 hwaddr l = 8;
2414 hwaddr addr1;
2416 mr = address_space_translate(as, addr, &addr1, &l,
2417 false);
2418 if (l < 8 || !memory_access_is_direct(mr, false)) {
2419 /* I/O case */
2420 io_mem_read(mr, addr1, &val, 8);
2421 #if defined(TARGET_WORDS_BIGENDIAN)
2422 if (endian == DEVICE_LITTLE_ENDIAN) {
2423 val = bswap64(val);
2425 #else
2426 if (endian == DEVICE_BIG_ENDIAN) {
2427 val = bswap64(val);
2429 #endif
2430 } else {
2431 /* RAM case */
2432 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2433 & TARGET_PAGE_MASK)
2434 + addr1);
2435 switch (endian) {
2436 case DEVICE_LITTLE_ENDIAN:
2437 val = ldq_le_p(ptr);
2438 break;
2439 case DEVICE_BIG_ENDIAN:
2440 val = ldq_be_p(ptr);
2441 break;
2442 default:
2443 val = ldq_p(ptr);
2444 break;
2447 return val;
2450 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
2452 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2455 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
2457 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2460 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
2462 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2465 /* XXX: optimize */
2466 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2468 uint8_t val;
2469 address_space_rw(as, addr, &val, 1, 0);
2470 return val;
2473 /* warning: addr must be aligned */
2474 static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
2475 enum device_endian endian)
2477 uint8_t *ptr;
2478 uint64_t val;
2479 MemoryRegion *mr;
2480 hwaddr l = 2;
2481 hwaddr addr1;
2483 mr = address_space_translate(as, addr, &addr1, &l,
2484 false);
2485 if (l < 2 || !memory_access_is_direct(mr, false)) {
2486 /* I/O case */
2487 io_mem_read(mr, addr1, &val, 2);
2488 #if defined(TARGET_WORDS_BIGENDIAN)
2489 if (endian == DEVICE_LITTLE_ENDIAN) {
2490 val = bswap16(val);
2492 #else
2493 if (endian == DEVICE_BIG_ENDIAN) {
2494 val = bswap16(val);
2496 #endif
2497 } else {
2498 /* RAM case */
2499 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2500 & TARGET_PAGE_MASK)
2501 + addr1);
2502 switch (endian) {
2503 case DEVICE_LITTLE_ENDIAN:
2504 val = lduw_le_p(ptr);
2505 break;
2506 case DEVICE_BIG_ENDIAN:
2507 val = lduw_be_p(ptr);
2508 break;
2509 default:
2510 val = lduw_p(ptr);
2511 break;
2514 return val;
2517 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
2519 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2522 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
2524 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2527 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
2529 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2532 /* warning: addr must be aligned. The ram page is not masked as dirty
2533 and the code inside is not invalidated. It is useful if the dirty
2534 bits are used to track modified PTEs */
2535 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
2537 uint8_t *ptr;
2538 MemoryRegion *mr;
2539 hwaddr l = 4;
2540 hwaddr addr1;
2542 mr = address_space_translate(as, addr, &addr1, &l,
2543 true);
2544 if (l < 4 || !memory_access_is_direct(mr, true)) {
2545 io_mem_write(mr, addr1, val, 4);
2546 } else {
2547 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2548 ptr = qemu_get_ram_ptr(addr1);
2549 stl_p(ptr, val);
2551 if (unlikely(in_migration)) {
2552 if (cpu_physical_memory_is_clean(addr1)) {
2553 /* invalidate code */
2554 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2555 /* set dirty bit */
2556 cpu_physical_memory_set_dirty_flag(addr1,
2557 DIRTY_MEMORY_MIGRATION);
2558 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
2564 /* warning: addr must be aligned */
2565 static inline void stl_phys_internal(AddressSpace *as,
2566 hwaddr addr, uint32_t val,
2567 enum device_endian endian)
2569 uint8_t *ptr;
2570 MemoryRegion *mr;
2571 hwaddr l = 4;
2572 hwaddr addr1;
2574 mr = address_space_translate(as, addr, &addr1, &l,
2575 true);
2576 if (l < 4 || !memory_access_is_direct(mr, true)) {
2577 #if defined(TARGET_WORDS_BIGENDIAN)
2578 if (endian == DEVICE_LITTLE_ENDIAN) {
2579 val = bswap32(val);
2581 #else
2582 if (endian == DEVICE_BIG_ENDIAN) {
2583 val = bswap32(val);
2585 #endif
2586 io_mem_write(mr, addr1, val, 4);
2587 } else {
2588 /* RAM case */
2589 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2590 ptr = qemu_get_ram_ptr(addr1);
2591 switch (endian) {
2592 case DEVICE_LITTLE_ENDIAN:
2593 stl_le_p(ptr, val);
2594 break;
2595 case DEVICE_BIG_ENDIAN:
2596 stl_be_p(ptr, val);
2597 break;
2598 default:
2599 stl_p(ptr, val);
2600 break;
2602 invalidate_and_set_dirty(addr1, 4);
2606 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2608 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2611 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2613 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2616 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2618 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2621 /* XXX: optimize */
2622 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2624 uint8_t v = val;
2625 address_space_rw(as, addr, &v, 1, 1);
2628 /* warning: addr must be aligned */
2629 static inline void stw_phys_internal(AddressSpace *as,
2630 hwaddr addr, uint32_t val,
2631 enum device_endian endian)
2633 uint8_t *ptr;
2634 MemoryRegion *mr;
2635 hwaddr l = 2;
2636 hwaddr addr1;
2638 mr = address_space_translate(as, addr, &addr1, &l, true);
2639 if (l < 2 || !memory_access_is_direct(mr, true)) {
2640 #if defined(TARGET_WORDS_BIGENDIAN)
2641 if (endian == DEVICE_LITTLE_ENDIAN) {
2642 val = bswap16(val);
2644 #else
2645 if (endian == DEVICE_BIG_ENDIAN) {
2646 val = bswap16(val);
2648 #endif
2649 io_mem_write(mr, addr1, val, 2);
2650 } else {
2651 /* RAM case */
2652 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2653 ptr = qemu_get_ram_ptr(addr1);
2654 switch (endian) {
2655 case DEVICE_LITTLE_ENDIAN:
2656 stw_le_p(ptr, val);
2657 break;
2658 case DEVICE_BIG_ENDIAN:
2659 stw_be_p(ptr, val);
2660 break;
2661 default:
2662 stw_p(ptr, val);
2663 break;
2665 invalidate_and_set_dirty(addr1, 2);
2669 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2671 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2674 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2676 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2679 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2681 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2684 /* XXX: optimize */
2685 void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2687 val = tswap64(val);
2688 address_space_rw(as, addr, (void *) &val, 8, 1);
2691 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2693 val = cpu_to_le64(val);
2694 address_space_rw(as, addr, (void *) &val, 8, 1);
2697 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2699 val = cpu_to_be64(val);
2700 address_space_rw(as, addr, (void *) &val, 8, 1);
2703 /* virtual memory access for debug (includes writing to ROM) */
2704 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2705 uint8_t *buf, int len, int is_write)
2707 int l;
2708 hwaddr phys_addr;
2709 target_ulong page;
2711 while (len > 0) {
2712 page = addr & TARGET_PAGE_MASK;
2713 phys_addr = cpu_get_phys_page_debug(cpu, page);
2714 /* if no physical page mapped, return an error */
2715 if (phys_addr == -1)
2716 return -1;
2717 l = (page + TARGET_PAGE_SIZE) - addr;
2718 if (l > len)
2719 l = len;
2720 phys_addr += (addr & ~TARGET_PAGE_MASK);
2721 if (is_write) {
2722 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2723 } else {
2724 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2726 len -= l;
2727 buf += l;
2728 addr += l;
2730 return 0;
2732 #endif
2734 #if !defined(CONFIG_USER_ONLY)
2737 * A helper function for the _utterly broken_ virtio device model to find out if
2738 * it's running on a big endian machine. Don't do this at home kids!
2740 bool virtio_is_big_endian(void);
2741 bool virtio_is_big_endian(void)
2743 #if defined(TARGET_WORDS_BIGENDIAN)
2744 return true;
2745 #else
2746 return false;
2747 #endif
2750 #endif
2752 #ifndef CONFIG_USER_ONLY
2753 bool cpu_physical_memory_is_io(hwaddr phys_addr)
2755 MemoryRegion*mr;
2756 hwaddr l = 1;
2758 mr = address_space_translate(&address_space_memory,
2759 phys_addr, &phys_addr, &l, false);
2761 return !(memory_region_is_ram(mr) ||
2762 memory_region_is_romd(mr));
2765 void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2767 RAMBlock *block;
2769 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2770 func(block->host, block->offset, block->length, opaque);
2773 #endif