roms: Flush icache when writing roms to guest memory
[qemu/ar7.git] / exec.c
blob896f7b83fc23fb116dd35059af148946c5bbc2ba
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "hw/xen/xen.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
42 #include <qemu.h>
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
45 #include "trace.h"
46 #endif
47 #include "exec/cpu-all.h"
49 #include "exec/cputlb.h"
50 #include "translate-all.h"
52 #include "exec/memory-internal.h"
53 #include "qemu/cache-utils.h"
55 //#define DEBUG_SUBPAGE
57 #if !defined(CONFIG_USER_ONLY)
58 static int in_migration;
60 RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62 static MemoryRegion *system_memory;
63 static MemoryRegion *system_io;
65 AddressSpace address_space_io;
66 AddressSpace address_space_memory;
68 MemoryRegion io_mem_rom, io_mem_notdirty;
69 static MemoryRegion io_mem_unassigned;
71 #endif
73 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
74 /* current CPU in the current thread. It is only valid inside
75 cpu_exec() */
76 DEFINE_TLS(CPUState *, current_cpu);
77 /* 0 = Do not count executed instructions.
78 1 = Precise instruction counting.
79 2 = Adaptive rate instruction counting. */
80 int use_icount;
82 #if !defined(CONFIG_USER_ONLY)
84 typedef struct PhysPageEntry PhysPageEntry;
86 struct PhysPageEntry {
87 uint16_t is_leaf : 1;
88 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
89 uint16_t ptr : 15;
92 typedef PhysPageEntry Node[L2_SIZE];
94 struct AddressSpaceDispatch {
95 /* This is a multi-level map on the physical address space.
96 * The bottom level has pointers to MemoryRegionSections.
98 PhysPageEntry phys_map;
99 Node *nodes;
100 MemoryRegionSection *sections;
101 AddressSpace *as;
104 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
105 typedef struct subpage_t {
106 MemoryRegion iomem;
107 AddressSpace *as;
108 hwaddr base;
109 uint16_t sub_section[TARGET_PAGE_SIZE];
110 } subpage_t;
112 #define PHYS_SECTION_UNASSIGNED 0
113 #define PHYS_SECTION_NOTDIRTY 1
114 #define PHYS_SECTION_ROM 2
115 #define PHYS_SECTION_WATCH 3
117 typedef struct PhysPageMap {
118 unsigned sections_nb;
119 unsigned sections_nb_alloc;
120 unsigned nodes_nb;
121 unsigned nodes_nb_alloc;
122 Node *nodes;
123 MemoryRegionSection *sections;
124 } PhysPageMap;
126 static PhysPageMap *prev_map;
127 static PhysPageMap next_map;
129 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
131 static void io_mem_init(void);
132 static void memory_map_init(void);
134 static MemoryRegion io_mem_watch;
135 #endif
137 #if !defined(CONFIG_USER_ONLY)
139 static void phys_map_node_reserve(unsigned nodes)
141 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
142 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
143 16);
144 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
145 next_map.nodes_nb + nodes);
146 next_map.nodes = g_renew(Node, next_map.nodes,
147 next_map.nodes_nb_alloc);
151 static uint16_t phys_map_node_alloc(void)
153 unsigned i;
154 uint16_t ret;
156 ret = next_map.nodes_nb++;
157 assert(ret != PHYS_MAP_NODE_NIL);
158 assert(ret != next_map.nodes_nb_alloc);
159 for (i = 0; i < L2_SIZE; ++i) {
160 next_map.nodes[ret][i].is_leaf = 0;
161 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
163 return ret;
166 static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
167 hwaddr *nb, uint16_t leaf,
168 int level)
170 PhysPageEntry *p;
171 int i;
172 hwaddr step = (hwaddr)1 << (level * L2_BITS);
174 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
175 lp->ptr = phys_map_node_alloc();
176 p = next_map.nodes[lp->ptr];
177 if (level == 0) {
178 for (i = 0; i < L2_SIZE; i++) {
179 p[i].is_leaf = 1;
180 p[i].ptr = PHYS_SECTION_UNASSIGNED;
183 } else {
184 p = next_map.nodes[lp->ptr];
186 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
188 while (*nb && lp < &p[L2_SIZE]) {
189 if ((*index & (step - 1)) == 0 && *nb >= step) {
190 lp->is_leaf = true;
191 lp->ptr = leaf;
192 *index += step;
193 *nb -= step;
194 } else {
195 phys_page_set_level(lp, index, nb, leaf, level - 1);
197 ++lp;
201 static void phys_page_set(AddressSpaceDispatch *d,
202 hwaddr index, hwaddr nb,
203 uint16_t leaf)
205 /* Wildly overreserve - it doesn't matter much. */
206 phys_map_node_reserve(3 * P_L2_LEVELS);
208 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
211 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
212 Node *nodes, MemoryRegionSection *sections)
214 PhysPageEntry *p;
215 int i;
217 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
218 if (lp.ptr == PHYS_MAP_NODE_NIL) {
219 return &sections[PHYS_SECTION_UNASSIGNED];
221 p = nodes[lp.ptr];
222 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
224 return &sections[lp.ptr];
227 bool memory_region_is_unassigned(MemoryRegion *mr)
229 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
230 && mr != &io_mem_watch;
233 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
234 hwaddr addr,
235 bool resolve_subpage)
237 MemoryRegionSection *section;
238 subpage_t *subpage;
240 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
241 d->nodes, d->sections);
242 if (resolve_subpage && section->mr->subpage) {
243 subpage = container_of(section->mr, subpage_t, iomem);
244 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
246 return section;
249 static MemoryRegionSection *
250 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
251 hwaddr *plen, bool resolve_subpage)
253 MemoryRegionSection *section;
254 Int128 diff;
256 section = address_space_lookup_region(d, addr, resolve_subpage);
257 /* Compute offset within MemoryRegionSection */
258 addr -= section->offset_within_address_space;
260 /* Compute offset within MemoryRegion */
261 *xlat = addr + section->offset_within_region;
263 diff = int128_sub(section->mr->size, int128_make64(addr));
264 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
265 return section;
268 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
269 hwaddr *xlat, hwaddr *plen,
270 bool is_write)
272 IOMMUTLBEntry iotlb;
273 MemoryRegionSection *section;
274 MemoryRegion *mr;
275 hwaddr len = *plen;
277 for (;;) {
278 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
279 mr = section->mr;
281 if (!mr->iommu_ops) {
282 break;
285 iotlb = mr->iommu_ops->translate(mr, addr);
286 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
287 | (addr & iotlb.addr_mask));
288 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
289 if (!(iotlb.perm & (1 << is_write))) {
290 mr = &io_mem_unassigned;
291 break;
294 as = iotlb.target_as;
297 *plen = len;
298 *xlat = addr;
299 return mr;
302 MemoryRegionSection *
303 address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
304 hwaddr *plen)
306 MemoryRegionSection *section;
307 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
309 assert(!section->mr->iommu_ops);
310 return section;
312 #endif
314 void cpu_exec_init_all(void)
316 #if !defined(CONFIG_USER_ONLY)
317 qemu_mutex_init(&ram_list.mutex);
318 memory_map_init();
319 io_mem_init();
320 #endif
323 #if !defined(CONFIG_USER_ONLY)
325 static int cpu_common_post_load(void *opaque, int version_id)
327 CPUState *cpu = opaque;
329 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
330 version_id is increased. */
331 cpu->interrupt_request &= ~0x01;
332 tlb_flush(cpu->env_ptr, 1);
334 return 0;
337 const VMStateDescription vmstate_cpu_common = {
338 .name = "cpu_common",
339 .version_id = 1,
340 .minimum_version_id = 1,
341 .minimum_version_id_old = 1,
342 .post_load = cpu_common_post_load,
343 .fields = (VMStateField []) {
344 VMSTATE_UINT32(halted, CPUState),
345 VMSTATE_UINT32(interrupt_request, CPUState),
346 VMSTATE_END_OF_LIST()
350 #endif
352 CPUState *qemu_get_cpu(int index)
354 CPUState *cpu;
356 CPU_FOREACH(cpu) {
357 if (cpu->cpu_index == index) {
358 return cpu;
362 return NULL;
365 void cpu_exec_init(CPUArchState *env)
367 CPUState *cpu = ENV_GET_CPU(env);
368 CPUClass *cc = CPU_GET_CLASS(cpu);
369 CPUState *some_cpu;
370 int cpu_index;
372 #if defined(CONFIG_USER_ONLY)
373 cpu_list_lock();
374 #endif
375 cpu_index = 0;
376 CPU_FOREACH(some_cpu) {
377 cpu_index++;
379 cpu->cpu_index = cpu_index;
380 cpu->numa_node = 0;
381 QTAILQ_INIT(&env->breakpoints);
382 QTAILQ_INIT(&env->watchpoints);
383 #ifndef CONFIG_USER_ONLY
384 cpu->thread_id = qemu_get_thread_id();
385 #endif
386 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
387 #if defined(CONFIG_USER_ONLY)
388 cpu_list_unlock();
389 #endif
390 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
391 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
393 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
394 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
395 cpu_save, cpu_load, env);
396 assert(cc->vmsd == NULL);
397 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
398 #endif
399 if (cc->vmsd != NULL) {
400 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
404 #if defined(TARGET_HAS_ICE)
405 #if defined(CONFIG_USER_ONLY)
406 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
408 tb_invalidate_phys_page_range(pc, pc + 1, 0);
410 #else
411 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
413 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
414 if (phys != -1) {
415 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
418 #endif
419 #endif /* TARGET_HAS_ICE */
421 #if defined(CONFIG_USER_ONLY)
422 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
427 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
428 int flags, CPUWatchpoint **watchpoint)
430 return -ENOSYS;
432 #else
433 /* Add a watchpoint. */
434 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
435 int flags, CPUWatchpoint **watchpoint)
437 target_ulong len_mask = ~(len - 1);
438 CPUWatchpoint *wp;
440 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
441 if ((len & (len - 1)) || (addr & ~len_mask) ||
442 len == 0 || len > TARGET_PAGE_SIZE) {
443 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
444 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
445 return -EINVAL;
447 wp = g_malloc(sizeof(*wp));
449 wp->vaddr = addr;
450 wp->len_mask = len_mask;
451 wp->flags = flags;
453 /* keep all GDB-injected watchpoints in front */
454 if (flags & BP_GDB)
455 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
456 else
457 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
459 tlb_flush_page(env, addr);
461 if (watchpoint)
462 *watchpoint = wp;
463 return 0;
466 /* Remove a specific watchpoint. */
467 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
468 int flags)
470 target_ulong len_mask = ~(len - 1);
471 CPUWatchpoint *wp;
473 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
474 if (addr == wp->vaddr && len_mask == wp->len_mask
475 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
476 cpu_watchpoint_remove_by_ref(env, wp);
477 return 0;
480 return -ENOENT;
483 /* Remove a specific watchpoint by reference. */
484 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
486 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
488 tlb_flush_page(env, watchpoint->vaddr);
490 g_free(watchpoint);
493 /* Remove all matching watchpoints. */
494 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
496 CPUWatchpoint *wp, *next;
498 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
499 if (wp->flags & mask)
500 cpu_watchpoint_remove_by_ref(env, wp);
503 #endif
505 /* Add a breakpoint. */
506 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
507 CPUBreakpoint **breakpoint)
509 #if defined(TARGET_HAS_ICE)
510 CPUBreakpoint *bp;
512 bp = g_malloc(sizeof(*bp));
514 bp->pc = pc;
515 bp->flags = flags;
517 /* keep all GDB-injected breakpoints in front */
518 if (flags & BP_GDB) {
519 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
520 } else {
521 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
524 breakpoint_invalidate(ENV_GET_CPU(env), pc);
526 if (breakpoint) {
527 *breakpoint = bp;
529 return 0;
530 #else
531 return -ENOSYS;
532 #endif
535 /* Remove a specific breakpoint. */
536 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
538 #if defined(TARGET_HAS_ICE)
539 CPUBreakpoint *bp;
541 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
542 if (bp->pc == pc && bp->flags == flags) {
543 cpu_breakpoint_remove_by_ref(env, bp);
544 return 0;
547 return -ENOENT;
548 #else
549 return -ENOSYS;
550 #endif
553 /* Remove a specific breakpoint by reference. */
554 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
556 #if defined(TARGET_HAS_ICE)
557 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
559 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
561 g_free(breakpoint);
562 #endif
565 /* Remove all matching breakpoints. */
566 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
568 #if defined(TARGET_HAS_ICE)
569 CPUBreakpoint *bp, *next;
571 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
572 if (bp->flags & mask)
573 cpu_breakpoint_remove_by_ref(env, bp);
575 #endif
578 /* enable or disable single step mode. EXCP_DEBUG is returned by the
579 CPU loop after each instruction */
580 void cpu_single_step(CPUState *cpu, int enabled)
582 #if defined(TARGET_HAS_ICE)
583 if (cpu->singlestep_enabled != enabled) {
584 cpu->singlestep_enabled = enabled;
585 if (kvm_enabled()) {
586 kvm_update_guest_debug(cpu, 0);
587 } else {
588 /* must flush all the translated code to avoid inconsistencies */
589 /* XXX: only flush what is necessary */
590 CPUArchState *env = cpu->env_ptr;
591 tb_flush(env);
594 #endif
597 void cpu_abort(CPUArchState *env, const char *fmt, ...)
599 CPUState *cpu = ENV_GET_CPU(env);
600 va_list ap;
601 va_list ap2;
603 va_start(ap, fmt);
604 va_copy(ap2, ap);
605 fprintf(stderr, "qemu: fatal: ");
606 vfprintf(stderr, fmt, ap);
607 fprintf(stderr, "\n");
608 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
609 if (qemu_log_enabled()) {
610 qemu_log("qemu: fatal: ");
611 qemu_log_vprintf(fmt, ap2);
612 qemu_log("\n");
613 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
614 qemu_log_flush();
615 qemu_log_close();
617 va_end(ap2);
618 va_end(ap);
619 #if defined(CONFIG_USER_ONLY)
621 struct sigaction act;
622 sigfillset(&act.sa_mask);
623 act.sa_handler = SIG_DFL;
624 sigaction(SIGABRT, &act, NULL);
626 #endif
627 abort();
630 #if !defined(CONFIG_USER_ONLY)
631 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
633 RAMBlock *block;
635 /* The list is protected by the iothread lock here. */
636 block = ram_list.mru_block;
637 if (block && addr - block->offset < block->length) {
638 goto found;
640 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
641 if (addr - block->offset < block->length) {
642 goto found;
646 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
647 abort();
649 found:
650 ram_list.mru_block = block;
651 return block;
654 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
655 uintptr_t length)
657 RAMBlock *block;
658 ram_addr_t start1;
660 block = qemu_get_ram_block(start);
661 assert(block == qemu_get_ram_block(end - 1));
662 start1 = (uintptr_t)block->host + (start - block->offset);
663 cpu_tlb_reset_dirty_all(start1, length);
666 /* Note: start and end must be within the same ram block. */
667 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
668 int dirty_flags)
670 uintptr_t length;
672 start &= TARGET_PAGE_MASK;
673 end = TARGET_PAGE_ALIGN(end);
675 length = end - start;
676 if (length == 0)
677 return;
678 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
680 if (tcg_enabled()) {
681 tlb_reset_dirty_range_all(start, end, length);
685 static int cpu_physical_memory_set_dirty_tracking(int enable)
687 int ret = 0;
688 in_migration = enable;
689 return ret;
692 hwaddr memory_region_section_get_iotlb(CPUArchState *env,
693 MemoryRegionSection *section,
694 target_ulong vaddr,
695 hwaddr paddr, hwaddr xlat,
696 int prot,
697 target_ulong *address)
699 hwaddr iotlb;
700 CPUWatchpoint *wp;
702 if (memory_region_is_ram(section->mr)) {
703 /* Normal RAM. */
704 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
705 + xlat;
706 if (!section->readonly) {
707 iotlb |= PHYS_SECTION_NOTDIRTY;
708 } else {
709 iotlb |= PHYS_SECTION_ROM;
711 } else {
712 iotlb = section - address_space_memory.dispatch->sections;
713 iotlb += xlat;
716 /* Make accesses to pages with watchpoints go via the
717 watchpoint trap routines. */
718 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
719 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
720 /* Avoid trapping reads of pages with a write breakpoint. */
721 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
722 iotlb = PHYS_SECTION_WATCH + paddr;
723 *address |= TLB_MMIO;
724 break;
729 return iotlb;
731 #endif /* defined(CONFIG_USER_ONLY) */
733 #if !defined(CONFIG_USER_ONLY)
735 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
736 uint16_t section);
737 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
739 static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
742 * Set a custom physical guest memory alloator.
743 * Accelerators with unusual needs may need this. Hopefully, we can
744 * get rid of it eventually.
746 void phys_mem_set_alloc(void *(*alloc)(size_t))
748 phys_mem_alloc = alloc;
751 static uint16_t phys_section_add(MemoryRegionSection *section)
753 /* The physical section number is ORed with a page-aligned
754 * pointer to produce the iotlb entries. Thus it should
755 * never overflow into the page-aligned value.
757 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
759 if (next_map.sections_nb == next_map.sections_nb_alloc) {
760 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
761 16);
762 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
763 next_map.sections_nb_alloc);
765 next_map.sections[next_map.sections_nb] = *section;
766 memory_region_ref(section->mr);
767 return next_map.sections_nb++;
770 static void phys_section_destroy(MemoryRegion *mr)
772 memory_region_unref(mr);
774 if (mr->subpage) {
775 subpage_t *subpage = container_of(mr, subpage_t, iomem);
776 memory_region_destroy(&subpage->iomem);
777 g_free(subpage);
781 static void phys_sections_free(PhysPageMap *map)
783 while (map->sections_nb > 0) {
784 MemoryRegionSection *section = &map->sections[--map->sections_nb];
785 phys_section_destroy(section->mr);
787 g_free(map->sections);
788 g_free(map->nodes);
789 g_free(map);
792 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
794 subpage_t *subpage;
795 hwaddr base = section->offset_within_address_space
796 & TARGET_PAGE_MASK;
797 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
798 next_map.nodes, next_map.sections);
799 MemoryRegionSection subsection = {
800 .offset_within_address_space = base,
801 .size = int128_make64(TARGET_PAGE_SIZE),
803 hwaddr start, end;
805 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
807 if (!(existing->mr->subpage)) {
808 subpage = subpage_init(d->as, base);
809 subsection.mr = &subpage->iomem;
810 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
811 phys_section_add(&subsection));
812 } else {
813 subpage = container_of(existing->mr, subpage_t, iomem);
815 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
816 end = start + int128_get64(section->size) - 1;
817 subpage_register(subpage, start, end, phys_section_add(section));
821 static void register_multipage(AddressSpaceDispatch *d,
822 MemoryRegionSection *section)
824 hwaddr start_addr = section->offset_within_address_space;
825 uint16_t section_index = phys_section_add(section);
826 uint64_t num_pages = int128_get64(int128_rshift(section->size,
827 TARGET_PAGE_BITS));
829 assert(num_pages);
830 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
833 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
835 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
836 AddressSpaceDispatch *d = as->next_dispatch;
837 MemoryRegionSection now = *section, remain = *section;
838 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
840 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
841 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
842 - now.offset_within_address_space;
844 now.size = int128_min(int128_make64(left), now.size);
845 register_subpage(d, &now);
846 } else {
847 now.size = int128_zero();
849 while (int128_ne(remain.size, now.size)) {
850 remain.size = int128_sub(remain.size, now.size);
851 remain.offset_within_address_space += int128_get64(now.size);
852 remain.offset_within_region += int128_get64(now.size);
853 now = remain;
854 if (int128_lt(remain.size, page_size)) {
855 register_subpage(d, &now);
856 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
857 now.size = page_size;
858 register_subpage(d, &now);
859 } else {
860 now.size = int128_and(now.size, int128_neg(page_size));
861 register_multipage(d, &now);
866 void qemu_flush_coalesced_mmio_buffer(void)
868 if (kvm_enabled())
869 kvm_flush_coalesced_mmio_buffer();
872 void qemu_mutex_lock_ramlist(void)
874 qemu_mutex_lock(&ram_list.mutex);
877 void qemu_mutex_unlock_ramlist(void)
879 qemu_mutex_unlock(&ram_list.mutex);
882 #ifdef __linux__
884 #include <sys/vfs.h>
886 #define HUGETLBFS_MAGIC 0x958458f6
888 static long gethugepagesize(const char *path)
890 struct statfs fs;
891 int ret;
893 do {
894 ret = statfs(path, &fs);
895 } while (ret != 0 && errno == EINTR);
897 if (ret != 0) {
898 perror(path);
899 return 0;
902 if (fs.f_type != HUGETLBFS_MAGIC)
903 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
905 return fs.f_bsize;
908 static sigjmp_buf sigjump;
910 static void sigbus_handler(int signal)
912 siglongjmp(sigjump, 1);
915 static void *file_ram_alloc(RAMBlock *block,
916 ram_addr_t memory,
917 const char *path)
919 char *filename;
920 char *sanitized_name;
921 char *c;
922 void *area;
923 int fd;
924 unsigned long hpagesize;
926 hpagesize = gethugepagesize(path);
927 if (!hpagesize) {
928 return NULL;
931 if (memory < hpagesize) {
932 return NULL;
935 if (kvm_enabled() && !kvm_has_sync_mmu()) {
936 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
937 return NULL;
940 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
941 sanitized_name = g_strdup(block->mr->name);
942 for (c = sanitized_name; *c != '\0'; c++) {
943 if (*c == '/')
944 *c = '_';
947 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
948 sanitized_name);
949 g_free(sanitized_name);
951 fd = mkstemp(filename);
952 if (fd < 0) {
953 perror("unable to create backing store for hugepages");
954 g_free(filename);
955 return NULL;
957 unlink(filename);
958 g_free(filename);
960 memory = (memory+hpagesize-1) & ~(hpagesize-1);
963 * ftruncate is not supported by hugetlbfs in older
964 * hosts, so don't bother bailing out on errors.
965 * If anything goes wrong with it under other filesystems,
966 * mmap will fail.
968 if (ftruncate(fd, memory))
969 perror("ftruncate");
971 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
972 if (area == MAP_FAILED) {
973 perror("file_ram_alloc: can't mmap RAM pages");
974 close(fd);
975 return (NULL);
978 if (mem_prealloc) {
979 int ret, i;
980 struct sigaction act, oldact;
981 sigset_t set, oldset;
983 memset(&act, 0, sizeof(act));
984 act.sa_handler = &sigbus_handler;
985 act.sa_flags = 0;
987 ret = sigaction(SIGBUS, &act, &oldact);
988 if (ret) {
989 perror("file_ram_alloc: failed to install signal handler");
990 exit(1);
993 /* unblock SIGBUS */
994 sigemptyset(&set);
995 sigaddset(&set, SIGBUS);
996 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
998 if (sigsetjmp(sigjump, 1)) {
999 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1000 exit(1);
1003 /* MAP_POPULATE silently ignores failures */
1004 for (i = 0; i < (memory/hpagesize)-1; i++) {
1005 memset(area + (hpagesize*i), 0, 1);
1008 ret = sigaction(SIGBUS, &oldact, NULL);
1009 if (ret) {
1010 perror("file_ram_alloc: failed to reinstall signal handler");
1011 exit(1);
1014 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1017 block->fd = fd;
1018 return area;
1020 #else
1021 static void *file_ram_alloc(RAMBlock *block,
1022 ram_addr_t memory,
1023 const char *path)
1025 fprintf(stderr, "-mem-path not supported on this host\n");
1026 exit(1);
1028 #endif
1030 static ram_addr_t find_ram_offset(ram_addr_t size)
1032 RAMBlock *block, *next_block;
1033 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1035 assert(size != 0); /* it would hand out same offset multiple times */
1037 if (QTAILQ_EMPTY(&ram_list.blocks))
1038 return 0;
1040 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1041 ram_addr_t end, next = RAM_ADDR_MAX;
1043 end = block->offset + block->length;
1045 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
1046 if (next_block->offset >= end) {
1047 next = MIN(next, next_block->offset);
1050 if (next - end >= size && next - end < mingap) {
1051 offset = end;
1052 mingap = next - end;
1056 if (offset == RAM_ADDR_MAX) {
1057 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1058 (uint64_t)size);
1059 abort();
1062 return offset;
1065 ram_addr_t last_ram_offset(void)
1067 RAMBlock *block;
1068 ram_addr_t last = 0;
1070 QTAILQ_FOREACH(block, &ram_list.blocks, next)
1071 last = MAX(last, block->offset + block->length);
1073 return last;
1076 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1078 int ret;
1080 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1081 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1082 "dump-guest-core", true)) {
1083 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1084 if (ret) {
1085 perror("qemu_madvise");
1086 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1087 "but dump_guest_core=off specified\n");
1092 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1094 RAMBlock *new_block, *block;
1096 new_block = NULL;
1097 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1098 if (block->offset == addr) {
1099 new_block = block;
1100 break;
1103 assert(new_block);
1104 assert(!new_block->idstr[0]);
1106 if (dev) {
1107 char *id = qdev_get_dev_path(dev);
1108 if (id) {
1109 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1110 g_free(id);
1113 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1115 /* This assumes the iothread lock is taken here too. */
1116 qemu_mutex_lock_ramlist();
1117 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1118 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1119 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1120 new_block->idstr);
1121 abort();
1124 qemu_mutex_unlock_ramlist();
1127 static int memory_try_enable_merging(void *addr, size_t len)
1129 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1130 /* disabled by the user */
1131 return 0;
1134 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1137 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1138 MemoryRegion *mr)
1140 RAMBlock *block, *new_block;
1142 size = TARGET_PAGE_ALIGN(size);
1143 new_block = g_malloc0(sizeof(*new_block));
1144 new_block->fd = -1;
1146 /* This assumes the iothread lock is taken here too. */
1147 qemu_mutex_lock_ramlist();
1148 new_block->mr = mr;
1149 new_block->offset = find_ram_offset(size);
1150 if (host) {
1151 new_block->host = host;
1152 new_block->flags |= RAM_PREALLOC_MASK;
1153 } else if (xen_enabled()) {
1154 if (mem_path) {
1155 fprintf(stderr, "-mem-path not supported with Xen\n");
1156 exit(1);
1158 xen_ram_alloc(new_block->offset, size, mr);
1159 } else {
1160 if (mem_path) {
1161 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1163 * file_ram_alloc() needs to allocate just like
1164 * phys_mem_alloc, but we haven't bothered to provide
1165 * a hook there.
1167 fprintf(stderr,
1168 "-mem-path not supported with this accelerator\n");
1169 exit(1);
1171 new_block->host = file_ram_alloc(new_block, size, mem_path);
1173 if (!new_block->host) {
1174 new_block->host = phys_mem_alloc(size);
1175 if (!new_block->host) {
1176 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1177 new_block->mr->name, strerror(errno));
1178 exit(1);
1180 memory_try_enable_merging(new_block->host, size);
1183 new_block->length = size;
1185 /* Keep the list sorted from biggest to smallest block. */
1186 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1187 if (block->length < new_block->length) {
1188 break;
1191 if (block) {
1192 QTAILQ_INSERT_BEFORE(block, new_block, next);
1193 } else {
1194 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1196 ram_list.mru_block = NULL;
1198 ram_list.version++;
1199 qemu_mutex_unlock_ramlist();
1201 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
1202 last_ram_offset() >> TARGET_PAGE_BITS);
1203 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1204 0, size >> TARGET_PAGE_BITS);
1205 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1207 qemu_ram_setup_dump(new_block->host, size);
1208 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1209 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
1211 if (kvm_enabled())
1212 kvm_setup_guest_memory(new_block->host, size);
1214 return new_block->offset;
1217 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1219 return qemu_ram_alloc_from_ptr(size, NULL, mr);
1222 void qemu_ram_free_from_ptr(ram_addr_t addr)
1224 RAMBlock *block;
1226 /* This assumes the iothread lock is taken here too. */
1227 qemu_mutex_lock_ramlist();
1228 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1229 if (addr == block->offset) {
1230 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1231 ram_list.mru_block = NULL;
1232 ram_list.version++;
1233 g_free(block);
1234 break;
1237 qemu_mutex_unlock_ramlist();
1240 void qemu_ram_free(ram_addr_t addr)
1242 RAMBlock *block;
1244 /* This assumes the iothread lock is taken here too. */
1245 qemu_mutex_lock_ramlist();
1246 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1247 if (addr == block->offset) {
1248 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1249 ram_list.mru_block = NULL;
1250 ram_list.version++;
1251 if (block->flags & RAM_PREALLOC_MASK) {
1253 } else if (xen_enabled()) {
1254 xen_invalidate_map_cache_entry(block->host);
1255 #ifndef _WIN32
1256 } else if (block->fd >= 0) {
1257 munmap(block->host, block->length);
1258 close(block->fd);
1259 #endif
1260 } else {
1261 qemu_anon_ram_free(block->host, block->length);
1263 g_free(block);
1264 break;
1267 qemu_mutex_unlock_ramlist();
1271 #ifndef _WIN32
1272 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1274 RAMBlock *block;
1275 ram_addr_t offset;
1276 int flags;
1277 void *area, *vaddr;
1279 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1280 offset = addr - block->offset;
1281 if (offset < block->length) {
1282 vaddr = block->host + offset;
1283 if (block->flags & RAM_PREALLOC_MASK) {
1285 } else if (xen_enabled()) {
1286 abort();
1287 } else {
1288 flags = MAP_FIXED;
1289 munmap(vaddr, length);
1290 if (block->fd >= 0) {
1291 #ifdef MAP_POPULATE
1292 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1293 MAP_PRIVATE;
1294 #else
1295 flags |= MAP_PRIVATE;
1296 #endif
1297 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1298 flags, block->fd, offset);
1299 } else {
1301 * Remap needs to match alloc. Accelerators that
1302 * set phys_mem_alloc never remap. If they did,
1303 * we'd need a remap hook here.
1305 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1307 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1308 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1309 flags, -1, 0);
1311 if (area != vaddr) {
1312 fprintf(stderr, "Could not remap addr: "
1313 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1314 length, addr);
1315 exit(1);
1317 memory_try_enable_merging(vaddr, length);
1318 qemu_ram_setup_dump(vaddr, length);
1320 return;
1324 #endif /* !_WIN32 */
1326 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1327 With the exception of the softmmu code in this file, this should
1328 only be used for local memory (e.g. video ram) that the device owns,
1329 and knows it isn't going to access beyond the end of the block.
1331 It should not be used for general purpose DMA.
1332 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1334 void *qemu_get_ram_ptr(ram_addr_t addr)
1336 RAMBlock *block = qemu_get_ram_block(addr);
1338 if (xen_enabled()) {
1339 /* We need to check if the requested address is in the RAM
1340 * because we don't want to map the entire memory in QEMU.
1341 * In that case just map until the end of the page.
1343 if (block->offset == 0) {
1344 return xen_map_cache(addr, 0, 0);
1345 } else if (block->host == NULL) {
1346 block->host =
1347 xen_map_cache(block->offset, block->length, 1);
1350 return block->host + (addr - block->offset);
1353 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1354 * but takes a size argument */
1355 static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1357 if (*size == 0) {
1358 return NULL;
1360 if (xen_enabled()) {
1361 return xen_map_cache(addr, *size, 1);
1362 } else {
1363 RAMBlock *block;
1365 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1366 if (addr - block->offset < block->length) {
1367 if (addr - block->offset + *size > block->length)
1368 *size = block->length - addr + block->offset;
1369 return block->host + (addr - block->offset);
1373 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1374 abort();
1378 /* Some of the softmmu routines need to translate from a host pointer
1379 (typically a TLB entry) back to a ram offset. */
1380 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1382 RAMBlock *block;
1383 uint8_t *host = ptr;
1385 if (xen_enabled()) {
1386 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1387 return qemu_get_ram_block(*ram_addr)->mr;
1390 block = ram_list.mru_block;
1391 if (block && block->host && host - block->host < block->length) {
1392 goto found;
1395 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1396 /* This case append when the block is not mapped. */
1397 if (block->host == NULL) {
1398 continue;
1400 if (host - block->host < block->length) {
1401 goto found;
1405 return NULL;
1407 found:
1408 *ram_addr = block->offset + (host - block->host);
1409 return block->mr;
1412 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1413 uint64_t val, unsigned size)
1415 int dirty_flags;
1416 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1417 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1418 tb_invalidate_phys_page_fast(ram_addr, size);
1419 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1421 switch (size) {
1422 case 1:
1423 stb_p(qemu_get_ram_ptr(ram_addr), val);
1424 break;
1425 case 2:
1426 stw_p(qemu_get_ram_ptr(ram_addr), val);
1427 break;
1428 case 4:
1429 stl_p(qemu_get_ram_ptr(ram_addr), val);
1430 break;
1431 default:
1432 abort();
1434 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1435 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1436 /* we remove the notdirty callback only if the code has been
1437 flushed */
1438 if (dirty_flags == 0xff) {
1439 CPUArchState *env = current_cpu->env_ptr;
1440 tlb_set_dirty(env, env->mem_io_vaddr);
1444 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1445 unsigned size, bool is_write)
1447 return is_write;
1450 static const MemoryRegionOps notdirty_mem_ops = {
1451 .write = notdirty_mem_write,
1452 .valid.accepts = notdirty_mem_accepts,
1453 .endianness = DEVICE_NATIVE_ENDIAN,
1456 /* Generate a debug exception if a watchpoint has been hit. */
1457 static void check_watchpoint(int offset, int len_mask, int flags)
1459 CPUArchState *env = current_cpu->env_ptr;
1460 target_ulong pc, cs_base;
1461 target_ulong vaddr;
1462 CPUWatchpoint *wp;
1463 int cpu_flags;
1465 if (env->watchpoint_hit) {
1466 /* We re-entered the check after replacing the TB. Now raise
1467 * the debug interrupt so that is will trigger after the
1468 * current instruction. */
1469 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
1470 return;
1472 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1473 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1474 if ((vaddr == (wp->vaddr & len_mask) ||
1475 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1476 wp->flags |= BP_WATCHPOINT_HIT;
1477 if (!env->watchpoint_hit) {
1478 env->watchpoint_hit = wp;
1479 tb_check_watchpoint(env);
1480 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1481 env->exception_index = EXCP_DEBUG;
1482 cpu_loop_exit(env);
1483 } else {
1484 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1485 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1486 cpu_resume_from_signal(env, NULL);
1489 } else {
1490 wp->flags &= ~BP_WATCHPOINT_HIT;
1495 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1496 so these check for a hit then pass through to the normal out-of-line
1497 phys routines. */
1498 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1499 unsigned size)
1501 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1502 switch (size) {
1503 case 1: return ldub_phys(addr);
1504 case 2: return lduw_phys(addr);
1505 case 4: return ldl_phys(addr);
1506 default: abort();
1510 static void watch_mem_write(void *opaque, hwaddr addr,
1511 uint64_t val, unsigned size)
1513 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1514 switch (size) {
1515 case 1:
1516 stb_phys(addr, val);
1517 break;
1518 case 2:
1519 stw_phys(addr, val);
1520 break;
1521 case 4:
1522 stl_phys(addr, val);
1523 break;
1524 default: abort();
1528 static const MemoryRegionOps watch_mem_ops = {
1529 .read = watch_mem_read,
1530 .write = watch_mem_write,
1531 .endianness = DEVICE_NATIVE_ENDIAN,
1534 static uint64_t subpage_read(void *opaque, hwaddr addr,
1535 unsigned len)
1537 subpage_t *subpage = opaque;
1538 uint8_t buf[4];
1540 #if defined(DEBUG_SUBPAGE)
1541 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1542 subpage, len, addr);
1543 #endif
1544 address_space_read(subpage->as, addr + subpage->base, buf, len);
1545 switch (len) {
1546 case 1:
1547 return ldub_p(buf);
1548 case 2:
1549 return lduw_p(buf);
1550 case 4:
1551 return ldl_p(buf);
1552 default:
1553 abort();
1557 static void subpage_write(void *opaque, hwaddr addr,
1558 uint64_t value, unsigned len)
1560 subpage_t *subpage = opaque;
1561 uint8_t buf[4];
1563 #if defined(DEBUG_SUBPAGE)
1564 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1565 " value %"PRIx64"\n",
1566 __func__, subpage, len, addr, value);
1567 #endif
1568 switch (len) {
1569 case 1:
1570 stb_p(buf, value);
1571 break;
1572 case 2:
1573 stw_p(buf, value);
1574 break;
1575 case 4:
1576 stl_p(buf, value);
1577 break;
1578 default:
1579 abort();
1581 address_space_write(subpage->as, addr + subpage->base, buf, len);
1584 static bool subpage_accepts(void *opaque, hwaddr addr,
1585 unsigned len, bool is_write)
1587 subpage_t *subpage = opaque;
1588 #if defined(DEBUG_SUBPAGE)
1589 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1590 __func__, subpage, is_write ? 'w' : 'r', len, addr);
1591 #endif
1593 return address_space_access_valid(subpage->as, addr + subpage->base,
1594 len, is_write);
1597 static const MemoryRegionOps subpage_ops = {
1598 .read = subpage_read,
1599 .write = subpage_write,
1600 .valid.accepts = subpage_accepts,
1601 .endianness = DEVICE_NATIVE_ENDIAN,
1604 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1605 uint16_t section)
1607 int idx, eidx;
1609 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1610 return -1;
1611 idx = SUBPAGE_IDX(start);
1612 eidx = SUBPAGE_IDX(end);
1613 #if defined(DEBUG_SUBPAGE)
1614 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1615 __func__, mmio, start, end, idx, eidx, section);
1616 #endif
1617 for (; idx <= eidx; idx++) {
1618 mmio->sub_section[idx] = section;
1621 return 0;
1624 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1626 subpage_t *mmio;
1628 mmio = g_malloc0(sizeof(subpage_t));
1630 mmio->as = as;
1631 mmio->base = base;
1632 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
1633 "subpage", TARGET_PAGE_SIZE);
1634 mmio->iomem.subpage = true;
1635 #if defined(DEBUG_SUBPAGE)
1636 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1637 mmio, base, TARGET_PAGE_SIZE);
1638 #endif
1639 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1641 return mmio;
1644 static uint16_t dummy_section(MemoryRegion *mr)
1646 MemoryRegionSection section = {
1647 .mr = mr,
1648 .offset_within_address_space = 0,
1649 .offset_within_region = 0,
1650 .size = int128_2_64(),
1653 return phys_section_add(&section);
1656 MemoryRegion *iotlb_to_region(hwaddr index)
1658 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
1661 static void io_mem_init(void)
1663 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1664 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1665 "unassigned", UINT64_MAX);
1666 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1667 "notdirty", UINT64_MAX);
1668 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1669 "watch", UINT64_MAX);
1672 static void mem_begin(MemoryListener *listener)
1674 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1675 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1677 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1678 d->as = as;
1679 as->next_dispatch = d;
1682 static void mem_commit(MemoryListener *listener)
1684 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1685 AddressSpaceDispatch *cur = as->dispatch;
1686 AddressSpaceDispatch *next = as->next_dispatch;
1688 next->nodes = next_map.nodes;
1689 next->sections = next_map.sections;
1691 as->dispatch = next;
1692 g_free(cur);
1695 static void core_begin(MemoryListener *listener)
1697 uint16_t n;
1699 prev_map = g_new(PhysPageMap, 1);
1700 *prev_map = next_map;
1702 memset(&next_map, 0, sizeof(next_map));
1703 n = dummy_section(&io_mem_unassigned);
1704 assert(n == PHYS_SECTION_UNASSIGNED);
1705 n = dummy_section(&io_mem_notdirty);
1706 assert(n == PHYS_SECTION_NOTDIRTY);
1707 n = dummy_section(&io_mem_rom);
1708 assert(n == PHYS_SECTION_ROM);
1709 n = dummy_section(&io_mem_watch);
1710 assert(n == PHYS_SECTION_WATCH);
1713 /* This listener's commit run after the other AddressSpaceDispatch listeners'.
1714 * All AddressSpaceDispatch instances have switched to the next map.
1716 static void core_commit(MemoryListener *listener)
1718 phys_sections_free(prev_map);
1721 static void tcg_commit(MemoryListener *listener)
1723 CPUState *cpu;
1725 /* since each CPU stores ram addresses in its TLB cache, we must
1726 reset the modified entries */
1727 /* XXX: slow ! */
1728 CPU_FOREACH(cpu) {
1729 CPUArchState *env = cpu->env_ptr;
1731 tlb_flush(env, 1);
1735 static void core_log_global_start(MemoryListener *listener)
1737 cpu_physical_memory_set_dirty_tracking(1);
1740 static void core_log_global_stop(MemoryListener *listener)
1742 cpu_physical_memory_set_dirty_tracking(0);
1745 static MemoryListener core_memory_listener = {
1746 .begin = core_begin,
1747 .commit = core_commit,
1748 .log_global_start = core_log_global_start,
1749 .log_global_stop = core_log_global_stop,
1750 .priority = 1,
1753 static MemoryListener tcg_memory_listener = {
1754 .commit = tcg_commit,
1757 void address_space_init_dispatch(AddressSpace *as)
1759 as->dispatch = NULL;
1760 as->dispatch_listener = (MemoryListener) {
1761 .begin = mem_begin,
1762 .commit = mem_commit,
1763 .region_add = mem_add,
1764 .region_nop = mem_add,
1765 .priority = 0,
1767 memory_listener_register(&as->dispatch_listener, as);
1770 void address_space_destroy_dispatch(AddressSpace *as)
1772 AddressSpaceDispatch *d = as->dispatch;
1774 memory_listener_unregister(&as->dispatch_listener);
1775 g_free(d);
1776 as->dispatch = NULL;
1779 static void memory_map_init(void)
1781 system_memory = g_malloc(sizeof(*system_memory));
1782 memory_region_init(system_memory, NULL, "system", INT64_MAX);
1783 address_space_init(&address_space_memory, system_memory, "memory");
1785 system_io = g_malloc(sizeof(*system_io));
1786 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1787 65536);
1788 address_space_init(&address_space_io, system_io, "I/O");
1790 memory_listener_register(&core_memory_listener, &address_space_memory);
1791 if (tcg_enabled()) {
1792 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1796 MemoryRegion *get_system_memory(void)
1798 return system_memory;
1801 MemoryRegion *get_system_io(void)
1803 return system_io;
1806 #endif /* !defined(CONFIG_USER_ONLY) */
1808 /* physical memory access (slow version, mainly for debug) */
1809 #if defined(CONFIG_USER_ONLY)
1810 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
1811 uint8_t *buf, int len, int is_write)
1813 int l, flags;
1814 target_ulong page;
1815 void * p;
1817 while (len > 0) {
1818 page = addr & TARGET_PAGE_MASK;
1819 l = (page + TARGET_PAGE_SIZE) - addr;
1820 if (l > len)
1821 l = len;
1822 flags = page_get_flags(page);
1823 if (!(flags & PAGE_VALID))
1824 return -1;
1825 if (is_write) {
1826 if (!(flags & PAGE_WRITE))
1827 return -1;
1828 /* XXX: this code should not depend on lock_user */
1829 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1830 return -1;
1831 memcpy(p, buf, l);
1832 unlock_user(p, addr, l);
1833 } else {
1834 if (!(flags & PAGE_READ))
1835 return -1;
1836 /* XXX: this code should not depend on lock_user */
1837 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1838 return -1;
1839 memcpy(buf, p, l);
1840 unlock_user(p, addr, 0);
1842 len -= l;
1843 buf += l;
1844 addr += l;
1846 return 0;
1849 #else
1851 static void invalidate_and_set_dirty(hwaddr addr,
1852 hwaddr length)
1854 if (!cpu_physical_memory_is_dirty(addr)) {
1855 /* invalidate code */
1856 tb_invalidate_phys_page_range(addr, addr + length, 0);
1857 /* set dirty bit */
1858 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1860 xen_modified_memory(addr, length);
1863 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1865 if (memory_region_is_ram(mr)) {
1866 return !(is_write && mr->readonly);
1868 if (memory_region_is_romd(mr)) {
1869 return !is_write;
1872 return false;
1875 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
1877 unsigned access_size_max = mr->ops->valid.max_access_size;
1879 /* Regions are assumed to support 1-4 byte accesses unless
1880 otherwise specified. */
1881 if (access_size_max == 0) {
1882 access_size_max = 4;
1885 /* Bound the maximum access by the alignment of the address. */
1886 if (!mr->ops->impl.unaligned) {
1887 unsigned align_size_max = addr & -addr;
1888 if (align_size_max != 0 && align_size_max < access_size_max) {
1889 access_size_max = align_size_max;
1893 /* Don't attempt accesses larger than the maximum. */
1894 if (l > access_size_max) {
1895 l = access_size_max;
1897 if (l & (l - 1)) {
1898 l = 1 << (qemu_fls(l) - 1);
1901 return l;
1904 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1905 int len, bool is_write)
1907 hwaddr l;
1908 uint8_t *ptr;
1909 uint64_t val;
1910 hwaddr addr1;
1911 MemoryRegion *mr;
1912 bool error = false;
1914 while (len > 0) {
1915 l = len;
1916 mr = address_space_translate(as, addr, &addr1, &l, is_write);
1918 if (is_write) {
1919 if (!memory_access_is_direct(mr, is_write)) {
1920 l = memory_access_size(mr, l, addr1);
1921 /* XXX: could force current_cpu to NULL to avoid
1922 potential bugs */
1923 switch (l) {
1924 case 8:
1925 /* 64 bit write access */
1926 val = ldq_p(buf);
1927 error |= io_mem_write(mr, addr1, val, 8);
1928 break;
1929 case 4:
1930 /* 32 bit write access */
1931 val = ldl_p(buf);
1932 error |= io_mem_write(mr, addr1, val, 4);
1933 break;
1934 case 2:
1935 /* 16 bit write access */
1936 val = lduw_p(buf);
1937 error |= io_mem_write(mr, addr1, val, 2);
1938 break;
1939 case 1:
1940 /* 8 bit write access */
1941 val = ldub_p(buf);
1942 error |= io_mem_write(mr, addr1, val, 1);
1943 break;
1944 default:
1945 abort();
1947 } else {
1948 addr1 += memory_region_get_ram_addr(mr);
1949 /* RAM case */
1950 ptr = qemu_get_ram_ptr(addr1);
1951 memcpy(ptr, buf, l);
1952 invalidate_and_set_dirty(addr1, l);
1954 } else {
1955 if (!memory_access_is_direct(mr, is_write)) {
1956 /* I/O case */
1957 l = memory_access_size(mr, l, addr1);
1958 switch (l) {
1959 case 8:
1960 /* 64 bit read access */
1961 error |= io_mem_read(mr, addr1, &val, 8);
1962 stq_p(buf, val);
1963 break;
1964 case 4:
1965 /* 32 bit read access */
1966 error |= io_mem_read(mr, addr1, &val, 4);
1967 stl_p(buf, val);
1968 break;
1969 case 2:
1970 /* 16 bit read access */
1971 error |= io_mem_read(mr, addr1, &val, 2);
1972 stw_p(buf, val);
1973 break;
1974 case 1:
1975 /* 8 bit read access */
1976 error |= io_mem_read(mr, addr1, &val, 1);
1977 stb_p(buf, val);
1978 break;
1979 default:
1980 abort();
1982 } else {
1983 /* RAM case */
1984 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
1985 memcpy(buf, ptr, l);
1988 len -= l;
1989 buf += l;
1990 addr += l;
1993 return error;
1996 bool address_space_write(AddressSpace *as, hwaddr addr,
1997 const uint8_t *buf, int len)
1999 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2002 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2004 return address_space_rw(as, addr, buf, len, false);
2008 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2009 int len, int is_write)
2011 address_space_rw(&address_space_memory, addr, buf, len, is_write);
2014 enum write_rom_type {
2015 WRITE_DATA,
2016 FLUSH_CACHE,
2019 static inline void cpu_physical_memory_write_rom_internal(
2020 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2022 hwaddr l;
2023 uint8_t *ptr;
2024 hwaddr addr1;
2025 MemoryRegion *mr;
2027 while (len > 0) {
2028 l = len;
2029 mr = address_space_translate(&address_space_memory,
2030 addr, &addr1, &l, true);
2032 if (!(memory_region_is_ram(mr) ||
2033 memory_region_is_romd(mr))) {
2034 /* do nothing */
2035 } else {
2036 addr1 += memory_region_get_ram_addr(mr);
2037 /* ROM/RAM case */
2038 ptr = qemu_get_ram_ptr(addr1);
2039 switch (type) {
2040 case WRITE_DATA:
2041 memcpy(ptr, buf, l);
2042 invalidate_and_set_dirty(addr1, l);
2043 break;
2044 case FLUSH_CACHE:
2045 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2046 break;
2049 len -= l;
2050 buf += l;
2051 addr += l;
2055 /* used for ROM loading : can write in RAM and ROM */
2056 void cpu_physical_memory_write_rom(hwaddr addr,
2057 const uint8_t *buf, int len)
2059 cpu_physical_memory_write_rom_internal(addr, buf, len, WRITE_DATA);
2062 void cpu_flush_icache_range(hwaddr start, int len)
2065 * This function should do the same thing as an icache flush that was
2066 * triggered from within the guest. For TCG we are always cache coherent,
2067 * so there is no need to flush anything. For KVM / Xen we need to flush
2068 * the host's instruction cache at least.
2070 if (tcg_enabled()) {
2071 return;
2074 cpu_physical_memory_write_rom_internal(start, NULL, len, FLUSH_CACHE);
2077 typedef struct {
2078 MemoryRegion *mr;
2079 void *buffer;
2080 hwaddr addr;
2081 hwaddr len;
2082 } BounceBuffer;
2084 static BounceBuffer bounce;
2086 typedef struct MapClient {
2087 void *opaque;
2088 void (*callback)(void *opaque);
2089 QLIST_ENTRY(MapClient) link;
2090 } MapClient;
2092 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2093 = QLIST_HEAD_INITIALIZER(map_client_list);
2095 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2097 MapClient *client = g_malloc(sizeof(*client));
2099 client->opaque = opaque;
2100 client->callback = callback;
2101 QLIST_INSERT_HEAD(&map_client_list, client, link);
2102 return client;
2105 static void cpu_unregister_map_client(void *_client)
2107 MapClient *client = (MapClient *)_client;
2109 QLIST_REMOVE(client, link);
2110 g_free(client);
2113 static void cpu_notify_map_clients(void)
2115 MapClient *client;
2117 while (!QLIST_EMPTY(&map_client_list)) {
2118 client = QLIST_FIRST(&map_client_list);
2119 client->callback(client->opaque);
2120 cpu_unregister_map_client(client);
2124 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2126 MemoryRegion *mr;
2127 hwaddr l, xlat;
2129 while (len > 0) {
2130 l = len;
2131 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2132 if (!memory_access_is_direct(mr, is_write)) {
2133 l = memory_access_size(mr, l, addr);
2134 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2135 return false;
2139 len -= l;
2140 addr += l;
2142 return true;
2145 /* Map a physical memory region into a host virtual address.
2146 * May map a subset of the requested range, given by and returned in *plen.
2147 * May return NULL if resources needed to perform the mapping are exhausted.
2148 * Use only for reads OR writes - not for read-modify-write operations.
2149 * Use cpu_register_map_client() to know when retrying the map operation is
2150 * likely to succeed.
2152 void *address_space_map(AddressSpace *as,
2153 hwaddr addr,
2154 hwaddr *plen,
2155 bool is_write)
2157 hwaddr len = *plen;
2158 hwaddr done = 0;
2159 hwaddr l, xlat, base;
2160 MemoryRegion *mr, *this_mr;
2161 ram_addr_t raddr;
2163 if (len == 0) {
2164 return NULL;
2167 l = len;
2168 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2169 if (!memory_access_is_direct(mr, is_write)) {
2170 if (bounce.buffer) {
2171 return NULL;
2173 /* Avoid unbounded allocations */
2174 l = MIN(l, TARGET_PAGE_SIZE);
2175 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2176 bounce.addr = addr;
2177 bounce.len = l;
2179 memory_region_ref(mr);
2180 bounce.mr = mr;
2181 if (!is_write) {
2182 address_space_read(as, addr, bounce.buffer, l);
2185 *plen = l;
2186 return bounce.buffer;
2189 base = xlat;
2190 raddr = memory_region_get_ram_addr(mr);
2192 for (;;) {
2193 len -= l;
2194 addr += l;
2195 done += l;
2196 if (len == 0) {
2197 break;
2200 l = len;
2201 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2202 if (this_mr != mr || xlat != base + done) {
2203 break;
2207 memory_region_ref(mr);
2208 *plen = done;
2209 return qemu_ram_ptr_length(raddr + base, plen);
2212 /* Unmaps a memory region previously mapped by address_space_map().
2213 * Will also mark the memory as dirty if is_write == 1. access_len gives
2214 * the amount of memory that was actually read or written by the caller.
2216 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2217 int is_write, hwaddr access_len)
2219 if (buffer != bounce.buffer) {
2220 MemoryRegion *mr;
2221 ram_addr_t addr1;
2223 mr = qemu_ram_addr_from_host(buffer, &addr1);
2224 assert(mr != NULL);
2225 if (is_write) {
2226 while (access_len) {
2227 unsigned l;
2228 l = TARGET_PAGE_SIZE;
2229 if (l > access_len)
2230 l = access_len;
2231 invalidate_and_set_dirty(addr1, l);
2232 addr1 += l;
2233 access_len -= l;
2236 if (xen_enabled()) {
2237 xen_invalidate_map_cache_entry(buffer);
2239 memory_region_unref(mr);
2240 return;
2242 if (is_write) {
2243 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2245 qemu_vfree(bounce.buffer);
2246 bounce.buffer = NULL;
2247 memory_region_unref(bounce.mr);
2248 cpu_notify_map_clients();
2251 void *cpu_physical_memory_map(hwaddr addr,
2252 hwaddr *plen,
2253 int is_write)
2255 return address_space_map(&address_space_memory, addr, plen, is_write);
2258 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2259 int is_write, hwaddr access_len)
2261 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2264 /* warning: addr must be aligned */
2265 static inline uint32_t ldl_phys_internal(hwaddr addr,
2266 enum device_endian endian)
2268 uint8_t *ptr;
2269 uint64_t val;
2270 MemoryRegion *mr;
2271 hwaddr l = 4;
2272 hwaddr addr1;
2274 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2275 false);
2276 if (l < 4 || !memory_access_is_direct(mr, false)) {
2277 /* I/O case */
2278 io_mem_read(mr, addr1, &val, 4);
2279 #if defined(TARGET_WORDS_BIGENDIAN)
2280 if (endian == DEVICE_LITTLE_ENDIAN) {
2281 val = bswap32(val);
2283 #else
2284 if (endian == DEVICE_BIG_ENDIAN) {
2285 val = bswap32(val);
2287 #endif
2288 } else {
2289 /* RAM case */
2290 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2291 & TARGET_PAGE_MASK)
2292 + addr1);
2293 switch (endian) {
2294 case DEVICE_LITTLE_ENDIAN:
2295 val = ldl_le_p(ptr);
2296 break;
2297 case DEVICE_BIG_ENDIAN:
2298 val = ldl_be_p(ptr);
2299 break;
2300 default:
2301 val = ldl_p(ptr);
2302 break;
2305 return val;
2308 uint32_t ldl_phys(hwaddr addr)
2310 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2313 uint32_t ldl_le_phys(hwaddr addr)
2315 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2318 uint32_t ldl_be_phys(hwaddr addr)
2320 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2323 /* warning: addr must be aligned */
2324 static inline uint64_t ldq_phys_internal(hwaddr addr,
2325 enum device_endian endian)
2327 uint8_t *ptr;
2328 uint64_t val;
2329 MemoryRegion *mr;
2330 hwaddr l = 8;
2331 hwaddr addr1;
2333 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2334 false);
2335 if (l < 8 || !memory_access_is_direct(mr, false)) {
2336 /* I/O case */
2337 io_mem_read(mr, addr1, &val, 8);
2338 #if defined(TARGET_WORDS_BIGENDIAN)
2339 if (endian == DEVICE_LITTLE_ENDIAN) {
2340 val = bswap64(val);
2342 #else
2343 if (endian == DEVICE_BIG_ENDIAN) {
2344 val = bswap64(val);
2346 #endif
2347 } else {
2348 /* RAM case */
2349 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2350 & TARGET_PAGE_MASK)
2351 + addr1);
2352 switch (endian) {
2353 case DEVICE_LITTLE_ENDIAN:
2354 val = ldq_le_p(ptr);
2355 break;
2356 case DEVICE_BIG_ENDIAN:
2357 val = ldq_be_p(ptr);
2358 break;
2359 default:
2360 val = ldq_p(ptr);
2361 break;
2364 return val;
2367 uint64_t ldq_phys(hwaddr addr)
2369 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2372 uint64_t ldq_le_phys(hwaddr addr)
2374 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2377 uint64_t ldq_be_phys(hwaddr addr)
2379 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2382 /* XXX: optimize */
2383 uint32_t ldub_phys(hwaddr addr)
2385 uint8_t val;
2386 cpu_physical_memory_read(addr, &val, 1);
2387 return val;
2390 /* warning: addr must be aligned */
2391 static inline uint32_t lduw_phys_internal(hwaddr addr,
2392 enum device_endian endian)
2394 uint8_t *ptr;
2395 uint64_t val;
2396 MemoryRegion *mr;
2397 hwaddr l = 2;
2398 hwaddr addr1;
2400 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2401 false);
2402 if (l < 2 || !memory_access_is_direct(mr, false)) {
2403 /* I/O case */
2404 io_mem_read(mr, addr1, &val, 2);
2405 #if defined(TARGET_WORDS_BIGENDIAN)
2406 if (endian == DEVICE_LITTLE_ENDIAN) {
2407 val = bswap16(val);
2409 #else
2410 if (endian == DEVICE_BIG_ENDIAN) {
2411 val = bswap16(val);
2413 #endif
2414 } else {
2415 /* RAM case */
2416 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2417 & TARGET_PAGE_MASK)
2418 + addr1);
2419 switch (endian) {
2420 case DEVICE_LITTLE_ENDIAN:
2421 val = lduw_le_p(ptr);
2422 break;
2423 case DEVICE_BIG_ENDIAN:
2424 val = lduw_be_p(ptr);
2425 break;
2426 default:
2427 val = lduw_p(ptr);
2428 break;
2431 return val;
2434 uint32_t lduw_phys(hwaddr addr)
2436 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2439 uint32_t lduw_le_phys(hwaddr addr)
2441 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2444 uint32_t lduw_be_phys(hwaddr addr)
2446 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2449 /* warning: addr must be aligned. The ram page is not masked as dirty
2450 and the code inside is not invalidated. It is useful if the dirty
2451 bits are used to track modified PTEs */
2452 void stl_phys_notdirty(hwaddr addr, uint32_t val)
2454 uint8_t *ptr;
2455 MemoryRegion *mr;
2456 hwaddr l = 4;
2457 hwaddr addr1;
2459 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2460 true);
2461 if (l < 4 || !memory_access_is_direct(mr, true)) {
2462 io_mem_write(mr, addr1, val, 4);
2463 } else {
2464 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2465 ptr = qemu_get_ram_ptr(addr1);
2466 stl_p(ptr, val);
2468 if (unlikely(in_migration)) {
2469 if (!cpu_physical_memory_is_dirty(addr1)) {
2470 /* invalidate code */
2471 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2472 /* set dirty bit */
2473 cpu_physical_memory_set_dirty_flags(
2474 addr1, (0xff & ~CODE_DIRTY_FLAG));
2480 /* warning: addr must be aligned */
2481 static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2482 enum device_endian endian)
2484 uint8_t *ptr;
2485 MemoryRegion *mr;
2486 hwaddr l = 4;
2487 hwaddr addr1;
2489 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2490 true);
2491 if (l < 4 || !memory_access_is_direct(mr, true)) {
2492 #if defined(TARGET_WORDS_BIGENDIAN)
2493 if (endian == DEVICE_LITTLE_ENDIAN) {
2494 val = bswap32(val);
2496 #else
2497 if (endian == DEVICE_BIG_ENDIAN) {
2498 val = bswap32(val);
2500 #endif
2501 io_mem_write(mr, addr1, val, 4);
2502 } else {
2503 /* RAM case */
2504 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2505 ptr = qemu_get_ram_ptr(addr1);
2506 switch (endian) {
2507 case DEVICE_LITTLE_ENDIAN:
2508 stl_le_p(ptr, val);
2509 break;
2510 case DEVICE_BIG_ENDIAN:
2511 stl_be_p(ptr, val);
2512 break;
2513 default:
2514 stl_p(ptr, val);
2515 break;
2517 invalidate_and_set_dirty(addr1, 4);
2521 void stl_phys(hwaddr addr, uint32_t val)
2523 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2526 void stl_le_phys(hwaddr addr, uint32_t val)
2528 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2531 void stl_be_phys(hwaddr addr, uint32_t val)
2533 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2536 /* XXX: optimize */
2537 void stb_phys(hwaddr addr, uint32_t val)
2539 uint8_t v = val;
2540 cpu_physical_memory_write(addr, &v, 1);
2543 /* warning: addr must be aligned */
2544 static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2545 enum device_endian endian)
2547 uint8_t *ptr;
2548 MemoryRegion *mr;
2549 hwaddr l = 2;
2550 hwaddr addr1;
2552 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2553 true);
2554 if (l < 2 || !memory_access_is_direct(mr, true)) {
2555 #if defined(TARGET_WORDS_BIGENDIAN)
2556 if (endian == DEVICE_LITTLE_ENDIAN) {
2557 val = bswap16(val);
2559 #else
2560 if (endian == DEVICE_BIG_ENDIAN) {
2561 val = bswap16(val);
2563 #endif
2564 io_mem_write(mr, addr1, val, 2);
2565 } else {
2566 /* RAM case */
2567 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2568 ptr = qemu_get_ram_ptr(addr1);
2569 switch (endian) {
2570 case DEVICE_LITTLE_ENDIAN:
2571 stw_le_p(ptr, val);
2572 break;
2573 case DEVICE_BIG_ENDIAN:
2574 stw_be_p(ptr, val);
2575 break;
2576 default:
2577 stw_p(ptr, val);
2578 break;
2580 invalidate_and_set_dirty(addr1, 2);
2584 void stw_phys(hwaddr addr, uint32_t val)
2586 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2589 void stw_le_phys(hwaddr addr, uint32_t val)
2591 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2594 void stw_be_phys(hwaddr addr, uint32_t val)
2596 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2599 /* XXX: optimize */
2600 void stq_phys(hwaddr addr, uint64_t val)
2602 val = tswap64(val);
2603 cpu_physical_memory_write(addr, &val, 8);
2606 void stq_le_phys(hwaddr addr, uint64_t val)
2608 val = cpu_to_le64(val);
2609 cpu_physical_memory_write(addr, &val, 8);
2612 void stq_be_phys(hwaddr addr, uint64_t val)
2614 val = cpu_to_be64(val);
2615 cpu_physical_memory_write(addr, &val, 8);
2618 /* virtual memory access for debug (includes writing to ROM) */
2619 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2620 uint8_t *buf, int len, int is_write)
2622 int l;
2623 hwaddr phys_addr;
2624 target_ulong page;
2626 while (len > 0) {
2627 page = addr & TARGET_PAGE_MASK;
2628 phys_addr = cpu_get_phys_page_debug(cpu, page);
2629 /* if no physical page mapped, return an error */
2630 if (phys_addr == -1)
2631 return -1;
2632 l = (page + TARGET_PAGE_SIZE) - addr;
2633 if (l > len)
2634 l = len;
2635 phys_addr += (addr & ~TARGET_PAGE_MASK);
2636 if (is_write)
2637 cpu_physical_memory_write_rom(phys_addr, buf, l);
2638 else
2639 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
2640 len -= l;
2641 buf += l;
2642 addr += l;
2644 return 0;
2646 #endif
2648 #if !defined(CONFIG_USER_ONLY)
2651 * A helper function for the _utterly broken_ virtio device model to find out if
2652 * it's running on a big endian machine. Don't do this at home kids!
2654 bool virtio_is_big_endian(void);
2655 bool virtio_is_big_endian(void)
2657 #if defined(TARGET_WORDS_BIGENDIAN)
2658 return true;
2659 #else
2660 return false;
2661 #endif
2664 #endif
2666 #ifndef CONFIG_USER_ONLY
2667 bool cpu_physical_memory_is_io(hwaddr phys_addr)
2669 MemoryRegion*mr;
2670 hwaddr l = 1;
2672 mr = address_space_translate(&address_space_memory,
2673 phys_addr, &phys_addr, &l, false);
2675 return !(memory_region_is_ram(mr) ||
2676 memory_region_is_romd(mr));
2679 void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2681 RAMBlock *block;
2683 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2684 func(block->host, block->offset, block->length, opaque);
2687 #endif