nbd: avoid uninitialized warnings
[qemu/ar7.git] / exec.c
blob00526d18c0002ac617a64811d812beb7cfe3481d
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "hw/xen/xen.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
42 #include <qemu.h>
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
45 #include "trace.h"
46 #endif
47 #include "exec/cpu-all.h"
49 #include "exec/cputlb.h"
50 #include "translate-all.h"
52 #include "exec/memory-internal.h"
54 #include "qemu/range.h"
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 static int in_migration;
61 RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
63 static MemoryRegion *system_memory;
64 static MemoryRegion *system_io;
66 AddressSpace address_space_io;
67 AddressSpace address_space_memory;
69 MemoryRegion io_mem_rom, io_mem_notdirty;
70 static MemoryRegion io_mem_unassigned;
72 #endif
74 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
75 /* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
77 DEFINE_TLS(CPUState *, current_cpu);
78 /* 0 = Do not count executed instructions.
79 1 = Precise instruction counting.
80 2 = Adaptive rate instruction counting. */
81 int use_icount;
83 #if !defined(CONFIG_USER_ONLY)
85 typedef struct PhysPageEntry PhysPageEntry;
87 struct PhysPageEntry {
88 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
89 uint32_t skip : 6;
90 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
91 uint32_t ptr : 26;
94 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
96 /* Size of the L2 (and L3, etc) page tables. */
97 #define ADDR_SPACE_BITS 64
99 #define P_L2_BITS 9
100 #define P_L2_SIZE (1 << P_L2_BITS)
102 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
104 typedef PhysPageEntry Node[P_L2_SIZE];
106 typedef struct PhysPageMap {
107 unsigned sections_nb;
108 unsigned sections_nb_alloc;
109 unsigned nodes_nb;
110 unsigned nodes_nb_alloc;
111 Node *nodes;
112 MemoryRegionSection *sections;
113 } PhysPageMap;
115 struct AddressSpaceDispatch {
116 /* This is a multi-level map on the physical address space.
117 * The bottom level has pointers to MemoryRegionSections.
119 PhysPageEntry phys_map;
120 PhysPageMap map;
121 AddressSpace *as;
124 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
125 typedef struct subpage_t {
126 MemoryRegion iomem;
127 AddressSpace *as;
128 hwaddr base;
129 uint16_t sub_section[TARGET_PAGE_SIZE];
130 } subpage_t;
132 #define PHYS_SECTION_UNASSIGNED 0
133 #define PHYS_SECTION_NOTDIRTY 1
134 #define PHYS_SECTION_ROM 2
135 #define PHYS_SECTION_WATCH 3
137 static void io_mem_init(void);
138 static void memory_map_init(void);
140 static MemoryRegion io_mem_watch;
141 #endif
143 #if !defined(CONFIG_USER_ONLY)
145 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
147 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
148 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
149 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
150 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
154 static uint32_t phys_map_node_alloc(PhysPageMap *map)
156 unsigned i;
157 uint32_t ret;
159 ret = map->nodes_nb++;
160 assert(ret != PHYS_MAP_NODE_NIL);
161 assert(ret != map->nodes_nb_alloc);
162 for (i = 0; i < P_L2_SIZE; ++i) {
163 map->nodes[ret][i].skip = 1;
164 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
166 return ret;
169 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
170 hwaddr *index, hwaddr *nb, uint16_t leaf,
171 int level)
173 PhysPageEntry *p;
174 int i;
175 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
177 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
178 lp->ptr = phys_map_node_alloc(map);
179 p = map->nodes[lp->ptr];
180 if (level == 0) {
181 for (i = 0; i < P_L2_SIZE; i++) {
182 p[i].skip = 0;
183 p[i].ptr = PHYS_SECTION_UNASSIGNED;
186 } else {
187 p = map->nodes[lp->ptr];
189 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
191 while (*nb && lp < &p[P_L2_SIZE]) {
192 if ((*index & (step - 1)) == 0 && *nb >= step) {
193 lp->skip = 0;
194 lp->ptr = leaf;
195 *index += step;
196 *nb -= step;
197 } else {
198 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
200 ++lp;
204 static void phys_page_set(AddressSpaceDispatch *d,
205 hwaddr index, hwaddr nb,
206 uint16_t leaf)
208 /* Wildly overreserve - it doesn't matter much. */
209 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
211 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
214 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
215 * and update our entry so we can skip it and go directly to the destination.
217 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
219 unsigned valid_ptr = P_L2_SIZE;
220 int valid = 0;
221 PhysPageEntry *p;
222 int i;
224 if (lp->ptr == PHYS_MAP_NODE_NIL) {
225 return;
228 p = nodes[lp->ptr];
229 for (i = 0; i < P_L2_SIZE; i++) {
230 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
231 continue;
234 valid_ptr = i;
235 valid++;
236 if (p[i].skip) {
237 phys_page_compact(&p[i], nodes, compacted);
241 /* We can only compress if there's only one child. */
242 if (valid != 1) {
243 return;
246 assert(valid_ptr < P_L2_SIZE);
248 /* Don't compress if it won't fit in the # of bits we have. */
249 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
250 return;
253 lp->ptr = p[valid_ptr].ptr;
254 if (!p[valid_ptr].skip) {
255 /* If our only child is a leaf, make this a leaf. */
256 /* By design, we should have made this node a leaf to begin with so we
257 * should never reach here.
258 * But since it's so simple to handle this, let's do it just in case we
259 * change this rule.
261 lp->skip = 0;
262 } else {
263 lp->skip += p[valid_ptr].skip;
267 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
269 DECLARE_BITMAP(compacted, nodes_nb);
271 if (d->phys_map.skip) {
272 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
276 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
277 Node *nodes, MemoryRegionSection *sections)
279 PhysPageEntry *p;
280 hwaddr index = addr >> TARGET_PAGE_BITS;
281 int i;
283 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
284 if (lp.ptr == PHYS_MAP_NODE_NIL) {
285 return &sections[PHYS_SECTION_UNASSIGNED];
287 p = nodes[lp.ptr];
288 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
291 if (sections[lp.ptr].size.hi ||
292 range_covers_byte(sections[lp.ptr].offset_within_address_space,
293 sections[lp.ptr].size.lo, addr)) {
294 return &sections[lp.ptr];
295 } else {
296 return &sections[PHYS_SECTION_UNASSIGNED];
300 bool memory_region_is_unassigned(MemoryRegion *mr)
302 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
303 && mr != &io_mem_watch;
306 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
307 hwaddr addr,
308 bool resolve_subpage)
310 MemoryRegionSection *section;
311 subpage_t *subpage;
313 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
314 if (resolve_subpage && section->mr->subpage) {
315 subpage = container_of(section->mr, subpage_t, iomem);
316 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
318 return section;
321 static MemoryRegionSection *
322 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
323 hwaddr *plen, bool resolve_subpage)
325 MemoryRegionSection *section;
326 Int128 diff;
328 section = address_space_lookup_region(d, addr, resolve_subpage);
329 /* Compute offset within MemoryRegionSection */
330 addr -= section->offset_within_address_space;
332 /* Compute offset within MemoryRegion */
333 *xlat = addr + section->offset_within_region;
335 diff = int128_sub(section->mr->size, int128_make64(addr));
336 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
337 return section;
340 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
341 hwaddr *xlat, hwaddr *plen,
342 bool is_write)
344 IOMMUTLBEntry iotlb;
345 MemoryRegionSection *section;
346 MemoryRegion *mr;
347 hwaddr len = *plen;
349 for (;;) {
350 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
351 mr = section->mr;
353 if (!mr->iommu_ops) {
354 break;
357 iotlb = mr->iommu_ops->translate(mr, addr);
358 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
359 | (addr & iotlb.addr_mask));
360 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
361 if (!(iotlb.perm & (1 << is_write))) {
362 mr = &io_mem_unassigned;
363 break;
366 as = iotlb.target_as;
369 *plen = len;
370 *xlat = addr;
371 return mr;
374 MemoryRegionSection *
375 address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
376 hwaddr *plen)
378 MemoryRegionSection *section;
379 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
381 assert(!section->mr->iommu_ops);
382 return section;
384 #endif
386 void cpu_exec_init_all(void)
388 #if !defined(CONFIG_USER_ONLY)
389 qemu_mutex_init(&ram_list.mutex);
390 memory_map_init();
391 io_mem_init();
392 #endif
395 #if !defined(CONFIG_USER_ONLY)
397 static int cpu_common_post_load(void *opaque, int version_id)
399 CPUState *cpu = opaque;
401 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
402 version_id is increased. */
403 cpu->interrupt_request &= ~0x01;
404 tlb_flush(cpu->env_ptr, 1);
406 return 0;
409 const VMStateDescription vmstate_cpu_common = {
410 .name = "cpu_common",
411 .version_id = 1,
412 .minimum_version_id = 1,
413 .minimum_version_id_old = 1,
414 .post_load = cpu_common_post_load,
415 .fields = (VMStateField []) {
416 VMSTATE_UINT32(halted, CPUState),
417 VMSTATE_UINT32(interrupt_request, CPUState),
418 VMSTATE_END_OF_LIST()
422 #endif
424 CPUState *qemu_get_cpu(int index)
426 CPUState *cpu;
428 CPU_FOREACH(cpu) {
429 if (cpu->cpu_index == index) {
430 return cpu;
434 return NULL;
437 void cpu_exec_init(CPUArchState *env)
439 CPUState *cpu = ENV_GET_CPU(env);
440 CPUClass *cc = CPU_GET_CLASS(cpu);
441 CPUState *some_cpu;
442 int cpu_index;
444 #if defined(CONFIG_USER_ONLY)
445 cpu_list_lock();
446 #endif
447 cpu_index = 0;
448 CPU_FOREACH(some_cpu) {
449 cpu_index++;
451 cpu->cpu_index = cpu_index;
452 cpu->numa_node = 0;
453 QTAILQ_INIT(&env->breakpoints);
454 QTAILQ_INIT(&env->watchpoints);
455 #ifndef CONFIG_USER_ONLY
456 cpu->thread_id = qemu_get_thread_id();
457 #endif
458 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
459 #if defined(CONFIG_USER_ONLY)
460 cpu_list_unlock();
461 #endif
462 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
463 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
465 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
466 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
467 cpu_save, cpu_load, env);
468 assert(cc->vmsd == NULL);
469 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
470 #endif
471 if (cc->vmsd != NULL) {
472 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
476 #if defined(TARGET_HAS_ICE)
477 #if defined(CONFIG_USER_ONLY)
478 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
480 tb_invalidate_phys_page_range(pc, pc + 1, 0);
482 #else
483 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
485 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
486 if (phys != -1) {
487 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
490 #endif
491 #endif /* TARGET_HAS_ICE */
493 #if defined(CONFIG_USER_ONLY)
494 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
499 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
500 int flags, CPUWatchpoint **watchpoint)
502 return -ENOSYS;
504 #else
505 /* Add a watchpoint. */
506 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
507 int flags, CPUWatchpoint **watchpoint)
509 target_ulong len_mask = ~(len - 1);
510 CPUWatchpoint *wp;
512 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
513 if ((len & (len - 1)) || (addr & ~len_mask) ||
514 len == 0 || len > TARGET_PAGE_SIZE) {
515 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
516 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
517 return -EINVAL;
519 wp = g_malloc(sizeof(*wp));
521 wp->vaddr = addr;
522 wp->len_mask = len_mask;
523 wp->flags = flags;
525 /* keep all GDB-injected watchpoints in front */
526 if (flags & BP_GDB)
527 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
528 else
529 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
531 tlb_flush_page(env, addr);
533 if (watchpoint)
534 *watchpoint = wp;
535 return 0;
538 /* Remove a specific watchpoint. */
539 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
540 int flags)
542 target_ulong len_mask = ~(len - 1);
543 CPUWatchpoint *wp;
545 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
546 if (addr == wp->vaddr && len_mask == wp->len_mask
547 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
548 cpu_watchpoint_remove_by_ref(env, wp);
549 return 0;
552 return -ENOENT;
555 /* Remove a specific watchpoint by reference. */
556 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
558 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
560 tlb_flush_page(env, watchpoint->vaddr);
562 g_free(watchpoint);
565 /* Remove all matching watchpoints. */
566 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
568 CPUWatchpoint *wp, *next;
570 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
571 if (wp->flags & mask)
572 cpu_watchpoint_remove_by_ref(env, wp);
575 #endif
577 /* Add a breakpoint. */
578 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
579 CPUBreakpoint **breakpoint)
581 #if defined(TARGET_HAS_ICE)
582 CPUBreakpoint *bp;
584 bp = g_malloc(sizeof(*bp));
586 bp->pc = pc;
587 bp->flags = flags;
589 /* keep all GDB-injected breakpoints in front */
590 if (flags & BP_GDB) {
591 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
592 } else {
593 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
596 breakpoint_invalidate(ENV_GET_CPU(env), pc);
598 if (breakpoint) {
599 *breakpoint = bp;
601 return 0;
602 #else
603 return -ENOSYS;
604 #endif
607 /* Remove a specific breakpoint. */
608 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
610 #if defined(TARGET_HAS_ICE)
611 CPUBreakpoint *bp;
613 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
614 if (bp->pc == pc && bp->flags == flags) {
615 cpu_breakpoint_remove_by_ref(env, bp);
616 return 0;
619 return -ENOENT;
620 #else
621 return -ENOSYS;
622 #endif
625 /* Remove a specific breakpoint by reference. */
626 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
628 #if defined(TARGET_HAS_ICE)
629 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
631 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
633 g_free(breakpoint);
634 #endif
637 /* Remove all matching breakpoints. */
638 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
640 #if defined(TARGET_HAS_ICE)
641 CPUBreakpoint *bp, *next;
643 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
644 if (bp->flags & mask)
645 cpu_breakpoint_remove_by_ref(env, bp);
647 #endif
650 /* enable or disable single step mode. EXCP_DEBUG is returned by the
651 CPU loop after each instruction */
652 void cpu_single_step(CPUState *cpu, int enabled)
654 #if defined(TARGET_HAS_ICE)
655 if (cpu->singlestep_enabled != enabled) {
656 cpu->singlestep_enabled = enabled;
657 if (kvm_enabled()) {
658 kvm_update_guest_debug(cpu, 0);
659 } else {
660 /* must flush all the translated code to avoid inconsistencies */
661 /* XXX: only flush what is necessary */
662 CPUArchState *env = cpu->env_ptr;
663 tb_flush(env);
666 #endif
669 void cpu_abort(CPUArchState *env, const char *fmt, ...)
671 CPUState *cpu = ENV_GET_CPU(env);
672 va_list ap;
673 va_list ap2;
675 va_start(ap, fmt);
676 va_copy(ap2, ap);
677 fprintf(stderr, "qemu: fatal: ");
678 vfprintf(stderr, fmt, ap);
679 fprintf(stderr, "\n");
680 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
681 if (qemu_log_enabled()) {
682 qemu_log("qemu: fatal: ");
683 qemu_log_vprintf(fmt, ap2);
684 qemu_log("\n");
685 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
686 qemu_log_flush();
687 qemu_log_close();
689 va_end(ap2);
690 va_end(ap);
691 #if defined(CONFIG_USER_ONLY)
693 struct sigaction act;
694 sigfillset(&act.sa_mask);
695 act.sa_handler = SIG_DFL;
696 sigaction(SIGABRT, &act, NULL);
698 #endif
699 abort();
702 #if !defined(CONFIG_USER_ONLY)
703 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
705 RAMBlock *block;
707 /* The list is protected by the iothread lock here. */
708 block = ram_list.mru_block;
709 if (block && addr - block->offset < block->length) {
710 goto found;
712 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
713 if (addr - block->offset < block->length) {
714 goto found;
718 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
719 abort();
721 found:
722 ram_list.mru_block = block;
723 return block;
726 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
727 uintptr_t length)
729 RAMBlock *block;
730 ram_addr_t start1;
732 block = qemu_get_ram_block(start);
733 assert(block == qemu_get_ram_block(end - 1));
734 start1 = (uintptr_t)block->host + (start - block->offset);
735 cpu_tlb_reset_dirty_all(start1, length);
738 /* Note: start and end must be within the same ram block. */
739 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
740 int dirty_flags)
742 uintptr_t length;
744 start &= TARGET_PAGE_MASK;
745 end = TARGET_PAGE_ALIGN(end);
747 length = end - start;
748 if (length == 0)
749 return;
750 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
752 if (tcg_enabled()) {
753 tlb_reset_dirty_range_all(start, end, length);
757 static int cpu_physical_memory_set_dirty_tracking(int enable)
759 int ret = 0;
760 in_migration = enable;
761 return ret;
764 hwaddr memory_region_section_get_iotlb(CPUArchState *env,
765 MemoryRegionSection *section,
766 target_ulong vaddr,
767 hwaddr paddr, hwaddr xlat,
768 int prot,
769 target_ulong *address)
771 hwaddr iotlb;
772 CPUWatchpoint *wp;
774 if (memory_region_is_ram(section->mr)) {
775 /* Normal RAM. */
776 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
777 + xlat;
778 if (!section->readonly) {
779 iotlb |= PHYS_SECTION_NOTDIRTY;
780 } else {
781 iotlb |= PHYS_SECTION_ROM;
783 } else {
784 iotlb = section - address_space_memory.dispatch->map.sections;
785 iotlb += xlat;
788 /* Make accesses to pages with watchpoints go via the
789 watchpoint trap routines. */
790 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
791 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
792 /* Avoid trapping reads of pages with a write breakpoint. */
793 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
794 iotlb = PHYS_SECTION_WATCH + paddr;
795 *address |= TLB_MMIO;
796 break;
801 return iotlb;
803 #endif /* defined(CONFIG_USER_ONLY) */
805 #if !defined(CONFIG_USER_ONLY)
807 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
808 uint16_t section);
809 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
811 static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
814 * Set a custom physical guest memory alloator.
815 * Accelerators with unusual needs may need this. Hopefully, we can
816 * get rid of it eventually.
818 void phys_mem_set_alloc(void *(*alloc)(size_t))
820 phys_mem_alloc = alloc;
823 static uint16_t phys_section_add(PhysPageMap *map,
824 MemoryRegionSection *section)
826 /* The physical section number is ORed with a page-aligned
827 * pointer to produce the iotlb entries. Thus it should
828 * never overflow into the page-aligned value.
830 assert(map->sections_nb < TARGET_PAGE_SIZE);
832 if (map->sections_nb == map->sections_nb_alloc) {
833 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
834 map->sections = g_renew(MemoryRegionSection, map->sections,
835 map->sections_nb_alloc);
837 map->sections[map->sections_nb] = *section;
838 memory_region_ref(section->mr);
839 return map->sections_nb++;
842 static void phys_section_destroy(MemoryRegion *mr)
844 memory_region_unref(mr);
846 if (mr->subpage) {
847 subpage_t *subpage = container_of(mr, subpage_t, iomem);
848 memory_region_destroy(&subpage->iomem);
849 g_free(subpage);
853 static void phys_sections_free(PhysPageMap *map)
855 while (map->sections_nb > 0) {
856 MemoryRegionSection *section = &map->sections[--map->sections_nb];
857 phys_section_destroy(section->mr);
859 g_free(map->sections);
860 g_free(map->nodes);
863 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
865 subpage_t *subpage;
866 hwaddr base = section->offset_within_address_space
867 & TARGET_PAGE_MASK;
868 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
869 d->map.nodes, d->map.sections);
870 MemoryRegionSection subsection = {
871 .offset_within_address_space = base,
872 .size = int128_make64(TARGET_PAGE_SIZE),
874 hwaddr start, end;
876 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
878 if (!(existing->mr->subpage)) {
879 subpage = subpage_init(d->as, base);
880 subsection.mr = &subpage->iomem;
881 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
882 phys_section_add(&d->map, &subsection));
883 } else {
884 subpage = container_of(existing->mr, subpage_t, iomem);
886 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
887 end = start + int128_get64(section->size) - 1;
888 subpage_register(subpage, start, end,
889 phys_section_add(&d->map, section));
893 static void register_multipage(AddressSpaceDispatch *d,
894 MemoryRegionSection *section)
896 hwaddr start_addr = section->offset_within_address_space;
897 uint16_t section_index = phys_section_add(&d->map, section);
898 uint64_t num_pages = int128_get64(int128_rshift(section->size,
899 TARGET_PAGE_BITS));
901 assert(num_pages);
902 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
905 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
907 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
908 AddressSpaceDispatch *d = as->next_dispatch;
909 MemoryRegionSection now = *section, remain = *section;
910 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
912 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
913 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
914 - now.offset_within_address_space;
916 now.size = int128_min(int128_make64(left), now.size);
917 register_subpage(d, &now);
918 } else {
919 now.size = int128_zero();
921 while (int128_ne(remain.size, now.size)) {
922 remain.size = int128_sub(remain.size, now.size);
923 remain.offset_within_address_space += int128_get64(now.size);
924 remain.offset_within_region += int128_get64(now.size);
925 now = remain;
926 if (int128_lt(remain.size, page_size)) {
927 register_subpage(d, &now);
928 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
929 now.size = page_size;
930 register_subpage(d, &now);
931 } else {
932 now.size = int128_and(now.size, int128_neg(page_size));
933 register_multipage(d, &now);
938 void qemu_flush_coalesced_mmio_buffer(void)
940 if (kvm_enabled())
941 kvm_flush_coalesced_mmio_buffer();
944 void qemu_mutex_lock_ramlist(void)
946 qemu_mutex_lock(&ram_list.mutex);
949 void qemu_mutex_unlock_ramlist(void)
951 qemu_mutex_unlock(&ram_list.mutex);
954 #ifdef __linux__
956 #include <sys/vfs.h>
958 #define HUGETLBFS_MAGIC 0x958458f6
960 static long gethugepagesize(const char *path)
962 struct statfs fs;
963 int ret;
965 do {
966 ret = statfs(path, &fs);
967 } while (ret != 0 && errno == EINTR);
969 if (ret != 0) {
970 perror(path);
971 return 0;
974 if (fs.f_type != HUGETLBFS_MAGIC)
975 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
977 return fs.f_bsize;
980 static sigjmp_buf sigjump;
982 static void sigbus_handler(int signal)
984 siglongjmp(sigjump, 1);
987 static void *file_ram_alloc(RAMBlock *block,
988 ram_addr_t memory,
989 const char *path)
991 char *filename;
992 char *sanitized_name;
993 char *c;
994 void *area;
995 int fd;
996 unsigned long hpagesize;
998 hpagesize = gethugepagesize(path);
999 if (!hpagesize) {
1000 return NULL;
1003 if (memory < hpagesize) {
1004 return NULL;
1007 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1008 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1009 return NULL;
1012 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1013 sanitized_name = g_strdup(block->mr->name);
1014 for (c = sanitized_name; *c != '\0'; c++) {
1015 if (*c == '/')
1016 *c = '_';
1019 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1020 sanitized_name);
1021 g_free(sanitized_name);
1023 fd = mkstemp(filename);
1024 if (fd < 0) {
1025 perror("unable to create backing store for hugepages");
1026 g_free(filename);
1027 return NULL;
1029 unlink(filename);
1030 g_free(filename);
1032 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1035 * ftruncate is not supported by hugetlbfs in older
1036 * hosts, so don't bother bailing out on errors.
1037 * If anything goes wrong with it under other filesystems,
1038 * mmap will fail.
1040 if (ftruncate(fd, memory))
1041 perror("ftruncate");
1043 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1044 if (area == MAP_FAILED) {
1045 perror("file_ram_alloc: can't mmap RAM pages");
1046 close(fd);
1047 return (NULL);
1050 if (mem_prealloc) {
1051 int ret, i;
1052 struct sigaction act, oldact;
1053 sigset_t set, oldset;
1055 memset(&act, 0, sizeof(act));
1056 act.sa_handler = &sigbus_handler;
1057 act.sa_flags = 0;
1059 ret = sigaction(SIGBUS, &act, &oldact);
1060 if (ret) {
1061 perror("file_ram_alloc: failed to install signal handler");
1062 exit(1);
1065 /* unblock SIGBUS */
1066 sigemptyset(&set);
1067 sigaddset(&set, SIGBUS);
1068 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1070 if (sigsetjmp(sigjump, 1)) {
1071 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1072 exit(1);
1075 /* MAP_POPULATE silently ignores failures */
1076 for (i = 0; i < (memory/hpagesize)-1; i++) {
1077 memset(area + (hpagesize*i), 0, 1);
1080 ret = sigaction(SIGBUS, &oldact, NULL);
1081 if (ret) {
1082 perror("file_ram_alloc: failed to reinstall signal handler");
1083 exit(1);
1086 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1089 block->fd = fd;
1090 return area;
1092 #else
1093 static void *file_ram_alloc(RAMBlock *block,
1094 ram_addr_t memory,
1095 const char *path)
1097 fprintf(stderr, "-mem-path not supported on this host\n");
1098 exit(1);
1100 #endif
1102 static ram_addr_t find_ram_offset(ram_addr_t size)
1104 RAMBlock *block, *next_block;
1105 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1107 assert(size != 0); /* it would hand out same offset multiple times */
1109 if (QTAILQ_EMPTY(&ram_list.blocks))
1110 return 0;
1112 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1113 ram_addr_t end, next = RAM_ADDR_MAX;
1115 end = block->offset + block->length;
1117 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
1118 if (next_block->offset >= end) {
1119 next = MIN(next, next_block->offset);
1122 if (next - end >= size && next - end < mingap) {
1123 offset = end;
1124 mingap = next - end;
1128 if (offset == RAM_ADDR_MAX) {
1129 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1130 (uint64_t)size);
1131 abort();
1134 return offset;
1137 ram_addr_t last_ram_offset(void)
1139 RAMBlock *block;
1140 ram_addr_t last = 0;
1142 QTAILQ_FOREACH(block, &ram_list.blocks, next)
1143 last = MAX(last, block->offset + block->length);
1145 return last;
1148 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1150 int ret;
1152 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1153 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1154 "dump-guest-core", true)) {
1155 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1156 if (ret) {
1157 perror("qemu_madvise");
1158 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1159 "but dump_guest_core=off specified\n");
1164 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1166 RAMBlock *new_block, *block;
1168 new_block = NULL;
1169 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1170 if (block->offset == addr) {
1171 new_block = block;
1172 break;
1175 assert(new_block);
1176 assert(!new_block->idstr[0]);
1178 if (dev) {
1179 char *id = qdev_get_dev_path(dev);
1180 if (id) {
1181 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1182 g_free(id);
1185 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1187 /* This assumes the iothread lock is taken here too. */
1188 qemu_mutex_lock_ramlist();
1189 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1190 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1191 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1192 new_block->idstr);
1193 abort();
1196 qemu_mutex_unlock_ramlist();
1199 static int memory_try_enable_merging(void *addr, size_t len)
1201 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1202 /* disabled by the user */
1203 return 0;
1206 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1209 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1210 MemoryRegion *mr)
1212 RAMBlock *block, *new_block;
1214 size = TARGET_PAGE_ALIGN(size);
1215 new_block = g_malloc0(sizeof(*new_block));
1216 new_block->fd = -1;
1218 /* This assumes the iothread lock is taken here too. */
1219 qemu_mutex_lock_ramlist();
1220 new_block->mr = mr;
1221 new_block->offset = find_ram_offset(size);
1222 if (host) {
1223 new_block->host = host;
1224 new_block->flags |= RAM_PREALLOC_MASK;
1225 } else if (xen_enabled()) {
1226 if (mem_path) {
1227 fprintf(stderr, "-mem-path not supported with Xen\n");
1228 exit(1);
1230 xen_ram_alloc(new_block->offset, size, mr);
1231 } else {
1232 if (mem_path) {
1233 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1235 * file_ram_alloc() needs to allocate just like
1236 * phys_mem_alloc, but we haven't bothered to provide
1237 * a hook there.
1239 fprintf(stderr,
1240 "-mem-path not supported with this accelerator\n");
1241 exit(1);
1243 new_block->host = file_ram_alloc(new_block, size, mem_path);
1245 if (!new_block->host) {
1246 new_block->host = phys_mem_alloc(size);
1247 if (!new_block->host) {
1248 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1249 new_block->mr->name, strerror(errno));
1250 exit(1);
1252 memory_try_enable_merging(new_block->host, size);
1255 new_block->length = size;
1257 /* Keep the list sorted from biggest to smallest block. */
1258 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1259 if (block->length < new_block->length) {
1260 break;
1263 if (block) {
1264 QTAILQ_INSERT_BEFORE(block, new_block, next);
1265 } else {
1266 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1268 ram_list.mru_block = NULL;
1270 ram_list.version++;
1271 qemu_mutex_unlock_ramlist();
1273 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
1274 last_ram_offset() >> TARGET_PAGE_BITS);
1275 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1276 0, size >> TARGET_PAGE_BITS);
1277 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1279 qemu_ram_setup_dump(new_block->host, size);
1280 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1281 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
1283 if (kvm_enabled())
1284 kvm_setup_guest_memory(new_block->host, size);
1286 return new_block->offset;
1289 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1291 return qemu_ram_alloc_from_ptr(size, NULL, mr);
1294 void qemu_ram_free_from_ptr(ram_addr_t addr)
1296 RAMBlock *block;
1298 /* This assumes the iothread lock is taken here too. */
1299 qemu_mutex_lock_ramlist();
1300 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1301 if (addr == block->offset) {
1302 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1303 ram_list.mru_block = NULL;
1304 ram_list.version++;
1305 g_free(block);
1306 break;
1309 qemu_mutex_unlock_ramlist();
1312 void qemu_ram_free(ram_addr_t addr)
1314 RAMBlock *block;
1316 /* This assumes the iothread lock is taken here too. */
1317 qemu_mutex_lock_ramlist();
1318 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1319 if (addr == block->offset) {
1320 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1321 ram_list.mru_block = NULL;
1322 ram_list.version++;
1323 if (block->flags & RAM_PREALLOC_MASK) {
1325 } else if (xen_enabled()) {
1326 xen_invalidate_map_cache_entry(block->host);
1327 #ifndef _WIN32
1328 } else if (block->fd >= 0) {
1329 munmap(block->host, block->length);
1330 close(block->fd);
1331 #endif
1332 } else {
1333 qemu_anon_ram_free(block->host, block->length);
1335 g_free(block);
1336 break;
1339 qemu_mutex_unlock_ramlist();
1343 #ifndef _WIN32
1344 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1346 RAMBlock *block;
1347 ram_addr_t offset;
1348 int flags;
1349 void *area, *vaddr;
1351 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1352 offset = addr - block->offset;
1353 if (offset < block->length) {
1354 vaddr = block->host + offset;
1355 if (block->flags & RAM_PREALLOC_MASK) {
1357 } else if (xen_enabled()) {
1358 abort();
1359 } else {
1360 flags = MAP_FIXED;
1361 munmap(vaddr, length);
1362 if (block->fd >= 0) {
1363 #ifdef MAP_POPULATE
1364 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1365 MAP_PRIVATE;
1366 #else
1367 flags |= MAP_PRIVATE;
1368 #endif
1369 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1370 flags, block->fd, offset);
1371 } else {
1373 * Remap needs to match alloc. Accelerators that
1374 * set phys_mem_alloc never remap. If they did,
1375 * we'd need a remap hook here.
1377 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1379 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1380 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1381 flags, -1, 0);
1383 if (area != vaddr) {
1384 fprintf(stderr, "Could not remap addr: "
1385 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1386 length, addr);
1387 exit(1);
1389 memory_try_enable_merging(vaddr, length);
1390 qemu_ram_setup_dump(vaddr, length);
1392 return;
1396 #endif /* !_WIN32 */
1398 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1399 With the exception of the softmmu code in this file, this should
1400 only be used for local memory (e.g. video ram) that the device owns,
1401 and knows it isn't going to access beyond the end of the block.
1403 It should not be used for general purpose DMA.
1404 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1406 void *qemu_get_ram_ptr(ram_addr_t addr)
1408 RAMBlock *block = qemu_get_ram_block(addr);
1410 if (xen_enabled()) {
1411 /* We need to check if the requested address is in the RAM
1412 * because we don't want to map the entire memory in QEMU.
1413 * In that case just map until the end of the page.
1415 if (block->offset == 0) {
1416 return xen_map_cache(addr, 0, 0);
1417 } else if (block->host == NULL) {
1418 block->host =
1419 xen_map_cache(block->offset, block->length, 1);
1422 return block->host + (addr - block->offset);
1425 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1426 * but takes a size argument */
1427 static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1429 if (*size == 0) {
1430 return NULL;
1432 if (xen_enabled()) {
1433 return xen_map_cache(addr, *size, 1);
1434 } else {
1435 RAMBlock *block;
1437 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1438 if (addr - block->offset < block->length) {
1439 if (addr - block->offset + *size > block->length)
1440 *size = block->length - addr + block->offset;
1441 return block->host + (addr - block->offset);
1445 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1446 abort();
1450 /* Some of the softmmu routines need to translate from a host pointer
1451 (typically a TLB entry) back to a ram offset. */
1452 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1454 RAMBlock *block;
1455 uint8_t *host = ptr;
1457 if (xen_enabled()) {
1458 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1459 return qemu_get_ram_block(*ram_addr)->mr;
1462 block = ram_list.mru_block;
1463 if (block && block->host && host - block->host < block->length) {
1464 goto found;
1467 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1468 /* This case append when the block is not mapped. */
1469 if (block->host == NULL) {
1470 continue;
1472 if (host - block->host < block->length) {
1473 goto found;
1477 return NULL;
1479 found:
1480 *ram_addr = block->offset + (host - block->host);
1481 return block->mr;
1484 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1485 uint64_t val, unsigned size)
1487 int dirty_flags;
1488 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1489 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1490 tb_invalidate_phys_page_fast(ram_addr, size);
1491 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1493 switch (size) {
1494 case 1:
1495 stb_p(qemu_get_ram_ptr(ram_addr), val);
1496 break;
1497 case 2:
1498 stw_p(qemu_get_ram_ptr(ram_addr), val);
1499 break;
1500 case 4:
1501 stl_p(qemu_get_ram_ptr(ram_addr), val);
1502 break;
1503 default:
1504 abort();
1506 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1507 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1508 /* we remove the notdirty callback only if the code has been
1509 flushed */
1510 if (dirty_flags == 0xff) {
1511 CPUArchState *env = current_cpu->env_ptr;
1512 tlb_set_dirty(env, env->mem_io_vaddr);
1516 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1517 unsigned size, bool is_write)
1519 return is_write;
1522 static const MemoryRegionOps notdirty_mem_ops = {
1523 .write = notdirty_mem_write,
1524 .valid.accepts = notdirty_mem_accepts,
1525 .endianness = DEVICE_NATIVE_ENDIAN,
1528 /* Generate a debug exception if a watchpoint has been hit. */
1529 static void check_watchpoint(int offset, int len_mask, int flags)
1531 CPUArchState *env = current_cpu->env_ptr;
1532 target_ulong pc, cs_base;
1533 target_ulong vaddr;
1534 CPUWatchpoint *wp;
1535 int cpu_flags;
1537 if (env->watchpoint_hit) {
1538 /* We re-entered the check after replacing the TB. Now raise
1539 * the debug interrupt so that is will trigger after the
1540 * current instruction. */
1541 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
1542 return;
1544 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1545 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1546 if ((vaddr == (wp->vaddr & len_mask) ||
1547 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1548 wp->flags |= BP_WATCHPOINT_HIT;
1549 if (!env->watchpoint_hit) {
1550 env->watchpoint_hit = wp;
1551 tb_check_watchpoint(env);
1552 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1553 env->exception_index = EXCP_DEBUG;
1554 cpu_loop_exit(env);
1555 } else {
1556 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1557 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1558 cpu_resume_from_signal(env, NULL);
1561 } else {
1562 wp->flags &= ~BP_WATCHPOINT_HIT;
1567 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1568 so these check for a hit then pass through to the normal out-of-line
1569 phys routines. */
1570 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1571 unsigned size)
1573 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1574 switch (size) {
1575 case 1: return ldub_phys(addr);
1576 case 2: return lduw_phys(addr);
1577 case 4: return ldl_phys(addr);
1578 default: abort();
1582 static void watch_mem_write(void *opaque, hwaddr addr,
1583 uint64_t val, unsigned size)
1585 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1586 switch (size) {
1587 case 1:
1588 stb_phys(addr, val);
1589 break;
1590 case 2:
1591 stw_phys(addr, val);
1592 break;
1593 case 4:
1594 stl_phys(addr, val);
1595 break;
1596 default: abort();
1600 static const MemoryRegionOps watch_mem_ops = {
1601 .read = watch_mem_read,
1602 .write = watch_mem_write,
1603 .endianness = DEVICE_NATIVE_ENDIAN,
1606 static uint64_t subpage_read(void *opaque, hwaddr addr,
1607 unsigned len)
1609 subpage_t *subpage = opaque;
1610 uint8_t buf[4];
1612 #if defined(DEBUG_SUBPAGE)
1613 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1614 subpage, len, addr);
1615 #endif
1616 address_space_read(subpage->as, addr + subpage->base, buf, len);
1617 switch (len) {
1618 case 1:
1619 return ldub_p(buf);
1620 case 2:
1621 return lduw_p(buf);
1622 case 4:
1623 return ldl_p(buf);
1624 default:
1625 abort();
1629 static void subpage_write(void *opaque, hwaddr addr,
1630 uint64_t value, unsigned len)
1632 subpage_t *subpage = opaque;
1633 uint8_t buf[4];
1635 #if defined(DEBUG_SUBPAGE)
1636 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1637 " value %"PRIx64"\n",
1638 __func__, subpage, len, addr, value);
1639 #endif
1640 switch (len) {
1641 case 1:
1642 stb_p(buf, value);
1643 break;
1644 case 2:
1645 stw_p(buf, value);
1646 break;
1647 case 4:
1648 stl_p(buf, value);
1649 break;
1650 default:
1651 abort();
1653 address_space_write(subpage->as, addr + subpage->base, buf, len);
1656 static bool subpage_accepts(void *opaque, hwaddr addr,
1657 unsigned len, bool is_write)
1659 subpage_t *subpage = opaque;
1660 #if defined(DEBUG_SUBPAGE)
1661 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1662 __func__, subpage, is_write ? 'w' : 'r', len, addr);
1663 #endif
1665 return address_space_access_valid(subpage->as, addr + subpage->base,
1666 len, is_write);
1669 static const MemoryRegionOps subpage_ops = {
1670 .read = subpage_read,
1671 .write = subpage_write,
1672 .valid.accepts = subpage_accepts,
1673 .endianness = DEVICE_NATIVE_ENDIAN,
1676 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1677 uint16_t section)
1679 int idx, eidx;
1681 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1682 return -1;
1683 idx = SUBPAGE_IDX(start);
1684 eidx = SUBPAGE_IDX(end);
1685 #if defined(DEBUG_SUBPAGE)
1686 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1687 __func__, mmio, start, end, idx, eidx, section);
1688 #endif
1689 for (; idx <= eidx; idx++) {
1690 mmio->sub_section[idx] = section;
1693 return 0;
1696 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1698 subpage_t *mmio;
1700 mmio = g_malloc0(sizeof(subpage_t));
1702 mmio->as = as;
1703 mmio->base = base;
1704 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
1705 "subpage", TARGET_PAGE_SIZE);
1706 mmio->iomem.subpage = true;
1707 #if defined(DEBUG_SUBPAGE)
1708 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1709 mmio, base, TARGET_PAGE_SIZE);
1710 #endif
1711 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1713 return mmio;
1716 static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
1718 MemoryRegionSection section = {
1719 .mr = mr,
1720 .offset_within_address_space = 0,
1721 .offset_within_region = 0,
1722 .size = int128_2_64(),
1725 return phys_section_add(map, &section);
1728 MemoryRegion *iotlb_to_region(hwaddr index)
1730 return address_space_memory.dispatch->map.sections[
1731 index & ~TARGET_PAGE_MASK].mr;
1734 static void io_mem_init(void)
1736 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1737 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1738 "unassigned", UINT64_MAX);
1739 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1740 "notdirty", UINT64_MAX);
1741 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1742 "watch", UINT64_MAX);
1745 static void mem_begin(MemoryListener *listener)
1747 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1748 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1749 uint16_t n;
1751 n = dummy_section(&d->map, &io_mem_unassigned);
1752 assert(n == PHYS_SECTION_UNASSIGNED);
1753 n = dummy_section(&d->map, &io_mem_notdirty);
1754 assert(n == PHYS_SECTION_NOTDIRTY);
1755 n = dummy_section(&d->map, &io_mem_rom);
1756 assert(n == PHYS_SECTION_ROM);
1757 n = dummy_section(&d->map, &io_mem_watch);
1758 assert(n == PHYS_SECTION_WATCH);
1760 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1761 d->as = as;
1762 as->next_dispatch = d;
1765 static void mem_commit(MemoryListener *listener)
1767 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1768 AddressSpaceDispatch *cur = as->dispatch;
1769 AddressSpaceDispatch *next = as->next_dispatch;
1771 phys_page_compact_all(next, next->map.nodes_nb);
1773 as->dispatch = next;
1775 if (cur) {
1776 phys_sections_free(&cur->map);
1777 g_free(cur);
1781 static void tcg_commit(MemoryListener *listener)
1783 CPUState *cpu;
1785 /* since each CPU stores ram addresses in its TLB cache, we must
1786 reset the modified entries */
1787 /* XXX: slow ! */
1788 CPU_FOREACH(cpu) {
1789 CPUArchState *env = cpu->env_ptr;
1791 tlb_flush(env, 1);
1795 static void core_log_global_start(MemoryListener *listener)
1797 cpu_physical_memory_set_dirty_tracking(1);
1800 static void core_log_global_stop(MemoryListener *listener)
1802 cpu_physical_memory_set_dirty_tracking(0);
1805 static MemoryListener core_memory_listener = {
1806 .log_global_start = core_log_global_start,
1807 .log_global_stop = core_log_global_stop,
1808 .priority = 1,
1811 static MemoryListener tcg_memory_listener = {
1812 .commit = tcg_commit,
1815 void address_space_init_dispatch(AddressSpace *as)
1817 as->dispatch = NULL;
1818 as->dispatch_listener = (MemoryListener) {
1819 .begin = mem_begin,
1820 .commit = mem_commit,
1821 .region_add = mem_add,
1822 .region_nop = mem_add,
1823 .priority = 0,
1825 memory_listener_register(&as->dispatch_listener, as);
1828 void address_space_destroy_dispatch(AddressSpace *as)
1830 AddressSpaceDispatch *d = as->dispatch;
1832 memory_listener_unregister(&as->dispatch_listener);
1833 g_free(d);
1834 as->dispatch = NULL;
1837 static void memory_map_init(void)
1839 system_memory = g_malloc(sizeof(*system_memory));
1841 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
1842 address_space_init(&address_space_memory, system_memory, "memory");
1844 system_io = g_malloc(sizeof(*system_io));
1845 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1846 65536);
1847 address_space_init(&address_space_io, system_io, "I/O");
1849 memory_listener_register(&core_memory_listener, &address_space_memory);
1850 if (tcg_enabled()) {
1851 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1855 MemoryRegion *get_system_memory(void)
1857 return system_memory;
1860 MemoryRegion *get_system_io(void)
1862 return system_io;
1865 #endif /* !defined(CONFIG_USER_ONLY) */
1867 /* physical memory access (slow version, mainly for debug) */
1868 #if defined(CONFIG_USER_ONLY)
1869 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
1870 uint8_t *buf, int len, int is_write)
1872 int l, flags;
1873 target_ulong page;
1874 void * p;
1876 while (len > 0) {
1877 page = addr & TARGET_PAGE_MASK;
1878 l = (page + TARGET_PAGE_SIZE) - addr;
1879 if (l > len)
1880 l = len;
1881 flags = page_get_flags(page);
1882 if (!(flags & PAGE_VALID))
1883 return -1;
1884 if (is_write) {
1885 if (!(flags & PAGE_WRITE))
1886 return -1;
1887 /* XXX: this code should not depend on lock_user */
1888 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1889 return -1;
1890 memcpy(p, buf, l);
1891 unlock_user(p, addr, l);
1892 } else {
1893 if (!(flags & PAGE_READ))
1894 return -1;
1895 /* XXX: this code should not depend on lock_user */
1896 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1897 return -1;
1898 memcpy(buf, p, l);
1899 unlock_user(p, addr, 0);
1901 len -= l;
1902 buf += l;
1903 addr += l;
1905 return 0;
1908 #else
1910 static void invalidate_and_set_dirty(hwaddr addr,
1911 hwaddr length)
1913 if (!cpu_physical_memory_is_dirty(addr)) {
1914 /* invalidate code */
1915 tb_invalidate_phys_page_range(addr, addr + length, 0);
1916 /* set dirty bit */
1917 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1919 xen_modified_memory(addr, length);
1922 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1924 if (memory_region_is_ram(mr)) {
1925 return !(is_write && mr->readonly);
1927 if (memory_region_is_romd(mr)) {
1928 return !is_write;
1931 return false;
1934 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
1936 unsigned access_size_max = mr->ops->valid.max_access_size;
1938 /* Regions are assumed to support 1-4 byte accesses unless
1939 otherwise specified. */
1940 if (access_size_max == 0) {
1941 access_size_max = 4;
1944 /* Bound the maximum access by the alignment of the address. */
1945 if (!mr->ops->impl.unaligned) {
1946 unsigned align_size_max = addr & -addr;
1947 if (align_size_max != 0 && align_size_max < access_size_max) {
1948 access_size_max = align_size_max;
1952 /* Don't attempt accesses larger than the maximum. */
1953 if (l > access_size_max) {
1954 l = access_size_max;
1956 if (l & (l - 1)) {
1957 l = 1 << (qemu_fls(l) - 1);
1960 return l;
1963 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1964 int len, bool is_write)
1966 hwaddr l;
1967 uint8_t *ptr;
1968 uint64_t val;
1969 hwaddr addr1;
1970 MemoryRegion *mr;
1971 bool error = false;
1973 while (len > 0) {
1974 l = len;
1975 mr = address_space_translate(as, addr, &addr1, &l, is_write);
1977 if (is_write) {
1978 if (!memory_access_is_direct(mr, is_write)) {
1979 l = memory_access_size(mr, l, addr1);
1980 /* XXX: could force current_cpu to NULL to avoid
1981 potential bugs */
1982 switch (l) {
1983 case 8:
1984 /* 64 bit write access */
1985 val = ldq_p(buf);
1986 error |= io_mem_write(mr, addr1, val, 8);
1987 break;
1988 case 4:
1989 /* 32 bit write access */
1990 val = ldl_p(buf);
1991 error |= io_mem_write(mr, addr1, val, 4);
1992 break;
1993 case 2:
1994 /* 16 bit write access */
1995 val = lduw_p(buf);
1996 error |= io_mem_write(mr, addr1, val, 2);
1997 break;
1998 case 1:
1999 /* 8 bit write access */
2000 val = ldub_p(buf);
2001 error |= io_mem_write(mr, addr1, val, 1);
2002 break;
2003 default:
2004 abort();
2006 } else {
2007 addr1 += memory_region_get_ram_addr(mr);
2008 /* RAM case */
2009 ptr = qemu_get_ram_ptr(addr1);
2010 memcpy(ptr, buf, l);
2011 invalidate_and_set_dirty(addr1, l);
2013 } else {
2014 if (!memory_access_is_direct(mr, is_write)) {
2015 /* I/O case */
2016 l = memory_access_size(mr, l, addr1);
2017 switch (l) {
2018 case 8:
2019 /* 64 bit read access */
2020 error |= io_mem_read(mr, addr1, &val, 8);
2021 stq_p(buf, val);
2022 break;
2023 case 4:
2024 /* 32 bit read access */
2025 error |= io_mem_read(mr, addr1, &val, 4);
2026 stl_p(buf, val);
2027 break;
2028 case 2:
2029 /* 16 bit read access */
2030 error |= io_mem_read(mr, addr1, &val, 2);
2031 stw_p(buf, val);
2032 break;
2033 case 1:
2034 /* 8 bit read access */
2035 error |= io_mem_read(mr, addr1, &val, 1);
2036 stb_p(buf, val);
2037 break;
2038 default:
2039 abort();
2041 } else {
2042 /* RAM case */
2043 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2044 memcpy(buf, ptr, l);
2047 len -= l;
2048 buf += l;
2049 addr += l;
2052 return error;
2055 bool address_space_write(AddressSpace *as, hwaddr addr,
2056 const uint8_t *buf, int len)
2058 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2061 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2063 return address_space_rw(as, addr, buf, len, false);
2067 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2068 int len, int is_write)
2070 address_space_rw(&address_space_memory, addr, buf, len, is_write);
2073 /* used for ROM loading : can write in RAM and ROM */
2074 void cpu_physical_memory_write_rom(hwaddr addr,
2075 const uint8_t *buf, int len)
2077 hwaddr l;
2078 uint8_t *ptr;
2079 hwaddr addr1;
2080 MemoryRegion *mr;
2082 while (len > 0) {
2083 l = len;
2084 mr = address_space_translate(&address_space_memory,
2085 addr, &addr1, &l, true);
2087 if (!(memory_region_is_ram(mr) ||
2088 memory_region_is_romd(mr))) {
2089 /* do nothing */
2090 } else {
2091 addr1 += memory_region_get_ram_addr(mr);
2092 /* ROM/RAM case */
2093 ptr = qemu_get_ram_ptr(addr1);
2094 memcpy(ptr, buf, l);
2095 invalidate_and_set_dirty(addr1, l);
2097 len -= l;
2098 buf += l;
2099 addr += l;
2103 typedef struct {
2104 MemoryRegion *mr;
2105 void *buffer;
2106 hwaddr addr;
2107 hwaddr len;
2108 } BounceBuffer;
2110 static BounceBuffer bounce;
2112 typedef struct MapClient {
2113 void *opaque;
2114 void (*callback)(void *opaque);
2115 QLIST_ENTRY(MapClient) link;
2116 } MapClient;
2118 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2119 = QLIST_HEAD_INITIALIZER(map_client_list);
2121 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2123 MapClient *client = g_malloc(sizeof(*client));
2125 client->opaque = opaque;
2126 client->callback = callback;
2127 QLIST_INSERT_HEAD(&map_client_list, client, link);
2128 return client;
2131 static void cpu_unregister_map_client(void *_client)
2133 MapClient *client = (MapClient *)_client;
2135 QLIST_REMOVE(client, link);
2136 g_free(client);
2139 static void cpu_notify_map_clients(void)
2141 MapClient *client;
2143 while (!QLIST_EMPTY(&map_client_list)) {
2144 client = QLIST_FIRST(&map_client_list);
2145 client->callback(client->opaque);
2146 cpu_unregister_map_client(client);
2150 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2152 MemoryRegion *mr;
2153 hwaddr l, xlat;
2155 while (len > 0) {
2156 l = len;
2157 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2158 if (!memory_access_is_direct(mr, is_write)) {
2159 l = memory_access_size(mr, l, addr);
2160 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2161 return false;
2165 len -= l;
2166 addr += l;
2168 return true;
2171 /* Map a physical memory region into a host virtual address.
2172 * May map a subset of the requested range, given by and returned in *plen.
2173 * May return NULL if resources needed to perform the mapping are exhausted.
2174 * Use only for reads OR writes - not for read-modify-write operations.
2175 * Use cpu_register_map_client() to know when retrying the map operation is
2176 * likely to succeed.
2178 void *address_space_map(AddressSpace *as,
2179 hwaddr addr,
2180 hwaddr *plen,
2181 bool is_write)
2183 hwaddr len = *plen;
2184 hwaddr done = 0;
2185 hwaddr l, xlat, base;
2186 MemoryRegion *mr, *this_mr;
2187 ram_addr_t raddr;
2189 if (len == 0) {
2190 return NULL;
2193 l = len;
2194 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2195 if (!memory_access_is_direct(mr, is_write)) {
2196 if (bounce.buffer) {
2197 return NULL;
2199 /* Avoid unbounded allocations */
2200 l = MIN(l, TARGET_PAGE_SIZE);
2201 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2202 bounce.addr = addr;
2203 bounce.len = l;
2205 memory_region_ref(mr);
2206 bounce.mr = mr;
2207 if (!is_write) {
2208 address_space_read(as, addr, bounce.buffer, l);
2211 *plen = l;
2212 return bounce.buffer;
2215 base = xlat;
2216 raddr = memory_region_get_ram_addr(mr);
2218 for (;;) {
2219 len -= l;
2220 addr += l;
2221 done += l;
2222 if (len == 0) {
2223 break;
2226 l = len;
2227 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2228 if (this_mr != mr || xlat != base + done) {
2229 break;
2233 memory_region_ref(mr);
2234 *plen = done;
2235 return qemu_ram_ptr_length(raddr + base, plen);
2238 /* Unmaps a memory region previously mapped by address_space_map().
2239 * Will also mark the memory as dirty if is_write == 1. access_len gives
2240 * the amount of memory that was actually read or written by the caller.
2242 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2243 int is_write, hwaddr access_len)
2245 if (buffer != bounce.buffer) {
2246 MemoryRegion *mr;
2247 ram_addr_t addr1;
2249 mr = qemu_ram_addr_from_host(buffer, &addr1);
2250 assert(mr != NULL);
2251 if (is_write) {
2252 while (access_len) {
2253 unsigned l;
2254 l = TARGET_PAGE_SIZE;
2255 if (l > access_len)
2256 l = access_len;
2257 invalidate_and_set_dirty(addr1, l);
2258 addr1 += l;
2259 access_len -= l;
2262 if (xen_enabled()) {
2263 xen_invalidate_map_cache_entry(buffer);
2265 memory_region_unref(mr);
2266 return;
2268 if (is_write) {
2269 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2271 qemu_vfree(bounce.buffer);
2272 bounce.buffer = NULL;
2273 memory_region_unref(bounce.mr);
2274 cpu_notify_map_clients();
2277 void *cpu_physical_memory_map(hwaddr addr,
2278 hwaddr *plen,
2279 int is_write)
2281 return address_space_map(&address_space_memory, addr, plen, is_write);
2284 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2285 int is_write, hwaddr access_len)
2287 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2290 /* warning: addr must be aligned */
2291 static inline uint32_t ldl_phys_internal(hwaddr addr,
2292 enum device_endian endian)
2294 uint8_t *ptr;
2295 uint64_t val;
2296 MemoryRegion *mr;
2297 hwaddr l = 4;
2298 hwaddr addr1;
2300 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2301 false);
2302 if (l < 4 || !memory_access_is_direct(mr, false)) {
2303 /* I/O case */
2304 io_mem_read(mr, addr1, &val, 4);
2305 #if defined(TARGET_WORDS_BIGENDIAN)
2306 if (endian == DEVICE_LITTLE_ENDIAN) {
2307 val = bswap32(val);
2309 #else
2310 if (endian == DEVICE_BIG_ENDIAN) {
2311 val = bswap32(val);
2313 #endif
2314 } else {
2315 /* RAM case */
2316 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2317 & TARGET_PAGE_MASK)
2318 + addr1);
2319 switch (endian) {
2320 case DEVICE_LITTLE_ENDIAN:
2321 val = ldl_le_p(ptr);
2322 break;
2323 case DEVICE_BIG_ENDIAN:
2324 val = ldl_be_p(ptr);
2325 break;
2326 default:
2327 val = ldl_p(ptr);
2328 break;
2331 return val;
2334 uint32_t ldl_phys(hwaddr addr)
2336 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2339 uint32_t ldl_le_phys(hwaddr addr)
2341 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2344 uint32_t ldl_be_phys(hwaddr addr)
2346 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2349 /* warning: addr must be aligned */
2350 static inline uint64_t ldq_phys_internal(hwaddr addr,
2351 enum device_endian endian)
2353 uint8_t *ptr;
2354 uint64_t val;
2355 MemoryRegion *mr;
2356 hwaddr l = 8;
2357 hwaddr addr1;
2359 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2360 false);
2361 if (l < 8 || !memory_access_is_direct(mr, false)) {
2362 /* I/O case */
2363 io_mem_read(mr, addr1, &val, 8);
2364 #if defined(TARGET_WORDS_BIGENDIAN)
2365 if (endian == DEVICE_LITTLE_ENDIAN) {
2366 val = bswap64(val);
2368 #else
2369 if (endian == DEVICE_BIG_ENDIAN) {
2370 val = bswap64(val);
2372 #endif
2373 } else {
2374 /* RAM case */
2375 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2376 & TARGET_PAGE_MASK)
2377 + addr1);
2378 switch (endian) {
2379 case DEVICE_LITTLE_ENDIAN:
2380 val = ldq_le_p(ptr);
2381 break;
2382 case DEVICE_BIG_ENDIAN:
2383 val = ldq_be_p(ptr);
2384 break;
2385 default:
2386 val = ldq_p(ptr);
2387 break;
2390 return val;
2393 uint64_t ldq_phys(hwaddr addr)
2395 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2398 uint64_t ldq_le_phys(hwaddr addr)
2400 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2403 uint64_t ldq_be_phys(hwaddr addr)
2405 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2408 /* XXX: optimize */
2409 uint32_t ldub_phys(hwaddr addr)
2411 uint8_t val;
2412 cpu_physical_memory_read(addr, &val, 1);
2413 return val;
2416 /* warning: addr must be aligned */
2417 static inline uint32_t lduw_phys_internal(hwaddr addr,
2418 enum device_endian endian)
2420 uint8_t *ptr;
2421 uint64_t val;
2422 MemoryRegion *mr;
2423 hwaddr l = 2;
2424 hwaddr addr1;
2426 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2427 false);
2428 if (l < 2 || !memory_access_is_direct(mr, false)) {
2429 /* I/O case */
2430 io_mem_read(mr, addr1, &val, 2);
2431 #if defined(TARGET_WORDS_BIGENDIAN)
2432 if (endian == DEVICE_LITTLE_ENDIAN) {
2433 val = bswap16(val);
2435 #else
2436 if (endian == DEVICE_BIG_ENDIAN) {
2437 val = bswap16(val);
2439 #endif
2440 } else {
2441 /* RAM case */
2442 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2443 & TARGET_PAGE_MASK)
2444 + addr1);
2445 switch (endian) {
2446 case DEVICE_LITTLE_ENDIAN:
2447 val = lduw_le_p(ptr);
2448 break;
2449 case DEVICE_BIG_ENDIAN:
2450 val = lduw_be_p(ptr);
2451 break;
2452 default:
2453 val = lduw_p(ptr);
2454 break;
2457 return val;
2460 uint32_t lduw_phys(hwaddr addr)
2462 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2465 uint32_t lduw_le_phys(hwaddr addr)
2467 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2470 uint32_t lduw_be_phys(hwaddr addr)
2472 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2475 /* warning: addr must be aligned. The ram page is not masked as dirty
2476 and the code inside is not invalidated. It is useful if the dirty
2477 bits are used to track modified PTEs */
2478 void stl_phys_notdirty(hwaddr addr, uint32_t val)
2480 uint8_t *ptr;
2481 MemoryRegion *mr;
2482 hwaddr l = 4;
2483 hwaddr addr1;
2485 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2486 true);
2487 if (l < 4 || !memory_access_is_direct(mr, true)) {
2488 io_mem_write(mr, addr1, val, 4);
2489 } else {
2490 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2491 ptr = qemu_get_ram_ptr(addr1);
2492 stl_p(ptr, val);
2494 if (unlikely(in_migration)) {
2495 if (!cpu_physical_memory_is_dirty(addr1)) {
2496 /* invalidate code */
2497 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2498 /* set dirty bit */
2499 cpu_physical_memory_set_dirty_flags(
2500 addr1, (0xff & ~CODE_DIRTY_FLAG));
2506 /* warning: addr must be aligned */
2507 static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2508 enum device_endian endian)
2510 uint8_t *ptr;
2511 MemoryRegion *mr;
2512 hwaddr l = 4;
2513 hwaddr addr1;
2515 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2516 true);
2517 if (l < 4 || !memory_access_is_direct(mr, true)) {
2518 #if defined(TARGET_WORDS_BIGENDIAN)
2519 if (endian == DEVICE_LITTLE_ENDIAN) {
2520 val = bswap32(val);
2522 #else
2523 if (endian == DEVICE_BIG_ENDIAN) {
2524 val = bswap32(val);
2526 #endif
2527 io_mem_write(mr, addr1, val, 4);
2528 } else {
2529 /* RAM case */
2530 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2531 ptr = qemu_get_ram_ptr(addr1);
2532 switch (endian) {
2533 case DEVICE_LITTLE_ENDIAN:
2534 stl_le_p(ptr, val);
2535 break;
2536 case DEVICE_BIG_ENDIAN:
2537 stl_be_p(ptr, val);
2538 break;
2539 default:
2540 stl_p(ptr, val);
2541 break;
2543 invalidate_and_set_dirty(addr1, 4);
2547 void stl_phys(hwaddr addr, uint32_t val)
2549 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2552 void stl_le_phys(hwaddr addr, uint32_t val)
2554 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2557 void stl_be_phys(hwaddr addr, uint32_t val)
2559 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2562 /* XXX: optimize */
2563 void stb_phys(hwaddr addr, uint32_t val)
2565 uint8_t v = val;
2566 cpu_physical_memory_write(addr, &v, 1);
2569 /* warning: addr must be aligned */
2570 static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2571 enum device_endian endian)
2573 uint8_t *ptr;
2574 MemoryRegion *mr;
2575 hwaddr l = 2;
2576 hwaddr addr1;
2578 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2579 true);
2580 if (l < 2 || !memory_access_is_direct(mr, true)) {
2581 #if defined(TARGET_WORDS_BIGENDIAN)
2582 if (endian == DEVICE_LITTLE_ENDIAN) {
2583 val = bswap16(val);
2585 #else
2586 if (endian == DEVICE_BIG_ENDIAN) {
2587 val = bswap16(val);
2589 #endif
2590 io_mem_write(mr, addr1, val, 2);
2591 } else {
2592 /* RAM case */
2593 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2594 ptr = qemu_get_ram_ptr(addr1);
2595 switch (endian) {
2596 case DEVICE_LITTLE_ENDIAN:
2597 stw_le_p(ptr, val);
2598 break;
2599 case DEVICE_BIG_ENDIAN:
2600 stw_be_p(ptr, val);
2601 break;
2602 default:
2603 stw_p(ptr, val);
2604 break;
2606 invalidate_and_set_dirty(addr1, 2);
2610 void stw_phys(hwaddr addr, uint32_t val)
2612 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2615 void stw_le_phys(hwaddr addr, uint32_t val)
2617 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2620 void stw_be_phys(hwaddr addr, uint32_t val)
2622 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2625 /* XXX: optimize */
2626 void stq_phys(hwaddr addr, uint64_t val)
2628 val = tswap64(val);
2629 cpu_physical_memory_write(addr, &val, 8);
2632 void stq_le_phys(hwaddr addr, uint64_t val)
2634 val = cpu_to_le64(val);
2635 cpu_physical_memory_write(addr, &val, 8);
2638 void stq_be_phys(hwaddr addr, uint64_t val)
2640 val = cpu_to_be64(val);
2641 cpu_physical_memory_write(addr, &val, 8);
2644 /* virtual memory access for debug (includes writing to ROM) */
2645 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2646 uint8_t *buf, int len, int is_write)
2648 int l;
2649 hwaddr phys_addr;
2650 target_ulong page;
2652 while (len > 0) {
2653 page = addr & TARGET_PAGE_MASK;
2654 phys_addr = cpu_get_phys_page_debug(cpu, page);
2655 /* if no physical page mapped, return an error */
2656 if (phys_addr == -1)
2657 return -1;
2658 l = (page + TARGET_PAGE_SIZE) - addr;
2659 if (l > len)
2660 l = len;
2661 phys_addr += (addr & ~TARGET_PAGE_MASK);
2662 if (is_write)
2663 cpu_physical_memory_write_rom(phys_addr, buf, l);
2664 else
2665 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
2666 len -= l;
2667 buf += l;
2668 addr += l;
2670 return 0;
2672 #endif
2674 #if !defined(CONFIG_USER_ONLY)
2677 * A helper function for the _utterly broken_ virtio device model to find out if
2678 * it's running on a big endian machine. Don't do this at home kids!
2680 bool virtio_is_big_endian(void);
2681 bool virtio_is_big_endian(void)
2683 #if defined(TARGET_WORDS_BIGENDIAN)
2684 return true;
2685 #else
2686 return false;
2687 #endif
2690 #endif
2692 #ifndef CONFIG_USER_ONLY
2693 bool cpu_physical_memory_is_io(hwaddr phys_addr)
2695 MemoryRegion*mr;
2696 hwaddr l = 1;
2698 mr = address_space_translate(&address_space_memory,
2699 phys_addr, &phys_addr, &l, false);
2701 return !(memory_region_is_ram(mr) ||
2702 memory_region_is_romd(mr));
2705 void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2707 RAMBlock *block;
2709 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2710 func(block->host, block->offset, block->length, opaque);
2713 #endif