exec: pass hw address to phys_page_find
[qemu/cris-port.git] / exec.c
blobb528dad76a6d129cc9f07470a243c3678ecdf584
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "hw/xen/xen.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
42 #include <qemu.h>
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
45 #include "trace.h"
46 #endif
47 #include "exec/cpu-all.h"
49 #include "exec/cputlb.h"
50 #include "translate-all.h"
52 #include "exec/memory-internal.h"
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 static int in_migration;
59 RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
61 static MemoryRegion *system_memory;
62 static MemoryRegion *system_io;
64 AddressSpace address_space_io;
65 AddressSpace address_space_memory;
67 MemoryRegion io_mem_rom, io_mem_notdirty;
68 static MemoryRegion io_mem_unassigned;
70 #endif
72 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
73 /* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
75 DEFINE_TLS(CPUState *, current_cpu);
76 /* 0 = Do not count executed instructions.
77 1 = Precise instruction counting.
78 2 = Adaptive rate instruction counting. */
79 int use_icount;
81 #if !defined(CONFIG_USER_ONLY)
83 typedef struct PhysPageEntry PhysPageEntry;
85 struct PhysPageEntry {
86 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
87 uint32_t skip : 6;
88 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
89 uint32_t ptr : 26;
92 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
94 /* Size of the L2 (and L3, etc) page tables. */
95 #define ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
97 #define P_L2_BITS 10
98 #define P_L2_SIZE (1 << P_L2_BITS)
100 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
102 typedef PhysPageEntry Node[P_L2_SIZE];
104 struct AddressSpaceDispatch {
105 /* This is a multi-level map on the physical address space.
106 * The bottom level has pointers to MemoryRegionSections.
108 PhysPageEntry phys_map;
109 Node *nodes;
110 MemoryRegionSection *sections;
111 AddressSpace *as;
114 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
115 typedef struct subpage_t {
116 MemoryRegion iomem;
117 AddressSpace *as;
118 hwaddr base;
119 uint16_t sub_section[TARGET_PAGE_SIZE];
120 } subpage_t;
122 #define PHYS_SECTION_UNASSIGNED 0
123 #define PHYS_SECTION_NOTDIRTY 1
124 #define PHYS_SECTION_ROM 2
125 #define PHYS_SECTION_WATCH 3
127 typedef struct PhysPageMap {
128 unsigned sections_nb;
129 unsigned sections_nb_alloc;
130 unsigned nodes_nb;
131 unsigned nodes_nb_alloc;
132 Node *nodes;
133 MemoryRegionSection *sections;
134 } PhysPageMap;
136 static PhysPageMap *prev_map;
137 static PhysPageMap next_map;
139 static void io_mem_init(void);
140 static void memory_map_init(void);
142 static MemoryRegion io_mem_watch;
143 #endif
145 #if !defined(CONFIG_USER_ONLY)
147 static void phys_map_node_reserve(unsigned nodes)
149 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
150 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
151 16);
152 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
153 next_map.nodes_nb + nodes);
154 next_map.nodes = g_renew(Node, next_map.nodes,
155 next_map.nodes_nb_alloc);
159 static uint32_t phys_map_node_alloc(void)
161 unsigned i;
162 uint32_t ret;
164 ret = next_map.nodes_nb++;
165 assert(ret != PHYS_MAP_NODE_NIL);
166 assert(ret != next_map.nodes_nb_alloc);
167 for (i = 0; i < P_L2_SIZE; ++i) {
168 next_map.nodes[ret][i].skip = 1;
169 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
171 return ret;
174 static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
175 hwaddr *nb, uint16_t leaf,
176 int level)
178 PhysPageEntry *p;
179 int i;
180 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
182 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
183 lp->ptr = phys_map_node_alloc();
184 p = next_map.nodes[lp->ptr];
185 if (level == 0) {
186 for (i = 0; i < P_L2_SIZE; i++) {
187 p[i].skip = 0;
188 p[i].ptr = PHYS_SECTION_UNASSIGNED;
191 } else {
192 p = next_map.nodes[lp->ptr];
194 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
196 while (*nb && lp < &p[P_L2_SIZE]) {
197 if ((*index & (step - 1)) == 0 && *nb >= step) {
198 lp->skip = 0;
199 lp->ptr = leaf;
200 *index += step;
201 *nb -= step;
202 } else {
203 phys_page_set_level(lp, index, nb, leaf, level - 1);
205 ++lp;
209 static void phys_page_set(AddressSpaceDispatch *d,
210 hwaddr index, hwaddr nb,
211 uint16_t leaf)
213 /* Wildly overreserve - it doesn't matter much. */
214 phys_map_node_reserve(3 * P_L2_LEVELS);
216 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
219 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
220 Node *nodes, MemoryRegionSection *sections)
222 PhysPageEntry *p;
223 hwaddr index = addr >> TARGET_PAGE_BITS;
224 int i;
226 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
227 if (lp.ptr == PHYS_MAP_NODE_NIL) {
228 return &sections[PHYS_SECTION_UNASSIGNED];
230 p = nodes[lp.ptr];
231 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
233 return &sections[lp.ptr];
236 bool memory_region_is_unassigned(MemoryRegion *mr)
238 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
239 && mr != &io_mem_watch;
242 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
243 hwaddr addr,
244 bool resolve_subpage)
246 MemoryRegionSection *section;
247 subpage_t *subpage;
249 section = phys_page_find(d->phys_map, addr, d->nodes, d->sections);
250 if (resolve_subpage && section->mr->subpage) {
251 subpage = container_of(section->mr, subpage_t, iomem);
252 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
254 return section;
257 static MemoryRegionSection *
258 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
259 hwaddr *plen, bool resolve_subpage)
261 MemoryRegionSection *section;
262 Int128 diff;
264 section = address_space_lookup_region(d, addr, resolve_subpage);
265 /* Compute offset within MemoryRegionSection */
266 addr -= section->offset_within_address_space;
268 /* Compute offset within MemoryRegion */
269 *xlat = addr + section->offset_within_region;
271 diff = int128_sub(section->mr->size, int128_make64(addr));
272 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
273 return section;
276 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
277 hwaddr *xlat, hwaddr *plen,
278 bool is_write)
280 IOMMUTLBEntry iotlb;
281 MemoryRegionSection *section;
282 MemoryRegion *mr;
283 hwaddr len = *plen;
285 for (;;) {
286 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
287 mr = section->mr;
289 if (!mr->iommu_ops) {
290 break;
293 iotlb = mr->iommu_ops->translate(mr, addr);
294 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
295 | (addr & iotlb.addr_mask));
296 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
297 if (!(iotlb.perm & (1 << is_write))) {
298 mr = &io_mem_unassigned;
299 break;
302 as = iotlb.target_as;
305 *plen = len;
306 *xlat = addr;
307 return mr;
310 MemoryRegionSection *
311 address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
312 hwaddr *plen)
314 MemoryRegionSection *section;
315 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
317 assert(!section->mr->iommu_ops);
318 return section;
320 #endif
322 void cpu_exec_init_all(void)
324 #if !defined(CONFIG_USER_ONLY)
325 qemu_mutex_init(&ram_list.mutex);
326 memory_map_init();
327 io_mem_init();
328 #endif
331 #if !defined(CONFIG_USER_ONLY)
333 static int cpu_common_post_load(void *opaque, int version_id)
335 CPUState *cpu = opaque;
337 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
338 version_id is increased. */
339 cpu->interrupt_request &= ~0x01;
340 tlb_flush(cpu->env_ptr, 1);
342 return 0;
345 const VMStateDescription vmstate_cpu_common = {
346 .name = "cpu_common",
347 .version_id = 1,
348 .minimum_version_id = 1,
349 .minimum_version_id_old = 1,
350 .post_load = cpu_common_post_load,
351 .fields = (VMStateField []) {
352 VMSTATE_UINT32(halted, CPUState),
353 VMSTATE_UINT32(interrupt_request, CPUState),
354 VMSTATE_END_OF_LIST()
358 #endif
360 CPUState *qemu_get_cpu(int index)
362 CPUState *cpu;
364 CPU_FOREACH(cpu) {
365 if (cpu->cpu_index == index) {
366 return cpu;
370 return NULL;
373 void cpu_exec_init(CPUArchState *env)
375 CPUState *cpu = ENV_GET_CPU(env);
376 CPUClass *cc = CPU_GET_CLASS(cpu);
377 CPUState *some_cpu;
378 int cpu_index;
380 #if defined(CONFIG_USER_ONLY)
381 cpu_list_lock();
382 #endif
383 cpu_index = 0;
384 CPU_FOREACH(some_cpu) {
385 cpu_index++;
387 cpu->cpu_index = cpu_index;
388 cpu->numa_node = 0;
389 QTAILQ_INIT(&env->breakpoints);
390 QTAILQ_INIT(&env->watchpoints);
391 #ifndef CONFIG_USER_ONLY
392 cpu->thread_id = qemu_get_thread_id();
393 #endif
394 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
395 #if defined(CONFIG_USER_ONLY)
396 cpu_list_unlock();
397 #endif
398 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
399 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
401 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
402 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
403 cpu_save, cpu_load, env);
404 assert(cc->vmsd == NULL);
405 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
406 #endif
407 if (cc->vmsd != NULL) {
408 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
412 #if defined(TARGET_HAS_ICE)
413 #if defined(CONFIG_USER_ONLY)
414 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
416 tb_invalidate_phys_page_range(pc, pc + 1, 0);
418 #else
419 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
421 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
422 if (phys != -1) {
423 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
426 #endif
427 #endif /* TARGET_HAS_ICE */
429 #if defined(CONFIG_USER_ONLY)
430 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
435 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
436 int flags, CPUWatchpoint **watchpoint)
438 return -ENOSYS;
440 #else
441 /* Add a watchpoint. */
442 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
443 int flags, CPUWatchpoint **watchpoint)
445 target_ulong len_mask = ~(len - 1);
446 CPUWatchpoint *wp;
448 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
449 if ((len & (len - 1)) || (addr & ~len_mask) ||
450 len == 0 || len > TARGET_PAGE_SIZE) {
451 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
452 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
453 return -EINVAL;
455 wp = g_malloc(sizeof(*wp));
457 wp->vaddr = addr;
458 wp->len_mask = len_mask;
459 wp->flags = flags;
461 /* keep all GDB-injected watchpoints in front */
462 if (flags & BP_GDB)
463 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
464 else
465 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
467 tlb_flush_page(env, addr);
469 if (watchpoint)
470 *watchpoint = wp;
471 return 0;
474 /* Remove a specific watchpoint. */
475 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
476 int flags)
478 target_ulong len_mask = ~(len - 1);
479 CPUWatchpoint *wp;
481 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
482 if (addr == wp->vaddr && len_mask == wp->len_mask
483 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
484 cpu_watchpoint_remove_by_ref(env, wp);
485 return 0;
488 return -ENOENT;
491 /* Remove a specific watchpoint by reference. */
492 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
494 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
496 tlb_flush_page(env, watchpoint->vaddr);
498 g_free(watchpoint);
501 /* Remove all matching watchpoints. */
502 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
504 CPUWatchpoint *wp, *next;
506 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
507 if (wp->flags & mask)
508 cpu_watchpoint_remove_by_ref(env, wp);
511 #endif
513 /* Add a breakpoint. */
514 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
515 CPUBreakpoint **breakpoint)
517 #if defined(TARGET_HAS_ICE)
518 CPUBreakpoint *bp;
520 bp = g_malloc(sizeof(*bp));
522 bp->pc = pc;
523 bp->flags = flags;
525 /* keep all GDB-injected breakpoints in front */
526 if (flags & BP_GDB) {
527 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
528 } else {
529 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
532 breakpoint_invalidate(ENV_GET_CPU(env), pc);
534 if (breakpoint) {
535 *breakpoint = bp;
537 return 0;
538 #else
539 return -ENOSYS;
540 #endif
543 /* Remove a specific breakpoint. */
544 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
546 #if defined(TARGET_HAS_ICE)
547 CPUBreakpoint *bp;
549 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
550 if (bp->pc == pc && bp->flags == flags) {
551 cpu_breakpoint_remove_by_ref(env, bp);
552 return 0;
555 return -ENOENT;
556 #else
557 return -ENOSYS;
558 #endif
561 /* Remove a specific breakpoint by reference. */
562 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
564 #if defined(TARGET_HAS_ICE)
565 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
567 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
569 g_free(breakpoint);
570 #endif
573 /* Remove all matching breakpoints. */
574 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
576 #if defined(TARGET_HAS_ICE)
577 CPUBreakpoint *bp, *next;
579 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
580 if (bp->flags & mask)
581 cpu_breakpoint_remove_by_ref(env, bp);
583 #endif
586 /* enable or disable single step mode. EXCP_DEBUG is returned by the
587 CPU loop after each instruction */
588 void cpu_single_step(CPUState *cpu, int enabled)
590 #if defined(TARGET_HAS_ICE)
591 if (cpu->singlestep_enabled != enabled) {
592 cpu->singlestep_enabled = enabled;
593 if (kvm_enabled()) {
594 kvm_update_guest_debug(cpu, 0);
595 } else {
596 /* must flush all the translated code to avoid inconsistencies */
597 /* XXX: only flush what is necessary */
598 CPUArchState *env = cpu->env_ptr;
599 tb_flush(env);
602 #endif
605 void cpu_abort(CPUArchState *env, const char *fmt, ...)
607 CPUState *cpu = ENV_GET_CPU(env);
608 va_list ap;
609 va_list ap2;
611 va_start(ap, fmt);
612 va_copy(ap2, ap);
613 fprintf(stderr, "qemu: fatal: ");
614 vfprintf(stderr, fmt, ap);
615 fprintf(stderr, "\n");
616 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
617 if (qemu_log_enabled()) {
618 qemu_log("qemu: fatal: ");
619 qemu_log_vprintf(fmt, ap2);
620 qemu_log("\n");
621 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
622 qemu_log_flush();
623 qemu_log_close();
625 va_end(ap2);
626 va_end(ap);
627 #if defined(CONFIG_USER_ONLY)
629 struct sigaction act;
630 sigfillset(&act.sa_mask);
631 act.sa_handler = SIG_DFL;
632 sigaction(SIGABRT, &act, NULL);
634 #endif
635 abort();
638 #if !defined(CONFIG_USER_ONLY)
639 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
641 RAMBlock *block;
643 /* The list is protected by the iothread lock here. */
644 block = ram_list.mru_block;
645 if (block && addr - block->offset < block->length) {
646 goto found;
648 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
649 if (addr - block->offset < block->length) {
650 goto found;
654 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
655 abort();
657 found:
658 ram_list.mru_block = block;
659 return block;
662 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
663 uintptr_t length)
665 RAMBlock *block;
666 ram_addr_t start1;
668 block = qemu_get_ram_block(start);
669 assert(block == qemu_get_ram_block(end - 1));
670 start1 = (uintptr_t)block->host + (start - block->offset);
671 cpu_tlb_reset_dirty_all(start1, length);
674 /* Note: start and end must be within the same ram block. */
675 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
676 int dirty_flags)
678 uintptr_t length;
680 start &= TARGET_PAGE_MASK;
681 end = TARGET_PAGE_ALIGN(end);
683 length = end - start;
684 if (length == 0)
685 return;
686 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
688 if (tcg_enabled()) {
689 tlb_reset_dirty_range_all(start, end, length);
693 static int cpu_physical_memory_set_dirty_tracking(int enable)
695 int ret = 0;
696 in_migration = enable;
697 return ret;
700 hwaddr memory_region_section_get_iotlb(CPUArchState *env,
701 MemoryRegionSection *section,
702 target_ulong vaddr,
703 hwaddr paddr, hwaddr xlat,
704 int prot,
705 target_ulong *address)
707 hwaddr iotlb;
708 CPUWatchpoint *wp;
710 if (memory_region_is_ram(section->mr)) {
711 /* Normal RAM. */
712 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
713 + xlat;
714 if (!section->readonly) {
715 iotlb |= PHYS_SECTION_NOTDIRTY;
716 } else {
717 iotlb |= PHYS_SECTION_ROM;
719 } else {
720 iotlb = section - address_space_memory.dispatch->sections;
721 iotlb += xlat;
724 /* Make accesses to pages with watchpoints go via the
725 watchpoint trap routines. */
726 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
727 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
728 /* Avoid trapping reads of pages with a write breakpoint. */
729 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
730 iotlb = PHYS_SECTION_WATCH + paddr;
731 *address |= TLB_MMIO;
732 break;
737 return iotlb;
739 #endif /* defined(CONFIG_USER_ONLY) */
741 #if !defined(CONFIG_USER_ONLY)
743 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
744 uint16_t section);
745 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
747 static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
750 * Set a custom physical guest memory alloator.
751 * Accelerators with unusual needs may need this. Hopefully, we can
752 * get rid of it eventually.
754 void phys_mem_set_alloc(void *(*alloc)(size_t))
756 phys_mem_alloc = alloc;
759 static uint16_t phys_section_add(MemoryRegionSection *section)
761 /* The physical section number is ORed with a page-aligned
762 * pointer to produce the iotlb entries. Thus it should
763 * never overflow into the page-aligned value.
765 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
767 if (next_map.sections_nb == next_map.sections_nb_alloc) {
768 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
769 16);
770 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
771 next_map.sections_nb_alloc);
773 next_map.sections[next_map.sections_nb] = *section;
774 memory_region_ref(section->mr);
775 return next_map.sections_nb++;
778 static void phys_section_destroy(MemoryRegion *mr)
780 memory_region_unref(mr);
782 if (mr->subpage) {
783 subpage_t *subpage = container_of(mr, subpage_t, iomem);
784 memory_region_destroy(&subpage->iomem);
785 g_free(subpage);
789 static void phys_sections_free(PhysPageMap *map)
791 while (map->sections_nb > 0) {
792 MemoryRegionSection *section = &map->sections[--map->sections_nb];
793 phys_section_destroy(section->mr);
795 g_free(map->sections);
796 g_free(map->nodes);
797 g_free(map);
800 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
802 subpage_t *subpage;
803 hwaddr base = section->offset_within_address_space
804 & TARGET_PAGE_MASK;
805 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
806 next_map.nodes, next_map.sections);
807 MemoryRegionSection subsection = {
808 .offset_within_address_space = base,
809 .size = int128_make64(TARGET_PAGE_SIZE),
811 hwaddr start, end;
813 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
815 if (!(existing->mr->subpage)) {
816 subpage = subpage_init(d->as, base);
817 subsection.mr = &subpage->iomem;
818 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
819 phys_section_add(&subsection));
820 } else {
821 subpage = container_of(existing->mr, subpage_t, iomem);
823 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
824 end = start + int128_get64(section->size) - 1;
825 subpage_register(subpage, start, end, phys_section_add(section));
829 static void register_multipage(AddressSpaceDispatch *d,
830 MemoryRegionSection *section)
832 hwaddr start_addr = section->offset_within_address_space;
833 uint16_t section_index = phys_section_add(section);
834 uint64_t num_pages = int128_get64(int128_rshift(section->size,
835 TARGET_PAGE_BITS));
837 assert(num_pages);
838 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
841 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
843 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
844 AddressSpaceDispatch *d = as->next_dispatch;
845 MemoryRegionSection now = *section, remain = *section;
846 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
848 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
849 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
850 - now.offset_within_address_space;
852 now.size = int128_min(int128_make64(left), now.size);
853 register_subpage(d, &now);
854 } else {
855 now.size = int128_zero();
857 while (int128_ne(remain.size, now.size)) {
858 remain.size = int128_sub(remain.size, now.size);
859 remain.offset_within_address_space += int128_get64(now.size);
860 remain.offset_within_region += int128_get64(now.size);
861 now = remain;
862 if (int128_lt(remain.size, page_size)) {
863 register_subpage(d, &now);
864 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
865 now.size = page_size;
866 register_subpage(d, &now);
867 } else {
868 now.size = int128_and(now.size, int128_neg(page_size));
869 register_multipage(d, &now);
874 void qemu_flush_coalesced_mmio_buffer(void)
876 if (kvm_enabled())
877 kvm_flush_coalesced_mmio_buffer();
880 void qemu_mutex_lock_ramlist(void)
882 qemu_mutex_lock(&ram_list.mutex);
885 void qemu_mutex_unlock_ramlist(void)
887 qemu_mutex_unlock(&ram_list.mutex);
890 #ifdef __linux__
892 #include <sys/vfs.h>
894 #define HUGETLBFS_MAGIC 0x958458f6
896 static long gethugepagesize(const char *path)
898 struct statfs fs;
899 int ret;
901 do {
902 ret = statfs(path, &fs);
903 } while (ret != 0 && errno == EINTR);
905 if (ret != 0) {
906 perror(path);
907 return 0;
910 if (fs.f_type != HUGETLBFS_MAGIC)
911 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
913 return fs.f_bsize;
916 static sigjmp_buf sigjump;
918 static void sigbus_handler(int signal)
920 siglongjmp(sigjump, 1);
923 static void *file_ram_alloc(RAMBlock *block,
924 ram_addr_t memory,
925 const char *path)
927 char *filename;
928 char *sanitized_name;
929 char *c;
930 void *area;
931 int fd;
932 unsigned long hpagesize;
934 hpagesize = gethugepagesize(path);
935 if (!hpagesize) {
936 return NULL;
939 if (memory < hpagesize) {
940 return NULL;
943 if (kvm_enabled() && !kvm_has_sync_mmu()) {
944 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
945 return NULL;
948 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
949 sanitized_name = g_strdup(block->mr->name);
950 for (c = sanitized_name; *c != '\0'; c++) {
951 if (*c == '/')
952 *c = '_';
955 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
956 sanitized_name);
957 g_free(sanitized_name);
959 fd = mkstemp(filename);
960 if (fd < 0) {
961 perror("unable to create backing store for hugepages");
962 g_free(filename);
963 return NULL;
965 unlink(filename);
966 g_free(filename);
968 memory = (memory+hpagesize-1) & ~(hpagesize-1);
971 * ftruncate is not supported by hugetlbfs in older
972 * hosts, so don't bother bailing out on errors.
973 * If anything goes wrong with it under other filesystems,
974 * mmap will fail.
976 if (ftruncate(fd, memory))
977 perror("ftruncate");
979 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
980 if (area == MAP_FAILED) {
981 perror("file_ram_alloc: can't mmap RAM pages");
982 close(fd);
983 return (NULL);
986 if (mem_prealloc) {
987 int ret, i;
988 struct sigaction act, oldact;
989 sigset_t set, oldset;
991 memset(&act, 0, sizeof(act));
992 act.sa_handler = &sigbus_handler;
993 act.sa_flags = 0;
995 ret = sigaction(SIGBUS, &act, &oldact);
996 if (ret) {
997 perror("file_ram_alloc: failed to install signal handler");
998 exit(1);
1001 /* unblock SIGBUS */
1002 sigemptyset(&set);
1003 sigaddset(&set, SIGBUS);
1004 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1006 if (sigsetjmp(sigjump, 1)) {
1007 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1008 exit(1);
1011 /* MAP_POPULATE silently ignores failures */
1012 for (i = 0; i < (memory/hpagesize)-1; i++) {
1013 memset(area + (hpagesize*i), 0, 1);
1016 ret = sigaction(SIGBUS, &oldact, NULL);
1017 if (ret) {
1018 perror("file_ram_alloc: failed to reinstall signal handler");
1019 exit(1);
1022 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1025 block->fd = fd;
1026 return area;
1028 #else
1029 static void *file_ram_alloc(RAMBlock *block,
1030 ram_addr_t memory,
1031 const char *path)
1033 fprintf(stderr, "-mem-path not supported on this host\n");
1034 exit(1);
1036 #endif
1038 static ram_addr_t find_ram_offset(ram_addr_t size)
1040 RAMBlock *block, *next_block;
1041 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1043 assert(size != 0); /* it would hand out same offset multiple times */
1045 if (QTAILQ_EMPTY(&ram_list.blocks))
1046 return 0;
1048 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1049 ram_addr_t end, next = RAM_ADDR_MAX;
1051 end = block->offset + block->length;
1053 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
1054 if (next_block->offset >= end) {
1055 next = MIN(next, next_block->offset);
1058 if (next - end >= size && next - end < mingap) {
1059 offset = end;
1060 mingap = next - end;
1064 if (offset == RAM_ADDR_MAX) {
1065 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1066 (uint64_t)size);
1067 abort();
1070 return offset;
1073 ram_addr_t last_ram_offset(void)
1075 RAMBlock *block;
1076 ram_addr_t last = 0;
1078 QTAILQ_FOREACH(block, &ram_list.blocks, next)
1079 last = MAX(last, block->offset + block->length);
1081 return last;
1084 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1086 int ret;
1088 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1089 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1090 "dump-guest-core", true)) {
1091 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1092 if (ret) {
1093 perror("qemu_madvise");
1094 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1095 "but dump_guest_core=off specified\n");
1100 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1102 RAMBlock *new_block, *block;
1104 new_block = NULL;
1105 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1106 if (block->offset == addr) {
1107 new_block = block;
1108 break;
1111 assert(new_block);
1112 assert(!new_block->idstr[0]);
1114 if (dev) {
1115 char *id = qdev_get_dev_path(dev);
1116 if (id) {
1117 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1118 g_free(id);
1121 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1123 /* This assumes the iothread lock is taken here too. */
1124 qemu_mutex_lock_ramlist();
1125 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1126 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1127 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1128 new_block->idstr);
1129 abort();
1132 qemu_mutex_unlock_ramlist();
1135 static int memory_try_enable_merging(void *addr, size_t len)
1137 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1138 /* disabled by the user */
1139 return 0;
1142 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1145 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1146 MemoryRegion *mr)
1148 RAMBlock *block, *new_block;
1150 size = TARGET_PAGE_ALIGN(size);
1151 new_block = g_malloc0(sizeof(*new_block));
1152 new_block->fd = -1;
1154 /* This assumes the iothread lock is taken here too. */
1155 qemu_mutex_lock_ramlist();
1156 new_block->mr = mr;
1157 new_block->offset = find_ram_offset(size);
1158 if (host) {
1159 new_block->host = host;
1160 new_block->flags |= RAM_PREALLOC_MASK;
1161 } else if (xen_enabled()) {
1162 if (mem_path) {
1163 fprintf(stderr, "-mem-path not supported with Xen\n");
1164 exit(1);
1166 xen_ram_alloc(new_block->offset, size, mr);
1167 } else {
1168 if (mem_path) {
1169 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1171 * file_ram_alloc() needs to allocate just like
1172 * phys_mem_alloc, but we haven't bothered to provide
1173 * a hook there.
1175 fprintf(stderr,
1176 "-mem-path not supported with this accelerator\n");
1177 exit(1);
1179 new_block->host = file_ram_alloc(new_block, size, mem_path);
1181 if (!new_block->host) {
1182 new_block->host = phys_mem_alloc(size);
1183 if (!new_block->host) {
1184 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1185 new_block->mr->name, strerror(errno));
1186 exit(1);
1188 memory_try_enable_merging(new_block->host, size);
1191 new_block->length = size;
1193 /* Keep the list sorted from biggest to smallest block. */
1194 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1195 if (block->length < new_block->length) {
1196 break;
1199 if (block) {
1200 QTAILQ_INSERT_BEFORE(block, new_block, next);
1201 } else {
1202 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1204 ram_list.mru_block = NULL;
1206 ram_list.version++;
1207 qemu_mutex_unlock_ramlist();
1209 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
1210 last_ram_offset() >> TARGET_PAGE_BITS);
1211 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1212 0, size >> TARGET_PAGE_BITS);
1213 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1215 qemu_ram_setup_dump(new_block->host, size);
1216 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1217 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
1219 if (kvm_enabled())
1220 kvm_setup_guest_memory(new_block->host, size);
1222 return new_block->offset;
1225 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1227 return qemu_ram_alloc_from_ptr(size, NULL, mr);
1230 void qemu_ram_free_from_ptr(ram_addr_t addr)
1232 RAMBlock *block;
1234 /* This assumes the iothread lock is taken here too. */
1235 qemu_mutex_lock_ramlist();
1236 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1237 if (addr == block->offset) {
1238 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1239 ram_list.mru_block = NULL;
1240 ram_list.version++;
1241 g_free(block);
1242 break;
1245 qemu_mutex_unlock_ramlist();
1248 void qemu_ram_free(ram_addr_t addr)
1250 RAMBlock *block;
1252 /* This assumes the iothread lock is taken here too. */
1253 qemu_mutex_lock_ramlist();
1254 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1255 if (addr == block->offset) {
1256 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1257 ram_list.mru_block = NULL;
1258 ram_list.version++;
1259 if (block->flags & RAM_PREALLOC_MASK) {
1261 } else if (xen_enabled()) {
1262 xen_invalidate_map_cache_entry(block->host);
1263 #ifndef _WIN32
1264 } else if (block->fd >= 0) {
1265 munmap(block->host, block->length);
1266 close(block->fd);
1267 #endif
1268 } else {
1269 qemu_anon_ram_free(block->host, block->length);
1271 g_free(block);
1272 break;
1275 qemu_mutex_unlock_ramlist();
1279 #ifndef _WIN32
1280 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1282 RAMBlock *block;
1283 ram_addr_t offset;
1284 int flags;
1285 void *area, *vaddr;
1287 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1288 offset = addr - block->offset;
1289 if (offset < block->length) {
1290 vaddr = block->host + offset;
1291 if (block->flags & RAM_PREALLOC_MASK) {
1293 } else if (xen_enabled()) {
1294 abort();
1295 } else {
1296 flags = MAP_FIXED;
1297 munmap(vaddr, length);
1298 if (block->fd >= 0) {
1299 #ifdef MAP_POPULATE
1300 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1301 MAP_PRIVATE;
1302 #else
1303 flags |= MAP_PRIVATE;
1304 #endif
1305 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1306 flags, block->fd, offset);
1307 } else {
1309 * Remap needs to match alloc. Accelerators that
1310 * set phys_mem_alloc never remap. If they did,
1311 * we'd need a remap hook here.
1313 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1315 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1316 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1317 flags, -1, 0);
1319 if (area != vaddr) {
1320 fprintf(stderr, "Could not remap addr: "
1321 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1322 length, addr);
1323 exit(1);
1325 memory_try_enable_merging(vaddr, length);
1326 qemu_ram_setup_dump(vaddr, length);
1328 return;
1332 #endif /* !_WIN32 */
1334 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1335 With the exception of the softmmu code in this file, this should
1336 only be used for local memory (e.g. video ram) that the device owns,
1337 and knows it isn't going to access beyond the end of the block.
1339 It should not be used for general purpose DMA.
1340 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1342 void *qemu_get_ram_ptr(ram_addr_t addr)
1344 RAMBlock *block = qemu_get_ram_block(addr);
1346 if (xen_enabled()) {
1347 /* We need to check if the requested address is in the RAM
1348 * because we don't want to map the entire memory in QEMU.
1349 * In that case just map until the end of the page.
1351 if (block->offset == 0) {
1352 return xen_map_cache(addr, 0, 0);
1353 } else if (block->host == NULL) {
1354 block->host =
1355 xen_map_cache(block->offset, block->length, 1);
1358 return block->host + (addr - block->offset);
1361 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1362 * but takes a size argument */
1363 static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1365 if (*size == 0) {
1366 return NULL;
1368 if (xen_enabled()) {
1369 return xen_map_cache(addr, *size, 1);
1370 } else {
1371 RAMBlock *block;
1373 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1374 if (addr - block->offset < block->length) {
1375 if (addr - block->offset + *size > block->length)
1376 *size = block->length - addr + block->offset;
1377 return block->host + (addr - block->offset);
1381 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1382 abort();
1386 /* Some of the softmmu routines need to translate from a host pointer
1387 (typically a TLB entry) back to a ram offset. */
1388 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1390 RAMBlock *block;
1391 uint8_t *host = ptr;
1393 if (xen_enabled()) {
1394 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1395 return qemu_get_ram_block(*ram_addr)->mr;
1398 block = ram_list.mru_block;
1399 if (block && block->host && host - block->host < block->length) {
1400 goto found;
1403 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1404 /* This case append when the block is not mapped. */
1405 if (block->host == NULL) {
1406 continue;
1408 if (host - block->host < block->length) {
1409 goto found;
1413 return NULL;
1415 found:
1416 *ram_addr = block->offset + (host - block->host);
1417 return block->mr;
1420 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1421 uint64_t val, unsigned size)
1423 int dirty_flags;
1424 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1425 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1426 tb_invalidate_phys_page_fast(ram_addr, size);
1427 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1429 switch (size) {
1430 case 1:
1431 stb_p(qemu_get_ram_ptr(ram_addr), val);
1432 break;
1433 case 2:
1434 stw_p(qemu_get_ram_ptr(ram_addr), val);
1435 break;
1436 case 4:
1437 stl_p(qemu_get_ram_ptr(ram_addr), val);
1438 break;
1439 default:
1440 abort();
1442 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1443 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1444 /* we remove the notdirty callback only if the code has been
1445 flushed */
1446 if (dirty_flags == 0xff) {
1447 CPUArchState *env = current_cpu->env_ptr;
1448 tlb_set_dirty(env, env->mem_io_vaddr);
1452 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1453 unsigned size, bool is_write)
1455 return is_write;
1458 static const MemoryRegionOps notdirty_mem_ops = {
1459 .write = notdirty_mem_write,
1460 .valid.accepts = notdirty_mem_accepts,
1461 .endianness = DEVICE_NATIVE_ENDIAN,
1464 /* Generate a debug exception if a watchpoint has been hit. */
1465 static void check_watchpoint(int offset, int len_mask, int flags)
1467 CPUArchState *env = current_cpu->env_ptr;
1468 target_ulong pc, cs_base;
1469 target_ulong vaddr;
1470 CPUWatchpoint *wp;
1471 int cpu_flags;
1473 if (env->watchpoint_hit) {
1474 /* We re-entered the check after replacing the TB. Now raise
1475 * the debug interrupt so that is will trigger after the
1476 * current instruction. */
1477 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
1478 return;
1480 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1481 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1482 if ((vaddr == (wp->vaddr & len_mask) ||
1483 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1484 wp->flags |= BP_WATCHPOINT_HIT;
1485 if (!env->watchpoint_hit) {
1486 env->watchpoint_hit = wp;
1487 tb_check_watchpoint(env);
1488 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1489 env->exception_index = EXCP_DEBUG;
1490 cpu_loop_exit(env);
1491 } else {
1492 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1493 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1494 cpu_resume_from_signal(env, NULL);
1497 } else {
1498 wp->flags &= ~BP_WATCHPOINT_HIT;
1503 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1504 so these check for a hit then pass through to the normal out-of-line
1505 phys routines. */
1506 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1507 unsigned size)
1509 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1510 switch (size) {
1511 case 1: return ldub_phys(addr);
1512 case 2: return lduw_phys(addr);
1513 case 4: return ldl_phys(addr);
1514 default: abort();
1518 static void watch_mem_write(void *opaque, hwaddr addr,
1519 uint64_t val, unsigned size)
1521 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1522 switch (size) {
1523 case 1:
1524 stb_phys(addr, val);
1525 break;
1526 case 2:
1527 stw_phys(addr, val);
1528 break;
1529 case 4:
1530 stl_phys(addr, val);
1531 break;
1532 default: abort();
1536 static const MemoryRegionOps watch_mem_ops = {
1537 .read = watch_mem_read,
1538 .write = watch_mem_write,
1539 .endianness = DEVICE_NATIVE_ENDIAN,
1542 static uint64_t subpage_read(void *opaque, hwaddr addr,
1543 unsigned len)
1545 subpage_t *subpage = opaque;
1546 uint8_t buf[4];
1548 #if defined(DEBUG_SUBPAGE)
1549 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1550 subpage, len, addr);
1551 #endif
1552 address_space_read(subpage->as, addr + subpage->base, buf, len);
1553 switch (len) {
1554 case 1:
1555 return ldub_p(buf);
1556 case 2:
1557 return lduw_p(buf);
1558 case 4:
1559 return ldl_p(buf);
1560 default:
1561 abort();
1565 static void subpage_write(void *opaque, hwaddr addr,
1566 uint64_t value, unsigned len)
1568 subpage_t *subpage = opaque;
1569 uint8_t buf[4];
1571 #if defined(DEBUG_SUBPAGE)
1572 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1573 " value %"PRIx64"\n",
1574 __func__, subpage, len, addr, value);
1575 #endif
1576 switch (len) {
1577 case 1:
1578 stb_p(buf, value);
1579 break;
1580 case 2:
1581 stw_p(buf, value);
1582 break;
1583 case 4:
1584 stl_p(buf, value);
1585 break;
1586 default:
1587 abort();
1589 address_space_write(subpage->as, addr + subpage->base, buf, len);
1592 static bool subpage_accepts(void *opaque, hwaddr addr,
1593 unsigned len, bool is_write)
1595 subpage_t *subpage = opaque;
1596 #if defined(DEBUG_SUBPAGE)
1597 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1598 __func__, subpage, is_write ? 'w' : 'r', len, addr);
1599 #endif
1601 return address_space_access_valid(subpage->as, addr + subpage->base,
1602 len, is_write);
1605 static const MemoryRegionOps subpage_ops = {
1606 .read = subpage_read,
1607 .write = subpage_write,
1608 .valid.accepts = subpage_accepts,
1609 .endianness = DEVICE_NATIVE_ENDIAN,
1612 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1613 uint16_t section)
1615 int idx, eidx;
1617 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1618 return -1;
1619 idx = SUBPAGE_IDX(start);
1620 eidx = SUBPAGE_IDX(end);
1621 #if defined(DEBUG_SUBPAGE)
1622 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1623 __func__, mmio, start, end, idx, eidx, section);
1624 #endif
1625 for (; idx <= eidx; idx++) {
1626 mmio->sub_section[idx] = section;
1629 return 0;
1632 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1634 subpage_t *mmio;
1636 mmio = g_malloc0(sizeof(subpage_t));
1638 mmio->as = as;
1639 mmio->base = base;
1640 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
1641 "subpage", TARGET_PAGE_SIZE);
1642 mmio->iomem.subpage = true;
1643 #if defined(DEBUG_SUBPAGE)
1644 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1645 mmio, base, TARGET_PAGE_SIZE);
1646 #endif
1647 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1649 return mmio;
1652 static uint16_t dummy_section(MemoryRegion *mr)
1654 MemoryRegionSection section = {
1655 .mr = mr,
1656 .offset_within_address_space = 0,
1657 .offset_within_region = 0,
1658 .size = int128_2_64(),
1661 return phys_section_add(&section);
1664 MemoryRegion *iotlb_to_region(hwaddr index)
1666 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
1669 static void io_mem_init(void)
1671 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1672 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1673 "unassigned", UINT64_MAX);
1674 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1675 "notdirty", UINT64_MAX);
1676 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1677 "watch", UINT64_MAX);
1680 static void mem_begin(MemoryListener *listener)
1682 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1683 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1685 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1686 d->as = as;
1687 as->next_dispatch = d;
1690 static void mem_commit(MemoryListener *listener)
1692 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1693 AddressSpaceDispatch *cur = as->dispatch;
1694 AddressSpaceDispatch *next = as->next_dispatch;
1696 next->nodes = next_map.nodes;
1697 next->sections = next_map.sections;
1699 as->dispatch = next;
1700 g_free(cur);
1703 static void core_begin(MemoryListener *listener)
1705 uint16_t n;
1707 prev_map = g_new(PhysPageMap, 1);
1708 *prev_map = next_map;
1710 memset(&next_map, 0, sizeof(next_map));
1711 n = dummy_section(&io_mem_unassigned);
1712 assert(n == PHYS_SECTION_UNASSIGNED);
1713 n = dummy_section(&io_mem_notdirty);
1714 assert(n == PHYS_SECTION_NOTDIRTY);
1715 n = dummy_section(&io_mem_rom);
1716 assert(n == PHYS_SECTION_ROM);
1717 n = dummy_section(&io_mem_watch);
1718 assert(n == PHYS_SECTION_WATCH);
1721 /* This listener's commit run after the other AddressSpaceDispatch listeners'.
1722 * All AddressSpaceDispatch instances have switched to the next map.
1724 static void core_commit(MemoryListener *listener)
1726 phys_sections_free(prev_map);
1729 static void tcg_commit(MemoryListener *listener)
1731 CPUState *cpu;
1733 /* since each CPU stores ram addresses in its TLB cache, we must
1734 reset the modified entries */
1735 /* XXX: slow ! */
1736 CPU_FOREACH(cpu) {
1737 CPUArchState *env = cpu->env_ptr;
1739 tlb_flush(env, 1);
1743 static void core_log_global_start(MemoryListener *listener)
1745 cpu_physical_memory_set_dirty_tracking(1);
1748 static void core_log_global_stop(MemoryListener *listener)
1750 cpu_physical_memory_set_dirty_tracking(0);
1753 static MemoryListener core_memory_listener = {
1754 .begin = core_begin,
1755 .commit = core_commit,
1756 .log_global_start = core_log_global_start,
1757 .log_global_stop = core_log_global_stop,
1758 .priority = 1,
1761 static MemoryListener tcg_memory_listener = {
1762 .commit = tcg_commit,
1765 void address_space_init_dispatch(AddressSpace *as)
1767 as->dispatch = NULL;
1768 as->dispatch_listener = (MemoryListener) {
1769 .begin = mem_begin,
1770 .commit = mem_commit,
1771 .region_add = mem_add,
1772 .region_nop = mem_add,
1773 .priority = 0,
1775 memory_listener_register(&as->dispatch_listener, as);
1778 void address_space_destroy_dispatch(AddressSpace *as)
1780 AddressSpaceDispatch *d = as->dispatch;
1782 memory_listener_unregister(&as->dispatch_listener);
1783 g_free(d);
1784 as->dispatch = NULL;
1787 static void memory_map_init(void)
1789 system_memory = g_malloc(sizeof(*system_memory));
1791 assert(ADDR_SPACE_BITS <= 64);
1793 memory_region_init(system_memory, NULL, "system",
1794 ADDR_SPACE_BITS == 64 ?
1795 UINT64_MAX : (0x1ULL << ADDR_SPACE_BITS));
1796 address_space_init(&address_space_memory, system_memory, "memory");
1798 system_io = g_malloc(sizeof(*system_io));
1799 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1800 65536);
1801 address_space_init(&address_space_io, system_io, "I/O");
1803 memory_listener_register(&core_memory_listener, &address_space_memory);
1804 if (tcg_enabled()) {
1805 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1809 MemoryRegion *get_system_memory(void)
1811 return system_memory;
1814 MemoryRegion *get_system_io(void)
1816 return system_io;
1819 #endif /* !defined(CONFIG_USER_ONLY) */
1821 /* physical memory access (slow version, mainly for debug) */
1822 #if defined(CONFIG_USER_ONLY)
1823 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
1824 uint8_t *buf, int len, int is_write)
1826 int l, flags;
1827 target_ulong page;
1828 void * p;
1830 while (len > 0) {
1831 page = addr & TARGET_PAGE_MASK;
1832 l = (page + TARGET_PAGE_SIZE) - addr;
1833 if (l > len)
1834 l = len;
1835 flags = page_get_flags(page);
1836 if (!(flags & PAGE_VALID))
1837 return -1;
1838 if (is_write) {
1839 if (!(flags & PAGE_WRITE))
1840 return -1;
1841 /* XXX: this code should not depend on lock_user */
1842 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1843 return -1;
1844 memcpy(p, buf, l);
1845 unlock_user(p, addr, l);
1846 } else {
1847 if (!(flags & PAGE_READ))
1848 return -1;
1849 /* XXX: this code should not depend on lock_user */
1850 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1851 return -1;
1852 memcpy(buf, p, l);
1853 unlock_user(p, addr, 0);
1855 len -= l;
1856 buf += l;
1857 addr += l;
1859 return 0;
1862 #else
1864 static void invalidate_and_set_dirty(hwaddr addr,
1865 hwaddr length)
1867 if (!cpu_physical_memory_is_dirty(addr)) {
1868 /* invalidate code */
1869 tb_invalidate_phys_page_range(addr, addr + length, 0);
1870 /* set dirty bit */
1871 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1873 xen_modified_memory(addr, length);
1876 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1878 if (memory_region_is_ram(mr)) {
1879 return !(is_write && mr->readonly);
1881 if (memory_region_is_romd(mr)) {
1882 return !is_write;
1885 return false;
1888 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
1890 unsigned access_size_max = mr->ops->valid.max_access_size;
1892 /* Regions are assumed to support 1-4 byte accesses unless
1893 otherwise specified. */
1894 if (access_size_max == 0) {
1895 access_size_max = 4;
1898 /* Bound the maximum access by the alignment of the address. */
1899 if (!mr->ops->impl.unaligned) {
1900 unsigned align_size_max = addr & -addr;
1901 if (align_size_max != 0 && align_size_max < access_size_max) {
1902 access_size_max = align_size_max;
1906 /* Don't attempt accesses larger than the maximum. */
1907 if (l > access_size_max) {
1908 l = access_size_max;
1910 if (l & (l - 1)) {
1911 l = 1 << (qemu_fls(l) - 1);
1914 return l;
1917 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1918 int len, bool is_write)
1920 hwaddr l;
1921 uint8_t *ptr;
1922 uint64_t val;
1923 hwaddr addr1;
1924 MemoryRegion *mr;
1925 bool error = false;
1927 while (len > 0) {
1928 l = len;
1929 mr = address_space_translate(as, addr, &addr1, &l, is_write);
1931 if (is_write) {
1932 if (!memory_access_is_direct(mr, is_write)) {
1933 l = memory_access_size(mr, l, addr1);
1934 /* XXX: could force current_cpu to NULL to avoid
1935 potential bugs */
1936 switch (l) {
1937 case 8:
1938 /* 64 bit write access */
1939 val = ldq_p(buf);
1940 error |= io_mem_write(mr, addr1, val, 8);
1941 break;
1942 case 4:
1943 /* 32 bit write access */
1944 val = ldl_p(buf);
1945 error |= io_mem_write(mr, addr1, val, 4);
1946 break;
1947 case 2:
1948 /* 16 bit write access */
1949 val = lduw_p(buf);
1950 error |= io_mem_write(mr, addr1, val, 2);
1951 break;
1952 case 1:
1953 /* 8 bit write access */
1954 val = ldub_p(buf);
1955 error |= io_mem_write(mr, addr1, val, 1);
1956 break;
1957 default:
1958 abort();
1960 } else {
1961 addr1 += memory_region_get_ram_addr(mr);
1962 /* RAM case */
1963 ptr = qemu_get_ram_ptr(addr1);
1964 memcpy(ptr, buf, l);
1965 invalidate_and_set_dirty(addr1, l);
1967 } else {
1968 if (!memory_access_is_direct(mr, is_write)) {
1969 /* I/O case */
1970 l = memory_access_size(mr, l, addr1);
1971 switch (l) {
1972 case 8:
1973 /* 64 bit read access */
1974 error |= io_mem_read(mr, addr1, &val, 8);
1975 stq_p(buf, val);
1976 break;
1977 case 4:
1978 /* 32 bit read access */
1979 error |= io_mem_read(mr, addr1, &val, 4);
1980 stl_p(buf, val);
1981 break;
1982 case 2:
1983 /* 16 bit read access */
1984 error |= io_mem_read(mr, addr1, &val, 2);
1985 stw_p(buf, val);
1986 break;
1987 case 1:
1988 /* 8 bit read access */
1989 error |= io_mem_read(mr, addr1, &val, 1);
1990 stb_p(buf, val);
1991 break;
1992 default:
1993 abort();
1995 } else {
1996 /* RAM case */
1997 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
1998 memcpy(buf, ptr, l);
2001 len -= l;
2002 buf += l;
2003 addr += l;
2006 return error;
2009 bool address_space_write(AddressSpace *as, hwaddr addr,
2010 const uint8_t *buf, int len)
2012 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2015 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2017 return address_space_rw(as, addr, buf, len, false);
2021 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2022 int len, int is_write)
2024 address_space_rw(&address_space_memory, addr, buf, len, is_write);
2027 /* used for ROM loading : can write in RAM and ROM */
2028 void cpu_physical_memory_write_rom(hwaddr addr,
2029 const uint8_t *buf, int len)
2031 hwaddr l;
2032 uint8_t *ptr;
2033 hwaddr addr1;
2034 MemoryRegion *mr;
2036 while (len > 0) {
2037 l = len;
2038 mr = address_space_translate(&address_space_memory,
2039 addr, &addr1, &l, true);
2041 if (!(memory_region_is_ram(mr) ||
2042 memory_region_is_romd(mr))) {
2043 /* do nothing */
2044 } else {
2045 addr1 += memory_region_get_ram_addr(mr);
2046 /* ROM/RAM case */
2047 ptr = qemu_get_ram_ptr(addr1);
2048 memcpy(ptr, buf, l);
2049 invalidate_and_set_dirty(addr1, l);
2051 len -= l;
2052 buf += l;
2053 addr += l;
2057 typedef struct {
2058 MemoryRegion *mr;
2059 void *buffer;
2060 hwaddr addr;
2061 hwaddr len;
2062 } BounceBuffer;
2064 static BounceBuffer bounce;
2066 typedef struct MapClient {
2067 void *opaque;
2068 void (*callback)(void *opaque);
2069 QLIST_ENTRY(MapClient) link;
2070 } MapClient;
2072 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2073 = QLIST_HEAD_INITIALIZER(map_client_list);
2075 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2077 MapClient *client = g_malloc(sizeof(*client));
2079 client->opaque = opaque;
2080 client->callback = callback;
2081 QLIST_INSERT_HEAD(&map_client_list, client, link);
2082 return client;
2085 static void cpu_unregister_map_client(void *_client)
2087 MapClient *client = (MapClient *)_client;
2089 QLIST_REMOVE(client, link);
2090 g_free(client);
2093 static void cpu_notify_map_clients(void)
2095 MapClient *client;
2097 while (!QLIST_EMPTY(&map_client_list)) {
2098 client = QLIST_FIRST(&map_client_list);
2099 client->callback(client->opaque);
2100 cpu_unregister_map_client(client);
2104 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2106 MemoryRegion *mr;
2107 hwaddr l, xlat;
2109 while (len > 0) {
2110 l = len;
2111 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2112 if (!memory_access_is_direct(mr, is_write)) {
2113 l = memory_access_size(mr, l, addr);
2114 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2115 return false;
2119 len -= l;
2120 addr += l;
2122 return true;
2125 /* Map a physical memory region into a host virtual address.
2126 * May map a subset of the requested range, given by and returned in *plen.
2127 * May return NULL if resources needed to perform the mapping are exhausted.
2128 * Use only for reads OR writes - not for read-modify-write operations.
2129 * Use cpu_register_map_client() to know when retrying the map operation is
2130 * likely to succeed.
2132 void *address_space_map(AddressSpace *as,
2133 hwaddr addr,
2134 hwaddr *plen,
2135 bool is_write)
2137 hwaddr len = *plen;
2138 hwaddr done = 0;
2139 hwaddr l, xlat, base;
2140 MemoryRegion *mr, *this_mr;
2141 ram_addr_t raddr;
2143 if (len == 0) {
2144 return NULL;
2147 l = len;
2148 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2149 if (!memory_access_is_direct(mr, is_write)) {
2150 if (bounce.buffer) {
2151 return NULL;
2153 /* Avoid unbounded allocations */
2154 l = MIN(l, TARGET_PAGE_SIZE);
2155 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2156 bounce.addr = addr;
2157 bounce.len = l;
2159 memory_region_ref(mr);
2160 bounce.mr = mr;
2161 if (!is_write) {
2162 address_space_read(as, addr, bounce.buffer, l);
2165 *plen = l;
2166 return bounce.buffer;
2169 base = xlat;
2170 raddr = memory_region_get_ram_addr(mr);
2172 for (;;) {
2173 len -= l;
2174 addr += l;
2175 done += l;
2176 if (len == 0) {
2177 break;
2180 l = len;
2181 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2182 if (this_mr != mr || xlat != base + done) {
2183 break;
2187 memory_region_ref(mr);
2188 *plen = done;
2189 return qemu_ram_ptr_length(raddr + base, plen);
2192 /* Unmaps a memory region previously mapped by address_space_map().
2193 * Will also mark the memory as dirty if is_write == 1. access_len gives
2194 * the amount of memory that was actually read or written by the caller.
2196 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2197 int is_write, hwaddr access_len)
2199 if (buffer != bounce.buffer) {
2200 MemoryRegion *mr;
2201 ram_addr_t addr1;
2203 mr = qemu_ram_addr_from_host(buffer, &addr1);
2204 assert(mr != NULL);
2205 if (is_write) {
2206 while (access_len) {
2207 unsigned l;
2208 l = TARGET_PAGE_SIZE;
2209 if (l > access_len)
2210 l = access_len;
2211 invalidate_and_set_dirty(addr1, l);
2212 addr1 += l;
2213 access_len -= l;
2216 if (xen_enabled()) {
2217 xen_invalidate_map_cache_entry(buffer);
2219 memory_region_unref(mr);
2220 return;
2222 if (is_write) {
2223 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2225 qemu_vfree(bounce.buffer);
2226 bounce.buffer = NULL;
2227 memory_region_unref(bounce.mr);
2228 cpu_notify_map_clients();
2231 void *cpu_physical_memory_map(hwaddr addr,
2232 hwaddr *plen,
2233 int is_write)
2235 return address_space_map(&address_space_memory, addr, plen, is_write);
2238 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2239 int is_write, hwaddr access_len)
2241 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2244 /* warning: addr must be aligned */
2245 static inline uint32_t ldl_phys_internal(hwaddr addr,
2246 enum device_endian endian)
2248 uint8_t *ptr;
2249 uint64_t val;
2250 MemoryRegion *mr;
2251 hwaddr l = 4;
2252 hwaddr addr1;
2254 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2255 false);
2256 if (l < 4 || !memory_access_is_direct(mr, false)) {
2257 /* I/O case */
2258 io_mem_read(mr, addr1, &val, 4);
2259 #if defined(TARGET_WORDS_BIGENDIAN)
2260 if (endian == DEVICE_LITTLE_ENDIAN) {
2261 val = bswap32(val);
2263 #else
2264 if (endian == DEVICE_BIG_ENDIAN) {
2265 val = bswap32(val);
2267 #endif
2268 } else {
2269 /* RAM case */
2270 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2271 & TARGET_PAGE_MASK)
2272 + addr1);
2273 switch (endian) {
2274 case DEVICE_LITTLE_ENDIAN:
2275 val = ldl_le_p(ptr);
2276 break;
2277 case DEVICE_BIG_ENDIAN:
2278 val = ldl_be_p(ptr);
2279 break;
2280 default:
2281 val = ldl_p(ptr);
2282 break;
2285 return val;
2288 uint32_t ldl_phys(hwaddr addr)
2290 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2293 uint32_t ldl_le_phys(hwaddr addr)
2295 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2298 uint32_t ldl_be_phys(hwaddr addr)
2300 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2303 /* warning: addr must be aligned */
2304 static inline uint64_t ldq_phys_internal(hwaddr addr,
2305 enum device_endian endian)
2307 uint8_t *ptr;
2308 uint64_t val;
2309 MemoryRegion *mr;
2310 hwaddr l = 8;
2311 hwaddr addr1;
2313 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2314 false);
2315 if (l < 8 || !memory_access_is_direct(mr, false)) {
2316 /* I/O case */
2317 io_mem_read(mr, addr1, &val, 8);
2318 #if defined(TARGET_WORDS_BIGENDIAN)
2319 if (endian == DEVICE_LITTLE_ENDIAN) {
2320 val = bswap64(val);
2322 #else
2323 if (endian == DEVICE_BIG_ENDIAN) {
2324 val = bswap64(val);
2326 #endif
2327 } else {
2328 /* RAM case */
2329 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2330 & TARGET_PAGE_MASK)
2331 + addr1);
2332 switch (endian) {
2333 case DEVICE_LITTLE_ENDIAN:
2334 val = ldq_le_p(ptr);
2335 break;
2336 case DEVICE_BIG_ENDIAN:
2337 val = ldq_be_p(ptr);
2338 break;
2339 default:
2340 val = ldq_p(ptr);
2341 break;
2344 return val;
2347 uint64_t ldq_phys(hwaddr addr)
2349 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2352 uint64_t ldq_le_phys(hwaddr addr)
2354 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2357 uint64_t ldq_be_phys(hwaddr addr)
2359 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2362 /* XXX: optimize */
2363 uint32_t ldub_phys(hwaddr addr)
2365 uint8_t val;
2366 cpu_physical_memory_read(addr, &val, 1);
2367 return val;
2370 /* warning: addr must be aligned */
2371 static inline uint32_t lduw_phys_internal(hwaddr addr,
2372 enum device_endian endian)
2374 uint8_t *ptr;
2375 uint64_t val;
2376 MemoryRegion *mr;
2377 hwaddr l = 2;
2378 hwaddr addr1;
2380 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2381 false);
2382 if (l < 2 || !memory_access_is_direct(mr, false)) {
2383 /* I/O case */
2384 io_mem_read(mr, addr1, &val, 2);
2385 #if defined(TARGET_WORDS_BIGENDIAN)
2386 if (endian == DEVICE_LITTLE_ENDIAN) {
2387 val = bswap16(val);
2389 #else
2390 if (endian == DEVICE_BIG_ENDIAN) {
2391 val = bswap16(val);
2393 #endif
2394 } else {
2395 /* RAM case */
2396 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2397 & TARGET_PAGE_MASK)
2398 + addr1);
2399 switch (endian) {
2400 case DEVICE_LITTLE_ENDIAN:
2401 val = lduw_le_p(ptr);
2402 break;
2403 case DEVICE_BIG_ENDIAN:
2404 val = lduw_be_p(ptr);
2405 break;
2406 default:
2407 val = lduw_p(ptr);
2408 break;
2411 return val;
2414 uint32_t lduw_phys(hwaddr addr)
2416 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2419 uint32_t lduw_le_phys(hwaddr addr)
2421 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2424 uint32_t lduw_be_phys(hwaddr addr)
2426 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2429 /* warning: addr must be aligned. The ram page is not masked as dirty
2430 and the code inside is not invalidated. It is useful if the dirty
2431 bits are used to track modified PTEs */
2432 void stl_phys_notdirty(hwaddr addr, uint32_t val)
2434 uint8_t *ptr;
2435 MemoryRegion *mr;
2436 hwaddr l = 4;
2437 hwaddr addr1;
2439 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2440 true);
2441 if (l < 4 || !memory_access_is_direct(mr, true)) {
2442 io_mem_write(mr, addr1, val, 4);
2443 } else {
2444 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2445 ptr = qemu_get_ram_ptr(addr1);
2446 stl_p(ptr, val);
2448 if (unlikely(in_migration)) {
2449 if (!cpu_physical_memory_is_dirty(addr1)) {
2450 /* invalidate code */
2451 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2452 /* set dirty bit */
2453 cpu_physical_memory_set_dirty_flags(
2454 addr1, (0xff & ~CODE_DIRTY_FLAG));
2460 /* warning: addr must be aligned */
2461 static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2462 enum device_endian endian)
2464 uint8_t *ptr;
2465 MemoryRegion *mr;
2466 hwaddr l = 4;
2467 hwaddr addr1;
2469 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2470 true);
2471 if (l < 4 || !memory_access_is_direct(mr, true)) {
2472 #if defined(TARGET_WORDS_BIGENDIAN)
2473 if (endian == DEVICE_LITTLE_ENDIAN) {
2474 val = bswap32(val);
2476 #else
2477 if (endian == DEVICE_BIG_ENDIAN) {
2478 val = bswap32(val);
2480 #endif
2481 io_mem_write(mr, addr1, val, 4);
2482 } else {
2483 /* RAM case */
2484 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2485 ptr = qemu_get_ram_ptr(addr1);
2486 switch (endian) {
2487 case DEVICE_LITTLE_ENDIAN:
2488 stl_le_p(ptr, val);
2489 break;
2490 case DEVICE_BIG_ENDIAN:
2491 stl_be_p(ptr, val);
2492 break;
2493 default:
2494 stl_p(ptr, val);
2495 break;
2497 invalidate_and_set_dirty(addr1, 4);
2501 void stl_phys(hwaddr addr, uint32_t val)
2503 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2506 void stl_le_phys(hwaddr addr, uint32_t val)
2508 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2511 void stl_be_phys(hwaddr addr, uint32_t val)
2513 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2516 /* XXX: optimize */
2517 void stb_phys(hwaddr addr, uint32_t val)
2519 uint8_t v = val;
2520 cpu_physical_memory_write(addr, &v, 1);
2523 /* warning: addr must be aligned */
2524 static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2525 enum device_endian endian)
2527 uint8_t *ptr;
2528 MemoryRegion *mr;
2529 hwaddr l = 2;
2530 hwaddr addr1;
2532 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2533 true);
2534 if (l < 2 || !memory_access_is_direct(mr, true)) {
2535 #if defined(TARGET_WORDS_BIGENDIAN)
2536 if (endian == DEVICE_LITTLE_ENDIAN) {
2537 val = bswap16(val);
2539 #else
2540 if (endian == DEVICE_BIG_ENDIAN) {
2541 val = bswap16(val);
2543 #endif
2544 io_mem_write(mr, addr1, val, 2);
2545 } else {
2546 /* RAM case */
2547 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2548 ptr = qemu_get_ram_ptr(addr1);
2549 switch (endian) {
2550 case DEVICE_LITTLE_ENDIAN:
2551 stw_le_p(ptr, val);
2552 break;
2553 case DEVICE_BIG_ENDIAN:
2554 stw_be_p(ptr, val);
2555 break;
2556 default:
2557 stw_p(ptr, val);
2558 break;
2560 invalidate_and_set_dirty(addr1, 2);
2564 void stw_phys(hwaddr addr, uint32_t val)
2566 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2569 void stw_le_phys(hwaddr addr, uint32_t val)
2571 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2574 void stw_be_phys(hwaddr addr, uint32_t val)
2576 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2579 /* XXX: optimize */
2580 void stq_phys(hwaddr addr, uint64_t val)
2582 val = tswap64(val);
2583 cpu_physical_memory_write(addr, &val, 8);
2586 void stq_le_phys(hwaddr addr, uint64_t val)
2588 val = cpu_to_le64(val);
2589 cpu_physical_memory_write(addr, &val, 8);
2592 void stq_be_phys(hwaddr addr, uint64_t val)
2594 val = cpu_to_be64(val);
2595 cpu_physical_memory_write(addr, &val, 8);
2598 /* virtual memory access for debug (includes writing to ROM) */
2599 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2600 uint8_t *buf, int len, int is_write)
2602 int l;
2603 hwaddr phys_addr;
2604 target_ulong page;
2606 while (len > 0) {
2607 page = addr & TARGET_PAGE_MASK;
2608 phys_addr = cpu_get_phys_page_debug(cpu, page);
2609 /* if no physical page mapped, return an error */
2610 if (phys_addr == -1)
2611 return -1;
2612 l = (page + TARGET_PAGE_SIZE) - addr;
2613 if (l > len)
2614 l = len;
2615 phys_addr += (addr & ~TARGET_PAGE_MASK);
2616 if (is_write)
2617 cpu_physical_memory_write_rom(phys_addr, buf, l);
2618 else
2619 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
2620 len -= l;
2621 buf += l;
2622 addr += l;
2624 return 0;
2626 #endif
2628 #if !defined(CONFIG_USER_ONLY)
2631 * A helper function for the _utterly broken_ virtio device model to find out if
2632 * it's running on a big endian machine. Don't do this at home kids!
2634 bool virtio_is_big_endian(void);
2635 bool virtio_is_big_endian(void)
2637 #if defined(TARGET_WORDS_BIGENDIAN)
2638 return true;
2639 #else
2640 return false;
2641 #endif
2644 #endif
2646 #ifndef CONFIG_USER_ONLY
2647 bool cpu_physical_memory_is_io(hwaddr phys_addr)
2649 MemoryRegion*mr;
2650 hwaddr l = 1;
2652 mr = address_space_translate(&address_space_memory,
2653 phys_addr, &phys_addr, &l, false);
2655 return !(memory_region_is_ram(mr) ||
2656 memory_region_is_romd(mr));
2659 void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2661 RAMBlock *block;
2663 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2664 func(block->host, block->offset, block->length, opaque);
2667 #endif