exec: Implement subpage_read/write via address_space_rw
[qemu/ar7.git] / exec.c
blobffd2dc8cf72f30231e6a0b8147d5d2c56c267142
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
41 #include <qemu.h>
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
44 #include "trace.h"
45 #endif
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
53 //#define DEBUG_SUBPAGE
55 #if !defined(CONFIG_USER_ONLY)
56 int phys_ram_fd;
57 static int in_migration;
59 RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
61 static MemoryRegion *system_memory;
62 static MemoryRegion *system_io;
64 AddressSpace address_space_io;
65 AddressSpace address_space_memory;
66 DMAContext dma_context_memory;
68 MemoryRegion io_mem_rom, io_mem_notdirty;
69 static MemoryRegion io_mem_unassigned;
71 #endif
73 CPUArchState *first_cpu;
74 /* current CPU in the current thread. It is only valid inside
75 cpu_exec() */
76 DEFINE_TLS(CPUArchState *,cpu_single_env);
77 /* 0 = Do not count executed instructions.
78 1 = Precise instruction counting.
79 2 = Adaptive rate instruction counting. */
80 int use_icount;
82 #if !defined(CONFIG_USER_ONLY)
84 typedef struct PhysPageEntry PhysPageEntry;
86 struct PhysPageEntry {
87 uint16_t is_leaf : 1;
88 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
89 uint16_t ptr : 15;
92 struct AddressSpaceDispatch {
93 /* This is a multi-level map on the physical address space.
94 * The bottom level has pointers to MemoryRegionSections.
96 PhysPageEntry phys_map;
97 MemoryListener listener;
98 AddressSpace *as;
101 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
102 typedef struct subpage_t {
103 MemoryRegion iomem;
104 AddressSpace *as;
105 hwaddr base;
106 uint16_t sub_section[TARGET_PAGE_SIZE];
107 } subpage_t;
109 static MemoryRegionSection *phys_sections;
110 static unsigned phys_sections_nb, phys_sections_nb_alloc;
111 static uint16_t phys_section_unassigned;
112 static uint16_t phys_section_notdirty;
113 static uint16_t phys_section_rom;
114 static uint16_t phys_section_watch;
116 /* Simple allocator for PhysPageEntry nodes */
117 static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
118 static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
120 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
122 static void io_mem_init(void);
123 static void memory_map_init(void);
124 static void *qemu_safe_ram_ptr(ram_addr_t addr);
126 static MemoryRegion io_mem_watch;
127 #endif
129 #if !defined(CONFIG_USER_ONLY)
131 static void phys_map_node_reserve(unsigned nodes)
133 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
134 typedef PhysPageEntry Node[L2_SIZE];
135 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
136 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
137 phys_map_nodes_nb + nodes);
138 phys_map_nodes = g_renew(Node, phys_map_nodes,
139 phys_map_nodes_nb_alloc);
143 static uint16_t phys_map_node_alloc(void)
145 unsigned i;
146 uint16_t ret;
148 ret = phys_map_nodes_nb++;
149 assert(ret != PHYS_MAP_NODE_NIL);
150 assert(ret != phys_map_nodes_nb_alloc);
151 for (i = 0; i < L2_SIZE; ++i) {
152 phys_map_nodes[ret][i].is_leaf = 0;
153 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
155 return ret;
158 static void phys_map_nodes_reset(void)
160 phys_map_nodes_nb = 0;
164 static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
165 hwaddr *nb, uint16_t leaf,
166 int level)
168 PhysPageEntry *p;
169 int i;
170 hwaddr step = (hwaddr)1 << (level * L2_BITS);
172 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
173 lp->ptr = phys_map_node_alloc();
174 p = phys_map_nodes[lp->ptr];
175 if (level == 0) {
176 for (i = 0; i < L2_SIZE; i++) {
177 p[i].is_leaf = 1;
178 p[i].ptr = phys_section_unassigned;
181 } else {
182 p = phys_map_nodes[lp->ptr];
184 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
186 while (*nb && lp < &p[L2_SIZE]) {
187 if ((*index & (step - 1)) == 0 && *nb >= step) {
188 lp->is_leaf = true;
189 lp->ptr = leaf;
190 *index += step;
191 *nb -= step;
192 } else {
193 phys_page_set_level(lp, index, nb, leaf, level - 1);
195 ++lp;
199 static void phys_page_set(AddressSpaceDispatch *d,
200 hwaddr index, hwaddr nb,
201 uint16_t leaf)
203 /* Wildly overreserve - it doesn't matter much. */
204 phys_map_node_reserve(3 * P_L2_LEVELS);
206 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
209 static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
211 PhysPageEntry lp = d->phys_map;
212 PhysPageEntry *p;
213 int i;
215 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
216 if (lp.ptr == PHYS_MAP_NODE_NIL) {
217 return &phys_sections[phys_section_unassigned];
219 p = phys_map_nodes[lp.ptr];
220 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
222 return &phys_sections[lp.ptr];
225 bool memory_region_is_unassigned(MemoryRegion *mr)
227 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
228 && mr != &io_mem_watch;
231 static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
232 hwaddr addr,
233 bool resolve_subpage)
235 MemoryRegionSection *section;
236 subpage_t *subpage;
238 section = phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS);
239 if (resolve_subpage && section->mr->subpage) {
240 subpage = container_of(section->mr, subpage_t, iomem);
241 section = &phys_sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
243 return section;
246 static MemoryRegionSection *
247 address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat,
248 hwaddr *plen, bool resolve_subpage)
250 MemoryRegionSection *section;
251 Int128 diff;
253 section = address_space_lookup_region(as, addr, resolve_subpage);
254 /* Compute offset within MemoryRegionSection */
255 addr -= section->offset_within_address_space;
257 /* Compute offset within MemoryRegion */
258 *xlat = addr + section->offset_within_region;
260 diff = int128_sub(section->mr->size, int128_make64(addr));
261 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
262 return section;
265 MemoryRegionSection *address_space_translate(AddressSpace *as, hwaddr addr,
266 hwaddr *xlat, hwaddr *plen,
267 bool is_write)
269 return address_space_translate_internal(as, addr, xlat, plen, true);
272 MemoryRegionSection *
273 address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
274 hwaddr *plen)
276 return address_space_translate_internal(as, addr, xlat, plen, false);
278 #endif
280 void cpu_exec_init_all(void)
282 #if !defined(CONFIG_USER_ONLY)
283 qemu_mutex_init(&ram_list.mutex);
284 memory_map_init();
285 io_mem_init();
286 #endif
289 #if !defined(CONFIG_USER_ONLY)
291 static int cpu_common_post_load(void *opaque, int version_id)
293 CPUState *cpu = opaque;
295 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
296 version_id is increased. */
297 cpu->interrupt_request &= ~0x01;
298 tlb_flush(cpu->env_ptr, 1);
300 return 0;
303 static const VMStateDescription vmstate_cpu_common = {
304 .name = "cpu_common",
305 .version_id = 1,
306 .minimum_version_id = 1,
307 .minimum_version_id_old = 1,
308 .post_load = cpu_common_post_load,
309 .fields = (VMStateField []) {
310 VMSTATE_UINT32(halted, CPUState),
311 VMSTATE_UINT32(interrupt_request, CPUState),
312 VMSTATE_END_OF_LIST()
315 #else
316 #define vmstate_cpu_common vmstate_dummy
317 #endif
319 CPUState *qemu_get_cpu(int index)
321 CPUArchState *env = first_cpu;
322 CPUState *cpu = NULL;
324 while (env) {
325 cpu = ENV_GET_CPU(env);
326 if (cpu->cpu_index == index) {
327 break;
329 env = env->next_cpu;
332 return env ? cpu : NULL;
335 void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
337 CPUArchState *env = first_cpu;
339 while (env) {
340 func(ENV_GET_CPU(env), data);
341 env = env->next_cpu;
345 void cpu_exec_init(CPUArchState *env)
347 CPUState *cpu = ENV_GET_CPU(env);
348 CPUClass *cc = CPU_GET_CLASS(cpu);
349 CPUArchState **penv;
350 int cpu_index;
352 #if defined(CONFIG_USER_ONLY)
353 cpu_list_lock();
354 #endif
355 env->next_cpu = NULL;
356 penv = &first_cpu;
357 cpu_index = 0;
358 while (*penv != NULL) {
359 penv = &(*penv)->next_cpu;
360 cpu_index++;
362 cpu->cpu_index = cpu_index;
363 cpu->numa_node = 0;
364 QTAILQ_INIT(&env->breakpoints);
365 QTAILQ_INIT(&env->watchpoints);
366 #ifndef CONFIG_USER_ONLY
367 cpu->thread_id = qemu_get_thread_id();
368 #endif
369 *penv = env;
370 #if defined(CONFIG_USER_ONLY)
371 cpu_list_unlock();
372 #endif
373 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
374 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
375 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
376 cpu_save, cpu_load, env);
377 assert(cc->vmsd == NULL);
378 #endif
379 if (cc->vmsd != NULL) {
380 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
384 #if defined(TARGET_HAS_ICE)
385 #if defined(CONFIG_USER_ONLY)
386 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
388 tb_invalidate_phys_page_range(pc, pc + 1, 0);
390 #else
391 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
393 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
394 (pc & ~TARGET_PAGE_MASK));
396 #endif
397 #endif /* TARGET_HAS_ICE */
399 #if defined(CONFIG_USER_ONLY)
400 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
405 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
406 int flags, CPUWatchpoint **watchpoint)
408 return -ENOSYS;
410 #else
411 /* Add a watchpoint. */
412 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
413 int flags, CPUWatchpoint **watchpoint)
415 target_ulong len_mask = ~(len - 1);
416 CPUWatchpoint *wp;
418 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
419 if ((len & (len - 1)) || (addr & ~len_mask) ||
420 len == 0 || len > TARGET_PAGE_SIZE) {
421 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
422 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
423 return -EINVAL;
425 wp = g_malloc(sizeof(*wp));
427 wp->vaddr = addr;
428 wp->len_mask = len_mask;
429 wp->flags = flags;
431 /* keep all GDB-injected watchpoints in front */
432 if (flags & BP_GDB)
433 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
434 else
435 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
437 tlb_flush_page(env, addr);
439 if (watchpoint)
440 *watchpoint = wp;
441 return 0;
444 /* Remove a specific watchpoint. */
445 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
446 int flags)
448 target_ulong len_mask = ~(len - 1);
449 CPUWatchpoint *wp;
451 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
452 if (addr == wp->vaddr && len_mask == wp->len_mask
453 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
454 cpu_watchpoint_remove_by_ref(env, wp);
455 return 0;
458 return -ENOENT;
461 /* Remove a specific watchpoint by reference. */
462 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
464 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
466 tlb_flush_page(env, watchpoint->vaddr);
468 g_free(watchpoint);
471 /* Remove all matching watchpoints. */
472 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
474 CPUWatchpoint *wp, *next;
476 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
477 if (wp->flags & mask)
478 cpu_watchpoint_remove_by_ref(env, wp);
481 #endif
483 /* Add a breakpoint. */
484 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
485 CPUBreakpoint **breakpoint)
487 #if defined(TARGET_HAS_ICE)
488 CPUBreakpoint *bp;
490 bp = g_malloc(sizeof(*bp));
492 bp->pc = pc;
493 bp->flags = flags;
495 /* keep all GDB-injected breakpoints in front */
496 if (flags & BP_GDB)
497 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
498 else
499 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
501 breakpoint_invalidate(env, pc);
503 if (breakpoint)
504 *breakpoint = bp;
505 return 0;
506 #else
507 return -ENOSYS;
508 #endif
511 /* Remove a specific breakpoint. */
512 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
514 #if defined(TARGET_HAS_ICE)
515 CPUBreakpoint *bp;
517 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
518 if (bp->pc == pc && bp->flags == flags) {
519 cpu_breakpoint_remove_by_ref(env, bp);
520 return 0;
523 return -ENOENT;
524 #else
525 return -ENOSYS;
526 #endif
529 /* Remove a specific breakpoint by reference. */
530 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
532 #if defined(TARGET_HAS_ICE)
533 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
535 breakpoint_invalidate(env, breakpoint->pc);
537 g_free(breakpoint);
538 #endif
541 /* Remove all matching breakpoints. */
542 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
544 #if defined(TARGET_HAS_ICE)
545 CPUBreakpoint *bp, *next;
547 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
548 if (bp->flags & mask)
549 cpu_breakpoint_remove_by_ref(env, bp);
551 #endif
554 /* enable or disable single step mode. EXCP_DEBUG is returned by the
555 CPU loop after each instruction */
556 void cpu_single_step(CPUArchState *env, int enabled)
558 #if defined(TARGET_HAS_ICE)
559 if (env->singlestep_enabled != enabled) {
560 env->singlestep_enabled = enabled;
561 if (kvm_enabled())
562 kvm_update_guest_debug(env, 0);
563 else {
564 /* must flush all the translated code to avoid inconsistencies */
565 /* XXX: only flush what is necessary */
566 tb_flush(env);
569 #endif
572 void cpu_exit(CPUArchState *env)
574 CPUState *cpu = ENV_GET_CPU(env);
576 cpu->exit_request = 1;
577 cpu->tcg_exit_req = 1;
580 void cpu_abort(CPUArchState *env, const char *fmt, ...)
582 va_list ap;
583 va_list ap2;
585 va_start(ap, fmt);
586 va_copy(ap2, ap);
587 fprintf(stderr, "qemu: fatal: ");
588 vfprintf(stderr, fmt, ap);
589 fprintf(stderr, "\n");
590 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
591 if (qemu_log_enabled()) {
592 qemu_log("qemu: fatal: ");
593 qemu_log_vprintf(fmt, ap2);
594 qemu_log("\n");
595 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
596 qemu_log_flush();
597 qemu_log_close();
599 va_end(ap2);
600 va_end(ap);
601 #if defined(CONFIG_USER_ONLY)
603 struct sigaction act;
604 sigfillset(&act.sa_mask);
605 act.sa_handler = SIG_DFL;
606 sigaction(SIGABRT, &act, NULL);
608 #endif
609 abort();
612 CPUArchState *cpu_copy(CPUArchState *env)
614 CPUArchState *new_env = cpu_init(env->cpu_model_str);
615 CPUArchState *next_cpu = new_env->next_cpu;
616 #if defined(TARGET_HAS_ICE)
617 CPUBreakpoint *bp;
618 CPUWatchpoint *wp;
619 #endif
621 memcpy(new_env, env, sizeof(CPUArchState));
623 /* Preserve chaining. */
624 new_env->next_cpu = next_cpu;
626 /* Clone all break/watchpoints.
627 Note: Once we support ptrace with hw-debug register access, make sure
628 BP_CPU break/watchpoints are handled correctly on clone. */
629 QTAILQ_INIT(&env->breakpoints);
630 QTAILQ_INIT(&env->watchpoints);
631 #if defined(TARGET_HAS_ICE)
632 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
633 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
635 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
636 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
637 wp->flags, NULL);
639 #endif
641 return new_env;
644 #if !defined(CONFIG_USER_ONLY)
645 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
646 uintptr_t length)
648 uintptr_t start1;
650 /* we modify the TLB cache so that the dirty bit will be set again
651 when accessing the range */
652 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
653 /* Check that we don't span multiple blocks - this breaks the
654 address comparisons below. */
655 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
656 != (end - 1) - start) {
657 abort();
659 cpu_tlb_reset_dirty_all(start1, length);
663 /* Note: start and end must be within the same ram block. */
664 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
665 int dirty_flags)
667 uintptr_t length;
669 start &= TARGET_PAGE_MASK;
670 end = TARGET_PAGE_ALIGN(end);
672 length = end - start;
673 if (length == 0)
674 return;
675 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
677 if (tcg_enabled()) {
678 tlb_reset_dirty_range_all(start, end, length);
682 static int cpu_physical_memory_set_dirty_tracking(int enable)
684 int ret = 0;
685 in_migration = enable;
686 return ret;
689 hwaddr memory_region_section_get_iotlb(CPUArchState *env,
690 MemoryRegionSection *section,
691 target_ulong vaddr,
692 hwaddr paddr, hwaddr xlat,
693 int prot,
694 target_ulong *address)
696 hwaddr iotlb;
697 CPUWatchpoint *wp;
699 if (memory_region_is_ram(section->mr)) {
700 /* Normal RAM. */
701 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
702 + xlat;
703 if (!section->readonly) {
704 iotlb |= phys_section_notdirty;
705 } else {
706 iotlb |= phys_section_rom;
708 } else {
709 iotlb = section - phys_sections;
710 iotlb += xlat;
713 /* Make accesses to pages with watchpoints go via the
714 watchpoint trap routines. */
715 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
716 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
717 /* Avoid trapping reads of pages with a write breakpoint. */
718 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
719 iotlb = phys_section_watch + paddr;
720 *address |= TLB_MMIO;
721 break;
726 return iotlb;
728 #endif /* defined(CONFIG_USER_ONLY) */
730 #if !defined(CONFIG_USER_ONLY)
732 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
733 uint16_t section);
734 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
735 static void destroy_page_desc(uint16_t section_index)
737 MemoryRegionSection *section = &phys_sections[section_index];
738 MemoryRegion *mr = section->mr;
740 if (mr->subpage) {
741 subpage_t *subpage = container_of(mr, subpage_t, iomem);
742 memory_region_destroy(&subpage->iomem);
743 g_free(subpage);
747 static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
749 unsigned i;
750 PhysPageEntry *p;
752 if (lp->ptr == PHYS_MAP_NODE_NIL) {
753 return;
756 p = phys_map_nodes[lp->ptr];
757 for (i = 0; i < L2_SIZE; ++i) {
758 if (!p[i].is_leaf) {
759 destroy_l2_mapping(&p[i], level - 1);
760 } else {
761 destroy_page_desc(p[i].ptr);
764 lp->is_leaf = 0;
765 lp->ptr = PHYS_MAP_NODE_NIL;
768 static void destroy_all_mappings(AddressSpaceDispatch *d)
770 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
771 phys_map_nodes_reset();
774 static uint16_t phys_section_add(MemoryRegionSection *section)
776 /* The physical section number is ORed with a page-aligned
777 * pointer to produce the iotlb entries. Thus it should
778 * never overflow into the page-aligned value.
780 assert(phys_sections_nb < TARGET_PAGE_SIZE);
782 if (phys_sections_nb == phys_sections_nb_alloc) {
783 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
784 phys_sections = g_renew(MemoryRegionSection, phys_sections,
785 phys_sections_nb_alloc);
787 phys_sections[phys_sections_nb] = *section;
788 return phys_sections_nb++;
791 static void phys_sections_clear(void)
793 phys_sections_nb = 0;
796 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
798 subpage_t *subpage;
799 hwaddr base = section->offset_within_address_space
800 & TARGET_PAGE_MASK;
801 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
802 MemoryRegionSection subsection = {
803 .offset_within_address_space = base,
804 .size = TARGET_PAGE_SIZE,
806 hwaddr start, end;
808 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
810 if (!(existing->mr->subpage)) {
811 subpage = subpage_init(d->as, base);
812 subsection.mr = &subpage->iomem;
813 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
814 phys_section_add(&subsection));
815 } else {
816 subpage = container_of(existing->mr, subpage_t, iomem);
818 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
819 end = start + section->size - 1;
820 subpage_register(subpage, start, end, phys_section_add(section));
824 static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
826 hwaddr start_addr = section->offset_within_address_space;
827 ram_addr_t size = section->size;
828 hwaddr addr;
829 uint16_t section_index = phys_section_add(section);
831 assert(size);
833 addr = start_addr;
834 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
835 section_index);
838 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > MAX_PHYS_ADDR_SPACE_BITS)
840 static MemoryRegionSection limit(MemoryRegionSection section)
842 section.size = MIN(section.offset_within_address_space + section.size,
843 MAX_PHYS_ADDR + 1)
844 - section.offset_within_address_space;
846 return section;
849 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
851 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
852 MemoryRegionSection now = limit(*section), remain = limit(*section);
854 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
855 || (now.size < TARGET_PAGE_SIZE)) {
856 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
857 - now.offset_within_address_space,
858 now.size);
859 register_subpage(d, &now);
860 remain.size -= now.size;
861 remain.offset_within_address_space += now.size;
862 remain.offset_within_region += now.size;
864 while (remain.size >= TARGET_PAGE_SIZE) {
865 now = remain;
866 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
867 now.size = TARGET_PAGE_SIZE;
868 register_subpage(d, &now);
869 } else {
870 now.size &= TARGET_PAGE_MASK;
871 register_multipage(d, &now);
873 remain.size -= now.size;
874 remain.offset_within_address_space += now.size;
875 remain.offset_within_region += now.size;
877 now = remain;
878 if (now.size) {
879 register_subpage(d, &now);
883 void qemu_flush_coalesced_mmio_buffer(void)
885 if (kvm_enabled())
886 kvm_flush_coalesced_mmio_buffer();
889 void qemu_mutex_lock_ramlist(void)
891 qemu_mutex_lock(&ram_list.mutex);
894 void qemu_mutex_unlock_ramlist(void)
896 qemu_mutex_unlock(&ram_list.mutex);
899 #if defined(__linux__) && !defined(TARGET_S390X)
901 #include <sys/vfs.h>
903 #define HUGETLBFS_MAGIC 0x958458f6
905 static long gethugepagesize(const char *path)
907 struct statfs fs;
908 int ret;
910 do {
911 ret = statfs(path, &fs);
912 } while (ret != 0 && errno == EINTR);
914 if (ret != 0) {
915 perror(path);
916 return 0;
919 if (fs.f_type != HUGETLBFS_MAGIC)
920 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
922 return fs.f_bsize;
925 static void *file_ram_alloc(RAMBlock *block,
926 ram_addr_t memory,
927 const char *path)
929 char *filename;
930 char *sanitized_name;
931 char *c;
932 void *area;
933 int fd;
934 #ifdef MAP_POPULATE
935 int flags;
936 #endif
937 unsigned long hpagesize;
939 hpagesize = gethugepagesize(path);
940 if (!hpagesize) {
941 return NULL;
944 if (memory < hpagesize) {
945 return NULL;
948 if (kvm_enabled() && !kvm_has_sync_mmu()) {
949 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
950 return NULL;
953 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
954 sanitized_name = g_strdup(block->mr->name);
955 for (c = sanitized_name; *c != '\0'; c++) {
956 if (*c == '/')
957 *c = '_';
960 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
961 sanitized_name);
962 g_free(sanitized_name);
964 fd = mkstemp(filename);
965 if (fd < 0) {
966 perror("unable to create backing store for hugepages");
967 g_free(filename);
968 return NULL;
970 unlink(filename);
971 g_free(filename);
973 memory = (memory+hpagesize-1) & ~(hpagesize-1);
976 * ftruncate is not supported by hugetlbfs in older
977 * hosts, so don't bother bailing out on errors.
978 * If anything goes wrong with it under other filesystems,
979 * mmap will fail.
981 if (ftruncate(fd, memory))
982 perror("ftruncate");
984 #ifdef MAP_POPULATE
985 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
986 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
987 * to sidestep this quirk.
989 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
990 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
991 #else
992 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
993 #endif
994 if (area == MAP_FAILED) {
995 perror("file_ram_alloc: can't mmap RAM pages");
996 close(fd);
997 return (NULL);
999 block->fd = fd;
1000 return area;
1002 #endif
1004 static ram_addr_t find_ram_offset(ram_addr_t size)
1006 RAMBlock *block, *next_block;
1007 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1009 assert(size != 0); /* it would hand out same offset multiple times */
1011 if (QTAILQ_EMPTY(&ram_list.blocks))
1012 return 0;
1014 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1015 ram_addr_t end, next = RAM_ADDR_MAX;
1017 end = block->offset + block->length;
1019 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
1020 if (next_block->offset >= end) {
1021 next = MIN(next, next_block->offset);
1024 if (next - end >= size && next - end < mingap) {
1025 offset = end;
1026 mingap = next - end;
1030 if (offset == RAM_ADDR_MAX) {
1031 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1032 (uint64_t)size);
1033 abort();
1036 return offset;
1039 ram_addr_t last_ram_offset(void)
1041 RAMBlock *block;
1042 ram_addr_t last = 0;
1044 QTAILQ_FOREACH(block, &ram_list.blocks, next)
1045 last = MAX(last, block->offset + block->length);
1047 return last;
1050 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1052 int ret;
1053 QemuOpts *machine_opts;
1055 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1056 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1057 if (machine_opts &&
1058 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1059 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1060 if (ret) {
1061 perror("qemu_madvise");
1062 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1063 "but dump_guest_core=off specified\n");
1068 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1070 RAMBlock *new_block, *block;
1072 new_block = NULL;
1073 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1074 if (block->offset == addr) {
1075 new_block = block;
1076 break;
1079 assert(new_block);
1080 assert(!new_block->idstr[0]);
1082 if (dev) {
1083 char *id = qdev_get_dev_path(dev);
1084 if (id) {
1085 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1086 g_free(id);
1089 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1091 /* This assumes the iothread lock is taken here too. */
1092 qemu_mutex_lock_ramlist();
1093 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1094 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1095 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1096 new_block->idstr);
1097 abort();
1100 qemu_mutex_unlock_ramlist();
1103 static int memory_try_enable_merging(void *addr, size_t len)
1105 QemuOpts *opts;
1107 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1108 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1109 /* disabled by the user */
1110 return 0;
1113 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1116 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1117 MemoryRegion *mr)
1119 RAMBlock *block, *new_block;
1121 size = TARGET_PAGE_ALIGN(size);
1122 new_block = g_malloc0(sizeof(*new_block));
1124 /* This assumes the iothread lock is taken here too. */
1125 qemu_mutex_lock_ramlist();
1126 new_block->mr = mr;
1127 new_block->offset = find_ram_offset(size);
1128 if (host) {
1129 new_block->host = host;
1130 new_block->flags |= RAM_PREALLOC_MASK;
1131 } else {
1132 if (mem_path) {
1133 #if defined (__linux__) && !defined(TARGET_S390X)
1134 new_block->host = file_ram_alloc(new_block, size, mem_path);
1135 if (!new_block->host) {
1136 new_block->host = qemu_anon_ram_alloc(size);
1137 memory_try_enable_merging(new_block->host, size);
1139 #else
1140 fprintf(stderr, "-mem-path option unsupported\n");
1141 exit(1);
1142 #endif
1143 } else {
1144 if (xen_enabled()) {
1145 xen_ram_alloc(new_block->offset, size, mr);
1146 } else if (kvm_enabled()) {
1147 /* some s390/kvm configurations have special constraints */
1148 new_block->host = kvm_ram_alloc(size);
1149 } else {
1150 new_block->host = qemu_anon_ram_alloc(size);
1152 memory_try_enable_merging(new_block->host, size);
1155 new_block->length = size;
1157 /* Keep the list sorted from biggest to smallest block. */
1158 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1159 if (block->length < new_block->length) {
1160 break;
1163 if (block) {
1164 QTAILQ_INSERT_BEFORE(block, new_block, next);
1165 } else {
1166 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1168 ram_list.mru_block = NULL;
1170 ram_list.version++;
1171 qemu_mutex_unlock_ramlist();
1173 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
1174 last_ram_offset() >> TARGET_PAGE_BITS);
1175 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1176 0, size >> TARGET_PAGE_BITS);
1177 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1179 qemu_ram_setup_dump(new_block->host, size);
1180 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1182 if (kvm_enabled())
1183 kvm_setup_guest_memory(new_block->host, size);
1185 return new_block->offset;
1188 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1190 return qemu_ram_alloc_from_ptr(size, NULL, mr);
1193 void qemu_ram_free_from_ptr(ram_addr_t addr)
1195 RAMBlock *block;
1197 /* This assumes the iothread lock is taken here too. */
1198 qemu_mutex_lock_ramlist();
1199 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1200 if (addr == block->offset) {
1201 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1202 ram_list.mru_block = NULL;
1203 ram_list.version++;
1204 g_free(block);
1205 break;
1208 qemu_mutex_unlock_ramlist();
1211 void qemu_ram_free(ram_addr_t addr)
1213 RAMBlock *block;
1215 /* This assumes the iothread lock is taken here too. */
1216 qemu_mutex_lock_ramlist();
1217 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1218 if (addr == block->offset) {
1219 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1220 ram_list.mru_block = NULL;
1221 ram_list.version++;
1222 if (block->flags & RAM_PREALLOC_MASK) {
1224 } else if (mem_path) {
1225 #if defined (__linux__) && !defined(TARGET_S390X)
1226 if (block->fd) {
1227 munmap(block->host, block->length);
1228 close(block->fd);
1229 } else {
1230 qemu_anon_ram_free(block->host, block->length);
1232 #else
1233 abort();
1234 #endif
1235 } else {
1236 if (xen_enabled()) {
1237 xen_invalidate_map_cache_entry(block->host);
1238 } else {
1239 qemu_anon_ram_free(block->host, block->length);
1242 g_free(block);
1243 break;
1246 qemu_mutex_unlock_ramlist();
1250 #ifndef _WIN32
1251 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1253 RAMBlock *block;
1254 ram_addr_t offset;
1255 int flags;
1256 void *area, *vaddr;
1258 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1259 offset = addr - block->offset;
1260 if (offset < block->length) {
1261 vaddr = block->host + offset;
1262 if (block->flags & RAM_PREALLOC_MASK) {
1264 } else {
1265 flags = MAP_FIXED;
1266 munmap(vaddr, length);
1267 if (mem_path) {
1268 #if defined(__linux__) && !defined(TARGET_S390X)
1269 if (block->fd) {
1270 #ifdef MAP_POPULATE
1271 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1272 MAP_PRIVATE;
1273 #else
1274 flags |= MAP_PRIVATE;
1275 #endif
1276 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1277 flags, block->fd, offset);
1278 } else {
1279 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1280 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1281 flags, -1, 0);
1283 #else
1284 abort();
1285 #endif
1286 } else {
1287 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1288 flags |= MAP_SHARED | MAP_ANONYMOUS;
1289 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1290 flags, -1, 0);
1291 #else
1292 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1293 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1294 flags, -1, 0);
1295 #endif
1297 if (area != vaddr) {
1298 fprintf(stderr, "Could not remap addr: "
1299 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1300 length, addr);
1301 exit(1);
1303 memory_try_enable_merging(vaddr, length);
1304 qemu_ram_setup_dump(vaddr, length);
1306 return;
1310 #endif /* !_WIN32 */
1312 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1313 With the exception of the softmmu code in this file, this should
1314 only be used for local memory (e.g. video ram) that the device owns,
1315 and knows it isn't going to access beyond the end of the block.
1317 It should not be used for general purpose DMA.
1318 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1320 void *qemu_get_ram_ptr(ram_addr_t addr)
1322 RAMBlock *block;
1324 /* The list is protected by the iothread lock here. */
1325 block = ram_list.mru_block;
1326 if (block && addr - block->offset < block->length) {
1327 goto found;
1329 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1330 if (addr - block->offset < block->length) {
1331 goto found;
1335 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1336 abort();
1338 found:
1339 ram_list.mru_block = block;
1340 if (xen_enabled()) {
1341 /* We need to check if the requested address is in the RAM
1342 * because we don't want to map the entire memory in QEMU.
1343 * In that case just map until the end of the page.
1345 if (block->offset == 0) {
1346 return xen_map_cache(addr, 0, 0);
1347 } else if (block->host == NULL) {
1348 block->host =
1349 xen_map_cache(block->offset, block->length, 1);
1352 return block->host + (addr - block->offset);
1355 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1356 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1358 * ??? Is this still necessary?
1360 static void *qemu_safe_ram_ptr(ram_addr_t addr)
1362 RAMBlock *block;
1364 /* The list is protected by the iothread lock here. */
1365 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1366 if (addr - block->offset < block->length) {
1367 if (xen_enabled()) {
1368 /* We need to check if the requested address is in the RAM
1369 * because we don't want to map the entire memory in QEMU.
1370 * In that case just map until the end of the page.
1372 if (block->offset == 0) {
1373 return xen_map_cache(addr, 0, 0);
1374 } else if (block->host == NULL) {
1375 block->host =
1376 xen_map_cache(block->offset, block->length, 1);
1379 return block->host + (addr - block->offset);
1383 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1384 abort();
1386 return NULL;
1389 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1390 * but takes a size argument */
1391 static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
1393 if (*size == 0) {
1394 return NULL;
1396 if (xen_enabled()) {
1397 return xen_map_cache(addr, *size, 1);
1398 } else {
1399 RAMBlock *block;
1401 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1402 if (addr - block->offset < block->length) {
1403 if (addr - block->offset + *size > block->length)
1404 *size = block->length - addr + block->offset;
1405 return block->host + (addr - block->offset);
1409 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1410 abort();
1414 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1416 RAMBlock *block;
1417 uint8_t *host = ptr;
1419 if (xen_enabled()) {
1420 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1421 return 0;
1424 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1425 /* This case append when the block is not mapped. */
1426 if (block->host == NULL) {
1427 continue;
1429 if (host - block->host < block->length) {
1430 *ram_addr = block->offset + (host - block->host);
1431 return 0;
1435 return -1;
1438 /* Some of the softmmu routines need to translate from a host pointer
1439 (typically a TLB entry) back to a ram offset. */
1440 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1442 ram_addr_t ram_addr;
1444 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1445 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1446 abort();
1448 return ram_addr;
1451 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1452 uint64_t val, unsigned size)
1454 int dirty_flags;
1455 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1456 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1457 tb_invalidate_phys_page_fast(ram_addr, size);
1458 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1460 switch (size) {
1461 case 1:
1462 stb_p(qemu_get_ram_ptr(ram_addr), val);
1463 break;
1464 case 2:
1465 stw_p(qemu_get_ram_ptr(ram_addr), val);
1466 break;
1467 case 4:
1468 stl_p(qemu_get_ram_ptr(ram_addr), val);
1469 break;
1470 default:
1471 abort();
1473 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1474 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1475 /* we remove the notdirty callback only if the code has been
1476 flushed */
1477 if (dirty_flags == 0xff)
1478 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
1481 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1482 unsigned size, bool is_write)
1484 return is_write;
1487 static const MemoryRegionOps notdirty_mem_ops = {
1488 .write = notdirty_mem_write,
1489 .valid.accepts = notdirty_mem_accepts,
1490 .endianness = DEVICE_NATIVE_ENDIAN,
1493 /* Generate a debug exception if a watchpoint has been hit. */
1494 static void check_watchpoint(int offset, int len_mask, int flags)
1496 CPUArchState *env = cpu_single_env;
1497 target_ulong pc, cs_base;
1498 target_ulong vaddr;
1499 CPUWatchpoint *wp;
1500 int cpu_flags;
1502 if (env->watchpoint_hit) {
1503 /* We re-entered the check after replacing the TB. Now raise
1504 * the debug interrupt so that is will trigger after the
1505 * current instruction. */
1506 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
1507 return;
1509 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1510 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1511 if ((vaddr == (wp->vaddr & len_mask) ||
1512 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1513 wp->flags |= BP_WATCHPOINT_HIT;
1514 if (!env->watchpoint_hit) {
1515 env->watchpoint_hit = wp;
1516 tb_check_watchpoint(env);
1517 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1518 env->exception_index = EXCP_DEBUG;
1519 cpu_loop_exit(env);
1520 } else {
1521 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1522 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1523 cpu_resume_from_signal(env, NULL);
1526 } else {
1527 wp->flags &= ~BP_WATCHPOINT_HIT;
1532 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1533 so these check for a hit then pass through to the normal out-of-line
1534 phys routines. */
1535 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1536 unsigned size)
1538 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1539 switch (size) {
1540 case 1: return ldub_phys(addr);
1541 case 2: return lduw_phys(addr);
1542 case 4: return ldl_phys(addr);
1543 default: abort();
1547 static void watch_mem_write(void *opaque, hwaddr addr,
1548 uint64_t val, unsigned size)
1550 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1551 switch (size) {
1552 case 1:
1553 stb_phys(addr, val);
1554 break;
1555 case 2:
1556 stw_phys(addr, val);
1557 break;
1558 case 4:
1559 stl_phys(addr, val);
1560 break;
1561 default: abort();
1565 static const MemoryRegionOps watch_mem_ops = {
1566 .read = watch_mem_read,
1567 .write = watch_mem_write,
1568 .endianness = DEVICE_NATIVE_ENDIAN,
1571 static uint64_t subpage_read(void *opaque, hwaddr addr,
1572 unsigned len)
1574 subpage_t *subpage = opaque;
1575 uint8_t buf[4];
1577 #if defined(DEBUG_SUBPAGE)
1578 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1579 subpage, len, addr);
1580 #endif
1581 address_space_read(subpage->as, addr + subpage->base, buf, len);
1582 switch (len) {
1583 case 1:
1584 return ldub_p(buf);
1585 case 2:
1586 return lduw_p(buf);
1587 case 4:
1588 return ldl_p(buf);
1589 default:
1590 abort();
1594 static void subpage_write(void *opaque, hwaddr addr,
1595 uint64_t value, unsigned len)
1597 subpage_t *subpage = opaque;
1598 uint8_t buf[4];
1600 #if defined(DEBUG_SUBPAGE)
1601 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1602 " value %"PRIx64"\n",
1603 __func__, subpage, len, addr, value);
1604 #endif
1605 switch (len) {
1606 case 1:
1607 stb_p(buf, value);
1608 break;
1609 case 2:
1610 stw_p(buf, value);
1611 break;
1612 case 4:
1613 stl_p(buf, value);
1614 break;
1615 default:
1616 abort();
1618 address_space_write(subpage->as, addr + subpage->base, buf, len);
1621 static bool subpage_accepts(void *opaque, hwaddr addr,
1622 unsigned size, bool is_write)
1624 subpage_t *subpage = opaque;
1625 #if defined(DEBUG_SUBPAGE)
1626 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1627 __func__, subpage, is_write ? 'w' : 'r', len, addr);
1628 #endif
1630 return address_space_access_valid(subpage->as, addr + subpage->base,
1631 size, is_write);
1634 static const MemoryRegionOps subpage_ops = {
1635 .read = subpage_read,
1636 .write = subpage_write,
1637 .valid.accepts = subpage_accepts,
1638 .endianness = DEVICE_NATIVE_ENDIAN,
1641 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1642 uint16_t section)
1644 int idx, eidx;
1646 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1647 return -1;
1648 idx = SUBPAGE_IDX(start);
1649 eidx = SUBPAGE_IDX(end);
1650 #if defined(DEBUG_SUBPAGE)
1651 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
1652 mmio, start, end, idx, eidx, memory);
1653 #endif
1654 for (; idx <= eidx; idx++) {
1655 mmio->sub_section[idx] = section;
1658 return 0;
1661 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1663 subpage_t *mmio;
1665 mmio = g_malloc0(sizeof(subpage_t));
1667 mmio->as = as;
1668 mmio->base = base;
1669 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1670 "subpage", TARGET_PAGE_SIZE);
1671 mmio->iomem.subpage = true;
1672 #if defined(DEBUG_SUBPAGE)
1673 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1674 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
1675 #endif
1676 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
1678 return mmio;
1681 static uint16_t dummy_section(MemoryRegion *mr)
1683 MemoryRegionSection section = {
1684 .mr = mr,
1685 .offset_within_address_space = 0,
1686 .offset_within_region = 0,
1687 .size = UINT64_MAX,
1690 return phys_section_add(&section);
1693 MemoryRegion *iotlb_to_region(hwaddr index)
1695 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
1698 static void io_mem_init(void)
1700 memory_region_init_io(&io_mem_rom, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1701 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1702 "unassigned", UINT64_MAX);
1703 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1704 "notdirty", UINT64_MAX);
1705 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1706 "watch", UINT64_MAX);
1709 static void mem_begin(MemoryListener *listener)
1711 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1713 destroy_all_mappings(d);
1714 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1717 static void core_begin(MemoryListener *listener)
1719 phys_sections_clear();
1720 phys_section_unassigned = dummy_section(&io_mem_unassigned);
1721 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1722 phys_section_rom = dummy_section(&io_mem_rom);
1723 phys_section_watch = dummy_section(&io_mem_watch);
1726 static void tcg_commit(MemoryListener *listener)
1728 CPUArchState *env;
1730 /* since each CPU stores ram addresses in its TLB cache, we must
1731 reset the modified entries */
1732 /* XXX: slow ! */
1733 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1734 tlb_flush(env, 1);
1738 static void core_log_global_start(MemoryListener *listener)
1740 cpu_physical_memory_set_dirty_tracking(1);
1743 static void core_log_global_stop(MemoryListener *listener)
1745 cpu_physical_memory_set_dirty_tracking(0);
1748 static void io_region_add(MemoryListener *listener,
1749 MemoryRegionSection *section)
1751 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1753 mrio->mr = section->mr;
1754 mrio->offset = section->offset_within_region;
1755 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
1756 section->offset_within_address_space, section->size);
1757 ioport_register(&mrio->iorange);
1760 static void io_region_del(MemoryListener *listener,
1761 MemoryRegionSection *section)
1763 isa_unassign_ioport(section->offset_within_address_space, section->size);
1766 static MemoryListener core_memory_listener = {
1767 .begin = core_begin,
1768 .log_global_start = core_log_global_start,
1769 .log_global_stop = core_log_global_stop,
1770 .priority = 1,
1773 static MemoryListener io_memory_listener = {
1774 .region_add = io_region_add,
1775 .region_del = io_region_del,
1776 .priority = 0,
1779 static MemoryListener tcg_memory_listener = {
1780 .commit = tcg_commit,
1783 void address_space_init_dispatch(AddressSpace *as)
1785 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1787 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1788 d->listener = (MemoryListener) {
1789 .begin = mem_begin,
1790 .region_add = mem_add,
1791 .region_nop = mem_add,
1792 .priority = 0,
1794 d->as = as;
1795 as->dispatch = d;
1796 memory_listener_register(&d->listener, as);
1799 void address_space_destroy_dispatch(AddressSpace *as)
1801 AddressSpaceDispatch *d = as->dispatch;
1803 memory_listener_unregister(&d->listener);
1804 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1805 g_free(d);
1806 as->dispatch = NULL;
1809 static void memory_map_init(void)
1811 system_memory = g_malloc(sizeof(*system_memory));
1812 memory_region_init(system_memory, "system", INT64_MAX);
1813 address_space_init(&address_space_memory, system_memory);
1814 address_space_memory.name = "memory";
1816 system_io = g_malloc(sizeof(*system_io));
1817 memory_region_init(system_io, "io", 65536);
1818 address_space_init(&address_space_io, system_io);
1819 address_space_io.name = "I/O";
1821 memory_listener_register(&core_memory_listener, &address_space_memory);
1822 memory_listener_register(&io_memory_listener, &address_space_io);
1823 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1825 dma_context_init(&dma_context_memory, &address_space_memory,
1826 NULL, NULL, NULL);
1829 MemoryRegion *get_system_memory(void)
1831 return system_memory;
1834 MemoryRegion *get_system_io(void)
1836 return system_io;
1839 #endif /* !defined(CONFIG_USER_ONLY) */
1841 /* physical memory access (slow version, mainly for debug) */
1842 #if defined(CONFIG_USER_ONLY)
1843 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
1844 uint8_t *buf, int len, int is_write)
1846 int l, flags;
1847 target_ulong page;
1848 void * p;
1850 while (len > 0) {
1851 page = addr & TARGET_PAGE_MASK;
1852 l = (page + TARGET_PAGE_SIZE) - addr;
1853 if (l > len)
1854 l = len;
1855 flags = page_get_flags(page);
1856 if (!(flags & PAGE_VALID))
1857 return -1;
1858 if (is_write) {
1859 if (!(flags & PAGE_WRITE))
1860 return -1;
1861 /* XXX: this code should not depend on lock_user */
1862 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1863 return -1;
1864 memcpy(p, buf, l);
1865 unlock_user(p, addr, l);
1866 } else {
1867 if (!(flags & PAGE_READ))
1868 return -1;
1869 /* XXX: this code should not depend on lock_user */
1870 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1871 return -1;
1872 memcpy(buf, p, l);
1873 unlock_user(p, addr, 0);
1875 len -= l;
1876 buf += l;
1877 addr += l;
1879 return 0;
1882 #else
1884 static void invalidate_and_set_dirty(hwaddr addr,
1885 hwaddr length)
1887 if (!cpu_physical_memory_is_dirty(addr)) {
1888 /* invalidate code */
1889 tb_invalidate_phys_page_range(addr, addr + length, 0);
1890 /* set dirty bit */
1891 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1893 xen_modified_memory(addr, length);
1896 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1898 if (memory_region_is_ram(mr)) {
1899 return !(is_write && mr->readonly);
1901 if (memory_region_is_romd(mr)) {
1902 return !is_write;
1905 return false;
1908 static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
1910 if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
1911 return 4;
1913 if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
1914 return 2;
1916 return 1;
1919 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1920 int len, bool is_write)
1922 hwaddr l;
1923 uint8_t *ptr;
1924 uint64_t val;
1925 hwaddr addr1;
1926 MemoryRegionSection *section;
1927 bool error = false;
1929 while (len > 0) {
1930 l = len;
1931 section = address_space_translate(as, addr, &addr1, &l, is_write);
1933 if (is_write) {
1934 if (!memory_access_is_direct(section->mr, is_write)) {
1935 l = memory_access_size(section->mr, l, addr1);
1936 /* XXX: could force cpu_single_env to NULL to avoid
1937 potential bugs */
1938 if (l == 4) {
1939 /* 32 bit write access */
1940 val = ldl_p(buf);
1941 error |= io_mem_write(section->mr, addr1, val, 4);
1942 } else if (l == 2) {
1943 /* 16 bit write access */
1944 val = lduw_p(buf);
1945 error |= io_mem_write(section->mr, addr1, val, 2);
1946 } else {
1947 /* 8 bit write access */
1948 val = ldub_p(buf);
1949 error |= io_mem_write(section->mr, addr1, val, 1);
1951 } else {
1952 addr1 += memory_region_get_ram_addr(section->mr);
1953 /* RAM case */
1954 ptr = qemu_get_ram_ptr(addr1);
1955 memcpy(ptr, buf, l);
1956 invalidate_and_set_dirty(addr1, l);
1958 } else {
1959 if (!memory_access_is_direct(section->mr, is_write)) {
1960 /* I/O case */
1961 l = memory_access_size(section->mr, l, addr1);
1962 if (l == 4) {
1963 /* 32 bit read access */
1964 error |= io_mem_read(section->mr, addr1, &val, 4);
1965 stl_p(buf, val);
1966 } else if (l == 2) {
1967 /* 16 bit read access */
1968 error |= io_mem_read(section->mr, addr1, &val, 2);
1969 stw_p(buf, val);
1970 } else {
1971 /* 8 bit read access */
1972 error |= io_mem_read(section->mr, addr1, &val, 1);
1973 stb_p(buf, val);
1975 } else {
1976 /* RAM case */
1977 ptr = qemu_get_ram_ptr(section->mr->ram_addr + addr1);
1978 memcpy(buf, ptr, l);
1981 len -= l;
1982 buf += l;
1983 addr += l;
1986 return error;
1989 bool address_space_write(AddressSpace *as, hwaddr addr,
1990 const uint8_t *buf, int len)
1992 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
1995 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
1997 return address_space_rw(as, addr, buf, len, false);
2001 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2002 int len, int is_write)
2004 address_space_rw(&address_space_memory, addr, buf, len, is_write);
2007 /* used for ROM loading : can write in RAM and ROM */
2008 void cpu_physical_memory_write_rom(hwaddr addr,
2009 const uint8_t *buf, int len)
2011 hwaddr l;
2012 uint8_t *ptr;
2013 hwaddr addr1;
2014 MemoryRegionSection *section;
2016 while (len > 0) {
2017 l = len;
2018 section = address_space_translate(&address_space_memory,
2019 addr, &addr1, &l, true);
2021 if (!(memory_region_is_ram(section->mr) ||
2022 memory_region_is_romd(section->mr))) {
2023 /* do nothing */
2024 } else {
2025 addr1 += memory_region_get_ram_addr(section->mr);
2026 /* ROM/RAM case */
2027 ptr = qemu_get_ram_ptr(addr1);
2028 memcpy(ptr, buf, l);
2029 invalidate_and_set_dirty(addr1, l);
2031 len -= l;
2032 buf += l;
2033 addr += l;
2037 typedef struct {
2038 void *buffer;
2039 hwaddr addr;
2040 hwaddr len;
2041 } BounceBuffer;
2043 static BounceBuffer bounce;
2045 typedef struct MapClient {
2046 void *opaque;
2047 void (*callback)(void *opaque);
2048 QLIST_ENTRY(MapClient) link;
2049 } MapClient;
2051 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2052 = QLIST_HEAD_INITIALIZER(map_client_list);
2054 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2056 MapClient *client = g_malloc(sizeof(*client));
2058 client->opaque = opaque;
2059 client->callback = callback;
2060 QLIST_INSERT_HEAD(&map_client_list, client, link);
2061 return client;
2064 static void cpu_unregister_map_client(void *_client)
2066 MapClient *client = (MapClient *)_client;
2068 QLIST_REMOVE(client, link);
2069 g_free(client);
2072 static void cpu_notify_map_clients(void)
2074 MapClient *client;
2076 while (!QLIST_EMPTY(&map_client_list)) {
2077 client = QLIST_FIRST(&map_client_list);
2078 client->callback(client->opaque);
2079 cpu_unregister_map_client(client);
2083 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2085 MemoryRegionSection *section;
2086 hwaddr l, xlat;
2088 while (len > 0) {
2089 l = len;
2090 section = address_space_translate(as, addr, &xlat, &l, is_write);
2091 if (!memory_access_is_direct(section->mr, is_write)) {
2092 l = memory_access_size(section->mr, l, addr);
2093 if (!memory_region_access_valid(section->mr, xlat, l, is_write)) {
2094 return false;
2098 len -= l;
2099 addr += l;
2101 return true;
2104 /* Map a physical memory region into a host virtual address.
2105 * May map a subset of the requested range, given by and returned in *plen.
2106 * May return NULL if resources needed to perform the mapping are exhausted.
2107 * Use only for reads OR writes - not for read-modify-write operations.
2108 * Use cpu_register_map_client() to know when retrying the map operation is
2109 * likely to succeed.
2111 void *address_space_map(AddressSpace *as,
2112 hwaddr addr,
2113 hwaddr *plen,
2114 bool is_write)
2116 hwaddr len = *plen;
2117 hwaddr todo = 0;
2118 hwaddr l, xlat;
2119 MemoryRegionSection *section;
2120 ram_addr_t raddr = RAM_ADDR_MAX;
2121 ram_addr_t rlen;
2122 void *ret;
2124 while (len > 0) {
2125 l = len;
2126 section = address_space_translate(as, addr, &xlat, &l, is_write);
2128 if (!memory_access_is_direct(section->mr, is_write)) {
2129 if (todo || bounce.buffer) {
2130 break;
2132 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2133 bounce.addr = addr;
2134 bounce.len = l;
2135 if (!is_write) {
2136 address_space_read(as, addr, bounce.buffer, l);
2139 *plen = l;
2140 return bounce.buffer;
2142 if (!todo) {
2143 raddr = memory_region_get_ram_addr(section->mr) + xlat;
2144 } else {
2145 if (memory_region_get_ram_addr(section->mr) + xlat != raddr + todo) {
2146 break;
2150 len -= l;
2151 addr += l;
2152 todo += l;
2154 rlen = todo;
2155 ret = qemu_ram_ptr_length(raddr, &rlen);
2156 *plen = rlen;
2157 return ret;
2160 /* Unmaps a memory region previously mapped by address_space_map().
2161 * Will also mark the memory as dirty if is_write == 1. access_len gives
2162 * the amount of memory that was actually read or written by the caller.
2164 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2165 int is_write, hwaddr access_len)
2167 if (buffer != bounce.buffer) {
2168 if (is_write) {
2169 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
2170 while (access_len) {
2171 unsigned l;
2172 l = TARGET_PAGE_SIZE;
2173 if (l > access_len)
2174 l = access_len;
2175 invalidate_and_set_dirty(addr1, l);
2176 addr1 += l;
2177 access_len -= l;
2180 if (xen_enabled()) {
2181 xen_invalidate_map_cache_entry(buffer);
2183 return;
2185 if (is_write) {
2186 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2188 qemu_vfree(bounce.buffer);
2189 bounce.buffer = NULL;
2190 cpu_notify_map_clients();
2193 void *cpu_physical_memory_map(hwaddr addr,
2194 hwaddr *plen,
2195 int is_write)
2197 return address_space_map(&address_space_memory, addr, plen, is_write);
2200 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2201 int is_write, hwaddr access_len)
2203 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2206 /* warning: addr must be aligned */
2207 static inline uint32_t ldl_phys_internal(hwaddr addr,
2208 enum device_endian endian)
2210 uint8_t *ptr;
2211 uint64_t val;
2212 MemoryRegionSection *section;
2213 hwaddr l = 4;
2214 hwaddr addr1;
2216 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2217 false);
2218 if (l < 4 || !memory_access_is_direct(section->mr, false)) {
2219 /* I/O case */
2220 io_mem_read(section->mr, addr1, &val, 4);
2221 #if defined(TARGET_WORDS_BIGENDIAN)
2222 if (endian == DEVICE_LITTLE_ENDIAN) {
2223 val = bswap32(val);
2225 #else
2226 if (endian == DEVICE_BIG_ENDIAN) {
2227 val = bswap32(val);
2229 #endif
2230 } else {
2231 /* RAM case */
2232 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2233 & TARGET_PAGE_MASK)
2234 + addr1);
2235 switch (endian) {
2236 case DEVICE_LITTLE_ENDIAN:
2237 val = ldl_le_p(ptr);
2238 break;
2239 case DEVICE_BIG_ENDIAN:
2240 val = ldl_be_p(ptr);
2241 break;
2242 default:
2243 val = ldl_p(ptr);
2244 break;
2247 return val;
2250 uint32_t ldl_phys(hwaddr addr)
2252 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2255 uint32_t ldl_le_phys(hwaddr addr)
2257 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2260 uint32_t ldl_be_phys(hwaddr addr)
2262 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2265 /* warning: addr must be aligned */
2266 static inline uint64_t ldq_phys_internal(hwaddr addr,
2267 enum device_endian endian)
2269 uint8_t *ptr;
2270 uint64_t val;
2271 MemoryRegionSection *section;
2272 hwaddr l = 8;
2273 hwaddr addr1;
2275 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2276 false);
2277 if (l < 8 || !memory_access_is_direct(section->mr, false)) {
2278 /* I/O case */
2279 io_mem_read(section->mr, addr1, &val, 8);
2280 #if defined(TARGET_WORDS_BIGENDIAN)
2281 if (endian == DEVICE_LITTLE_ENDIAN) {
2282 val = bswap64(val);
2284 #else
2285 if (endian == DEVICE_BIG_ENDIAN) {
2286 val = bswap64(val);
2288 #endif
2289 } else {
2290 /* RAM case */
2291 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2292 & TARGET_PAGE_MASK)
2293 + addr1);
2294 switch (endian) {
2295 case DEVICE_LITTLE_ENDIAN:
2296 val = ldq_le_p(ptr);
2297 break;
2298 case DEVICE_BIG_ENDIAN:
2299 val = ldq_be_p(ptr);
2300 break;
2301 default:
2302 val = ldq_p(ptr);
2303 break;
2306 return val;
2309 uint64_t ldq_phys(hwaddr addr)
2311 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2314 uint64_t ldq_le_phys(hwaddr addr)
2316 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2319 uint64_t ldq_be_phys(hwaddr addr)
2321 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2324 /* XXX: optimize */
2325 uint32_t ldub_phys(hwaddr addr)
2327 uint8_t val;
2328 cpu_physical_memory_read(addr, &val, 1);
2329 return val;
2332 /* warning: addr must be aligned */
2333 static inline uint32_t lduw_phys_internal(hwaddr addr,
2334 enum device_endian endian)
2336 uint8_t *ptr;
2337 uint64_t val;
2338 MemoryRegionSection *section;
2339 hwaddr l = 2;
2340 hwaddr addr1;
2342 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2343 false);
2344 if (l < 2 || !memory_access_is_direct(section->mr, false)) {
2345 /* I/O case */
2346 io_mem_read(section->mr, addr1, &val, 2);
2347 #if defined(TARGET_WORDS_BIGENDIAN)
2348 if (endian == DEVICE_LITTLE_ENDIAN) {
2349 val = bswap16(val);
2351 #else
2352 if (endian == DEVICE_BIG_ENDIAN) {
2353 val = bswap16(val);
2355 #endif
2356 } else {
2357 /* RAM case */
2358 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2359 & TARGET_PAGE_MASK)
2360 + addr1);
2361 switch (endian) {
2362 case DEVICE_LITTLE_ENDIAN:
2363 val = lduw_le_p(ptr);
2364 break;
2365 case DEVICE_BIG_ENDIAN:
2366 val = lduw_be_p(ptr);
2367 break;
2368 default:
2369 val = lduw_p(ptr);
2370 break;
2373 return val;
2376 uint32_t lduw_phys(hwaddr addr)
2378 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2381 uint32_t lduw_le_phys(hwaddr addr)
2383 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2386 uint32_t lduw_be_phys(hwaddr addr)
2388 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2391 /* warning: addr must be aligned. The ram page is not masked as dirty
2392 and the code inside is not invalidated. It is useful if the dirty
2393 bits are used to track modified PTEs */
2394 void stl_phys_notdirty(hwaddr addr, uint32_t val)
2396 uint8_t *ptr;
2397 MemoryRegionSection *section;
2398 hwaddr l = 4;
2399 hwaddr addr1;
2401 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2402 true);
2403 if (l < 4 || !memory_access_is_direct(section->mr, true)) {
2404 io_mem_write(section->mr, addr1, val, 4);
2405 } else {
2406 addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK;
2407 ptr = qemu_get_ram_ptr(addr1);
2408 stl_p(ptr, val);
2410 if (unlikely(in_migration)) {
2411 if (!cpu_physical_memory_is_dirty(addr1)) {
2412 /* invalidate code */
2413 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2414 /* set dirty bit */
2415 cpu_physical_memory_set_dirty_flags(
2416 addr1, (0xff & ~CODE_DIRTY_FLAG));
2422 /* warning: addr must be aligned */
2423 static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2424 enum device_endian endian)
2426 uint8_t *ptr;
2427 MemoryRegionSection *section;
2428 hwaddr l = 4;
2429 hwaddr addr1;
2431 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2432 true);
2433 if (l < 4 || !memory_access_is_direct(section->mr, true)) {
2434 #if defined(TARGET_WORDS_BIGENDIAN)
2435 if (endian == DEVICE_LITTLE_ENDIAN) {
2436 val = bswap32(val);
2438 #else
2439 if (endian == DEVICE_BIG_ENDIAN) {
2440 val = bswap32(val);
2442 #endif
2443 io_mem_write(section->mr, addr1, val, 4);
2444 } else {
2445 /* RAM case */
2446 addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK;
2447 ptr = qemu_get_ram_ptr(addr1);
2448 switch (endian) {
2449 case DEVICE_LITTLE_ENDIAN:
2450 stl_le_p(ptr, val);
2451 break;
2452 case DEVICE_BIG_ENDIAN:
2453 stl_be_p(ptr, val);
2454 break;
2455 default:
2456 stl_p(ptr, val);
2457 break;
2459 invalidate_and_set_dirty(addr1, 4);
2463 void stl_phys(hwaddr addr, uint32_t val)
2465 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2468 void stl_le_phys(hwaddr addr, uint32_t val)
2470 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2473 void stl_be_phys(hwaddr addr, uint32_t val)
2475 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2478 /* XXX: optimize */
2479 void stb_phys(hwaddr addr, uint32_t val)
2481 uint8_t v = val;
2482 cpu_physical_memory_write(addr, &v, 1);
2485 /* warning: addr must be aligned */
2486 static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2487 enum device_endian endian)
2489 uint8_t *ptr;
2490 MemoryRegionSection *section;
2491 hwaddr l = 2;
2492 hwaddr addr1;
2494 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2495 true);
2496 if (l < 2 || !memory_access_is_direct(section->mr, true)) {
2497 #if defined(TARGET_WORDS_BIGENDIAN)
2498 if (endian == DEVICE_LITTLE_ENDIAN) {
2499 val = bswap16(val);
2501 #else
2502 if (endian == DEVICE_BIG_ENDIAN) {
2503 val = bswap16(val);
2505 #endif
2506 io_mem_write(section->mr, addr1, val, 2);
2507 } else {
2508 /* RAM case */
2509 addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK;
2510 ptr = qemu_get_ram_ptr(addr1);
2511 switch (endian) {
2512 case DEVICE_LITTLE_ENDIAN:
2513 stw_le_p(ptr, val);
2514 break;
2515 case DEVICE_BIG_ENDIAN:
2516 stw_be_p(ptr, val);
2517 break;
2518 default:
2519 stw_p(ptr, val);
2520 break;
2522 invalidate_and_set_dirty(addr1, 2);
2526 void stw_phys(hwaddr addr, uint32_t val)
2528 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2531 void stw_le_phys(hwaddr addr, uint32_t val)
2533 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2536 void stw_be_phys(hwaddr addr, uint32_t val)
2538 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2541 /* XXX: optimize */
2542 void stq_phys(hwaddr addr, uint64_t val)
2544 val = tswap64(val);
2545 cpu_physical_memory_write(addr, &val, 8);
2548 void stq_le_phys(hwaddr addr, uint64_t val)
2550 val = cpu_to_le64(val);
2551 cpu_physical_memory_write(addr, &val, 8);
2554 void stq_be_phys(hwaddr addr, uint64_t val)
2556 val = cpu_to_be64(val);
2557 cpu_physical_memory_write(addr, &val, 8);
2560 /* virtual memory access for debug (includes writing to ROM) */
2561 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
2562 uint8_t *buf, int len, int is_write)
2564 int l;
2565 hwaddr phys_addr;
2566 target_ulong page;
2568 while (len > 0) {
2569 page = addr & TARGET_PAGE_MASK;
2570 phys_addr = cpu_get_phys_page_debug(env, page);
2571 /* if no physical page mapped, return an error */
2572 if (phys_addr == -1)
2573 return -1;
2574 l = (page + TARGET_PAGE_SIZE) - addr;
2575 if (l > len)
2576 l = len;
2577 phys_addr += (addr & ~TARGET_PAGE_MASK);
2578 if (is_write)
2579 cpu_physical_memory_write_rom(phys_addr, buf, l);
2580 else
2581 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
2582 len -= l;
2583 buf += l;
2584 addr += l;
2586 return 0;
2588 #endif
2590 #if !defined(CONFIG_USER_ONLY)
2593 * A helper function for the _utterly broken_ virtio device model to find out if
2594 * it's running on a big endian machine. Don't do this at home kids!
2596 bool virtio_is_big_endian(void);
2597 bool virtio_is_big_endian(void)
2599 #if defined(TARGET_WORDS_BIGENDIAN)
2600 return true;
2601 #else
2602 return false;
2603 #endif
2606 #endif
2608 #ifndef CONFIG_USER_ONLY
2609 bool cpu_physical_memory_is_io(hwaddr phys_addr)
2611 MemoryRegionSection *section;
2612 hwaddr l = 1;
2614 section = address_space_translate(&address_space_memory,
2615 phys_addr, &phys_addr, &l, false);
2617 return !(memory_region_is_ram(section->mr) ||
2618 memory_region_is_romd(section->mr));
2620 #endif