memory: assert that PhysPageEntry's ptr does not overflow
[qemu/ar7.git] / exec.c
blob8562fcac9c654032651ddefa04e69c46177f331d
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
41 #include <qemu.h>
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
44 #include "trace.h"
45 #endif
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
53 //#define DEBUG_UNASSIGNED
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 int phys_ram_fd;
58 static int in_migration;
60 RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62 static MemoryRegion *system_memory;
63 static MemoryRegion *system_io;
65 AddressSpace address_space_io;
66 AddressSpace address_space_memory;
67 DMAContext dma_context_memory;
69 MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
70 static MemoryRegion io_mem_subpage_ram;
72 #endif
74 CPUArchState *first_cpu;
75 /* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
77 DEFINE_TLS(CPUArchState *,cpu_single_env);
78 /* 0 = Do not count executed instructions.
79 1 = Precise instruction counting.
80 2 = Adaptive rate instruction counting. */
81 int use_icount;
83 #if !defined(CONFIG_USER_ONLY)
85 static MemoryRegionSection *phys_sections;
86 static unsigned phys_sections_nb, phys_sections_nb_alloc;
87 static uint16_t phys_section_unassigned;
88 static uint16_t phys_section_notdirty;
89 static uint16_t phys_section_rom;
90 static uint16_t phys_section_watch;
92 /* Simple allocator for PhysPageEntry nodes */
93 static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94 static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
96 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
98 static void io_mem_init(void);
99 static void memory_map_init(void);
100 static void *qemu_safe_ram_ptr(ram_addr_t addr);
102 static MemoryRegion io_mem_watch;
103 #endif
105 #if !defined(CONFIG_USER_ONLY)
107 static void phys_map_node_reserve(unsigned nodes)
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
119 static uint16_t phys_map_node_alloc(void)
121 unsigned i;
122 uint16_t ret;
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
127 for (i = 0; i < L2_SIZE; ++i) {
128 phys_map_nodes[ret][i].is_leaf = 0;
129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
131 return ret;
134 static void phys_map_nodes_reset(void)
136 phys_map_nodes_nb = 0;
140 static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
142 int level)
144 PhysPageEntry *p;
145 int i;
146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
153 p[i].is_leaf = 1;
154 p[i].ptr = phys_section_unassigned;
157 } else {
158 p = phys_map_nodes[lp->ptr];
160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
162 while (*nb && lp < &p[L2_SIZE]) {
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
165 lp->ptr = leaf;
166 *index += step;
167 *nb -= step;
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
171 ++lp;
175 static void phys_page_set(AddressSpaceDispatch *d,
176 hwaddr index, hwaddr nb,
177 uint16_t leaf)
179 /* Wildly overreserve - it doesn't matter much. */
180 phys_map_node_reserve(3 * P_L2_LEVELS);
182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
185 MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
187 PhysPageEntry lp = d->phys_map;
188 PhysPageEntry *p;
189 int i;
190 uint16_t s_index = phys_section_unassigned;
192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
194 goto not_found;
196 p = phys_map_nodes[lp.ptr];
197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
200 s_index = lp.ptr;
201 not_found:
202 return &phys_sections[s_index];
205 bool memory_region_is_unassigned(MemoryRegion *mr)
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
211 #endif
213 void cpu_exec_init_all(void)
215 #if !defined(CONFIG_USER_ONLY)
216 qemu_mutex_init(&ram_list.mutex);
217 memory_map_init();
218 io_mem_init();
219 #endif
222 #if !defined(CONFIG_USER_ONLY)
224 static int cpu_common_post_load(void *opaque, int version_id)
226 CPUState *cpu = opaque;
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 cpu->interrupt_request &= ~0x01;
231 tlb_flush(cpu->env_ptr, 1);
233 return 0;
236 static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
243 VMSTATE_UINT32(halted, CPUState),
244 VMSTATE_UINT32(interrupt_request, CPUState),
245 VMSTATE_END_OF_LIST()
248 #else
249 #define vmstate_cpu_common vmstate_dummy
250 #endif
252 CPUState *qemu_get_cpu(int index)
254 CPUArchState *env = first_cpu;
255 CPUState *cpu = NULL;
257 while (env) {
258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
260 break;
262 env = env->next_cpu;
265 return env ? cpu : NULL;
268 void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
270 CPUArchState *env = first_cpu;
272 while (env) {
273 func(ENV_GET_CPU(env), data);
274 env = env->next_cpu;
278 void cpu_exec_init(CPUArchState *env)
280 CPUState *cpu = ENV_GET_CPU(env);
281 CPUClass *cc = CPU_GET_CLASS(cpu);
282 CPUArchState **penv;
283 int cpu_index;
285 #if defined(CONFIG_USER_ONLY)
286 cpu_list_lock();
287 #endif
288 env->next_cpu = NULL;
289 penv = &first_cpu;
290 cpu_index = 0;
291 while (*penv != NULL) {
292 penv = &(*penv)->next_cpu;
293 cpu_index++;
295 cpu->cpu_index = cpu_index;
296 cpu->numa_node = 0;
297 QTAILQ_INIT(&env->breakpoints);
298 QTAILQ_INIT(&env->watchpoints);
299 #ifndef CONFIG_USER_ONLY
300 cpu->thread_id = qemu_get_thread_id();
301 #endif
302 *penv = env;
303 #if defined(CONFIG_USER_ONLY)
304 cpu_list_unlock();
305 #endif
306 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
307 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
308 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
309 cpu_save, cpu_load, env);
310 assert(cc->vmsd == NULL);
311 #endif
312 if (cc->vmsd != NULL) {
313 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
317 #if defined(TARGET_HAS_ICE)
318 #if defined(CONFIG_USER_ONLY)
319 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
321 tb_invalidate_phys_page_range(pc, pc + 1, 0);
323 #else
324 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
326 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
327 (pc & ~TARGET_PAGE_MASK));
329 #endif
330 #endif /* TARGET_HAS_ICE */
332 #if defined(CONFIG_USER_ONLY)
333 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
338 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
339 int flags, CPUWatchpoint **watchpoint)
341 return -ENOSYS;
343 #else
344 /* Add a watchpoint. */
345 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
346 int flags, CPUWatchpoint **watchpoint)
348 target_ulong len_mask = ~(len - 1);
349 CPUWatchpoint *wp;
351 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
352 if ((len & (len - 1)) || (addr & ~len_mask) ||
353 len == 0 || len > TARGET_PAGE_SIZE) {
354 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
355 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
356 return -EINVAL;
358 wp = g_malloc(sizeof(*wp));
360 wp->vaddr = addr;
361 wp->len_mask = len_mask;
362 wp->flags = flags;
364 /* keep all GDB-injected watchpoints in front */
365 if (flags & BP_GDB)
366 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
367 else
368 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
370 tlb_flush_page(env, addr);
372 if (watchpoint)
373 *watchpoint = wp;
374 return 0;
377 /* Remove a specific watchpoint. */
378 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
379 int flags)
381 target_ulong len_mask = ~(len - 1);
382 CPUWatchpoint *wp;
384 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
385 if (addr == wp->vaddr && len_mask == wp->len_mask
386 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
387 cpu_watchpoint_remove_by_ref(env, wp);
388 return 0;
391 return -ENOENT;
394 /* Remove a specific watchpoint by reference. */
395 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
397 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
399 tlb_flush_page(env, watchpoint->vaddr);
401 g_free(watchpoint);
404 /* Remove all matching watchpoints. */
405 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
407 CPUWatchpoint *wp, *next;
409 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
410 if (wp->flags & mask)
411 cpu_watchpoint_remove_by_ref(env, wp);
414 #endif
416 /* Add a breakpoint. */
417 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
418 CPUBreakpoint **breakpoint)
420 #if defined(TARGET_HAS_ICE)
421 CPUBreakpoint *bp;
423 bp = g_malloc(sizeof(*bp));
425 bp->pc = pc;
426 bp->flags = flags;
428 /* keep all GDB-injected breakpoints in front */
429 if (flags & BP_GDB)
430 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
431 else
432 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
434 breakpoint_invalidate(env, pc);
436 if (breakpoint)
437 *breakpoint = bp;
438 return 0;
439 #else
440 return -ENOSYS;
441 #endif
444 /* Remove a specific breakpoint. */
445 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
447 #if defined(TARGET_HAS_ICE)
448 CPUBreakpoint *bp;
450 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
451 if (bp->pc == pc && bp->flags == flags) {
452 cpu_breakpoint_remove_by_ref(env, bp);
453 return 0;
456 return -ENOENT;
457 #else
458 return -ENOSYS;
459 #endif
462 /* Remove a specific breakpoint by reference. */
463 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
465 #if defined(TARGET_HAS_ICE)
466 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
468 breakpoint_invalidate(env, breakpoint->pc);
470 g_free(breakpoint);
471 #endif
474 /* Remove all matching breakpoints. */
475 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
477 #if defined(TARGET_HAS_ICE)
478 CPUBreakpoint *bp, *next;
480 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
481 if (bp->flags & mask)
482 cpu_breakpoint_remove_by_ref(env, bp);
484 #endif
487 /* enable or disable single step mode. EXCP_DEBUG is returned by the
488 CPU loop after each instruction */
489 void cpu_single_step(CPUArchState *env, int enabled)
491 #if defined(TARGET_HAS_ICE)
492 if (env->singlestep_enabled != enabled) {
493 env->singlestep_enabled = enabled;
494 if (kvm_enabled())
495 kvm_update_guest_debug(env, 0);
496 else {
497 /* must flush all the translated code to avoid inconsistencies */
498 /* XXX: only flush what is necessary */
499 tb_flush(env);
502 #endif
505 void cpu_exit(CPUArchState *env)
507 CPUState *cpu = ENV_GET_CPU(env);
509 cpu->exit_request = 1;
510 cpu->tcg_exit_req = 1;
513 void cpu_abort(CPUArchState *env, const char *fmt, ...)
515 va_list ap;
516 va_list ap2;
518 va_start(ap, fmt);
519 va_copy(ap2, ap);
520 fprintf(stderr, "qemu: fatal: ");
521 vfprintf(stderr, fmt, ap);
522 fprintf(stderr, "\n");
523 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
524 if (qemu_log_enabled()) {
525 qemu_log("qemu: fatal: ");
526 qemu_log_vprintf(fmt, ap2);
527 qemu_log("\n");
528 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
529 qemu_log_flush();
530 qemu_log_close();
532 va_end(ap2);
533 va_end(ap);
534 #if defined(CONFIG_USER_ONLY)
536 struct sigaction act;
537 sigfillset(&act.sa_mask);
538 act.sa_handler = SIG_DFL;
539 sigaction(SIGABRT, &act, NULL);
541 #endif
542 abort();
545 CPUArchState *cpu_copy(CPUArchState *env)
547 CPUArchState *new_env = cpu_init(env->cpu_model_str);
548 CPUArchState *next_cpu = new_env->next_cpu;
549 #if defined(TARGET_HAS_ICE)
550 CPUBreakpoint *bp;
551 CPUWatchpoint *wp;
552 #endif
554 memcpy(new_env, env, sizeof(CPUArchState));
556 /* Preserve chaining. */
557 new_env->next_cpu = next_cpu;
559 /* Clone all break/watchpoints.
560 Note: Once we support ptrace with hw-debug register access, make sure
561 BP_CPU break/watchpoints are handled correctly on clone. */
562 QTAILQ_INIT(&env->breakpoints);
563 QTAILQ_INIT(&env->watchpoints);
564 #if defined(TARGET_HAS_ICE)
565 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
566 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
568 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
569 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
570 wp->flags, NULL);
572 #endif
574 return new_env;
577 #if !defined(CONFIG_USER_ONLY)
578 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
579 uintptr_t length)
581 uintptr_t start1;
583 /* we modify the TLB cache so that the dirty bit will be set again
584 when accessing the range */
585 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
586 /* Check that we don't span multiple blocks - this breaks the
587 address comparisons below. */
588 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
589 != (end - 1) - start) {
590 abort();
592 cpu_tlb_reset_dirty_all(start1, length);
596 /* Note: start and end must be within the same ram block. */
597 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
598 int dirty_flags)
600 uintptr_t length;
602 start &= TARGET_PAGE_MASK;
603 end = TARGET_PAGE_ALIGN(end);
605 length = end - start;
606 if (length == 0)
607 return;
608 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
610 if (tcg_enabled()) {
611 tlb_reset_dirty_range_all(start, end, length);
615 static int cpu_physical_memory_set_dirty_tracking(int enable)
617 int ret = 0;
618 in_migration = enable;
619 return ret;
622 hwaddr memory_region_section_get_iotlb(CPUArchState *env,
623 MemoryRegionSection *section,
624 target_ulong vaddr,
625 hwaddr paddr,
626 int prot,
627 target_ulong *address)
629 hwaddr iotlb;
630 CPUWatchpoint *wp;
632 if (memory_region_is_ram(section->mr)) {
633 /* Normal RAM. */
634 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
635 + memory_region_section_addr(section, paddr);
636 if (!section->readonly) {
637 iotlb |= phys_section_notdirty;
638 } else {
639 iotlb |= phys_section_rom;
641 } else {
642 iotlb = section - phys_sections;
643 iotlb += memory_region_section_addr(section, paddr);
646 /* Make accesses to pages with watchpoints go via the
647 watchpoint trap routines. */
648 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
649 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
650 /* Avoid trapping reads of pages with a write breakpoint. */
651 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
652 iotlb = phys_section_watch + paddr;
653 *address |= TLB_MMIO;
654 break;
659 return iotlb;
661 #endif /* defined(CONFIG_USER_ONLY) */
663 #if !defined(CONFIG_USER_ONLY)
665 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
666 typedef struct subpage_t {
667 MemoryRegion iomem;
668 hwaddr base;
669 uint16_t sub_section[TARGET_PAGE_SIZE];
670 } subpage_t;
672 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
673 uint16_t section);
674 static subpage_t *subpage_init(hwaddr base);
675 static void destroy_page_desc(uint16_t section_index)
677 MemoryRegionSection *section = &phys_sections[section_index];
678 MemoryRegion *mr = section->mr;
680 if (mr->subpage) {
681 subpage_t *subpage = container_of(mr, subpage_t, iomem);
682 memory_region_destroy(&subpage->iomem);
683 g_free(subpage);
687 static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
689 unsigned i;
690 PhysPageEntry *p;
692 if (lp->ptr == PHYS_MAP_NODE_NIL) {
693 return;
696 p = phys_map_nodes[lp->ptr];
697 for (i = 0; i < L2_SIZE; ++i) {
698 if (!p[i].is_leaf) {
699 destroy_l2_mapping(&p[i], level - 1);
700 } else {
701 destroy_page_desc(p[i].ptr);
704 lp->is_leaf = 0;
705 lp->ptr = PHYS_MAP_NODE_NIL;
708 static void destroy_all_mappings(AddressSpaceDispatch *d)
710 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
711 phys_map_nodes_reset();
714 static uint16_t phys_section_add(MemoryRegionSection *section)
716 /* The physical section number is ORed with a page-aligned
717 * pointer to produce the iotlb entries. Thus it should
718 * never overflow into the page-aligned value.
720 assert(phys_sections_nb < TARGET_PAGE_SIZE);
722 if (phys_sections_nb == phys_sections_nb_alloc) {
723 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
724 phys_sections = g_renew(MemoryRegionSection, phys_sections,
725 phys_sections_nb_alloc);
727 phys_sections[phys_sections_nb] = *section;
728 return phys_sections_nb++;
731 static void phys_sections_clear(void)
733 phys_sections_nb = 0;
736 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
738 subpage_t *subpage;
739 hwaddr base = section->offset_within_address_space
740 & TARGET_PAGE_MASK;
741 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
742 MemoryRegionSection subsection = {
743 .offset_within_address_space = base,
744 .size = TARGET_PAGE_SIZE,
746 hwaddr start, end;
748 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
750 if (!(existing->mr->subpage)) {
751 subpage = subpage_init(base);
752 subsection.mr = &subpage->iomem;
753 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
754 phys_section_add(&subsection));
755 } else {
756 subpage = container_of(existing->mr, subpage_t, iomem);
758 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
759 end = start + section->size - 1;
760 subpage_register(subpage, start, end, phys_section_add(section));
764 static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
766 hwaddr start_addr = section->offset_within_address_space;
767 ram_addr_t size = section->size;
768 hwaddr addr;
769 uint16_t section_index = phys_section_add(section);
771 assert(size);
773 addr = start_addr;
774 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
775 section_index);
778 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
780 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
781 MemoryRegionSection now = *section, remain = *section;
783 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
784 || (now.size < TARGET_PAGE_SIZE)) {
785 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
786 - now.offset_within_address_space,
787 now.size);
788 register_subpage(d, &now);
789 remain.size -= now.size;
790 remain.offset_within_address_space += now.size;
791 remain.offset_within_region += now.size;
793 while (remain.size >= TARGET_PAGE_SIZE) {
794 now = remain;
795 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
796 now.size = TARGET_PAGE_SIZE;
797 register_subpage(d, &now);
798 } else {
799 now.size &= TARGET_PAGE_MASK;
800 register_multipage(d, &now);
802 remain.size -= now.size;
803 remain.offset_within_address_space += now.size;
804 remain.offset_within_region += now.size;
806 now = remain;
807 if (now.size) {
808 register_subpage(d, &now);
812 void qemu_flush_coalesced_mmio_buffer(void)
814 if (kvm_enabled())
815 kvm_flush_coalesced_mmio_buffer();
818 void qemu_mutex_lock_ramlist(void)
820 qemu_mutex_lock(&ram_list.mutex);
823 void qemu_mutex_unlock_ramlist(void)
825 qemu_mutex_unlock(&ram_list.mutex);
828 #if defined(__linux__) && !defined(TARGET_S390X)
830 #include <sys/vfs.h>
832 #define HUGETLBFS_MAGIC 0x958458f6
834 static long gethugepagesize(const char *path)
836 struct statfs fs;
837 int ret;
839 do {
840 ret = statfs(path, &fs);
841 } while (ret != 0 && errno == EINTR);
843 if (ret != 0) {
844 perror(path);
845 return 0;
848 if (fs.f_type != HUGETLBFS_MAGIC)
849 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
851 return fs.f_bsize;
854 static void *file_ram_alloc(RAMBlock *block,
855 ram_addr_t memory,
856 const char *path)
858 char *filename;
859 char *sanitized_name;
860 char *c;
861 void *area;
862 int fd;
863 #ifdef MAP_POPULATE
864 int flags;
865 #endif
866 unsigned long hpagesize;
868 hpagesize = gethugepagesize(path);
869 if (!hpagesize) {
870 return NULL;
873 if (memory < hpagesize) {
874 return NULL;
877 if (kvm_enabled() && !kvm_has_sync_mmu()) {
878 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
879 return NULL;
882 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
883 sanitized_name = g_strdup(block->mr->name);
884 for (c = sanitized_name; *c != '\0'; c++) {
885 if (*c == '/')
886 *c = '_';
889 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
890 sanitized_name);
891 g_free(sanitized_name);
893 fd = mkstemp(filename);
894 if (fd < 0) {
895 perror("unable to create backing store for hugepages");
896 g_free(filename);
897 return NULL;
899 unlink(filename);
900 g_free(filename);
902 memory = (memory+hpagesize-1) & ~(hpagesize-1);
905 * ftruncate is not supported by hugetlbfs in older
906 * hosts, so don't bother bailing out on errors.
907 * If anything goes wrong with it under other filesystems,
908 * mmap will fail.
910 if (ftruncate(fd, memory))
911 perror("ftruncate");
913 #ifdef MAP_POPULATE
914 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
915 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
916 * to sidestep this quirk.
918 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
919 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
920 #else
921 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
922 #endif
923 if (area == MAP_FAILED) {
924 perror("file_ram_alloc: can't mmap RAM pages");
925 close(fd);
926 return (NULL);
928 block->fd = fd;
929 return area;
931 #endif
933 static ram_addr_t find_ram_offset(ram_addr_t size)
935 RAMBlock *block, *next_block;
936 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
938 assert(size != 0); /* it would hand out same offset multiple times */
940 if (QTAILQ_EMPTY(&ram_list.blocks))
941 return 0;
943 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
944 ram_addr_t end, next = RAM_ADDR_MAX;
946 end = block->offset + block->length;
948 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
949 if (next_block->offset >= end) {
950 next = MIN(next, next_block->offset);
953 if (next - end >= size && next - end < mingap) {
954 offset = end;
955 mingap = next - end;
959 if (offset == RAM_ADDR_MAX) {
960 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
961 (uint64_t)size);
962 abort();
965 return offset;
968 ram_addr_t last_ram_offset(void)
970 RAMBlock *block;
971 ram_addr_t last = 0;
973 QTAILQ_FOREACH(block, &ram_list.blocks, next)
974 last = MAX(last, block->offset + block->length);
976 return last;
979 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
981 int ret;
982 QemuOpts *machine_opts;
984 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
985 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
986 if (machine_opts &&
987 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
988 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
989 if (ret) {
990 perror("qemu_madvise");
991 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
992 "but dump_guest_core=off specified\n");
997 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
999 RAMBlock *new_block, *block;
1001 new_block = NULL;
1002 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1003 if (block->offset == addr) {
1004 new_block = block;
1005 break;
1008 assert(new_block);
1009 assert(!new_block->idstr[0]);
1011 if (dev) {
1012 char *id = qdev_get_dev_path(dev);
1013 if (id) {
1014 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1015 g_free(id);
1018 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1020 /* This assumes the iothread lock is taken here too. */
1021 qemu_mutex_lock_ramlist();
1022 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1023 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1024 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1025 new_block->idstr);
1026 abort();
1029 qemu_mutex_unlock_ramlist();
1032 static int memory_try_enable_merging(void *addr, size_t len)
1034 QemuOpts *opts;
1036 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1037 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1038 /* disabled by the user */
1039 return 0;
1042 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1045 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1046 MemoryRegion *mr)
1048 RAMBlock *block, *new_block;
1050 size = TARGET_PAGE_ALIGN(size);
1051 new_block = g_malloc0(sizeof(*new_block));
1053 /* This assumes the iothread lock is taken here too. */
1054 qemu_mutex_lock_ramlist();
1055 new_block->mr = mr;
1056 new_block->offset = find_ram_offset(size);
1057 if (host) {
1058 new_block->host = host;
1059 new_block->flags |= RAM_PREALLOC_MASK;
1060 } else {
1061 if (mem_path) {
1062 #if defined (__linux__) && !defined(TARGET_S390X)
1063 new_block->host = file_ram_alloc(new_block, size, mem_path);
1064 if (!new_block->host) {
1065 new_block->host = qemu_anon_ram_alloc(size);
1066 memory_try_enable_merging(new_block->host, size);
1068 #else
1069 fprintf(stderr, "-mem-path option unsupported\n");
1070 exit(1);
1071 #endif
1072 } else {
1073 if (xen_enabled()) {
1074 xen_ram_alloc(new_block->offset, size, mr);
1075 } else if (kvm_enabled()) {
1076 /* some s390/kvm configurations have special constraints */
1077 new_block->host = kvm_ram_alloc(size);
1078 } else {
1079 new_block->host = qemu_anon_ram_alloc(size);
1081 memory_try_enable_merging(new_block->host, size);
1084 new_block->length = size;
1086 /* Keep the list sorted from biggest to smallest block. */
1087 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1088 if (block->length < new_block->length) {
1089 break;
1092 if (block) {
1093 QTAILQ_INSERT_BEFORE(block, new_block, next);
1094 } else {
1095 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1097 ram_list.mru_block = NULL;
1099 ram_list.version++;
1100 qemu_mutex_unlock_ramlist();
1102 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
1103 last_ram_offset() >> TARGET_PAGE_BITS);
1104 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1105 0, size >> TARGET_PAGE_BITS);
1106 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1108 qemu_ram_setup_dump(new_block->host, size);
1109 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1111 if (kvm_enabled())
1112 kvm_setup_guest_memory(new_block->host, size);
1114 return new_block->offset;
1117 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1119 return qemu_ram_alloc_from_ptr(size, NULL, mr);
1122 void qemu_ram_free_from_ptr(ram_addr_t addr)
1124 RAMBlock *block;
1126 /* This assumes the iothread lock is taken here too. */
1127 qemu_mutex_lock_ramlist();
1128 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1129 if (addr == block->offset) {
1130 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1131 ram_list.mru_block = NULL;
1132 ram_list.version++;
1133 g_free(block);
1134 break;
1137 qemu_mutex_unlock_ramlist();
1140 void qemu_ram_free(ram_addr_t addr)
1142 RAMBlock *block;
1144 /* This assumes the iothread lock is taken here too. */
1145 qemu_mutex_lock_ramlist();
1146 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1147 if (addr == block->offset) {
1148 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1149 ram_list.mru_block = NULL;
1150 ram_list.version++;
1151 if (block->flags & RAM_PREALLOC_MASK) {
1153 } else if (mem_path) {
1154 #if defined (__linux__) && !defined(TARGET_S390X)
1155 if (block->fd) {
1156 munmap(block->host, block->length);
1157 close(block->fd);
1158 } else {
1159 qemu_anon_ram_free(block->host, block->length);
1161 #else
1162 abort();
1163 #endif
1164 } else {
1165 if (xen_enabled()) {
1166 xen_invalidate_map_cache_entry(block->host);
1167 } else {
1168 qemu_anon_ram_free(block->host, block->length);
1171 g_free(block);
1172 break;
1175 qemu_mutex_unlock_ramlist();
1179 #ifndef _WIN32
1180 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1182 RAMBlock *block;
1183 ram_addr_t offset;
1184 int flags;
1185 void *area, *vaddr;
1187 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1188 offset = addr - block->offset;
1189 if (offset < block->length) {
1190 vaddr = block->host + offset;
1191 if (block->flags & RAM_PREALLOC_MASK) {
1193 } else {
1194 flags = MAP_FIXED;
1195 munmap(vaddr, length);
1196 if (mem_path) {
1197 #if defined(__linux__) && !defined(TARGET_S390X)
1198 if (block->fd) {
1199 #ifdef MAP_POPULATE
1200 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1201 MAP_PRIVATE;
1202 #else
1203 flags |= MAP_PRIVATE;
1204 #endif
1205 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1206 flags, block->fd, offset);
1207 } else {
1208 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1209 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1210 flags, -1, 0);
1212 #else
1213 abort();
1214 #endif
1215 } else {
1216 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1217 flags |= MAP_SHARED | MAP_ANONYMOUS;
1218 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1219 flags, -1, 0);
1220 #else
1221 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1222 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1223 flags, -1, 0);
1224 #endif
1226 if (area != vaddr) {
1227 fprintf(stderr, "Could not remap addr: "
1228 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1229 length, addr);
1230 exit(1);
1232 memory_try_enable_merging(vaddr, length);
1233 qemu_ram_setup_dump(vaddr, length);
1235 return;
1239 #endif /* !_WIN32 */
1241 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1242 With the exception of the softmmu code in this file, this should
1243 only be used for local memory (e.g. video ram) that the device owns,
1244 and knows it isn't going to access beyond the end of the block.
1246 It should not be used for general purpose DMA.
1247 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1249 void *qemu_get_ram_ptr(ram_addr_t addr)
1251 RAMBlock *block;
1253 /* The list is protected by the iothread lock here. */
1254 block = ram_list.mru_block;
1255 if (block && addr - block->offset < block->length) {
1256 goto found;
1258 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1259 if (addr - block->offset < block->length) {
1260 goto found;
1264 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1265 abort();
1267 found:
1268 ram_list.mru_block = block;
1269 if (xen_enabled()) {
1270 /* We need to check if the requested address is in the RAM
1271 * because we don't want to map the entire memory in QEMU.
1272 * In that case just map until the end of the page.
1274 if (block->offset == 0) {
1275 return xen_map_cache(addr, 0, 0);
1276 } else if (block->host == NULL) {
1277 block->host =
1278 xen_map_cache(block->offset, block->length, 1);
1281 return block->host + (addr - block->offset);
1284 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1285 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1287 * ??? Is this still necessary?
1289 static void *qemu_safe_ram_ptr(ram_addr_t addr)
1291 RAMBlock *block;
1293 /* The list is protected by the iothread lock here. */
1294 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1295 if (addr - block->offset < block->length) {
1296 if (xen_enabled()) {
1297 /* We need to check if the requested address is in the RAM
1298 * because we don't want to map the entire memory in QEMU.
1299 * In that case just map until the end of the page.
1301 if (block->offset == 0) {
1302 return xen_map_cache(addr, 0, 0);
1303 } else if (block->host == NULL) {
1304 block->host =
1305 xen_map_cache(block->offset, block->length, 1);
1308 return block->host + (addr - block->offset);
1312 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1313 abort();
1315 return NULL;
1318 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1319 * but takes a size argument */
1320 static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
1322 if (*size == 0) {
1323 return NULL;
1325 if (xen_enabled()) {
1326 return xen_map_cache(addr, *size, 1);
1327 } else {
1328 RAMBlock *block;
1330 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1331 if (addr - block->offset < block->length) {
1332 if (addr - block->offset + *size > block->length)
1333 *size = block->length - addr + block->offset;
1334 return block->host + (addr - block->offset);
1338 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1339 abort();
1343 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1345 RAMBlock *block;
1346 uint8_t *host = ptr;
1348 if (xen_enabled()) {
1349 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1350 return 0;
1353 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1354 /* This case append when the block is not mapped. */
1355 if (block->host == NULL) {
1356 continue;
1358 if (host - block->host < block->length) {
1359 *ram_addr = block->offset + (host - block->host);
1360 return 0;
1364 return -1;
1367 /* Some of the softmmu routines need to translate from a host pointer
1368 (typically a TLB entry) back to a ram offset. */
1369 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1371 ram_addr_t ram_addr;
1373 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1374 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1375 abort();
1377 return ram_addr;
1380 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1381 unsigned size)
1383 #ifdef DEBUG_UNASSIGNED
1384 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1385 #endif
1386 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1387 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
1388 #endif
1389 return 0;
1392 static void unassigned_mem_write(void *opaque, hwaddr addr,
1393 uint64_t val, unsigned size)
1395 #ifdef DEBUG_UNASSIGNED
1396 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1397 #endif
1398 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1399 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
1400 #endif
1403 static const MemoryRegionOps unassigned_mem_ops = {
1404 .read = unassigned_mem_read,
1405 .write = unassigned_mem_write,
1406 .endianness = DEVICE_NATIVE_ENDIAN,
1409 static uint64_t error_mem_read(void *opaque, hwaddr addr,
1410 unsigned size)
1412 abort();
1415 static void error_mem_write(void *opaque, hwaddr addr,
1416 uint64_t value, unsigned size)
1418 abort();
1421 static const MemoryRegionOps error_mem_ops = {
1422 .read = error_mem_read,
1423 .write = error_mem_write,
1424 .endianness = DEVICE_NATIVE_ENDIAN,
1427 static const MemoryRegionOps rom_mem_ops = {
1428 .read = error_mem_read,
1429 .write = unassigned_mem_write,
1430 .endianness = DEVICE_NATIVE_ENDIAN,
1433 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1434 uint64_t val, unsigned size)
1436 int dirty_flags;
1437 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1438 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1439 #if !defined(CONFIG_USER_ONLY)
1440 tb_invalidate_phys_page_fast(ram_addr, size);
1441 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1442 #endif
1444 switch (size) {
1445 case 1:
1446 stb_p(qemu_get_ram_ptr(ram_addr), val);
1447 break;
1448 case 2:
1449 stw_p(qemu_get_ram_ptr(ram_addr), val);
1450 break;
1451 case 4:
1452 stl_p(qemu_get_ram_ptr(ram_addr), val);
1453 break;
1454 default:
1455 abort();
1457 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1458 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1459 /* we remove the notdirty callback only if the code has been
1460 flushed */
1461 if (dirty_flags == 0xff)
1462 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
1465 static const MemoryRegionOps notdirty_mem_ops = {
1466 .read = error_mem_read,
1467 .write = notdirty_mem_write,
1468 .endianness = DEVICE_NATIVE_ENDIAN,
1471 /* Generate a debug exception if a watchpoint has been hit. */
1472 static void check_watchpoint(int offset, int len_mask, int flags)
1474 CPUArchState *env = cpu_single_env;
1475 target_ulong pc, cs_base;
1476 target_ulong vaddr;
1477 CPUWatchpoint *wp;
1478 int cpu_flags;
1480 if (env->watchpoint_hit) {
1481 /* We re-entered the check after replacing the TB. Now raise
1482 * the debug interrupt so that is will trigger after the
1483 * current instruction. */
1484 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
1485 return;
1487 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1488 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1489 if ((vaddr == (wp->vaddr & len_mask) ||
1490 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1491 wp->flags |= BP_WATCHPOINT_HIT;
1492 if (!env->watchpoint_hit) {
1493 env->watchpoint_hit = wp;
1494 tb_check_watchpoint(env);
1495 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1496 env->exception_index = EXCP_DEBUG;
1497 cpu_loop_exit(env);
1498 } else {
1499 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1500 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1501 cpu_resume_from_signal(env, NULL);
1504 } else {
1505 wp->flags &= ~BP_WATCHPOINT_HIT;
1510 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1511 so these check for a hit then pass through to the normal out-of-line
1512 phys routines. */
1513 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1514 unsigned size)
1516 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1517 switch (size) {
1518 case 1: return ldub_phys(addr);
1519 case 2: return lduw_phys(addr);
1520 case 4: return ldl_phys(addr);
1521 default: abort();
1525 static void watch_mem_write(void *opaque, hwaddr addr,
1526 uint64_t val, unsigned size)
1528 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1529 switch (size) {
1530 case 1:
1531 stb_phys(addr, val);
1532 break;
1533 case 2:
1534 stw_phys(addr, val);
1535 break;
1536 case 4:
1537 stl_phys(addr, val);
1538 break;
1539 default: abort();
1543 static const MemoryRegionOps watch_mem_ops = {
1544 .read = watch_mem_read,
1545 .write = watch_mem_write,
1546 .endianness = DEVICE_NATIVE_ENDIAN,
1549 static uint64_t subpage_read(void *opaque, hwaddr addr,
1550 unsigned len)
1552 subpage_t *mmio = opaque;
1553 unsigned int idx = SUBPAGE_IDX(addr);
1554 MemoryRegionSection *section;
1555 #if defined(DEBUG_SUBPAGE)
1556 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1557 mmio, len, addr, idx);
1558 #endif
1560 section = &phys_sections[mmio->sub_section[idx]];
1561 addr += mmio->base;
1562 addr -= section->offset_within_address_space;
1563 addr += section->offset_within_region;
1564 return io_mem_read(section->mr, addr, len);
1567 static void subpage_write(void *opaque, hwaddr addr,
1568 uint64_t value, unsigned len)
1570 subpage_t *mmio = opaque;
1571 unsigned int idx = SUBPAGE_IDX(addr);
1572 MemoryRegionSection *section;
1573 #if defined(DEBUG_SUBPAGE)
1574 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1575 " idx %d value %"PRIx64"\n",
1576 __func__, mmio, len, addr, idx, value);
1577 #endif
1579 section = &phys_sections[mmio->sub_section[idx]];
1580 addr += mmio->base;
1581 addr -= section->offset_within_address_space;
1582 addr += section->offset_within_region;
1583 io_mem_write(section->mr, addr, value, len);
1586 static const MemoryRegionOps subpage_ops = {
1587 .read = subpage_read,
1588 .write = subpage_write,
1589 .endianness = DEVICE_NATIVE_ENDIAN,
1592 static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
1593 unsigned size)
1595 ram_addr_t raddr = addr;
1596 void *ptr = qemu_get_ram_ptr(raddr);
1597 switch (size) {
1598 case 1: return ldub_p(ptr);
1599 case 2: return lduw_p(ptr);
1600 case 4: return ldl_p(ptr);
1601 default: abort();
1605 static void subpage_ram_write(void *opaque, hwaddr addr,
1606 uint64_t value, unsigned size)
1608 ram_addr_t raddr = addr;
1609 void *ptr = qemu_get_ram_ptr(raddr);
1610 switch (size) {
1611 case 1: return stb_p(ptr, value);
1612 case 2: return stw_p(ptr, value);
1613 case 4: return stl_p(ptr, value);
1614 default: abort();
1618 static const MemoryRegionOps subpage_ram_ops = {
1619 .read = subpage_ram_read,
1620 .write = subpage_ram_write,
1621 .endianness = DEVICE_NATIVE_ENDIAN,
1624 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1625 uint16_t section)
1627 int idx, eidx;
1629 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1630 return -1;
1631 idx = SUBPAGE_IDX(start);
1632 eidx = SUBPAGE_IDX(end);
1633 #if defined(DEBUG_SUBPAGE)
1634 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
1635 mmio, start, end, idx, eidx, memory);
1636 #endif
1637 if (memory_region_is_ram(phys_sections[section].mr)) {
1638 MemoryRegionSection new_section = phys_sections[section];
1639 new_section.mr = &io_mem_subpage_ram;
1640 section = phys_section_add(&new_section);
1642 for (; idx <= eidx; idx++) {
1643 mmio->sub_section[idx] = section;
1646 return 0;
1649 static subpage_t *subpage_init(hwaddr base)
1651 subpage_t *mmio;
1653 mmio = g_malloc0(sizeof(subpage_t));
1655 mmio->base = base;
1656 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1657 "subpage", TARGET_PAGE_SIZE);
1658 mmio->iomem.subpage = true;
1659 #if defined(DEBUG_SUBPAGE)
1660 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1661 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
1662 #endif
1663 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
1665 return mmio;
1668 static uint16_t dummy_section(MemoryRegion *mr)
1670 MemoryRegionSection section = {
1671 .mr = mr,
1672 .offset_within_address_space = 0,
1673 .offset_within_region = 0,
1674 .size = UINT64_MAX,
1677 return phys_section_add(&section);
1680 MemoryRegion *iotlb_to_region(hwaddr index)
1682 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
1685 static void io_mem_init(void)
1687 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
1688 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1689 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1690 "unassigned", UINT64_MAX);
1691 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1692 "notdirty", UINT64_MAX);
1693 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1694 "subpage-ram", UINT64_MAX);
1695 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1696 "watch", UINT64_MAX);
1699 static void mem_begin(MemoryListener *listener)
1701 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1703 destroy_all_mappings(d);
1704 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1707 static void core_begin(MemoryListener *listener)
1709 phys_sections_clear();
1710 phys_section_unassigned = dummy_section(&io_mem_unassigned);
1711 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1712 phys_section_rom = dummy_section(&io_mem_rom);
1713 phys_section_watch = dummy_section(&io_mem_watch);
1716 static void tcg_commit(MemoryListener *listener)
1718 CPUArchState *env;
1720 /* since each CPU stores ram addresses in its TLB cache, we must
1721 reset the modified entries */
1722 /* XXX: slow ! */
1723 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1724 tlb_flush(env, 1);
1728 static void core_log_global_start(MemoryListener *listener)
1730 cpu_physical_memory_set_dirty_tracking(1);
1733 static void core_log_global_stop(MemoryListener *listener)
1735 cpu_physical_memory_set_dirty_tracking(0);
1738 static void io_region_add(MemoryListener *listener,
1739 MemoryRegionSection *section)
1741 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1743 mrio->mr = section->mr;
1744 mrio->offset = section->offset_within_region;
1745 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
1746 section->offset_within_address_space, section->size);
1747 ioport_register(&mrio->iorange);
1750 static void io_region_del(MemoryListener *listener,
1751 MemoryRegionSection *section)
1753 isa_unassign_ioport(section->offset_within_address_space, section->size);
1756 static MemoryListener core_memory_listener = {
1757 .begin = core_begin,
1758 .log_global_start = core_log_global_start,
1759 .log_global_stop = core_log_global_stop,
1760 .priority = 1,
1763 static MemoryListener io_memory_listener = {
1764 .region_add = io_region_add,
1765 .region_del = io_region_del,
1766 .priority = 0,
1769 static MemoryListener tcg_memory_listener = {
1770 .commit = tcg_commit,
1773 void address_space_init_dispatch(AddressSpace *as)
1775 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1777 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1778 d->listener = (MemoryListener) {
1779 .begin = mem_begin,
1780 .region_add = mem_add,
1781 .region_nop = mem_add,
1782 .priority = 0,
1784 as->dispatch = d;
1785 memory_listener_register(&d->listener, as);
1788 void address_space_destroy_dispatch(AddressSpace *as)
1790 AddressSpaceDispatch *d = as->dispatch;
1792 memory_listener_unregister(&d->listener);
1793 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1794 g_free(d);
1795 as->dispatch = NULL;
1798 static void memory_map_init(void)
1800 system_memory = g_malloc(sizeof(*system_memory));
1801 memory_region_init(system_memory, "system", INT64_MAX);
1802 address_space_init(&address_space_memory, system_memory);
1803 address_space_memory.name = "memory";
1805 system_io = g_malloc(sizeof(*system_io));
1806 memory_region_init(system_io, "io", 65536);
1807 address_space_init(&address_space_io, system_io);
1808 address_space_io.name = "I/O";
1810 memory_listener_register(&core_memory_listener, &address_space_memory);
1811 memory_listener_register(&io_memory_listener, &address_space_io);
1812 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1814 dma_context_init(&dma_context_memory, &address_space_memory,
1815 NULL, NULL, NULL);
1818 MemoryRegion *get_system_memory(void)
1820 return system_memory;
1823 MemoryRegion *get_system_io(void)
1825 return system_io;
1828 #endif /* !defined(CONFIG_USER_ONLY) */
1830 /* physical memory access (slow version, mainly for debug) */
1831 #if defined(CONFIG_USER_ONLY)
1832 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
1833 uint8_t *buf, int len, int is_write)
1835 int l, flags;
1836 target_ulong page;
1837 void * p;
1839 while (len > 0) {
1840 page = addr & TARGET_PAGE_MASK;
1841 l = (page + TARGET_PAGE_SIZE) - addr;
1842 if (l > len)
1843 l = len;
1844 flags = page_get_flags(page);
1845 if (!(flags & PAGE_VALID))
1846 return -1;
1847 if (is_write) {
1848 if (!(flags & PAGE_WRITE))
1849 return -1;
1850 /* XXX: this code should not depend on lock_user */
1851 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1852 return -1;
1853 memcpy(p, buf, l);
1854 unlock_user(p, addr, l);
1855 } else {
1856 if (!(flags & PAGE_READ))
1857 return -1;
1858 /* XXX: this code should not depend on lock_user */
1859 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1860 return -1;
1861 memcpy(buf, p, l);
1862 unlock_user(p, addr, 0);
1864 len -= l;
1865 buf += l;
1866 addr += l;
1868 return 0;
1871 #else
1873 static void invalidate_and_set_dirty(hwaddr addr,
1874 hwaddr length)
1876 if (!cpu_physical_memory_is_dirty(addr)) {
1877 /* invalidate code */
1878 tb_invalidate_phys_page_range(addr, addr + length, 0);
1879 /* set dirty bit */
1880 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1882 xen_modified_memory(addr, length);
1885 void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1886 int len, bool is_write)
1888 AddressSpaceDispatch *d = as->dispatch;
1889 int l;
1890 uint8_t *ptr;
1891 uint32_t val;
1892 hwaddr page;
1893 MemoryRegionSection *section;
1895 while (len > 0) {
1896 page = addr & TARGET_PAGE_MASK;
1897 l = (page + TARGET_PAGE_SIZE) - addr;
1898 if (l > len)
1899 l = len;
1900 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
1902 if (is_write) {
1903 if (!memory_region_is_ram(section->mr)) {
1904 hwaddr addr1;
1905 addr1 = memory_region_section_addr(section, addr);
1906 /* XXX: could force cpu_single_env to NULL to avoid
1907 potential bugs */
1908 if (l >= 4 && ((addr1 & 3) == 0)) {
1909 /* 32 bit write access */
1910 val = ldl_p(buf);
1911 io_mem_write(section->mr, addr1, val, 4);
1912 l = 4;
1913 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1914 /* 16 bit write access */
1915 val = lduw_p(buf);
1916 io_mem_write(section->mr, addr1, val, 2);
1917 l = 2;
1918 } else {
1919 /* 8 bit write access */
1920 val = ldub_p(buf);
1921 io_mem_write(section->mr, addr1, val, 1);
1922 l = 1;
1924 } else if (!section->readonly) {
1925 ram_addr_t addr1;
1926 addr1 = memory_region_get_ram_addr(section->mr)
1927 + memory_region_section_addr(section, addr);
1928 /* RAM case */
1929 ptr = qemu_get_ram_ptr(addr1);
1930 memcpy(ptr, buf, l);
1931 invalidate_and_set_dirty(addr1, l);
1933 } else {
1934 if (!(memory_region_is_ram(section->mr) ||
1935 memory_region_is_romd(section->mr))) {
1936 hwaddr addr1;
1937 /* I/O case */
1938 addr1 = memory_region_section_addr(section, addr);
1939 if (l >= 4 && ((addr1 & 3) == 0)) {
1940 /* 32 bit read access */
1941 val = io_mem_read(section->mr, addr1, 4);
1942 stl_p(buf, val);
1943 l = 4;
1944 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1945 /* 16 bit read access */
1946 val = io_mem_read(section->mr, addr1, 2);
1947 stw_p(buf, val);
1948 l = 2;
1949 } else {
1950 /* 8 bit read access */
1951 val = io_mem_read(section->mr, addr1, 1);
1952 stb_p(buf, val);
1953 l = 1;
1955 } else {
1956 /* RAM case */
1957 ptr = qemu_get_ram_ptr(section->mr->ram_addr
1958 + memory_region_section_addr(section,
1959 addr));
1960 memcpy(buf, ptr, l);
1963 len -= l;
1964 buf += l;
1965 addr += l;
1969 void address_space_write(AddressSpace *as, hwaddr addr,
1970 const uint8_t *buf, int len)
1972 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1976 * address_space_read: read from an address space.
1978 * @as: #AddressSpace to be accessed
1979 * @addr: address within that address space
1980 * @buf: buffer with the data transferred
1982 void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
1984 address_space_rw(as, addr, buf, len, false);
1988 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
1989 int len, int is_write)
1991 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1994 /* used for ROM loading : can write in RAM and ROM */
1995 void cpu_physical_memory_write_rom(hwaddr addr,
1996 const uint8_t *buf, int len)
1998 AddressSpaceDispatch *d = address_space_memory.dispatch;
1999 int l;
2000 uint8_t *ptr;
2001 hwaddr page;
2002 MemoryRegionSection *section;
2004 while (len > 0) {
2005 page = addr & TARGET_PAGE_MASK;
2006 l = (page + TARGET_PAGE_SIZE) - addr;
2007 if (l > len)
2008 l = len;
2009 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
2011 if (!(memory_region_is_ram(section->mr) ||
2012 memory_region_is_romd(section->mr))) {
2013 /* do nothing */
2014 } else {
2015 unsigned long addr1;
2016 addr1 = memory_region_get_ram_addr(section->mr)
2017 + memory_region_section_addr(section, addr);
2018 /* ROM/RAM case */
2019 ptr = qemu_get_ram_ptr(addr1);
2020 memcpy(ptr, buf, l);
2021 invalidate_and_set_dirty(addr1, l);
2023 len -= l;
2024 buf += l;
2025 addr += l;
2029 typedef struct {
2030 void *buffer;
2031 hwaddr addr;
2032 hwaddr len;
2033 } BounceBuffer;
2035 static BounceBuffer bounce;
2037 typedef struct MapClient {
2038 void *opaque;
2039 void (*callback)(void *opaque);
2040 QLIST_ENTRY(MapClient) link;
2041 } MapClient;
2043 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2044 = QLIST_HEAD_INITIALIZER(map_client_list);
2046 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2048 MapClient *client = g_malloc(sizeof(*client));
2050 client->opaque = opaque;
2051 client->callback = callback;
2052 QLIST_INSERT_HEAD(&map_client_list, client, link);
2053 return client;
2056 static void cpu_unregister_map_client(void *_client)
2058 MapClient *client = (MapClient *)_client;
2060 QLIST_REMOVE(client, link);
2061 g_free(client);
2064 static void cpu_notify_map_clients(void)
2066 MapClient *client;
2068 while (!QLIST_EMPTY(&map_client_list)) {
2069 client = QLIST_FIRST(&map_client_list);
2070 client->callback(client->opaque);
2071 cpu_unregister_map_client(client);
2075 /* Map a physical memory region into a host virtual address.
2076 * May map a subset of the requested range, given by and returned in *plen.
2077 * May return NULL if resources needed to perform the mapping are exhausted.
2078 * Use only for reads OR writes - not for read-modify-write operations.
2079 * Use cpu_register_map_client() to know when retrying the map operation is
2080 * likely to succeed.
2082 void *address_space_map(AddressSpace *as,
2083 hwaddr addr,
2084 hwaddr *plen,
2085 bool is_write)
2087 AddressSpaceDispatch *d = as->dispatch;
2088 hwaddr len = *plen;
2089 hwaddr todo = 0;
2090 int l;
2091 hwaddr page;
2092 MemoryRegionSection *section;
2093 ram_addr_t raddr = RAM_ADDR_MAX;
2094 ram_addr_t rlen;
2095 void *ret;
2097 while (len > 0) {
2098 page = addr & TARGET_PAGE_MASK;
2099 l = (page + TARGET_PAGE_SIZE) - addr;
2100 if (l > len)
2101 l = len;
2102 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
2104 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
2105 if (todo || bounce.buffer) {
2106 break;
2108 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2109 bounce.addr = addr;
2110 bounce.len = l;
2111 if (!is_write) {
2112 address_space_read(as, addr, bounce.buffer, l);
2115 *plen = l;
2116 return bounce.buffer;
2118 if (!todo) {
2119 raddr = memory_region_get_ram_addr(section->mr)
2120 + memory_region_section_addr(section, addr);
2123 len -= l;
2124 addr += l;
2125 todo += l;
2127 rlen = todo;
2128 ret = qemu_ram_ptr_length(raddr, &rlen);
2129 *plen = rlen;
2130 return ret;
2133 /* Unmaps a memory region previously mapped by address_space_map().
2134 * Will also mark the memory as dirty if is_write == 1. access_len gives
2135 * the amount of memory that was actually read or written by the caller.
2137 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2138 int is_write, hwaddr access_len)
2140 if (buffer != bounce.buffer) {
2141 if (is_write) {
2142 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
2143 while (access_len) {
2144 unsigned l;
2145 l = TARGET_PAGE_SIZE;
2146 if (l > access_len)
2147 l = access_len;
2148 invalidate_and_set_dirty(addr1, l);
2149 addr1 += l;
2150 access_len -= l;
2153 if (xen_enabled()) {
2154 xen_invalidate_map_cache_entry(buffer);
2156 return;
2158 if (is_write) {
2159 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2161 qemu_vfree(bounce.buffer);
2162 bounce.buffer = NULL;
2163 cpu_notify_map_clients();
2166 void *cpu_physical_memory_map(hwaddr addr,
2167 hwaddr *plen,
2168 int is_write)
2170 return address_space_map(&address_space_memory, addr, plen, is_write);
2173 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2174 int is_write, hwaddr access_len)
2176 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2179 /* warning: addr must be aligned */
2180 static inline uint32_t ldl_phys_internal(hwaddr addr,
2181 enum device_endian endian)
2183 uint8_t *ptr;
2184 uint32_t val;
2185 MemoryRegionSection *section;
2187 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2189 if (!(memory_region_is_ram(section->mr) ||
2190 memory_region_is_romd(section->mr))) {
2191 /* I/O case */
2192 addr = memory_region_section_addr(section, addr);
2193 val = io_mem_read(section->mr, addr, 4);
2194 #if defined(TARGET_WORDS_BIGENDIAN)
2195 if (endian == DEVICE_LITTLE_ENDIAN) {
2196 val = bswap32(val);
2198 #else
2199 if (endian == DEVICE_BIG_ENDIAN) {
2200 val = bswap32(val);
2202 #endif
2203 } else {
2204 /* RAM case */
2205 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2206 & TARGET_PAGE_MASK)
2207 + memory_region_section_addr(section, addr));
2208 switch (endian) {
2209 case DEVICE_LITTLE_ENDIAN:
2210 val = ldl_le_p(ptr);
2211 break;
2212 case DEVICE_BIG_ENDIAN:
2213 val = ldl_be_p(ptr);
2214 break;
2215 default:
2216 val = ldl_p(ptr);
2217 break;
2220 return val;
2223 uint32_t ldl_phys(hwaddr addr)
2225 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2228 uint32_t ldl_le_phys(hwaddr addr)
2230 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2233 uint32_t ldl_be_phys(hwaddr addr)
2235 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2238 /* warning: addr must be aligned */
2239 static inline uint64_t ldq_phys_internal(hwaddr addr,
2240 enum device_endian endian)
2242 uint8_t *ptr;
2243 uint64_t val;
2244 MemoryRegionSection *section;
2246 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2248 if (!(memory_region_is_ram(section->mr) ||
2249 memory_region_is_romd(section->mr))) {
2250 /* I/O case */
2251 addr = memory_region_section_addr(section, addr);
2253 /* XXX This is broken when device endian != cpu endian.
2254 Fix and add "endian" variable check */
2255 #ifdef TARGET_WORDS_BIGENDIAN
2256 val = io_mem_read(section->mr, addr, 4) << 32;
2257 val |= io_mem_read(section->mr, addr + 4, 4);
2258 #else
2259 val = io_mem_read(section->mr, addr, 4);
2260 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
2261 #endif
2262 } else {
2263 /* RAM case */
2264 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2265 & TARGET_PAGE_MASK)
2266 + memory_region_section_addr(section, addr));
2267 switch (endian) {
2268 case DEVICE_LITTLE_ENDIAN:
2269 val = ldq_le_p(ptr);
2270 break;
2271 case DEVICE_BIG_ENDIAN:
2272 val = ldq_be_p(ptr);
2273 break;
2274 default:
2275 val = ldq_p(ptr);
2276 break;
2279 return val;
2282 uint64_t ldq_phys(hwaddr addr)
2284 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2287 uint64_t ldq_le_phys(hwaddr addr)
2289 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2292 uint64_t ldq_be_phys(hwaddr addr)
2294 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2297 /* XXX: optimize */
2298 uint32_t ldub_phys(hwaddr addr)
2300 uint8_t val;
2301 cpu_physical_memory_read(addr, &val, 1);
2302 return val;
2305 /* warning: addr must be aligned */
2306 static inline uint32_t lduw_phys_internal(hwaddr addr,
2307 enum device_endian endian)
2309 uint8_t *ptr;
2310 uint64_t val;
2311 MemoryRegionSection *section;
2313 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2315 if (!(memory_region_is_ram(section->mr) ||
2316 memory_region_is_romd(section->mr))) {
2317 /* I/O case */
2318 addr = memory_region_section_addr(section, addr);
2319 val = io_mem_read(section->mr, addr, 2);
2320 #if defined(TARGET_WORDS_BIGENDIAN)
2321 if (endian == DEVICE_LITTLE_ENDIAN) {
2322 val = bswap16(val);
2324 #else
2325 if (endian == DEVICE_BIG_ENDIAN) {
2326 val = bswap16(val);
2328 #endif
2329 } else {
2330 /* RAM case */
2331 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2332 & TARGET_PAGE_MASK)
2333 + memory_region_section_addr(section, addr));
2334 switch (endian) {
2335 case DEVICE_LITTLE_ENDIAN:
2336 val = lduw_le_p(ptr);
2337 break;
2338 case DEVICE_BIG_ENDIAN:
2339 val = lduw_be_p(ptr);
2340 break;
2341 default:
2342 val = lduw_p(ptr);
2343 break;
2346 return val;
2349 uint32_t lduw_phys(hwaddr addr)
2351 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2354 uint32_t lduw_le_phys(hwaddr addr)
2356 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2359 uint32_t lduw_be_phys(hwaddr addr)
2361 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2364 /* warning: addr must be aligned. The ram page is not masked as dirty
2365 and the code inside is not invalidated. It is useful if the dirty
2366 bits are used to track modified PTEs */
2367 void stl_phys_notdirty(hwaddr addr, uint32_t val)
2369 uint8_t *ptr;
2370 MemoryRegionSection *section;
2372 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2374 if (!memory_region_is_ram(section->mr) || section->readonly) {
2375 addr = memory_region_section_addr(section, addr);
2376 if (memory_region_is_ram(section->mr)) {
2377 section = &phys_sections[phys_section_rom];
2379 io_mem_write(section->mr, addr, val, 4);
2380 } else {
2381 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
2382 & TARGET_PAGE_MASK)
2383 + memory_region_section_addr(section, addr);
2384 ptr = qemu_get_ram_ptr(addr1);
2385 stl_p(ptr, val);
2387 if (unlikely(in_migration)) {
2388 if (!cpu_physical_memory_is_dirty(addr1)) {
2389 /* invalidate code */
2390 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2391 /* set dirty bit */
2392 cpu_physical_memory_set_dirty_flags(
2393 addr1, (0xff & ~CODE_DIRTY_FLAG));
2399 /* warning: addr must be aligned */
2400 static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2401 enum device_endian endian)
2403 uint8_t *ptr;
2404 MemoryRegionSection *section;
2406 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2408 if (!memory_region_is_ram(section->mr) || section->readonly) {
2409 addr = memory_region_section_addr(section, addr);
2410 if (memory_region_is_ram(section->mr)) {
2411 section = &phys_sections[phys_section_rom];
2413 #if defined(TARGET_WORDS_BIGENDIAN)
2414 if (endian == DEVICE_LITTLE_ENDIAN) {
2415 val = bswap32(val);
2417 #else
2418 if (endian == DEVICE_BIG_ENDIAN) {
2419 val = bswap32(val);
2421 #endif
2422 io_mem_write(section->mr, addr, val, 4);
2423 } else {
2424 unsigned long addr1;
2425 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2426 + memory_region_section_addr(section, addr);
2427 /* RAM case */
2428 ptr = qemu_get_ram_ptr(addr1);
2429 switch (endian) {
2430 case DEVICE_LITTLE_ENDIAN:
2431 stl_le_p(ptr, val);
2432 break;
2433 case DEVICE_BIG_ENDIAN:
2434 stl_be_p(ptr, val);
2435 break;
2436 default:
2437 stl_p(ptr, val);
2438 break;
2440 invalidate_and_set_dirty(addr1, 4);
2444 void stl_phys(hwaddr addr, uint32_t val)
2446 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2449 void stl_le_phys(hwaddr addr, uint32_t val)
2451 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2454 void stl_be_phys(hwaddr addr, uint32_t val)
2456 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2459 /* XXX: optimize */
2460 void stb_phys(hwaddr addr, uint32_t val)
2462 uint8_t v = val;
2463 cpu_physical_memory_write(addr, &v, 1);
2466 /* warning: addr must be aligned */
2467 static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2468 enum device_endian endian)
2470 uint8_t *ptr;
2471 MemoryRegionSection *section;
2473 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2475 if (!memory_region_is_ram(section->mr) || section->readonly) {
2476 addr = memory_region_section_addr(section, addr);
2477 if (memory_region_is_ram(section->mr)) {
2478 section = &phys_sections[phys_section_rom];
2480 #if defined(TARGET_WORDS_BIGENDIAN)
2481 if (endian == DEVICE_LITTLE_ENDIAN) {
2482 val = bswap16(val);
2484 #else
2485 if (endian == DEVICE_BIG_ENDIAN) {
2486 val = bswap16(val);
2488 #endif
2489 io_mem_write(section->mr, addr, val, 2);
2490 } else {
2491 unsigned long addr1;
2492 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2493 + memory_region_section_addr(section, addr);
2494 /* RAM case */
2495 ptr = qemu_get_ram_ptr(addr1);
2496 switch (endian) {
2497 case DEVICE_LITTLE_ENDIAN:
2498 stw_le_p(ptr, val);
2499 break;
2500 case DEVICE_BIG_ENDIAN:
2501 stw_be_p(ptr, val);
2502 break;
2503 default:
2504 stw_p(ptr, val);
2505 break;
2507 invalidate_and_set_dirty(addr1, 2);
2511 void stw_phys(hwaddr addr, uint32_t val)
2513 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2516 void stw_le_phys(hwaddr addr, uint32_t val)
2518 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2521 void stw_be_phys(hwaddr addr, uint32_t val)
2523 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2526 /* XXX: optimize */
2527 void stq_phys(hwaddr addr, uint64_t val)
2529 val = tswap64(val);
2530 cpu_physical_memory_write(addr, &val, 8);
2533 void stq_le_phys(hwaddr addr, uint64_t val)
2535 val = cpu_to_le64(val);
2536 cpu_physical_memory_write(addr, &val, 8);
2539 void stq_be_phys(hwaddr addr, uint64_t val)
2541 val = cpu_to_be64(val);
2542 cpu_physical_memory_write(addr, &val, 8);
2545 /* virtual memory access for debug (includes writing to ROM) */
2546 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
2547 uint8_t *buf, int len, int is_write)
2549 int l;
2550 hwaddr phys_addr;
2551 target_ulong page;
2553 while (len > 0) {
2554 page = addr & TARGET_PAGE_MASK;
2555 phys_addr = cpu_get_phys_page_debug(env, page);
2556 /* if no physical page mapped, return an error */
2557 if (phys_addr == -1)
2558 return -1;
2559 l = (page + TARGET_PAGE_SIZE) - addr;
2560 if (l > len)
2561 l = len;
2562 phys_addr += (addr & ~TARGET_PAGE_MASK);
2563 if (is_write)
2564 cpu_physical_memory_write_rom(phys_addr, buf, l);
2565 else
2566 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
2567 len -= l;
2568 buf += l;
2569 addr += l;
2571 return 0;
2573 #endif
2575 #if !defined(CONFIG_USER_ONLY)
2578 * A helper function for the _utterly broken_ virtio device model to find out if
2579 * it's running on a big endian machine. Don't do this at home kids!
2581 bool virtio_is_big_endian(void);
2582 bool virtio_is_big_endian(void)
2584 #if defined(TARGET_WORDS_BIGENDIAN)
2585 return true;
2586 #else
2587 return false;
2588 #endif
2591 #endif
2593 #ifndef CONFIG_USER_ONLY
2594 bool cpu_physical_memory_is_io(hwaddr phys_addr)
2596 MemoryRegionSection *section;
2598 section = phys_page_find(address_space_memory.dispatch,
2599 phys_addr >> TARGET_PAGE_BITS);
2601 return !(memory_region_is_ram(section->mr) ||
2602 memory_region_is_romd(section->mr));
2604 #endif