exec: make io_mem_unassigned private
[qemu-kvm.git] / exec.c
blob7e22980e778225c5adce91309c1e9e70a8ad7563
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
41 #include <qemu.h>
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
44 #include "trace.h"
45 #endif
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
53 //#define DEBUG_UNASSIGNED
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 int phys_ram_fd;
58 static int in_migration;
60 RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62 static MemoryRegion *system_memory;
63 static MemoryRegion *system_io;
65 AddressSpace address_space_io;
66 AddressSpace address_space_memory;
67 DMAContext dma_context_memory;
69 MemoryRegion io_mem_rom, io_mem_notdirty;
70 static MemoryRegion io_mem_unassigned, io_mem_subpage_ram;
72 #endif
74 CPUArchState *first_cpu;
75 /* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
77 DEFINE_TLS(CPUArchState *,cpu_single_env);
78 /* 0 = Do not count executed instructions.
79 1 = Precise instruction counting.
80 2 = Adaptive rate instruction counting. */
81 int use_icount;
83 #if !defined(CONFIG_USER_ONLY)
85 static MemoryRegionSection *phys_sections;
86 static unsigned phys_sections_nb, phys_sections_nb_alloc;
87 static uint16_t phys_section_unassigned;
88 static uint16_t phys_section_notdirty;
89 static uint16_t phys_section_rom;
90 static uint16_t phys_section_watch;
92 /* Simple allocator for PhysPageEntry nodes */
93 static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94 static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
96 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
98 static void io_mem_init(void);
99 static void memory_map_init(void);
100 static void *qemu_safe_ram_ptr(ram_addr_t addr);
102 static MemoryRegion io_mem_watch;
103 #endif
105 #if !defined(CONFIG_USER_ONLY)
107 static void phys_map_node_reserve(unsigned nodes)
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
119 static uint16_t phys_map_node_alloc(void)
121 unsigned i;
122 uint16_t ret;
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
127 for (i = 0; i < L2_SIZE; ++i) {
128 phys_map_nodes[ret][i].is_leaf = 0;
129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
131 return ret;
134 static void phys_map_nodes_reset(void)
136 phys_map_nodes_nb = 0;
140 static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
142 int level)
144 PhysPageEntry *p;
145 int i;
146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
153 p[i].is_leaf = 1;
154 p[i].ptr = phys_section_unassigned;
157 } else {
158 p = phys_map_nodes[lp->ptr];
160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
162 while (*nb && lp < &p[L2_SIZE]) {
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
165 lp->ptr = leaf;
166 *index += step;
167 *nb -= step;
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
171 ++lp;
175 static void phys_page_set(AddressSpaceDispatch *d,
176 hwaddr index, hwaddr nb,
177 uint16_t leaf)
179 /* Wildly overreserve - it doesn't matter much. */
180 phys_map_node_reserve(3 * P_L2_LEVELS);
182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
185 MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
187 PhysPageEntry lp = d->phys_map;
188 PhysPageEntry *p;
189 int i;
191 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
192 if (lp.ptr == PHYS_MAP_NODE_NIL) {
193 return &phys_sections[phys_section_unassigned];
195 p = phys_map_nodes[lp.ptr];
196 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
198 return &phys_sections[lp.ptr];
201 bool memory_region_is_unassigned(MemoryRegion *mr)
203 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
204 && mr != &io_mem_watch;
206 #endif
208 void cpu_exec_init_all(void)
210 #if !defined(CONFIG_USER_ONLY)
211 qemu_mutex_init(&ram_list.mutex);
212 memory_map_init();
213 io_mem_init();
214 #endif
217 #if !defined(CONFIG_USER_ONLY)
219 static int cpu_common_post_load(void *opaque, int version_id)
221 CPUState *cpu = opaque;
223 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
224 version_id is increased. */
225 cpu->interrupt_request &= ~0x01;
226 tlb_flush(cpu->env_ptr, 1);
228 return 0;
231 static const VMStateDescription vmstate_cpu_common = {
232 .name = "cpu_common",
233 .version_id = 1,
234 .minimum_version_id = 1,
235 .minimum_version_id_old = 1,
236 .post_load = cpu_common_post_load,
237 .fields = (VMStateField []) {
238 VMSTATE_UINT32(halted, CPUState),
239 VMSTATE_UINT32(interrupt_request, CPUState),
240 VMSTATE_END_OF_LIST()
243 #else
244 #define vmstate_cpu_common vmstate_dummy
245 #endif
247 CPUState *qemu_get_cpu(int index)
249 CPUArchState *env = first_cpu;
250 CPUState *cpu = NULL;
252 while (env) {
253 cpu = ENV_GET_CPU(env);
254 if (cpu->cpu_index == index) {
255 break;
257 env = env->next_cpu;
260 return env ? cpu : NULL;
263 void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
265 CPUArchState *env = first_cpu;
267 while (env) {
268 func(ENV_GET_CPU(env), data);
269 env = env->next_cpu;
273 void cpu_exec_init(CPUArchState *env)
275 CPUState *cpu = ENV_GET_CPU(env);
276 CPUClass *cc = CPU_GET_CLASS(cpu);
277 CPUArchState **penv;
278 int cpu_index;
280 #if defined(CONFIG_USER_ONLY)
281 cpu_list_lock();
282 #endif
283 env->next_cpu = NULL;
284 penv = &first_cpu;
285 cpu_index = 0;
286 while (*penv != NULL) {
287 penv = &(*penv)->next_cpu;
288 cpu_index++;
290 cpu->cpu_index = cpu_index;
291 cpu->numa_node = 0;
292 QTAILQ_INIT(&env->breakpoints);
293 QTAILQ_INIT(&env->watchpoints);
294 #ifndef CONFIG_USER_ONLY
295 cpu->thread_id = qemu_get_thread_id();
296 #endif
297 *penv = env;
298 #if defined(CONFIG_USER_ONLY)
299 cpu_list_unlock();
300 #endif
301 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
302 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
303 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
304 cpu_save, cpu_load, env);
305 assert(cc->vmsd == NULL);
306 #endif
307 if (cc->vmsd != NULL) {
308 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
312 #if defined(TARGET_HAS_ICE)
313 #if defined(CONFIG_USER_ONLY)
314 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
316 tb_invalidate_phys_page_range(pc, pc + 1, 0);
318 #else
319 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
321 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
322 (pc & ~TARGET_PAGE_MASK));
324 #endif
325 #endif /* TARGET_HAS_ICE */
327 #if defined(CONFIG_USER_ONLY)
328 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
333 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
334 int flags, CPUWatchpoint **watchpoint)
336 return -ENOSYS;
338 #else
339 /* Add a watchpoint. */
340 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
341 int flags, CPUWatchpoint **watchpoint)
343 target_ulong len_mask = ~(len - 1);
344 CPUWatchpoint *wp;
346 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
347 if ((len & (len - 1)) || (addr & ~len_mask) ||
348 len == 0 || len > TARGET_PAGE_SIZE) {
349 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
350 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
351 return -EINVAL;
353 wp = g_malloc(sizeof(*wp));
355 wp->vaddr = addr;
356 wp->len_mask = len_mask;
357 wp->flags = flags;
359 /* keep all GDB-injected watchpoints in front */
360 if (flags & BP_GDB)
361 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
362 else
363 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
365 tlb_flush_page(env, addr);
367 if (watchpoint)
368 *watchpoint = wp;
369 return 0;
372 /* Remove a specific watchpoint. */
373 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
374 int flags)
376 target_ulong len_mask = ~(len - 1);
377 CPUWatchpoint *wp;
379 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
380 if (addr == wp->vaddr && len_mask == wp->len_mask
381 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
382 cpu_watchpoint_remove_by_ref(env, wp);
383 return 0;
386 return -ENOENT;
389 /* Remove a specific watchpoint by reference. */
390 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
392 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
394 tlb_flush_page(env, watchpoint->vaddr);
396 g_free(watchpoint);
399 /* Remove all matching watchpoints. */
400 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
402 CPUWatchpoint *wp, *next;
404 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
405 if (wp->flags & mask)
406 cpu_watchpoint_remove_by_ref(env, wp);
409 #endif
411 /* Add a breakpoint. */
412 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
413 CPUBreakpoint **breakpoint)
415 #if defined(TARGET_HAS_ICE)
416 CPUBreakpoint *bp;
418 bp = g_malloc(sizeof(*bp));
420 bp->pc = pc;
421 bp->flags = flags;
423 /* keep all GDB-injected breakpoints in front */
424 if (flags & BP_GDB)
425 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
426 else
427 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
429 breakpoint_invalidate(env, pc);
431 if (breakpoint)
432 *breakpoint = bp;
433 return 0;
434 #else
435 return -ENOSYS;
436 #endif
439 /* Remove a specific breakpoint. */
440 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
442 #if defined(TARGET_HAS_ICE)
443 CPUBreakpoint *bp;
445 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
446 if (bp->pc == pc && bp->flags == flags) {
447 cpu_breakpoint_remove_by_ref(env, bp);
448 return 0;
451 return -ENOENT;
452 #else
453 return -ENOSYS;
454 #endif
457 /* Remove a specific breakpoint by reference. */
458 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
460 #if defined(TARGET_HAS_ICE)
461 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
463 breakpoint_invalidate(env, breakpoint->pc);
465 g_free(breakpoint);
466 #endif
469 /* Remove all matching breakpoints. */
470 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
472 #if defined(TARGET_HAS_ICE)
473 CPUBreakpoint *bp, *next;
475 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
476 if (bp->flags & mask)
477 cpu_breakpoint_remove_by_ref(env, bp);
479 #endif
482 /* enable or disable single step mode. EXCP_DEBUG is returned by the
483 CPU loop after each instruction */
484 void cpu_single_step(CPUArchState *env, int enabled)
486 #if defined(TARGET_HAS_ICE)
487 if (env->singlestep_enabled != enabled) {
488 env->singlestep_enabled = enabled;
489 if (kvm_enabled())
490 kvm_update_guest_debug(env, 0);
491 else {
492 /* must flush all the translated code to avoid inconsistencies */
493 /* XXX: only flush what is necessary */
494 tb_flush(env);
497 #endif
500 void cpu_exit(CPUArchState *env)
502 CPUState *cpu = ENV_GET_CPU(env);
504 cpu->exit_request = 1;
505 cpu->tcg_exit_req = 1;
508 void cpu_abort(CPUArchState *env, const char *fmt, ...)
510 va_list ap;
511 va_list ap2;
513 va_start(ap, fmt);
514 va_copy(ap2, ap);
515 fprintf(stderr, "qemu: fatal: ");
516 vfprintf(stderr, fmt, ap);
517 fprintf(stderr, "\n");
518 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
519 if (qemu_log_enabled()) {
520 qemu_log("qemu: fatal: ");
521 qemu_log_vprintf(fmt, ap2);
522 qemu_log("\n");
523 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
524 qemu_log_flush();
525 qemu_log_close();
527 va_end(ap2);
528 va_end(ap);
529 #if defined(CONFIG_USER_ONLY)
531 struct sigaction act;
532 sigfillset(&act.sa_mask);
533 act.sa_handler = SIG_DFL;
534 sigaction(SIGABRT, &act, NULL);
536 #endif
537 abort();
540 CPUArchState *cpu_copy(CPUArchState *env)
542 CPUArchState *new_env = cpu_init(env->cpu_model_str);
543 CPUArchState *next_cpu = new_env->next_cpu;
544 #if defined(TARGET_HAS_ICE)
545 CPUBreakpoint *bp;
546 CPUWatchpoint *wp;
547 #endif
549 memcpy(new_env, env, sizeof(CPUArchState));
551 /* Preserve chaining. */
552 new_env->next_cpu = next_cpu;
554 /* Clone all break/watchpoints.
555 Note: Once we support ptrace with hw-debug register access, make sure
556 BP_CPU break/watchpoints are handled correctly on clone. */
557 QTAILQ_INIT(&env->breakpoints);
558 QTAILQ_INIT(&env->watchpoints);
559 #if defined(TARGET_HAS_ICE)
560 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
561 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
563 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
564 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
565 wp->flags, NULL);
567 #endif
569 return new_env;
572 #if !defined(CONFIG_USER_ONLY)
573 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
574 uintptr_t length)
576 uintptr_t start1;
578 /* we modify the TLB cache so that the dirty bit will be set again
579 when accessing the range */
580 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
581 /* Check that we don't span multiple blocks - this breaks the
582 address comparisons below. */
583 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
584 != (end - 1) - start) {
585 abort();
587 cpu_tlb_reset_dirty_all(start1, length);
591 /* Note: start and end must be within the same ram block. */
592 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
593 int dirty_flags)
595 uintptr_t length;
597 start &= TARGET_PAGE_MASK;
598 end = TARGET_PAGE_ALIGN(end);
600 length = end - start;
601 if (length == 0)
602 return;
603 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
605 if (tcg_enabled()) {
606 tlb_reset_dirty_range_all(start, end, length);
610 static int cpu_physical_memory_set_dirty_tracking(int enable)
612 int ret = 0;
613 in_migration = enable;
614 return ret;
617 hwaddr memory_region_section_get_iotlb(CPUArchState *env,
618 MemoryRegionSection *section,
619 target_ulong vaddr,
620 hwaddr paddr,
621 int prot,
622 target_ulong *address)
624 hwaddr iotlb;
625 CPUWatchpoint *wp;
627 if (memory_region_is_ram(section->mr)) {
628 /* Normal RAM. */
629 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
630 + memory_region_section_addr(section, paddr);
631 if (!section->readonly) {
632 iotlb |= phys_section_notdirty;
633 } else {
634 iotlb |= phys_section_rom;
636 } else {
637 iotlb = section - phys_sections;
638 iotlb += memory_region_section_addr(section, paddr);
641 /* Make accesses to pages with watchpoints go via the
642 watchpoint trap routines. */
643 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
644 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
645 /* Avoid trapping reads of pages with a write breakpoint. */
646 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
647 iotlb = phys_section_watch + paddr;
648 *address |= TLB_MMIO;
649 break;
654 return iotlb;
656 #endif /* defined(CONFIG_USER_ONLY) */
658 #if !defined(CONFIG_USER_ONLY)
660 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
661 typedef struct subpage_t {
662 MemoryRegion iomem;
663 hwaddr base;
664 uint16_t sub_section[TARGET_PAGE_SIZE];
665 } subpage_t;
667 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
668 uint16_t section);
669 static subpage_t *subpage_init(hwaddr base);
670 static void destroy_page_desc(uint16_t section_index)
672 MemoryRegionSection *section = &phys_sections[section_index];
673 MemoryRegion *mr = section->mr;
675 if (mr->subpage) {
676 subpage_t *subpage = container_of(mr, subpage_t, iomem);
677 memory_region_destroy(&subpage->iomem);
678 g_free(subpage);
682 static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
684 unsigned i;
685 PhysPageEntry *p;
687 if (lp->ptr == PHYS_MAP_NODE_NIL) {
688 return;
691 p = phys_map_nodes[lp->ptr];
692 for (i = 0; i < L2_SIZE; ++i) {
693 if (!p[i].is_leaf) {
694 destroy_l2_mapping(&p[i], level - 1);
695 } else {
696 destroy_page_desc(p[i].ptr);
699 lp->is_leaf = 0;
700 lp->ptr = PHYS_MAP_NODE_NIL;
703 static void destroy_all_mappings(AddressSpaceDispatch *d)
705 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
706 phys_map_nodes_reset();
709 static uint16_t phys_section_add(MemoryRegionSection *section)
711 /* The physical section number is ORed with a page-aligned
712 * pointer to produce the iotlb entries. Thus it should
713 * never overflow into the page-aligned value.
715 assert(phys_sections_nb < TARGET_PAGE_SIZE);
717 if (phys_sections_nb == phys_sections_nb_alloc) {
718 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
719 phys_sections = g_renew(MemoryRegionSection, phys_sections,
720 phys_sections_nb_alloc);
722 phys_sections[phys_sections_nb] = *section;
723 return phys_sections_nb++;
726 static void phys_sections_clear(void)
728 phys_sections_nb = 0;
731 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
733 subpage_t *subpage;
734 hwaddr base = section->offset_within_address_space
735 & TARGET_PAGE_MASK;
736 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
737 MemoryRegionSection subsection = {
738 .offset_within_address_space = base,
739 .size = TARGET_PAGE_SIZE,
741 hwaddr start, end;
743 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
745 if (!(existing->mr->subpage)) {
746 subpage = subpage_init(base);
747 subsection.mr = &subpage->iomem;
748 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
749 phys_section_add(&subsection));
750 } else {
751 subpage = container_of(existing->mr, subpage_t, iomem);
753 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
754 end = start + section->size - 1;
755 subpage_register(subpage, start, end, phys_section_add(section));
759 static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
761 hwaddr start_addr = section->offset_within_address_space;
762 ram_addr_t size = section->size;
763 hwaddr addr;
764 uint16_t section_index = phys_section_add(section);
766 assert(size);
768 addr = start_addr;
769 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
770 section_index);
773 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > MAX_PHYS_ADDR_SPACE_BITS)
775 static MemoryRegionSection limit(MemoryRegionSection section)
777 section.size = MIN(section.offset_within_address_space + section.size,
778 MAX_PHYS_ADDR + 1)
779 - section.offset_within_address_space;
781 return section;
784 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
786 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
787 MemoryRegionSection now = limit(*section), remain = limit(*section);
789 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
790 || (now.size < TARGET_PAGE_SIZE)) {
791 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
792 - now.offset_within_address_space,
793 now.size);
794 register_subpage(d, &now);
795 remain.size -= now.size;
796 remain.offset_within_address_space += now.size;
797 remain.offset_within_region += now.size;
799 while (remain.size >= TARGET_PAGE_SIZE) {
800 now = remain;
801 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
802 now.size = TARGET_PAGE_SIZE;
803 register_subpage(d, &now);
804 } else {
805 now.size &= TARGET_PAGE_MASK;
806 register_multipage(d, &now);
808 remain.size -= now.size;
809 remain.offset_within_address_space += now.size;
810 remain.offset_within_region += now.size;
812 now = remain;
813 if (now.size) {
814 register_subpage(d, &now);
818 void qemu_flush_coalesced_mmio_buffer(void)
820 if (kvm_enabled())
821 kvm_flush_coalesced_mmio_buffer();
824 void qemu_mutex_lock_ramlist(void)
826 qemu_mutex_lock(&ram_list.mutex);
829 void qemu_mutex_unlock_ramlist(void)
831 qemu_mutex_unlock(&ram_list.mutex);
834 #if defined(__linux__) && !defined(TARGET_S390X)
836 #include <sys/vfs.h>
838 #define HUGETLBFS_MAGIC 0x958458f6
840 static long gethugepagesize(const char *path)
842 struct statfs fs;
843 int ret;
845 do {
846 ret = statfs(path, &fs);
847 } while (ret != 0 && errno == EINTR);
849 if (ret != 0) {
850 perror(path);
851 return 0;
854 if (fs.f_type != HUGETLBFS_MAGIC)
855 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
857 return fs.f_bsize;
860 static void *file_ram_alloc(RAMBlock *block,
861 ram_addr_t memory,
862 const char *path)
864 char *filename;
865 char *sanitized_name;
866 char *c;
867 void *area;
868 int fd;
869 #ifdef MAP_POPULATE
870 int flags;
871 #endif
872 unsigned long hpagesize;
874 hpagesize = gethugepagesize(path);
875 if (!hpagesize) {
876 return NULL;
879 if (memory < hpagesize) {
880 return NULL;
883 if (kvm_enabled() && !kvm_has_sync_mmu()) {
884 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
885 return NULL;
888 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
889 sanitized_name = g_strdup(block->mr->name);
890 for (c = sanitized_name; *c != '\0'; c++) {
891 if (*c == '/')
892 *c = '_';
895 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
896 sanitized_name);
897 g_free(sanitized_name);
899 fd = mkstemp(filename);
900 if (fd < 0) {
901 perror("unable to create backing store for hugepages");
902 g_free(filename);
903 return NULL;
905 unlink(filename);
906 g_free(filename);
908 memory = (memory+hpagesize-1) & ~(hpagesize-1);
911 * ftruncate is not supported by hugetlbfs in older
912 * hosts, so don't bother bailing out on errors.
913 * If anything goes wrong with it under other filesystems,
914 * mmap will fail.
916 if (ftruncate(fd, memory))
917 perror("ftruncate");
919 #ifdef MAP_POPULATE
920 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
921 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
922 * to sidestep this quirk.
924 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
925 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
926 #else
927 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
928 #endif
929 if (area == MAP_FAILED) {
930 perror("file_ram_alloc: can't mmap RAM pages");
931 close(fd);
932 return (NULL);
934 block->fd = fd;
935 return area;
937 #endif
939 static ram_addr_t find_ram_offset(ram_addr_t size)
941 RAMBlock *block, *next_block;
942 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
944 assert(size != 0); /* it would hand out same offset multiple times */
946 if (QTAILQ_EMPTY(&ram_list.blocks))
947 return 0;
949 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
950 ram_addr_t end, next = RAM_ADDR_MAX;
952 end = block->offset + block->length;
954 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
955 if (next_block->offset >= end) {
956 next = MIN(next, next_block->offset);
959 if (next - end >= size && next - end < mingap) {
960 offset = end;
961 mingap = next - end;
965 if (offset == RAM_ADDR_MAX) {
966 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
967 (uint64_t)size);
968 abort();
971 return offset;
974 ram_addr_t last_ram_offset(void)
976 RAMBlock *block;
977 ram_addr_t last = 0;
979 QTAILQ_FOREACH(block, &ram_list.blocks, next)
980 last = MAX(last, block->offset + block->length);
982 return last;
985 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
987 int ret;
988 QemuOpts *machine_opts;
990 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
991 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
992 if (machine_opts &&
993 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
994 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
995 if (ret) {
996 perror("qemu_madvise");
997 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
998 "but dump_guest_core=off specified\n");
1003 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1005 RAMBlock *new_block, *block;
1007 new_block = NULL;
1008 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1009 if (block->offset == addr) {
1010 new_block = block;
1011 break;
1014 assert(new_block);
1015 assert(!new_block->idstr[0]);
1017 if (dev) {
1018 char *id = qdev_get_dev_path(dev);
1019 if (id) {
1020 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1021 g_free(id);
1024 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1026 /* This assumes the iothread lock is taken here too. */
1027 qemu_mutex_lock_ramlist();
1028 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1029 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1030 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1031 new_block->idstr);
1032 abort();
1035 qemu_mutex_unlock_ramlist();
1038 static int memory_try_enable_merging(void *addr, size_t len)
1040 QemuOpts *opts;
1042 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1043 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1044 /* disabled by the user */
1045 return 0;
1048 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1051 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1052 MemoryRegion *mr)
1054 RAMBlock *block, *new_block;
1056 size = TARGET_PAGE_ALIGN(size);
1057 new_block = g_malloc0(sizeof(*new_block));
1059 /* This assumes the iothread lock is taken here too. */
1060 qemu_mutex_lock_ramlist();
1061 new_block->mr = mr;
1062 new_block->offset = find_ram_offset(size);
1063 if (host) {
1064 new_block->host = host;
1065 new_block->flags |= RAM_PREALLOC_MASK;
1066 } else {
1067 if (mem_path) {
1068 #if defined (__linux__) && !defined(TARGET_S390X)
1069 new_block->host = file_ram_alloc(new_block, size, mem_path);
1070 if (!new_block->host) {
1071 new_block->host = qemu_anon_ram_alloc(size);
1072 memory_try_enable_merging(new_block->host, size);
1074 #else
1075 fprintf(stderr, "-mem-path option unsupported\n");
1076 exit(1);
1077 #endif
1078 } else {
1079 if (xen_enabled()) {
1080 xen_ram_alloc(new_block->offset, size, mr);
1081 } else if (kvm_enabled()) {
1082 /* some s390/kvm configurations have special constraints */
1083 new_block->host = kvm_ram_alloc(size);
1084 } else {
1085 new_block->host = qemu_anon_ram_alloc(size);
1087 memory_try_enable_merging(new_block->host, size);
1090 new_block->length = size;
1092 /* Keep the list sorted from biggest to smallest block. */
1093 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1094 if (block->length < new_block->length) {
1095 break;
1098 if (block) {
1099 QTAILQ_INSERT_BEFORE(block, new_block, next);
1100 } else {
1101 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1103 ram_list.mru_block = NULL;
1105 ram_list.version++;
1106 qemu_mutex_unlock_ramlist();
1108 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
1109 last_ram_offset() >> TARGET_PAGE_BITS);
1110 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1111 0, size >> TARGET_PAGE_BITS);
1112 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1114 qemu_ram_setup_dump(new_block->host, size);
1115 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1117 if (kvm_enabled())
1118 kvm_setup_guest_memory(new_block->host, size);
1120 return new_block->offset;
1123 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1125 return qemu_ram_alloc_from_ptr(size, NULL, mr);
1128 void qemu_ram_free_from_ptr(ram_addr_t addr)
1130 RAMBlock *block;
1132 /* This assumes the iothread lock is taken here too. */
1133 qemu_mutex_lock_ramlist();
1134 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1135 if (addr == block->offset) {
1136 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1137 ram_list.mru_block = NULL;
1138 ram_list.version++;
1139 g_free(block);
1140 break;
1143 qemu_mutex_unlock_ramlist();
1146 void qemu_ram_free(ram_addr_t addr)
1148 RAMBlock *block;
1150 /* This assumes the iothread lock is taken here too. */
1151 qemu_mutex_lock_ramlist();
1152 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1153 if (addr == block->offset) {
1154 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1155 ram_list.mru_block = NULL;
1156 ram_list.version++;
1157 if (block->flags & RAM_PREALLOC_MASK) {
1159 } else if (mem_path) {
1160 #if defined (__linux__) && !defined(TARGET_S390X)
1161 if (block->fd) {
1162 munmap(block->host, block->length);
1163 close(block->fd);
1164 } else {
1165 qemu_anon_ram_free(block->host, block->length);
1167 #else
1168 abort();
1169 #endif
1170 } else {
1171 if (xen_enabled()) {
1172 xen_invalidate_map_cache_entry(block->host);
1173 } else {
1174 qemu_anon_ram_free(block->host, block->length);
1177 g_free(block);
1178 break;
1181 qemu_mutex_unlock_ramlist();
1185 #ifndef _WIN32
1186 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1188 RAMBlock *block;
1189 ram_addr_t offset;
1190 int flags;
1191 void *area, *vaddr;
1193 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1194 offset = addr - block->offset;
1195 if (offset < block->length) {
1196 vaddr = block->host + offset;
1197 if (block->flags & RAM_PREALLOC_MASK) {
1199 } else {
1200 flags = MAP_FIXED;
1201 munmap(vaddr, length);
1202 if (mem_path) {
1203 #if defined(__linux__) && !defined(TARGET_S390X)
1204 if (block->fd) {
1205 #ifdef MAP_POPULATE
1206 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1207 MAP_PRIVATE;
1208 #else
1209 flags |= MAP_PRIVATE;
1210 #endif
1211 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1212 flags, block->fd, offset);
1213 } else {
1214 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1215 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1216 flags, -1, 0);
1218 #else
1219 abort();
1220 #endif
1221 } else {
1222 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1223 flags |= MAP_SHARED | MAP_ANONYMOUS;
1224 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1225 flags, -1, 0);
1226 #else
1227 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1228 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1229 flags, -1, 0);
1230 #endif
1232 if (area != vaddr) {
1233 fprintf(stderr, "Could not remap addr: "
1234 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1235 length, addr);
1236 exit(1);
1238 memory_try_enable_merging(vaddr, length);
1239 qemu_ram_setup_dump(vaddr, length);
1241 return;
1245 #endif /* !_WIN32 */
1247 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1248 With the exception of the softmmu code in this file, this should
1249 only be used for local memory (e.g. video ram) that the device owns,
1250 and knows it isn't going to access beyond the end of the block.
1252 It should not be used for general purpose DMA.
1253 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1255 void *qemu_get_ram_ptr(ram_addr_t addr)
1257 RAMBlock *block;
1259 /* The list is protected by the iothread lock here. */
1260 block = ram_list.mru_block;
1261 if (block && addr - block->offset < block->length) {
1262 goto found;
1264 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1265 if (addr - block->offset < block->length) {
1266 goto found;
1270 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1271 abort();
1273 found:
1274 ram_list.mru_block = block;
1275 if (xen_enabled()) {
1276 /* We need to check if the requested address is in the RAM
1277 * because we don't want to map the entire memory in QEMU.
1278 * In that case just map until the end of the page.
1280 if (block->offset == 0) {
1281 return xen_map_cache(addr, 0, 0);
1282 } else if (block->host == NULL) {
1283 block->host =
1284 xen_map_cache(block->offset, block->length, 1);
1287 return block->host + (addr - block->offset);
1290 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1291 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1293 * ??? Is this still necessary?
1295 static void *qemu_safe_ram_ptr(ram_addr_t addr)
1297 RAMBlock *block;
1299 /* The list is protected by the iothread lock here. */
1300 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1301 if (addr - block->offset < block->length) {
1302 if (xen_enabled()) {
1303 /* We need to check if the requested address is in the RAM
1304 * because we don't want to map the entire memory in QEMU.
1305 * In that case just map until the end of the page.
1307 if (block->offset == 0) {
1308 return xen_map_cache(addr, 0, 0);
1309 } else if (block->host == NULL) {
1310 block->host =
1311 xen_map_cache(block->offset, block->length, 1);
1314 return block->host + (addr - block->offset);
1318 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1319 abort();
1321 return NULL;
1324 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1325 * but takes a size argument */
1326 static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
1328 if (*size == 0) {
1329 return NULL;
1331 if (xen_enabled()) {
1332 return xen_map_cache(addr, *size, 1);
1333 } else {
1334 RAMBlock *block;
1336 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1337 if (addr - block->offset < block->length) {
1338 if (addr - block->offset + *size > block->length)
1339 *size = block->length - addr + block->offset;
1340 return block->host + (addr - block->offset);
1344 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1345 abort();
1349 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1351 RAMBlock *block;
1352 uint8_t *host = ptr;
1354 if (xen_enabled()) {
1355 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1356 return 0;
1359 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1360 /* This case append when the block is not mapped. */
1361 if (block->host == NULL) {
1362 continue;
1364 if (host - block->host < block->length) {
1365 *ram_addr = block->offset + (host - block->host);
1366 return 0;
1370 return -1;
1373 /* Some of the softmmu routines need to translate from a host pointer
1374 (typically a TLB entry) back to a ram offset. */
1375 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1377 ram_addr_t ram_addr;
1379 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1380 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1381 abort();
1383 return ram_addr;
1386 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1387 unsigned size)
1389 #ifdef DEBUG_UNASSIGNED
1390 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1391 #endif
1392 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1393 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
1394 #endif
1395 return 0;
1398 static void unassigned_mem_write(void *opaque, hwaddr addr,
1399 uint64_t val, unsigned size)
1401 #ifdef DEBUG_UNASSIGNED
1402 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1403 #endif
1404 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1405 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
1406 #endif
1409 static const MemoryRegionOps unassigned_mem_ops = {
1410 .read = unassigned_mem_read,
1411 .write = unassigned_mem_write,
1412 .endianness = DEVICE_NATIVE_ENDIAN,
1415 static uint64_t error_mem_read(void *opaque, hwaddr addr,
1416 unsigned size)
1418 abort();
1421 static const MemoryRegionOps rom_mem_ops = {
1422 .read = error_mem_read,
1423 .write = unassigned_mem_write,
1424 .endianness = DEVICE_NATIVE_ENDIAN,
1427 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1428 uint64_t val, unsigned size)
1430 int dirty_flags;
1431 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1432 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1433 tb_invalidate_phys_page_fast(ram_addr, size);
1434 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1436 switch (size) {
1437 case 1:
1438 stb_p(qemu_get_ram_ptr(ram_addr), val);
1439 break;
1440 case 2:
1441 stw_p(qemu_get_ram_ptr(ram_addr), val);
1442 break;
1443 case 4:
1444 stl_p(qemu_get_ram_ptr(ram_addr), val);
1445 break;
1446 default:
1447 abort();
1449 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1450 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1451 /* we remove the notdirty callback only if the code has been
1452 flushed */
1453 if (dirty_flags == 0xff)
1454 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
1457 static const MemoryRegionOps notdirty_mem_ops = {
1458 .read = error_mem_read,
1459 .write = notdirty_mem_write,
1460 .endianness = DEVICE_NATIVE_ENDIAN,
1463 /* Generate a debug exception if a watchpoint has been hit. */
1464 static void check_watchpoint(int offset, int len_mask, int flags)
1466 CPUArchState *env = cpu_single_env;
1467 target_ulong pc, cs_base;
1468 target_ulong vaddr;
1469 CPUWatchpoint *wp;
1470 int cpu_flags;
1472 if (env->watchpoint_hit) {
1473 /* We re-entered the check after replacing the TB. Now raise
1474 * the debug interrupt so that is will trigger after the
1475 * current instruction. */
1476 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
1477 return;
1479 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1480 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1481 if ((vaddr == (wp->vaddr & len_mask) ||
1482 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1483 wp->flags |= BP_WATCHPOINT_HIT;
1484 if (!env->watchpoint_hit) {
1485 env->watchpoint_hit = wp;
1486 tb_check_watchpoint(env);
1487 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1488 env->exception_index = EXCP_DEBUG;
1489 cpu_loop_exit(env);
1490 } else {
1491 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1492 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1493 cpu_resume_from_signal(env, NULL);
1496 } else {
1497 wp->flags &= ~BP_WATCHPOINT_HIT;
1502 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1503 so these check for a hit then pass through to the normal out-of-line
1504 phys routines. */
1505 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1506 unsigned size)
1508 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1509 switch (size) {
1510 case 1: return ldub_phys(addr);
1511 case 2: return lduw_phys(addr);
1512 case 4: return ldl_phys(addr);
1513 default: abort();
1517 static void watch_mem_write(void *opaque, hwaddr addr,
1518 uint64_t val, unsigned size)
1520 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1521 switch (size) {
1522 case 1:
1523 stb_phys(addr, val);
1524 break;
1525 case 2:
1526 stw_phys(addr, val);
1527 break;
1528 case 4:
1529 stl_phys(addr, val);
1530 break;
1531 default: abort();
1535 static const MemoryRegionOps watch_mem_ops = {
1536 .read = watch_mem_read,
1537 .write = watch_mem_write,
1538 .endianness = DEVICE_NATIVE_ENDIAN,
1541 static uint64_t subpage_read(void *opaque, hwaddr addr,
1542 unsigned len)
1544 subpage_t *mmio = opaque;
1545 unsigned int idx = SUBPAGE_IDX(addr);
1546 MemoryRegionSection *section;
1547 #if defined(DEBUG_SUBPAGE)
1548 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1549 mmio, len, addr, idx);
1550 #endif
1552 section = &phys_sections[mmio->sub_section[idx]];
1553 addr += mmio->base;
1554 addr -= section->offset_within_address_space;
1555 addr += section->offset_within_region;
1556 return io_mem_read(section->mr, addr, len);
1559 static void subpage_write(void *opaque, hwaddr addr,
1560 uint64_t value, unsigned len)
1562 subpage_t *mmio = opaque;
1563 unsigned int idx = SUBPAGE_IDX(addr);
1564 MemoryRegionSection *section;
1565 #if defined(DEBUG_SUBPAGE)
1566 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1567 " idx %d value %"PRIx64"\n",
1568 __func__, mmio, len, addr, idx, value);
1569 #endif
1571 section = &phys_sections[mmio->sub_section[idx]];
1572 addr += mmio->base;
1573 addr -= section->offset_within_address_space;
1574 addr += section->offset_within_region;
1575 io_mem_write(section->mr, addr, value, len);
1578 static const MemoryRegionOps subpage_ops = {
1579 .read = subpage_read,
1580 .write = subpage_write,
1581 .endianness = DEVICE_NATIVE_ENDIAN,
1584 static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
1585 unsigned size)
1587 ram_addr_t raddr = addr;
1588 void *ptr = qemu_get_ram_ptr(raddr);
1589 switch (size) {
1590 case 1: return ldub_p(ptr);
1591 case 2: return lduw_p(ptr);
1592 case 4: return ldl_p(ptr);
1593 default: abort();
1597 static void subpage_ram_write(void *opaque, hwaddr addr,
1598 uint64_t value, unsigned size)
1600 ram_addr_t raddr = addr;
1601 void *ptr = qemu_get_ram_ptr(raddr);
1602 switch (size) {
1603 case 1: return stb_p(ptr, value);
1604 case 2: return stw_p(ptr, value);
1605 case 4: return stl_p(ptr, value);
1606 default: abort();
1610 static const MemoryRegionOps subpage_ram_ops = {
1611 .read = subpage_ram_read,
1612 .write = subpage_ram_write,
1613 .endianness = DEVICE_NATIVE_ENDIAN,
1616 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1617 uint16_t section)
1619 int idx, eidx;
1621 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1622 return -1;
1623 idx = SUBPAGE_IDX(start);
1624 eidx = SUBPAGE_IDX(end);
1625 #if defined(DEBUG_SUBPAGE)
1626 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
1627 mmio, start, end, idx, eidx, memory);
1628 #endif
1629 if (memory_region_is_ram(phys_sections[section].mr)) {
1630 MemoryRegionSection new_section = phys_sections[section];
1631 new_section.mr = &io_mem_subpage_ram;
1632 section = phys_section_add(&new_section);
1634 for (; idx <= eidx; idx++) {
1635 mmio->sub_section[idx] = section;
1638 return 0;
1641 static subpage_t *subpage_init(hwaddr base)
1643 subpage_t *mmio;
1645 mmio = g_malloc0(sizeof(subpage_t));
1647 mmio->base = base;
1648 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1649 "subpage", TARGET_PAGE_SIZE);
1650 mmio->iomem.subpage = true;
1651 #if defined(DEBUG_SUBPAGE)
1652 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1653 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
1654 #endif
1655 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
1657 return mmio;
1660 static uint16_t dummy_section(MemoryRegion *mr)
1662 MemoryRegionSection section = {
1663 .mr = mr,
1664 .offset_within_address_space = 0,
1665 .offset_within_region = 0,
1666 .size = UINT64_MAX,
1669 return phys_section_add(&section);
1672 MemoryRegion *iotlb_to_region(hwaddr index)
1674 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
1677 static void io_mem_init(void)
1679 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1680 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1681 "unassigned", UINT64_MAX);
1682 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1683 "notdirty", UINT64_MAX);
1684 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1685 "subpage-ram", UINT64_MAX);
1686 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1687 "watch", UINT64_MAX);
1690 static void mem_begin(MemoryListener *listener)
1692 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1694 destroy_all_mappings(d);
1695 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1698 static void core_begin(MemoryListener *listener)
1700 phys_sections_clear();
1701 phys_section_unassigned = dummy_section(&io_mem_unassigned);
1702 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1703 phys_section_rom = dummy_section(&io_mem_rom);
1704 phys_section_watch = dummy_section(&io_mem_watch);
1707 static void tcg_commit(MemoryListener *listener)
1709 CPUArchState *env;
1711 /* since each CPU stores ram addresses in its TLB cache, we must
1712 reset the modified entries */
1713 /* XXX: slow ! */
1714 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1715 tlb_flush(env, 1);
1719 static void core_log_global_start(MemoryListener *listener)
1721 cpu_physical_memory_set_dirty_tracking(1);
1724 static void core_log_global_stop(MemoryListener *listener)
1726 cpu_physical_memory_set_dirty_tracking(0);
1729 static void io_region_add(MemoryListener *listener,
1730 MemoryRegionSection *section)
1732 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1734 mrio->mr = section->mr;
1735 mrio->offset = section->offset_within_region;
1736 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
1737 section->offset_within_address_space, section->size);
1738 ioport_register(&mrio->iorange);
1741 static void io_region_del(MemoryListener *listener,
1742 MemoryRegionSection *section)
1744 isa_unassign_ioport(section->offset_within_address_space, section->size);
1747 static MemoryListener core_memory_listener = {
1748 .begin = core_begin,
1749 .log_global_start = core_log_global_start,
1750 .log_global_stop = core_log_global_stop,
1751 .priority = 1,
1754 static MemoryListener io_memory_listener = {
1755 .region_add = io_region_add,
1756 .region_del = io_region_del,
1757 .priority = 0,
1760 static MemoryListener tcg_memory_listener = {
1761 .commit = tcg_commit,
1764 void address_space_init_dispatch(AddressSpace *as)
1766 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1768 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1769 d->listener = (MemoryListener) {
1770 .begin = mem_begin,
1771 .region_add = mem_add,
1772 .region_nop = mem_add,
1773 .priority = 0,
1775 as->dispatch = d;
1776 memory_listener_register(&d->listener, as);
1779 void address_space_destroy_dispatch(AddressSpace *as)
1781 AddressSpaceDispatch *d = as->dispatch;
1783 memory_listener_unregister(&d->listener);
1784 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1785 g_free(d);
1786 as->dispatch = NULL;
1789 static void memory_map_init(void)
1791 system_memory = g_malloc(sizeof(*system_memory));
1792 memory_region_init(system_memory, "system", INT64_MAX);
1793 address_space_init(&address_space_memory, system_memory);
1794 address_space_memory.name = "memory";
1796 system_io = g_malloc(sizeof(*system_io));
1797 memory_region_init(system_io, "io", 65536);
1798 address_space_init(&address_space_io, system_io);
1799 address_space_io.name = "I/O";
1801 memory_listener_register(&core_memory_listener, &address_space_memory);
1802 memory_listener_register(&io_memory_listener, &address_space_io);
1803 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1805 dma_context_init(&dma_context_memory, &address_space_memory,
1806 NULL, NULL, NULL);
1809 MemoryRegion *get_system_memory(void)
1811 return system_memory;
1814 MemoryRegion *get_system_io(void)
1816 return system_io;
1819 #endif /* !defined(CONFIG_USER_ONLY) */
1821 /* physical memory access (slow version, mainly for debug) */
1822 #if defined(CONFIG_USER_ONLY)
1823 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
1824 uint8_t *buf, int len, int is_write)
1826 int l, flags;
1827 target_ulong page;
1828 void * p;
1830 while (len > 0) {
1831 page = addr & TARGET_PAGE_MASK;
1832 l = (page + TARGET_PAGE_SIZE) - addr;
1833 if (l > len)
1834 l = len;
1835 flags = page_get_flags(page);
1836 if (!(flags & PAGE_VALID))
1837 return -1;
1838 if (is_write) {
1839 if (!(flags & PAGE_WRITE))
1840 return -1;
1841 /* XXX: this code should not depend on lock_user */
1842 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1843 return -1;
1844 memcpy(p, buf, l);
1845 unlock_user(p, addr, l);
1846 } else {
1847 if (!(flags & PAGE_READ))
1848 return -1;
1849 /* XXX: this code should not depend on lock_user */
1850 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1851 return -1;
1852 memcpy(buf, p, l);
1853 unlock_user(p, addr, 0);
1855 len -= l;
1856 buf += l;
1857 addr += l;
1859 return 0;
1862 #else
1864 static void invalidate_and_set_dirty(hwaddr addr,
1865 hwaddr length)
1867 if (!cpu_physical_memory_is_dirty(addr)) {
1868 /* invalidate code */
1869 tb_invalidate_phys_page_range(addr, addr + length, 0);
1870 /* set dirty bit */
1871 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1873 xen_modified_memory(addr, length);
1876 void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1877 int len, bool is_write)
1879 AddressSpaceDispatch *d = as->dispatch;
1880 int l;
1881 uint8_t *ptr;
1882 uint32_t val;
1883 hwaddr page;
1884 MemoryRegionSection *section;
1886 while (len > 0) {
1887 page = addr & TARGET_PAGE_MASK;
1888 l = (page + TARGET_PAGE_SIZE) - addr;
1889 if (l > len)
1890 l = len;
1891 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
1893 if (is_write) {
1894 if (!memory_region_is_ram(section->mr)) {
1895 hwaddr addr1;
1896 addr1 = memory_region_section_addr(section, addr);
1897 /* XXX: could force cpu_single_env to NULL to avoid
1898 potential bugs */
1899 if (l >= 4 && ((addr1 & 3) == 0)) {
1900 /* 32 bit write access */
1901 val = ldl_p(buf);
1902 io_mem_write(section->mr, addr1, val, 4);
1903 l = 4;
1904 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1905 /* 16 bit write access */
1906 val = lduw_p(buf);
1907 io_mem_write(section->mr, addr1, val, 2);
1908 l = 2;
1909 } else {
1910 /* 8 bit write access */
1911 val = ldub_p(buf);
1912 io_mem_write(section->mr, addr1, val, 1);
1913 l = 1;
1915 } else if (!section->readonly) {
1916 ram_addr_t addr1;
1917 addr1 = memory_region_get_ram_addr(section->mr)
1918 + memory_region_section_addr(section, addr);
1919 /* RAM case */
1920 ptr = qemu_get_ram_ptr(addr1);
1921 memcpy(ptr, buf, l);
1922 invalidate_and_set_dirty(addr1, l);
1924 } else {
1925 if (!(memory_region_is_ram(section->mr) ||
1926 memory_region_is_romd(section->mr))) {
1927 hwaddr addr1;
1928 /* I/O case */
1929 addr1 = memory_region_section_addr(section, addr);
1930 if (l >= 4 && ((addr1 & 3) == 0)) {
1931 /* 32 bit read access */
1932 val = io_mem_read(section->mr, addr1, 4);
1933 stl_p(buf, val);
1934 l = 4;
1935 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1936 /* 16 bit read access */
1937 val = io_mem_read(section->mr, addr1, 2);
1938 stw_p(buf, val);
1939 l = 2;
1940 } else {
1941 /* 8 bit read access */
1942 val = io_mem_read(section->mr, addr1, 1);
1943 stb_p(buf, val);
1944 l = 1;
1946 } else {
1947 /* RAM case */
1948 ptr = qemu_get_ram_ptr(section->mr->ram_addr
1949 + memory_region_section_addr(section,
1950 addr));
1951 memcpy(buf, ptr, l);
1954 len -= l;
1955 buf += l;
1956 addr += l;
1960 void address_space_write(AddressSpace *as, hwaddr addr,
1961 const uint8_t *buf, int len)
1963 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1967 * address_space_read: read from an address space.
1969 * @as: #AddressSpace to be accessed
1970 * @addr: address within that address space
1971 * @buf: buffer with the data transferred
1973 void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
1975 address_space_rw(as, addr, buf, len, false);
1979 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
1980 int len, int is_write)
1982 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1985 /* used for ROM loading : can write in RAM and ROM */
1986 void cpu_physical_memory_write_rom(hwaddr addr,
1987 const uint8_t *buf, int len)
1989 AddressSpaceDispatch *d = address_space_memory.dispatch;
1990 int l;
1991 uint8_t *ptr;
1992 hwaddr page;
1993 MemoryRegionSection *section;
1995 while (len > 0) {
1996 page = addr & TARGET_PAGE_MASK;
1997 l = (page + TARGET_PAGE_SIZE) - addr;
1998 if (l > len)
1999 l = len;
2000 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
2002 if (!(memory_region_is_ram(section->mr) ||
2003 memory_region_is_romd(section->mr))) {
2004 /* do nothing */
2005 } else {
2006 unsigned long addr1;
2007 addr1 = memory_region_get_ram_addr(section->mr)
2008 + memory_region_section_addr(section, addr);
2009 /* ROM/RAM case */
2010 ptr = qemu_get_ram_ptr(addr1);
2011 memcpy(ptr, buf, l);
2012 invalidate_and_set_dirty(addr1, l);
2014 len -= l;
2015 buf += l;
2016 addr += l;
2020 typedef struct {
2021 void *buffer;
2022 hwaddr addr;
2023 hwaddr len;
2024 } BounceBuffer;
2026 static BounceBuffer bounce;
2028 typedef struct MapClient {
2029 void *opaque;
2030 void (*callback)(void *opaque);
2031 QLIST_ENTRY(MapClient) link;
2032 } MapClient;
2034 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2035 = QLIST_HEAD_INITIALIZER(map_client_list);
2037 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2039 MapClient *client = g_malloc(sizeof(*client));
2041 client->opaque = opaque;
2042 client->callback = callback;
2043 QLIST_INSERT_HEAD(&map_client_list, client, link);
2044 return client;
2047 static void cpu_unregister_map_client(void *_client)
2049 MapClient *client = (MapClient *)_client;
2051 QLIST_REMOVE(client, link);
2052 g_free(client);
2055 static void cpu_notify_map_clients(void)
2057 MapClient *client;
2059 while (!QLIST_EMPTY(&map_client_list)) {
2060 client = QLIST_FIRST(&map_client_list);
2061 client->callback(client->opaque);
2062 cpu_unregister_map_client(client);
2066 /* Map a physical memory region into a host virtual address.
2067 * May map a subset of the requested range, given by and returned in *plen.
2068 * May return NULL if resources needed to perform the mapping are exhausted.
2069 * Use only for reads OR writes - not for read-modify-write operations.
2070 * Use cpu_register_map_client() to know when retrying the map operation is
2071 * likely to succeed.
2073 void *address_space_map(AddressSpace *as,
2074 hwaddr addr,
2075 hwaddr *plen,
2076 bool is_write)
2078 AddressSpaceDispatch *d = as->dispatch;
2079 hwaddr len = *plen;
2080 hwaddr todo = 0;
2081 int l;
2082 hwaddr page;
2083 MemoryRegionSection *section;
2084 ram_addr_t raddr = RAM_ADDR_MAX;
2085 ram_addr_t rlen;
2086 void *ret;
2088 while (len > 0) {
2089 page = addr & TARGET_PAGE_MASK;
2090 l = (page + TARGET_PAGE_SIZE) - addr;
2091 if (l > len)
2092 l = len;
2093 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
2095 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
2096 if (todo || bounce.buffer) {
2097 break;
2099 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2100 bounce.addr = addr;
2101 bounce.len = l;
2102 if (!is_write) {
2103 address_space_read(as, addr, bounce.buffer, l);
2106 *plen = l;
2107 return bounce.buffer;
2109 if (!todo) {
2110 raddr = memory_region_get_ram_addr(section->mr)
2111 + memory_region_section_addr(section, addr);
2114 len -= l;
2115 addr += l;
2116 todo += l;
2118 rlen = todo;
2119 ret = qemu_ram_ptr_length(raddr, &rlen);
2120 *plen = rlen;
2121 return ret;
2124 /* Unmaps a memory region previously mapped by address_space_map().
2125 * Will also mark the memory as dirty if is_write == 1. access_len gives
2126 * the amount of memory that was actually read or written by the caller.
2128 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2129 int is_write, hwaddr access_len)
2131 if (buffer != bounce.buffer) {
2132 if (is_write) {
2133 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
2134 while (access_len) {
2135 unsigned l;
2136 l = TARGET_PAGE_SIZE;
2137 if (l > access_len)
2138 l = access_len;
2139 invalidate_and_set_dirty(addr1, l);
2140 addr1 += l;
2141 access_len -= l;
2144 if (xen_enabled()) {
2145 xen_invalidate_map_cache_entry(buffer);
2147 return;
2149 if (is_write) {
2150 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2152 qemu_vfree(bounce.buffer);
2153 bounce.buffer = NULL;
2154 cpu_notify_map_clients();
2157 void *cpu_physical_memory_map(hwaddr addr,
2158 hwaddr *plen,
2159 int is_write)
2161 return address_space_map(&address_space_memory, addr, plen, is_write);
2164 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2165 int is_write, hwaddr access_len)
2167 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2170 /* warning: addr must be aligned */
2171 static inline uint32_t ldl_phys_internal(hwaddr addr,
2172 enum device_endian endian)
2174 uint8_t *ptr;
2175 uint32_t val;
2176 MemoryRegionSection *section;
2178 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2180 if (!(memory_region_is_ram(section->mr) ||
2181 memory_region_is_romd(section->mr))) {
2182 /* I/O case */
2183 addr = memory_region_section_addr(section, addr);
2184 val = io_mem_read(section->mr, addr, 4);
2185 #if defined(TARGET_WORDS_BIGENDIAN)
2186 if (endian == DEVICE_LITTLE_ENDIAN) {
2187 val = bswap32(val);
2189 #else
2190 if (endian == DEVICE_BIG_ENDIAN) {
2191 val = bswap32(val);
2193 #endif
2194 } else {
2195 /* RAM case */
2196 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2197 & TARGET_PAGE_MASK)
2198 + memory_region_section_addr(section, addr));
2199 switch (endian) {
2200 case DEVICE_LITTLE_ENDIAN:
2201 val = ldl_le_p(ptr);
2202 break;
2203 case DEVICE_BIG_ENDIAN:
2204 val = ldl_be_p(ptr);
2205 break;
2206 default:
2207 val = ldl_p(ptr);
2208 break;
2211 return val;
2214 uint32_t ldl_phys(hwaddr addr)
2216 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2219 uint32_t ldl_le_phys(hwaddr addr)
2221 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2224 uint32_t ldl_be_phys(hwaddr addr)
2226 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2229 /* warning: addr must be aligned */
2230 static inline uint64_t ldq_phys_internal(hwaddr addr,
2231 enum device_endian endian)
2233 uint8_t *ptr;
2234 uint64_t val;
2235 MemoryRegionSection *section;
2237 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2239 if (!(memory_region_is_ram(section->mr) ||
2240 memory_region_is_romd(section->mr))) {
2241 /* I/O case */
2242 addr = memory_region_section_addr(section, addr);
2244 /* XXX This is broken when device endian != cpu endian.
2245 Fix and add "endian" variable check */
2246 #ifdef TARGET_WORDS_BIGENDIAN
2247 val = io_mem_read(section->mr, addr, 4) << 32;
2248 val |= io_mem_read(section->mr, addr + 4, 4);
2249 #else
2250 val = io_mem_read(section->mr, addr, 4);
2251 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
2252 #endif
2253 } else {
2254 /* RAM case */
2255 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2256 & TARGET_PAGE_MASK)
2257 + memory_region_section_addr(section, addr));
2258 switch (endian) {
2259 case DEVICE_LITTLE_ENDIAN:
2260 val = ldq_le_p(ptr);
2261 break;
2262 case DEVICE_BIG_ENDIAN:
2263 val = ldq_be_p(ptr);
2264 break;
2265 default:
2266 val = ldq_p(ptr);
2267 break;
2270 return val;
2273 uint64_t ldq_phys(hwaddr addr)
2275 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2278 uint64_t ldq_le_phys(hwaddr addr)
2280 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2283 uint64_t ldq_be_phys(hwaddr addr)
2285 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2288 /* XXX: optimize */
2289 uint32_t ldub_phys(hwaddr addr)
2291 uint8_t val;
2292 cpu_physical_memory_read(addr, &val, 1);
2293 return val;
2296 /* warning: addr must be aligned */
2297 static inline uint32_t lduw_phys_internal(hwaddr addr,
2298 enum device_endian endian)
2300 uint8_t *ptr;
2301 uint64_t val;
2302 MemoryRegionSection *section;
2304 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2306 if (!(memory_region_is_ram(section->mr) ||
2307 memory_region_is_romd(section->mr))) {
2308 /* I/O case */
2309 addr = memory_region_section_addr(section, addr);
2310 val = io_mem_read(section->mr, addr, 2);
2311 #if defined(TARGET_WORDS_BIGENDIAN)
2312 if (endian == DEVICE_LITTLE_ENDIAN) {
2313 val = bswap16(val);
2315 #else
2316 if (endian == DEVICE_BIG_ENDIAN) {
2317 val = bswap16(val);
2319 #endif
2320 } else {
2321 /* RAM case */
2322 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2323 & TARGET_PAGE_MASK)
2324 + memory_region_section_addr(section, addr));
2325 switch (endian) {
2326 case DEVICE_LITTLE_ENDIAN:
2327 val = lduw_le_p(ptr);
2328 break;
2329 case DEVICE_BIG_ENDIAN:
2330 val = lduw_be_p(ptr);
2331 break;
2332 default:
2333 val = lduw_p(ptr);
2334 break;
2337 return val;
2340 uint32_t lduw_phys(hwaddr addr)
2342 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2345 uint32_t lduw_le_phys(hwaddr addr)
2347 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2350 uint32_t lduw_be_phys(hwaddr addr)
2352 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2355 /* warning: addr must be aligned. The ram page is not masked as dirty
2356 and the code inside is not invalidated. It is useful if the dirty
2357 bits are used to track modified PTEs */
2358 void stl_phys_notdirty(hwaddr addr, uint32_t val)
2360 uint8_t *ptr;
2361 MemoryRegionSection *section;
2363 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2365 if (!memory_region_is_ram(section->mr) || section->readonly) {
2366 addr = memory_region_section_addr(section, addr);
2367 if (memory_region_is_ram(section->mr)) {
2368 section = &phys_sections[phys_section_rom];
2370 io_mem_write(section->mr, addr, val, 4);
2371 } else {
2372 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
2373 & TARGET_PAGE_MASK)
2374 + memory_region_section_addr(section, addr);
2375 ptr = qemu_get_ram_ptr(addr1);
2376 stl_p(ptr, val);
2378 if (unlikely(in_migration)) {
2379 if (!cpu_physical_memory_is_dirty(addr1)) {
2380 /* invalidate code */
2381 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2382 /* set dirty bit */
2383 cpu_physical_memory_set_dirty_flags(
2384 addr1, (0xff & ~CODE_DIRTY_FLAG));
2390 /* warning: addr must be aligned */
2391 static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2392 enum device_endian endian)
2394 uint8_t *ptr;
2395 MemoryRegionSection *section;
2397 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2399 if (!memory_region_is_ram(section->mr) || section->readonly) {
2400 addr = memory_region_section_addr(section, addr);
2401 if (memory_region_is_ram(section->mr)) {
2402 section = &phys_sections[phys_section_rom];
2404 #if defined(TARGET_WORDS_BIGENDIAN)
2405 if (endian == DEVICE_LITTLE_ENDIAN) {
2406 val = bswap32(val);
2408 #else
2409 if (endian == DEVICE_BIG_ENDIAN) {
2410 val = bswap32(val);
2412 #endif
2413 io_mem_write(section->mr, addr, val, 4);
2414 } else {
2415 unsigned long addr1;
2416 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2417 + memory_region_section_addr(section, addr);
2418 /* RAM case */
2419 ptr = qemu_get_ram_ptr(addr1);
2420 switch (endian) {
2421 case DEVICE_LITTLE_ENDIAN:
2422 stl_le_p(ptr, val);
2423 break;
2424 case DEVICE_BIG_ENDIAN:
2425 stl_be_p(ptr, val);
2426 break;
2427 default:
2428 stl_p(ptr, val);
2429 break;
2431 invalidate_and_set_dirty(addr1, 4);
2435 void stl_phys(hwaddr addr, uint32_t val)
2437 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2440 void stl_le_phys(hwaddr addr, uint32_t val)
2442 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2445 void stl_be_phys(hwaddr addr, uint32_t val)
2447 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2450 /* XXX: optimize */
2451 void stb_phys(hwaddr addr, uint32_t val)
2453 uint8_t v = val;
2454 cpu_physical_memory_write(addr, &v, 1);
2457 /* warning: addr must be aligned */
2458 static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2459 enum device_endian endian)
2461 uint8_t *ptr;
2462 MemoryRegionSection *section;
2464 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2466 if (!memory_region_is_ram(section->mr) || section->readonly) {
2467 addr = memory_region_section_addr(section, addr);
2468 if (memory_region_is_ram(section->mr)) {
2469 section = &phys_sections[phys_section_rom];
2471 #if defined(TARGET_WORDS_BIGENDIAN)
2472 if (endian == DEVICE_LITTLE_ENDIAN) {
2473 val = bswap16(val);
2475 #else
2476 if (endian == DEVICE_BIG_ENDIAN) {
2477 val = bswap16(val);
2479 #endif
2480 io_mem_write(section->mr, addr, val, 2);
2481 } else {
2482 unsigned long addr1;
2483 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2484 + memory_region_section_addr(section, addr);
2485 /* RAM case */
2486 ptr = qemu_get_ram_ptr(addr1);
2487 switch (endian) {
2488 case DEVICE_LITTLE_ENDIAN:
2489 stw_le_p(ptr, val);
2490 break;
2491 case DEVICE_BIG_ENDIAN:
2492 stw_be_p(ptr, val);
2493 break;
2494 default:
2495 stw_p(ptr, val);
2496 break;
2498 invalidate_and_set_dirty(addr1, 2);
2502 void stw_phys(hwaddr addr, uint32_t val)
2504 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2507 void stw_le_phys(hwaddr addr, uint32_t val)
2509 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2512 void stw_be_phys(hwaddr addr, uint32_t val)
2514 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2517 /* XXX: optimize */
2518 void stq_phys(hwaddr addr, uint64_t val)
2520 val = tswap64(val);
2521 cpu_physical_memory_write(addr, &val, 8);
2524 void stq_le_phys(hwaddr addr, uint64_t val)
2526 val = cpu_to_le64(val);
2527 cpu_physical_memory_write(addr, &val, 8);
2530 void stq_be_phys(hwaddr addr, uint64_t val)
2532 val = cpu_to_be64(val);
2533 cpu_physical_memory_write(addr, &val, 8);
2536 /* virtual memory access for debug (includes writing to ROM) */
2537 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
2538 uint8_t *buf, int len, int is_write)
2540 int l;
2541 hwaddr phys_addr;
2542 target_ulong page;
2544 while (len > 0) {
2545 page = addr & TARGET_PAGE_MASK;
2546 phys_addr = cpu_get_phys_page_debug(env, page);
2547 /* if no physical page mapped, return an error */
2548 if (phys_addr == -1)
2549 return -1;
2550 l = (page + TARGET_PAGE_SIZE) - addr;
2551 if (l > len)
2552 l = len;
2553 phys_addr += (addr & ~TARGET_PAGE_MASK);
2554 if (is_write)
2555 cpu_physical_memory_write_rom(phys_addr, buf, l);
2556 else
2557 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
2558 len -= l;
2559 buf += l;
2560 addr += l;
2562 return 0;
2564 #endif
2566 #if !defined(CONFIG_USER_ONLY)
2569 * A helper function for the _utterly broken_ virtio device model to find out if
2570 * it's running on a big endian machine. Don't do this at home kids!
2572 bool virtio_is_big_endian(void);
2573 bool virtio_is_big_endian(void)
2575 #if defined(TARGET_WORDS_BIGENDIAN)
2576 return true;
2577 #else
2578 return false;
2579 #endif
2582 #endif
2584 #ifndef CONFIG_USER_ONLY
2585 bool cpu_physical_memory_is_io(hwaddr phys_addr)
2587 MemoryRegionSection *section;
2589 section = phys_page_find(address_space_memory.dispatch,
2590 phys_addr >> TARGET_PAGE_BITS);
2592 return !(memory_region_is_ram(section->mr) ||
2593 memory_region_is_romd(section->mr));
2595 #endif