exec: eliminate stq_phys_notdirty
[qemu/ar7.git] / exec.c
blob1355661963f657f9c5d392214314cfa0d557f880
1 /*
2 * Virtual page mapping
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #ifdef _WIN32
21 #include <windows.h>
22 #else
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #endif
27 #include "qemu-common.h"
28 #include "cpu.h"
29 #include "tcg.h"
30 #include "hw/hw.h"
31 #include "hw/qdev.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
41 #include <qemu.h>
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
44 #include "trace.h"
45 #endif
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
53 //#define DEBUG_UNASSIGNED
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 int phys_ram_fd;
58 static int in_migration;
60 RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62 static MemoryRegion *system_memory;
63 static MemoryRegion *system_io;
65 AddressSpace address_space_io;
66 AddressSpace address_space_memory;
67 DMAContext dma_context_memory;
69 MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
70 static MemoryRegion io_mem_subpage_ram;
72 #endif
74 CPUArchState *first_cpu;
75 /* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
77 DEFINE_TLS(CPUArchState *,cpu_single_env);
78 /* 0 = Do not count executed instructions.
79 1 = Precise instruction counting.
80 2 = Adaptive rate instruction counting. */
81 int use_icount;
83 #if !defined(CONFIG_USER_ONLY)
85 static MemoryRegionSection *phys_sections;
86 static unsigned phys_sections_nb, phys_sections_nb_alloc;
87 static uint16_t phys_section_unassigned;
88 static uint16_t phys_section_notdirty;
89 static uint16_t phys_section_rom;
90 static uint16_t phys_section_watch;
92 /* Simple allocator for PhysPageEntry nodes */
93 static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94 static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
96 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
98 static void io_mem_init(void);
99 static void memory_map_init(void);
100 static void *qemu_safe_ram_ptr(ram_addr_t addr);
102 static MemoryRegion io_mem_watch;
103 #endif
105 #if !defined(CONFIG_USER_ONLY)
107 static void phys_map_node_reserve(unsigned nodes)
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
119 static uint16_t phys_map_node_alloc(void)
121 unsigned i;
122 uint16_t ret;
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
127 for (i = 0; i < L2_SIZE; ++i) {
128 phys_map_nodes[ret][i].is_leaf = 0;
129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
131 return ret;
134 static void phys_map_nodes_reset(void)
136 phys_map_nodes_nb = 0;
140 static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
142 int level)
144 PhysPageEntry *p;
145 int i;
146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
153 p[i].is_leaf = 1;
154 p[i].ptr = phys_section_unassigned;
157 } else {
158 p = phys_map_nodes[lp->ptr];
160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
162 while (*nb && lp < &p[L2_SIZE]) {
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
165 lp->ptr = leaf;
166 *index += step;
167 *nb -= step;
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
171 ++lp;
175 static void phys_page_set(AddressSpaceDispatch *d,
176 hwaddr index, hwaddr nb,
177 uint16_t leaf)
179 /* Wildly overreserve - it doesn't matter much. */
180 phys_map_node_reserve(3 * P_L2_LEVELS);
182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
185 MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
187 PhysPageEntry lp = d->phys_map;
188 PhysPageEntry *p;
189 int i;
190 uint16_t s_index = phys_section_unassigned;
192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
194 goto not_found;
196 p = phys_map_nodes[lp.ptr];
197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
200 s_index = lp.ptr;
201 not_found:
202 return &phys_sections[s_index];
205 bool memory_region_is_unassigned(MemoryRegion *mr)
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
211 #endif
213 void cpu_exec_init_all(void)
215 #if !defined(CONFIG_USER_ONLY)
216 qemu_mutex_init(&ram_list.mutex);
217 memory_map_init();
218 io_mem_init();
219 #endif
222 #if !defined(CONFIG_USER_ONLY)
224 static int cpu_common_post_load(void *opaque, int version_id)
226 CPUState *cpu = opaque;
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 cpu->interrupt_request &= ~0x01;
231 tlb_flush(cpu->env_ptr, 1);
233 return 0;
236 static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
243 VMSTATE_UINT32(halted, CPUState),
244 VMSTATE_UINT32(interrupt_request, CPUState),
245 VMSTATE_END_OF_LIST()
248 #else
249 #define vmstate_cpu_common vmstate_dummy
250 #endif
252 CPUState *qemu_get_cpu(int index)
254 CPUArchState *env = first_cpu;
255 CPUState *cpu = NULL;
257 while (env) {
258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
260 break;
262 env = env->next_cpu;
265 return env ? cpu : NULL;
268 void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
270 CPUArchState *env = first_cpu;
272 while (env) {
273 func(ENV_GET_CPU(env), data);
274 env = env->next_cpu;
278 void cpu_exec_init(CPUArchState *env)
280 CPUState *cpu = ENV_GET_CPU(env);
281 CPUClass *cc = CPU_GET_CLASS(cpu);
282 CPUArchState **penv;
283 int cpu_index;
285 #if defined(CONFIG_USER_ONLY)
286 cpu_list_lock();
287 #endif
288 env->next_cpu = NULL;
289 penv = &first_cpu;
290 cpu_index = 0;
291 while (*penv != NULL) {
292 penv = &(*penv)->next_cpu;
293 cpu_index++;
295 cpu->cpu_index = cpu_index;
296 cpu->numa_node = 0;
297 QTAILQ_INIT(&env->breakpoints);
298 QTAILQ_INIT(&env->watchpoints);
299 #ifndef CONFIG_USER_ONLY
300 cpu->thread_id = qemu_get_thread_id();
301 #endif
302 *penv = env;
303 #if defined(CONFIG_USER_ONLY)
304 cpu_list_unlock();
305 #endif
306 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
307 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
308 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
309 cpu_save, cpu_load, env);
310 assert(cc->vmsd == NULL);
311 #endif
312 if (cc->vmsd != NULL) {
313 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
317 #if defined(TARGET_HAS_ICE)
318 #if defined(CONFIG_USER_ONLY)
319 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
321 tb_invalidate_phys_page_range(pc, pc + 1, 0);
323 #else
324 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
326 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
327 (pc & ~TARGET_PAGE_MASK));
329 #endif
330 #endif /* TARGET_HAS_ICE */
332 #if defined(CONFIG_USER_ONLY)
333 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
338 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
339 int flags, CPUWatchpoint **watchpoint)
341 return -ENOSYS;
343 #else
344 /* Add a watchpoint. */
345 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
346 int flags, CPUWatchpoint **watchpoint)
348 target_ulong len_mask = ~(len - 1);
349 CPUWatchpoint *wp;
351 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
352 if ((len & (len - 1)) || (addr & ~len_mask) ||
353 len == 0 || len > TARGET_PAGE_SIZE) {
354 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
355 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
356 return -EINVAL;
358 wp = g_malloc(sizeof(*wp));
360 wp->vaddr = addr;
361 wp->len_mask = len_mask;
362 wp->flags = flags;
364 /* keep all GDB-injected watchpoints in front */
365 if (flags & BP_GDB)
366 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
367 else
368 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
370 tlb_flush_page(env, addr);
372 if (watchpoint)
373 *watchpoint = wp;
374 return 0;
377 /* Remove a specific watchpoint. */
378 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
379 int flags)
381 target_ulong len_mask = ~(len - 1);
382 CPUWatchpoint *wp;
384 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
385 if (addr == wp->vaddr && len_mask == wp->len_mask
386 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
387 cpu_watchpoint_remove_by_ref(env, wp);
388 return 0;
391 return -ENOENT;
394 /* Remove a specific watchpoint by reference. */
395 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
397 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
399 tlb_flush_page(env, watchpoint->vaddr);
401 g_free(watchpoint);
404 /* Remove all matching watchpoints. */
405 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
407 CPUWatchpoint *wp, *next;
409 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
410 if (wp->flags & mask)
411 cpu_watchpoint_remove_by_ref(env, wp);
414 #endif
416 /* Add a breakpoint. */
417 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
418 CPUBreakpoint **breakpoint)
420 #if defined(TARGET_HAS_ICE)
421 CPUBreakpoint *bp;
423 bp = g_malloc(sizeof(*bp));
425 bp->pc = pc;
426 bp->flags = flags;
428 /* keep all GDB-injected breakpoints in front */
429 if (flags & BP_GDB)
430 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
431 else
432 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
434 breakpoint_invalidate(env, pc);
436 if (breakpoint)
437 *breakpoint = bp;
438 return 0;
439 #else
440 return -ENOSYS;
441 #endif
444 /* Remove a specific breakpoint. */
445 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
447 #if defined(TARGET_HAS_ICE)
448 CPUBreakpoint *bp;
450 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
451 if (bp->pc == pc && bp->flags == flags) {
452 cpu_breakpoint_remove_by_ref(env, bp);
453 return 0;
456 return -ENOENT;
457 #else
458 return -ENOSYS;
459 #endif
462 /* Remove a specific breakpoint by reference. */
463 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
465 #if defined(TARGET_HAS_ICE)
466 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
468 breakpoint_invalidate(env, breakpoint->pc);
470 g_free(breakpoint);
471 #endif
474 /* Remove all matching breakpoints. */
475 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
477 #if defined(TARGET_HAS_ICE)
478 CPUBreakpoint *bp, *next;
480 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
481 if (bp->flags & mask)
482 cpu_breakpoint_remove_by_ref(env, bp);
484 #endif
487 /* enable or disable single step mode. EXCP_DEBUG is returned by the
488 CPU loop after each instruction */
489 void cpu_single_step(CPUArchState *env, int enabled)
491 #if defined(TARGET_HAS_ICE)
492 if (env->singlestep_enabled != enabled) {
493 env->singlestep_enabled = enabled;
494 if (kvm_enabled())
495 kvm_update_guest_debug(env, 0);
496 else {
497 /* must flush all the translated code to avoid inconsistencies */
498 /* XXX: only flush what is necessary */
499 tb_flush(env);
502 #endif
505 void cpu_exit(CPUArchState *env)
507 CPUState *cpu = ENV_GET_CPU(env);
509 cpu->exit_request = 1;
510 cpu->tcg_exit_req = 1;
513 void cpu_abort(CPUArchState *env, const char *fmt, ...)
515 va_list ap;
516 va_list ap2;
518 va_start(ap, fmt);
519 va_copy(ap2, ap);
520 fprintf(stderr, "qemu: fatal: ");
521 vfprintf(stderr, fmt, ap);
522 fprintf(stderr, "\n");
523 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
524 if (qemu_log_enabled()) {
525 qemu_log("qemu: fatal: ");
526 qemu_log_vprintf(fmt, ap2);
527 qemu_log("\n");
528 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
529 qemu_log_flush();
530 qemu_log_close();
532 va_end(ap2);
533 va_end(ap);
534 #if defined(CONFIG_USER_ONLY)
536 struct sigaction act;
537 sigfillset(&act.sa_mask);
538 act.sa_handler = SIG_DFL;
539 sigaction(SIGABRT, &act, NULL);
541 #endif
542 abort();
545 CPUArchState *cpu_copy(CPUArchState *env)
547 CPUArchState *new_env = cpu_init(env->cpu_model_str);
548 CPUArchState *next_cpu = new_env->next_cpu;
549 #if defined(TARGET_HAS_ICE)
550 CPUBreakpoint *bp;
551 CPUWatchpoint *wp;
552 #endif
554 memcpy(new_env, env, sizeof(CPUArchState));
556 /* Preserve chaining. */
557 new_env->next_cpu = next_cpu;
559 /* Clone all break/watchpoints.
560 Note: Once we support ptrace with hw-debug register access, make sure
561 BP_CPU break/watchpoints are handled correctly on clone. */
562 QTAILQ_INIT(&env->breakpoints);
563 QTAILQ_INIT(&env->watchpoints);
564 #if defined(TARGET_HAS_ICE)
565 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
566 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
568 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
569 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
570 wp->flags, NULL);
572 #endif
574 return new_env;
577 #if !defined(CONFIG_USER_ONLY)
578 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
579 uintptr_t length)
581 uintptr_t start1;
583 /* we modify the TLB cache so that the dirty bit will be set again
584 when accessing the range */
585 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
586 /* Check that we don't span multiple blocks - this breaks the
587 address comparisons below. */
588 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
589 != (end - 1) - start) {
590 abort();
592 cpu_tlb_reset_dirty_all(start1, length);
596 /* Note: start and end must be within the same ram block. */
597 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
598 int dirty_flags)
600 uintptr_t length;
602 start &= TARGET_PAGE_MASK;
603 end = TARGET_PAGE_ALIGN(end);
605 length = end - start;
606 if (length == 0)
607 return;
608 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
610 if (tcg_enabled()) {
611 tlb_reset_dirty_range_all(start, end, length);
615 static int cpu_physical_memory_set_dirty_tracking(int enable)
617 int ret = 0;
618 in_migration = enable;
619 return ret;
622 hwaddr memory_region_section_get_iotlb(CPUArchState *env,
623 MemoryRegionSection *section,
624 target_ulong vaddr,
625 hwaddr paddr,
626 int prot,
627 target_ulong *address)
629 hwaddr iotlb;
630 CPUWatchpoint *wp;
632 if (memory_region_is_ram(section->mr)) {
633 /* Normal RAM. */
634 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
635 + memory_region_section_addr(section, paddr);
636 if (!section->readonly) {
637 iotlb |= phys_section_notdirty;
638 } else {
639 iotlb |= phys_section_rom;
641 } else {
642 iotlb = section - phys_sections;
643 iotlb += memory_region_section_addr(section, paddr);
646 /* Make accesses to pages with watchpoints go via the
647 watchpoint trap routines. */
648 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
649 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
650 /* Avoid trapping reads of pages with a write breakpoint. */
651 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
652 iotlb = phys_section_watch + paddr;
653 *address |= TLB_MMIO;
654 break;
659 return iotlb;
661 #endif /* defined(CONFIG_USER_ONLY) */
663 #if !defined(CONFIG_USER_ONLY)
665 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
666 typedef struct subpage_t {
667 MemoryRegion iomem;
668 hwaddr base;
669 uint16_t sub_section[TARGET_PAGE_SIZE];
670 } subpage_t;
672 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
673 uint16_t section);
674 static subpage_t *subpage_init(hwaddr base);
675 static void destroy_page_desc(uint16_t section_index)
677 MemoryRegionSection *section = &phys_sections[section_index];
678 MemoryRegion *mr = section->mr;
680 if (mr->subpage) {
681 subpage_t *subpage = container_of(mr, subpage_t, iomem);
682 memory_region_destroy(&subpage->iomem);
683 g_free(subpage);
687 static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
689 unsigned i;
690 PhysPageEntry *p;
692 if (lp->ptr == PHYS_MAP_NODE_NIL) {
693 return;
696 p = phys_map_nodes[lp->ptr];
697 for (i = 0; i < L2_SIZE; ++i) {
698 if (!p[i].is_leaf) {
699 destroy_l2_mapping(&p[i], level - 1);
700 } else {
701 destroy_page_desc(p[i].ptr);
704 lp->is_leaf = 0;
705 lp->ptr = PHYS_MAP_NODE_NIL;
708 static void destroy_all_mappings(AddressSpaceDispatch *d)
710 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
711 phys_map_nodes_reset();
714 static uint16_t phys_section_add(MemoryRegionSection *section)
716 if (phys_sections_nb == phys_sections_nb_alloc) {
717 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
718 phys_sections = g_renew(MemoryRegionSection, phys_sections,
719 phys_sections_nb_alloc);
721 phys_sections[phys_sections_nb] = *section;
722 return phys_sections_nb++;
725 static void phys_sections_clear(void)
727 phys_sections_nb = 0;
730 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
732 subpage_t *subpage;
733 hwaddr base = section->offset_within_address_space
734 & TARGET_PAGE_MASK;
735 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
736 MemoryRegionSection subsection = {
737 .offset_within_address_space = base,
738 .size = TARGET_PAGE_SIZE,
740 hwaddr start, end;
742 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
744 if (!(existing->mr->subpage)) {
745 subpage = subpage_init(base);
746 subsection.mr = &subpage->iomem;
747 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
748 phys_section_add(&subsection));
749 } else {
750 subpage = container_of(existing->mr, subpage_t, iomem);
752 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
753 end = start + section->size - 1;
754 subpage_register(subpage, start, end, phys_section_add(section));
758 static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
760 hwaddr start_addr = section->offset_within_address_space;
761 ram_addr_t size = section->size;
762 hwaddr addr;
763 uint16_t section_index = phys_section_add(section);
765 assert(size);
767 addr = start_addr;
768 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
769 section_index);
772 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
774 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
775 MemoryRegionSection now = *section, remain = *section;
777 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
778 || (now.size < TARGET_PAGE_SIZE)) {
779 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
780 - now.offset_within_address_space,
781 now.size);
782 register_subpage(d, &now);
783 remain.size -= now.size;
784 remain.offset_within_address_space += now.size;
785 remain.offset_within_region += now.size;
787 while (remain.size >= TARGET_PAGE_SIZE) {
788 now = remain;
789 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
790 now.size = TARGET_PAGE_SIZE;
791 register_subpage(d, &now);
792 } else {
793 now.size &= TARGET_PAGE_MASK;
794 register_multipage(d, &now);
796 remain.size -= now.size;
797 remain.offset_within_address_space += now.size;
798 remain.offset_within_region += now.size;
800 now = remain;
801 if (now.size) {
802 register_subpage(d, &now);
806 void qemu_flush_coalesced_mmio_buffer(void)
808 if (kvm_enabled())
809 kvm_flush_coalesced_mmio_buffer();
812 void qemu_mutex_lock_ramlist(void)
814 qemu_mutex_lock(&ram_list.mutex);
817 void qemu_mutex_unlock_ramlist(void)
819 qemu_mutex_unlock(&ram_list.mutex);
822 #if defined(__linux__) && !defined(TARGET_S390X)
824 #include <sys/vfs.h>
826 #define HUGETLBFS_MAGIC 0x958458f6
828 static long gethugepagesize(const char *path)
830 struct statfs fs;
831 int ret;
833 do {
834 ret = statfs(path, &fs);
835 } while (ret != 0 && errno == EINTR);
837 if (ret != 0) {
838 perror(path);
839 return 0;
842 if (fs.f_type != HUGETLBFS_MAGIC)
843 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
845 return fs.f_bsize;
848 static void *file_ram_alloc(RAMBlock *block,
849 ram_addr_t memory,
850 const char *path)
852 char *filename;
853 char *sanitized_name;
854 char *c;
855 void *area;
856 int fd;
857 #ifdef MAP_POPULATE
858 int flags;
859 #endif
860 unsigned long hpagesize;
862 hpagesize = gethugepagesize(path);
863 if (!hpagesize) {
864 return NULL;
867 if (memory < hpagesize) {
868 return NULL;
871 if (kvm_enabled() && !kvm_has_sync_mmu()) {
872 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
873 return NULL;
876 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
877 sanitized_name = g_strdup(block->mr->name);
878 for (c = sanitized_name; *c != '\0'; c++) {
879 if (*c == '/')
880 *c = '_';
883 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
884 sanitized_name);
885 g_free(sanitized_name);
887 fd = mkstemp(filename);
888 if (fd < 0) {
889 perror("unable to create backing store for hugepages");
890 g_free(filename);
891 return NULL;
893 unlink(filename);
894 g_free(filename);
896 memory = (memory+hpagesize-1) & ~(hpagesize-1);
899 * ftruncate is not supported by hugetlbfs in older
900 * hosts, so don't bother bailing out on errors.
901 * If anything goes wrong with it under other filesystems,
902 * mmap will fail.
904 if (ftruncate(fd, memory))
905 perror("ftruncate");
907 #ifdef MAP_POPULATE
908 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
909 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
910 * to sidestep this quirk.
912 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
913 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
914 #else
915 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
916 #endif
917 if (area == MAP_FAILED) {
918 perror("file_ram_alloc: can't mmap RAM pages");
919 close(fd);
920 return (NULL);
922 block->fd = fd;
923 return area;
925 #endif
927 static ram_addr_t find_ram_offset(ram_addr_t size)
929 RAMBlock *block, *next_block;
930 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
932 assert(size != 0); /* it would hand out same offset multiple times */
934 if (QTAILQ_EMPTY(&ram_list.blocks))
935 return 0;
937 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
938 ram_addr_t end, next = RAM_ADDR_MAX;
940 end = block->offset + block->length;
942 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
943 if (next_block->offset >= end) {
944 next = MIN(next, next_block->offset);
947 if (next - end >= size && next - end < mingap) {
948 offset = end;
949 mingap = next - end;
953 if (offset == RAM_ADDR_MAX) {
954 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
955 (uint64_t)size);
956 abort();
959 return offset;
962 ram_addr_t last_ram_offset(void)
964 RAMBlock *block;
965 ram_addr_t last = 0;
967 QTAILQ_FOREACH(block, &ram_list.blocks, next)
968 last = MAX(last, block->offset + block->length);
970 return last;
973 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
975 int ret;
976 QemuOpts *machine_opts;
978 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
979 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
980 if (machine_opts &&
981 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
982 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
983 if (ret) {
984 perror("qemu_madvise");
985 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
986 "but dump_guest_core=off specified\n");
991 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
993 RAMBlock *new_block, *block;
995 new_block = NULL;
996 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
997 if (block->offset == addr) {
998 new_block = block;
999 break;
1002 assert(new_block);
1003 assert(!new_block->idstr[0]);
1005 if (dev) {
1006 char *id = qdev_get_dev_path(dev);
1007 if (id) {
1008 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1009 g_free(id);
1012 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1014 /* This assumes the iothread lock is taken here too. */
1015 qemu_mutex_lock_ramlist();
1016 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1017 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1018 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1019 new_block->idstr);
1020 abort();
1023 qemu_mutex_unlock_ramlist();
1026 static int memory_try_enable_merging(void *addr, size_t len)
1028 QemuOpts *opts;
1030 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1031 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1032 /* disabled by the user */
1033 return 0;
1036 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1039 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1040 MemoryRegion *mr)
1042 RAMBlock *block, *new_block;
1044 size = TARGET_PAGE_ALIGN(size);
1045 new_block = g_malloc0(sizeof(*new_block));
1047 /* This assumes the iothread lock is taken here too. */
1048 qemu_mutex_lock_ramlist();
1049 new_block->mr = mr;
1050 new_block->offset = find_ram_offset(size);
1051 if (host) {
1052 new_block->host = host;
1053 new_block->flags |= RAM_PREALLOC_MASK;
1054 } else {
1055 if (mem_path) {
1056 #if defined (__linux__) && !defined(TARGET_S390X)
1057 new_block->host = file_ram_alloc(new_block, size, mem_path);
1058 if (!new_block->host) {
1059 new_block->host = qemu_anon_ram_alloc(size);
1060 memory_try_enable_merging(new_block->host, size);
1062 #else
1063 fprintf(stderr, "-mem-path option unsupported\n");
1064 exit(1);
1065 #endif
1066 } else {
1067 if (xen_enabled()) {
1068 xen_ram_alloc(new_block->offset, size, mr);
1069 } else if (kvm_enabled()) {
1070 /* some s390/kvm configurations have special constraints */
1071 new_block->host = kvm_ram_alloc(size);
1072 } else {
1073 new_block->host = qemu_anon_ram_alloc(size);
1075 memory_try_enable_merging(new_block->host, size);
1078 new_block->length = size;
1080 /* Keep the list sorted from biggest to smallest block. */
1081 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1082 if (block->length < new_block->length) {
1083 break;
1086 if (block) {
1087 QTAILQ_INSERT_BEFORE(block, new_block, next);
1088 } else {
1089 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1091 ram_list.mru_block = NULL;
1093 ram_list.version++;
1094 qemu_mutex_unlock_ramlist();
1096 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
1097 last_ram_offset() >> TARGET_PAGE_BITS);
1098 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1099 0, size >> TARGET_PAGE_BITS);
1100 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1102 qemu_ram_setup_dump(new_block->host, size);
1103 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1105 if (kvm_enabled())
1106 kvm_setup_guest_memory(new_block->host, size);
1108 return new_block->offset;
1111 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1113 return qemu_ram_alloc_from_ptr(size, NULL, mr);
1116 void qemu_ram_free_from_ptr(ram_addr_t addr)
1118 RAMBlock *block;
1120 /* This assumes the iothread lock is taken here too. */
1121 qemu_mutex_lock_ramlist();
1122 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1123 if (addr == block->offset) {
1124 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1125 ram_list.mru_block = NULL;
1126 ram_list.version++;
1127 g_free(block);
1128 break;
1131 qemu_mutex_unlock_ramlist();
1134 void qemu_ram_free(ram_addr_t addr)
1136 RAMBlock *block;
1138 /* This assumes the iothread lock is taken here too. */
1139 qemu_mutex_lock_ramlist();
1140 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1141 if (addr == block->offset) {
1142 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1143 ram_list.mru_block = NULL;
1144 ram_list.version++;
1145 if (block->flags & RAM_PREALLOC_MASK) {
1147 } else if (mem_path) {
1148 #if defined (__linux__) && !defined(TARGET_S390X)
1149 if (block->fd) {
1150 munmap(block->host, block->length);
1151 close(block->fd);
1152 } else {
1153 qemu_anon_ram_free(block->host, block->length);
1155 #else
1156 abort();
1157 #endif
1158 } else {
1159 if (xen_enabled()) {
1160 xen_invalidate_map_cache_entry(block->host);
1161 } else {
1162 qemu_anon_ram_free(block->host, block->length);
1165 g_free(block);
1166 break;
1169 qemu_mutex_unlock_ramlist();
1173 #ifndef _WIN32
1174 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1176 RAMBlock *block;
1177 ram_addr_t offset;
1178 int flags;
1179 void *area, *vaddr;
1181 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1182 offset = addr - block->offset;
1183 if (offset < block->length) {
1184 vaddr = block->host + offset;
1185 if (block->flags & RAM_PREALLOC_MASK) {
1187 } else {
1188 flags = MAP_FIXED;
1189 munmap(vaddr, length);
1190 if (mem_path) {
1191 #if defined(__linux__) && !defined(TARGET_S390X)
1192 if (block->fd) {
1193 #ifdef MAP_POPULATE
1194 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1195 MAP_PRIVATE;
1196 #else
1197 flags |= MAP_PRIVATE;
1198 #endif
1199 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1200 flags, block->fd, offset);
1201 } else {
1202 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1203 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1204 flags, -1, 0);
1206 #else
1207 abort();
1208 #endif
1209 } else {
1210 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1211 flags |= MAP_SHARED | MAP_ANONYMOUS;
1212 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1213 flags, -1, 0);
1214 #else
1215 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1216 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1217 flags, -1, 0);
1218 #endif
1220 if (area != vaddr) {
1221 fprintf(stderr, "Could not remap addr: "
1222 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1223 length, addr);
1224 exit(1);
1226 memory_try_enable_merging(vaddr, length);
1227 qemu_ram_setup_dump(vaddr, length);
1229 return;
1233 #endif /* !_WIN32 */
1235 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1236 With the exception of the softmmu code in this file, this should
1237 only be used for local memory (e.g. video ram) that the device owns,
1238 and knows it isn't going to access beyond the end of the block.
1240 It should not be used for general purpose DMA.
1241 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1243 void *qemu_get_ram_ptr(ram_addr_t addr)
1245 RAMBlock *block;
1247 /* The list is protected by the iothread lock here. */
1248 block = ram_list.mru_block;
1249 if (block && addr - block->offset < block->length) {
1250 goto found;
1252 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1253 if (addr - block->offset < block->length) {
1254 goto found;
1258 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1259 abort();
1261 found:
1262 ram_list.mru_block = block;
1263 if (xen_enabled()) {
1264 /* We need to check if the requested address is in the RAM
1265 * because we don't want to map the entire memory in QEMU.
1266 * In that case just map until the end of the page.
1268 if (block->offset == 0) {
1269 return xen_map_cache(addr, 0, 0);
1270 } else if (block->host == NULL) {
1271 block->host =
1272 xen_map_cache(block->offset, block->length, 1);
1275 return block->host + (addr - block->offset);
1278 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1279 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1281 * ??? Is this still necessary?
1283 static void *qemu_safe_ram_ptr(ram_addr_t addr)
1285 RAMBlock *block;
1287 /* The list is protected by the iothread lock here. */
1288 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1289 if (addr - block->offset < block->length) {
1290 if (xen_enabled()) {
1291 /* We need to check if the requested address is in the RAM
1292 * because we don't want to map the entire memory in QEMU.
1293 * In that case just map until the end of the page.
1295 if (block->offset == 0) {
1296 return xen_map_cache(addr, 0, 0);
1297 } else if (block->host == NULL) {
1298 block->host =
1299 xen_map_cache(block->offset, block->length, 1);
1302 return block->host + (addr - block->offset);
1306 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1307 abort();
1309 return NULL;
1312 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1313 * but takes a size argument */
1314 static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
1316 if (*size == 0) {
1317 return NULL;
1319 if (xen_enabled()) {
1320 return xen_map_cache(addr, *size, 1);
1321 } else {
1322 RAMBlock *block;
1324 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1325 if (addr - block->offset < block->length) {
1326 if (addr - block->offset + *size > block->length)
1327 *size = block->length - addr + block->offset;
1328 return block->host + (addr - block->offset);
1332 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1333 abort();
1337 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1339 RAMBlock *block;
1340 uint8_t *host = ptr;
1342 if (xen_enabled()) {
1343 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1344 return 0;
1347 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1348 /* This case append when the block is not mapped. */
1349 if (block->host == NULL) {
1350 continue;
1352 if (host - block->host < block->length) {
1353 *ram_addr = block->offset + (host - block->host);
1354 return 0;
1358 return -1;
1361 /* Some of the softmmu routines need to translate from a host pointer
1362 (typically a TLB entry) back to a ram offset. */
1363 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1365 ram_addr_t ram_addr;
1367 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1368 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1369 abort();
1371 return ram_addr;
1374 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1375 unsigned size)
1377 #ifdef DEBUG_UNASSIGNED
1378 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1379 #endif
1380 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1381 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
1382 #endif
1383 return 0;
1386 static void unassigned_mem_write(void *opaque, hwaddr addr,
1387 uint64_t val, unsigned size)
1389 #ifdef DEBUG_UNASSIGNED
1390 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1391 #endif
1392 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1393 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
1394 #endif
1397 static const MemoryRegionOps unassigned_mem_ops = {
1398 .read = unassigned_mem_read,
1399 .write = unassigned_mem_write,
1400 .endianness = DEVICE_NATIVE_ENDIAN,
1403 static uint64_t error_mem_read(void *opaque, hwaddr addr,
1404 unsigned size)
1406 abort();
1409 static void error_mem_write(void *opaque, hwaddr addr,
1410 uint64_t value, unsigned size)
1412 abort();
1415 static const MemoryRegionOps error_mem_ops = {
1416 .read = error_mem_read,
1417 .write = error_mem_write,
1418 .endianness = DEVICE_NATIVE_ENDIAN,
1421 static const MemoryRegionOps rom_mem_ops = {
1422 .read = error_mem_read,
1423 .write = unassigned_mem_write,
1424 .endianness = DEVICE_NATIVE_ENDIAN,
1427 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1428 uint64_t val, unsigned size)
1430 int dirty_flags;
1431 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1432 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1433 #if !defined(CONFIG_USER_ONLY)
1434 tb_invalidate_phys_page_fast(ram_addr, size);
1435 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1436 #endif
1438 switch (size) {
1439 case 1:
1440 stb_p(qemu_get_ram_ptr(ram_addr), val);
1441 break;
1442 case 2:
1443 stw_p(qemu_get_ram_ptr(ram_addr), val);
1444 break;
1445 case 4:
1446 stl_p(qemu_get_ram_ptr(ram_addr), val);
1447 break;
1448 default:
1449 abort();
1451 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1452 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1453 /* we remove the notdirty callback only if the code has been
1454 flushed */
1455 if (dirty_flags == 0xff)
1456 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
1459 static const MemoryRegionOps notdirty_mem_ops = {
1460 .read = error_mem_read,
1461 .write = notdirty_mem_write,
1462 .endianness = DEVICE_NATIVE_ENDIAN,
1465 /* Generate a debug exception if a watchpoint has been hit. */
1466 static void check_watchpoint(int offset, int len_mask, int flags)
1468 CPUArchState *env = cpu_single_env;
1469 target_ulong pc, cs_base;
1470 target_ulong vaddr;
1471 CPUWatchpoint *wp;
1472 int cpu_flags;
1474 if (env->watchpoint_hit) {
1475 /* We re-entered the check after replacing the TB. Now raise
1476 * the debug interrupt so that is will trigger after the
1477 * current instruction. */
1478 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
1479 return;
1481 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1482 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1483 if ((vaddr == (wp->vaddr & len_mask) ||
1484 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1485 wp->flags |= BP_WATCHPOINT_HIT;
1486 if (!env->watchpoint_hit) {
1487 env->watchpoint_hit = wp;
1488 tb_check_watchpoint(env);
1489 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1490 env->exception_index = EXCP_DEBUG;
1491 cpu_loop_exit(env);
1492 } else {
1493 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1494 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1495 cpu_resume_from_signal(env, NULL);
1498 } else {
1499 wp->flags &= ~BP_WATCHPOINT_HIT;
1504 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1505 so these check for a hit then pass through to the normal out-of-line
1506 phys routines. */
1507 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1508 unsigned size)
1510 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1511 switch (size) {
1512 case 1: return ldub_phys(addr);
1513 case 2: return lduw_phys(addr);
1514 case 4: return ldl_phys(addr);
1515 default: abort();
1519 static void watch_mem_write(void *opaque, hwaddr addr,
1520 uint64_t val, unsigned size)
1522 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1523 switch (size) {
1524 case 1:
1525 stb_phys(addr, val);
1526 break;
1527 case 2:
1528 stw_phys(addr, val);
1529 break;
1530 case 4:
1531 stl_phys(addr, val);
1532 break;
1533 default: abort();
1537 static const MemoryRegionOps watch_mem_ops = {
1538 .read = watch_mem_read,
1539 .write = watch_mem_write,
1540 .endianness = DEVICE_NATIVE_ENDIAN,
1543 static uint64_t subpage_read(void *opaque, hwaddr addr,
1544 unsigned len)
1546 subpage_t *mmio = opaque;
1547 unsigned int idx = SUBPAGE_IDX(addr);
1548 MemoryRegionSection *section;
1549 #if defined(DEBUG_SUBPAGE)
1550 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1551 mmio, len, addr, idx);
1552 #endif
1554 section = &phys_sections[mmio->sub_section[idx]];
1555 addr += mmio->base;
1556 addr -= section->offset_within_address_space;
1557 addr += section->offset_within_region;
1558 return io_mem_read(section->mr, addr, len);
1561 static void subpage_write(void *opaque, hwaddr addr,
1562 uint64_t value, unsigned len)
1564 subpage_t *mmio = opaque;
1565 unsigned int idx = SUBPAGE_IDX(addr);
1566 MemoryRegionSection *section;
1567 #if defined(DEBUG_SUBPAGE)
1568 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1569 " idx %d value %"PRIx64"\n",
1570 __func__, mmio, len, addr, idx, value);
1571 #endif
1573 section = &phys_sections[mmio->sub_section[idx]];
1574 addr += mmio->base;
1575 addr -= section->offset_within_address_space;
1576 addr += section->offset_within_region;
1577 io_mem_write(section->mr, addr, value, len);
1580 static const MemoryRegionOps subpage_ops = {
1581 .read = subpage_read,
1582 .write = subpage_write,
1583 .endianness = DEVICE_NATIVE_ENDIAN,
1586 static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
1587 unsigned size)
1589 ram_addr_t raddr = addr;
1590 void *ptr = qemu_get_ram_ptr(raddr);
1591 switch (size) {
1592 case 1: return ldub_p(ptr);
1593 case 2: return lduw_p(ptr);
1594 case 4: return ldl_p(ptr);
1595 default: abort();
1599 static void subpage_ram_write(void *opaque, hwaddr addr,
1600 uint64_t value, unsigned size)
1602 ram_addr_t raddr = addr;
1603 void *ptr = qemu_get_ram_ptr(raddr);
1604 switch (size) {
1605 case 1: return stb_p(ptr, value);
1606 case 2: return stw_p(ptr, value);
1607 case 4: return stl_p(ptr, value);
1608 default: abort();
1612 static const MemoryRegionOps subpage_ram_ops = {
1613 .read = subpage_ram_read,
1614 .write = subpage_ram_write,
1615 .endianness = DEVICE_NATIVE_ENDIAN,
1618 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1619 uint16_t section)
1621 int idx, eidx;
1623 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1624 return -1;
1625 idx = SUBPAGE_IDX(start);
1626 eidx = SUBPAGE_IDX(end);
1627 #if defined(DEBUG_SUBPAGE)
1628 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
1629 mmio, start, end, idx, eidx, memory);
1630 #endif
1631 if (memory_region_is_ram(phys_sections[section].mr)) {
1632 MemoryRegionSection new_section = phys_sections[section];
1633 new_section.mr = &io_mem_subpage_ram;
1634 section = phys_section_add(&new_section);
1636 for (; idx <= eidx; idx++) {
1637 mmio->sub_section[idx] = section;
1640 return 0;
1643 static subpage_t *subpage_init(hwaddr base)
1645 subpage_t *mmio;
1647 mmio = g_malloc0(sizeof(subpage_t));
1649 mmio->base = base;
1650 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1651 "subpage", TARGET_PAGE_SIZE);
1652 mmio->iomem.subpage = true;
1653 #if defined(DEBUG_SUBPAGE)
1654 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1655 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
1656 #endif
1657 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
1659 return mmio;
1662 static uint16_t dummy_section(MemoryRegion *mr)
1664 MemoryRegionSection section = {
1665 .mr = mr,
1666 .offset_within_address_space = 0,
1667 .offset_within_region = 0,
1668 .size = UINT64_MAX,
1671 return phys_section_add(&section);
1674 MemoryRegion *iotlb_to_region(hwaddr index)
1676 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
1679 static void io_mem_init(void)
1681 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
1682 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1683 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1684 "unassigned", UINT64_MAX);
1685 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1686 "notdirty", UINT64_MAX);
1687 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1688 "subpage-ram", UINT64_MAX);
1689 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1690 "watch", UINT64_MAX);
1693 static void mem_begin(MemoryListener *listener)
1695 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1697 destroy_all_mappings(d);
1698 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1701 static void core_begin(MemoryListener *listener)
1703 phys_sections_clear();
1704 phys_section_unassigned = dummy_section(&io_mem_unassigned);
1705 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1706 phys_section_rom = dummy_section(&io_mem_rom);
1707 phys_section_watch = dummy_section(&io_mem_watch);
1710 static void tcg_commit(MemoryListener *listener)
1712 CPUArchState *env;
1714 /* since each CPU stores ram addresses in its TLB cache, we must
1715 reset the modified entries */
1716 /* XXX: slow ! */
1717 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1718 tlb_flush(env, 1);
1722 static void core_log_global_start(MemoryListener *listener)
1724 cpu_physical_memory_set_dirty_tracking(1);
1727 static void core_log_global_stop(MemoryListener *listener)
1729 cpu_physical_memory_set_dirty_tracking(0);
1732 static void io_region_add(MemoryListener *listener,
1733 MemoryRegionSection *section)
1735 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1737 mrio->mr = section->mr;
1738 mrio->offset = section->offset_within_region;
1739 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
1740 section->offset_within_address_space, section->size);
1741 ioport_register(&mrio->iorange);
1744 static void io_region_del(MemoryListener *listener,
1745 MemoryRegionSection *section)
1747 isa_unassign_ioport(section->offset_within_address_space, section->size);
1750 static MemoryListener core_memory_listener = {
1751 .begin = core_begin,
1752 .log_global_start = core_log_global_start,
1753 .log_global_stop = core_log_global_stop,
1754 .priority = 1,
1757 static MemoryListener io_memory_listener = {
1758 .region_add = io_region_add,
1759 .region_del = io_region_del,
1760 .priority = 0,
1763 static MemoryListener tcg_memory_listener = {
1764 .commit = tcg_commit,
1767 void address_space_init_dispatch(AddressSpace *as)
1769 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1771 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1772 d->listener = (MemoryListener) {
1773 .begin = mem_begin,
1774 .region_add = mem_add,
1775 .region_nop = mem_add,
1776 .priority = 0,
1778 as->dispatch = d;
1779 memory_listener_register(&d->listener, as);
1782 void address_space_destroy_dispatch(AddressSpace *as)
1784 AddressSpaceDispatch *d = as->dispatch;
1786 memory_listener_unregister(&d->listener);
1787 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1788 g_free(d);
1789 as->dispatch = NULL;
1792 static void memory_map_init(void)
1794 system_memory = g_malloc(sizeof(*system_memory));
1795 memory_region_init(system_memory, "system", INT64_MAX);
1796 address_space_init(&address_space_memory, system_memory);
1797 address_space_memory.name = "memory";
1799 system_io = g_malloc(sizeof(*system_io));
1800 memory_region_init(system_io, "io", 65536);
1801 address_space_init(&address_space_io, system_io);
1802 address_space_io.name = "I/O";
1804 memory_listener_register(&core_memory_listener, &address_space_memory);
1805 memory_listener_register(&io_memory_listener, &address_space_io);
1806 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1808 dma_context_init(&dma_context_memory, &address_space_memory,
1809 NULL, NULL, NULL);
1812 MemoryRegion *get_system_memory(void)
1814 return system_memory;
1817 MemoryRegion *get_system_io(void)
1819 return system_io;
1822 #endif /* !defined(CONFIG_USER_ONLY) */
1824 /* physical memory access (slow version, mainly for debug) */
1825 #if defined(CONFIG_USER_ONLY)
1826 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
1827 uint8_t *buf, int len, int is_write)
1829 int l, flags;
1830 target_ulong page;
1831 void * p;
1833 while (len > 0) {
1834 page = addr & TARGET_PAGE_MASK;
1835 l = (page + TARGET_PAGE_SIZE) - addr;
1836 if (l > len)
1837 l = len;
1838 flags = page_get_flags(page);
1839 if (!(flags & PAGE_VALID))
1840 return -1;
1841 if (is_write) {
1842 if (!(flags & PAGE_WRITE))
1843 return -1;
1844 /* XXX: this code should not depend on lock_user */
1845 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1846 return -1;
1847 memcpy(p, buf, l);
1848 unlock_user(p, addr, l);
1849 } else {
1850 if (!(flags & PAGE_READ))
1851 return -1;
1852 /* XXX: this code should not depend on lock_user */
1853 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1854 return -1;
1855 memcpy(buf, p, l);
1856 unlock_user(p, addr, 0);
1858 len -= l;
1859 buf += l;
1860 addr += l;
1862 return 0;
1865 #else
1867 static void invalidate_and_set_dirty(hwaddr addr,
1868 hwaddr length)
1870 if (!cpu_physical_memory_is_dirty(addr)) {
1871 /* invalidate code */
1872 tb_invalidate_phys_page_range(addr, addr + length, 0);
1873 /* set dirty bit */
1874 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1876 xen_modified_memory(addr, length);
1879 void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1880 int len, bool is_write)
1882 AddressSpaceDispatch *d = as->dispatch;
1883 int l;
1884 uint8_t *ptr;
1885 uint32_t val;
1886 hwaddr page;
1887 MemoryRegionSection *section;
1889 while (len > 0) {
1890 page = addr & TARGET_PAGE_MASK;
1891 l = (page + TARGET_PAGE_SIZE) - addr;
1892 if (l > len)
1893 l = len;
1894 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
1896 if (is_write) {
1897 if (!memory_region_is_ram(section->mr)) {
1898 hwaddr addr1;
1899 addr1 = memory_region_section_addr(section, addr);
1900 /* XXX: could force cpu_single_env to NULL to avoid
1901 potential bugs */
1902 if (l >= 4 && ((addr1 & 3) == 0)) {
1903 /* 32 bit write access */
1904 val = ldl_p(buf);
1905 io_mem_write(section->mr, addr1, val, 4);
1906 l = 4;
1907 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1908 /* 16 bit write access */
1909 val = lduw_p(buf);
1910 io_mem_write(section->mr, addr1, val, 2);
1911 l = 2;
1912 } else {
1913 /* 8 bit write access */
1914 val = ldub_p(buf);
1915 io_mem_write(section->mr, addr1, val, 1);
1916 l = 1;
1918 } else if (!section->readonly) {
1919 ram_addr_t addr1;
1920 addr1 = memory_region_get_ram_addr(section->mr)
1921 + memory_region_section_addr(section, addr);
1922 /* RAM case */
1923 ptr = qemu_get_ram_ptr(addr1);
1924 memcpy(ptr, buf, l);
1925 invalidate_and_set_dirty(addr1, l);
1927 } else {
1928 if (!(memory_region_is_ram(section->mr) ||
1929 memory_region_is_romd(section->mr))) {
1930 hwaddr addr1;
1931 /* I/O case */
1932 addr1 = memory_region_section_addr(section, addr);
1933 if (l >= 4 && ((addr1 & 3) == 0)) {
1934 /* 32 bit read access */
1935 val = io_mem_read(section->mr, addr1, 4);
1936 stl_p(buf, val);
1937 l = 4;
1938 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1939 /* 16 bit read access */
1940 val = io_mem_read(section->mr, addr1, 2);
1941 stw_p(buf, val);
1942 l = 2;
1943 } else {
1944 /* 8 bit read access */
1945 val = io_mem_read(section->mr, addr1, 1);
1946 stb_p(buf, val);
1947 l = 1;
1949 } else {
1950 /* RAM case */
1951 ptr = qemu_get_ram_ptr(section->mr->ram_addr
1952 + memory_region_section_addr(section,
1953 addr));
1954 memcpy(buf, ptr, l);
1957 len -= l;
1958 buf += l;
1959 addr += l;
1963 void address_space_write(AddressSpace *as, hwaddr addr,
1964 const uint8_t *buf, int len)
1966 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1970 * address_space_read: read from an address space.
1972 * @as: #AddressSpace to be accessed
1973 * @addr: address within that address space
1974 * @buf: buffer with the data transferred
1976 void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
1978 address_space_rw(as, addr, buf, len, false);
1982 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
1983 int len, int is_write)
1985 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1988 /* used for ROM loading : can write in RAM and ROM */
1989 void cpu_physical_memory_write_rom(hwaddr addr,
1990 const uint8_t *buf, int len)
1992 AddressSpaceDispatch *d = address_space_memory.dispatch;
1993 int l;
1994 uint8_t *ptr;
1995 hwaddr page;
1996 MemoryRegionSection *section;
1998 while (len > 0) {
1999 page = addr & TARGET_PAGE_MASK;
2000 l = (page + TARGET_PAGE_SIZE) - addr;
2001 if (l > len)
2002 l = len;
2003 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
2005 if (!(memory_region_is_ram(section->mr) ||
2006 memory_region_is_romd(section->mr))) {
2007 /* do nothing */
2008 } else {
2009 unsigned long addr1;
2010 addr1 = memory_region_get_ram_addr(section->mr)
2011 + memory_region_section_addr(section, addr);
2012 /* ROM/RAM case */
2013 ptr = qemu_get_ram_ptr(addr1);
2014 memcpy(ptr, buf, l);
2015 invalidate_and_set_dirty(addr1, l);
2017 len -= l;
2018 buf += l;
2019 addr += l;
2023 typedef struct {
2024 void *buffer;
2025 hwaddr addr;
2026 hwaddr len;
2027 } BounceBuffer;
2029 static BounceBuffer bounce;
2031 typedef struct MapClient {
2032 void *opaque;
2033 void (*callback)(void *opaque);
2034 QLIST_ENTRY(MapClient) link;
2035 } MapClient;
2037 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2038 = QLIST_HEAD_INITIALIZER(map_client_list);
2040 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2042 MapClient *client = g_malloc(sizeof(*client));
2044 client->opaque = opaque;
2045 client->callback = callback;
2046 QLIST_INSERT_HEAD(&map_client_list, client, link);
2047 return client;
2050 static void cpu_unregister_map_client(void *_client)
2052 MapClient *client = (MapClient *)_client;
2054 QLIST_REMOVE(client, link);
2055 g_free(client);
2058 static void cpu_notify_map_clients(void)
2060 MapClient *client;
2062 while (!QLIST_EMPTY(&map_client_list)) {
2063 client = QLIST_FIRST(&map_client_list);
2064 client->callback(client->opaque);
2065 cpu_unregister_map_client(client);
2069 /* Map a physical memory region into a host virtual address.
2070 * May map a subset of the requested range, given by and returned in *plen.
2071 * May return NULL if resources needed to perform the mapping are exhausted.
2072 * Use only for reads OR writes - not for read-modify-write operations.
2073 * Use cpu_register_map_client() to know when retrying the map operation is
2074 * likely to succeed.
2076 void *address_space_map(AddressSpace *as,
2077 hwaddr addr,
2078 hwaddr *plen,
2079 bool is_write)
2081 AddressSpaceDispatch *d = as->dispatch;
2082 hwaddr len = *plen;
2083 hwaddr todo = 0;
2084 int l;
2085 hwaddr page;
2086 MemoryRegionSection *section;
2087 ram_addr_t raddr = RAM_ADDR_MAX;
2088 ram_addr_t rlen;
2089 void *ret;
2091 while (len > 0) {
2092 page = addr & TARGET_PAGE_MASK;
2093 l = (page + TARGET_PAGE_SIZE) - addr;
2094 if (l > len)
2095 l = len;
2096 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
2098 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
2099 if (todo || bounce.buffer) {
2100 break;
2102 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2103 bounce.addr = addr;
2104 bounce.len = l;
2105 if (!is_write) {
2106 address_space_read(as, addr, bounce.buffer, l);
2109 *plen = l;
2110 return bounce.buffer;
2112 if (!todo) {
2113 raddr = memory_region_get_ram_addr(section->mr)
2114 + memory_region_section_addr(section, addr);
2117 len -= l;
2118 addr += l;
2119 todo += l;
2121 rlen = todo;
2122 ret = qemu_ram_ptr_length(raddr, &rlen);
2123 *plen = rlen;
2124 return ret;
2127 /* Unmaps a memory region previously mapped by address_space_map().
2128 * Will also mark the memory as dirty if is_write == 1. access_len gives
2129 * the amount of memory that was actually read or written by the caller.
2131 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2132 int is_write, hwaddr access_len)
2134 if (buffer != bounce.buffer) {
2135 if (is_write) {
2136 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
2137 while (access_len) {
2138 unsigned l;
2139 l = TARGET_PAGE_SIZE;
2140 if (l > access_len)
2141 l = access_len;
2142 invalidate_and_set_dirty(addr1, l);
2143 addr1 += l;
2144 access_len -= l;
2147 if (xen_enabled()) {
2148 xen_invalidate_map_cache_entry(buffer);
2150 return;
2152 if (is_write) {
2153 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2155 qemu_vfree(bounce.buffer);
2156 bounce.buffer = NULL;
2157 cpu_notify_map_clients();
2160 void *cpu_physical_memory_map(hwaddr addr,
2161 hwaddr *plen,
2162 int is_write)
2164 return address_space_map(&address_space_memory, addr, plen, is_write);
2167 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2168 int is_write, hwaddr access_len)
2170 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2173 /* warning: addr must be aligned */
2174 static inline uint32_t ldl_phys_internal(hwaddr addr,
2175 enum device_endian endian)
2177 uint8_t *ptr;
2178 uint32_t val;
2179 MemoryRegionSection *section;
2181 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2183 if (!(memory_region_is_ram(section->mr) ||
2184 memory_region_is_romd(section->mr))) {
2185 /* I/O case */
2186 addr = memory_region_section_addr(section, addr);
2187 val = io_mem_read(section->mr, addr, 4);
2188 #if defined(TARGET_WORDS_BIGENDIAN)
2189 if (endian == DEVICE_LITTLE_ENDIAN) {
2190 val = bswap32(val);
2192 #else
2193 if (endian == DEVICE_BIG_ENDIAN) {
2194 val = bswap32(val);
2196 #endif
2197 } else {
2198 /* RAM case */
2199 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2200 & TARGET_PAGE_MASK)
2201 + memory_region_section_addr(section, addr));
2202 switch (endian) {
2203 case DEVICE_LITTLE_ENDIAN:
2204 val = ldl_le_p(ptr);
2205 break;
2206 case DEVICE_BIG_ENDIAN:
2207 val = ldl_be_p(ptr);
2208 break;
2209 default:
2210 val = ldl_p(ptr);
2211 break;
2214 return val;
2217 uint32_t ldl_phys(hwaddr addr)
2219 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2222 uint32_t ldl_le_phys(hwaddr addr)
2224 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2227 uint32_t ldl_be_phys(hwaddr addr)
2229 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2232 /* warning: addr must be aligned */
2233 static inline uint64_t ldq_phys_internal(hwaddr addr,
2234 enum device_endian endian)
2236 uint8_t *ptr;
2237 uint64_t val;
2238 MemoryRegionSection *section;
2240 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2242 if (!(memory_region_is_ram(section->mr) ||
2243 memory_region_is_romd(section->mr))) {
2244 /* I/O case */
2245 addr = memory_region_section_addr(section, addr);
2247 /* XXX This is broken when device endian != cpu endian.
2248 Fix and add "endian" variable check */
2249 #ifdef TARGET_WORDS_BIGENDIAN
2250 val = io_mem_read(section->mr, addr, 4) << 32;
2251 val |= io_mem_read(section->mr, addr + 4, 4);
2252 #else
2253 val = io_mem_read(section->mr, addr, 4);
2254 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
2255 #endif
2256 } else {
2257 /* RAM case */
2258 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2259 & TARGET_PAGE_MASK)
2260 + memory_region_section_addr(section, addr));
2261 switch (endian) {
2262 case DEVICE_LITTLE_ENDIAN:
2263 val = ldq_le_p(ptr);
2264 break;
2265 case DEVICE_BIG_ENDIAN:
2266 val = ldq_be_p(ptr);
2267 break;
2268 default:
2269 val = ldq_p(ptr);
2270 break;
2273 return val;
2276 uint64_t ldq_phys(hwaddr addr)
2278 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2281 uint64_t ldq_le_phys(hwaddr addr)
2283 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2286 uint64_t ldq_be_phys(hwaddr addr)
2288 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2291 /* XXX: optimize */
2292 uint32_t ldub_phys(hwaddr addr)
2294 uint8_t val;
2295 cpu_physical_memory_read(addr, &val, 1);
2296 return val;
2299 /* warning: addr must be aligned */
2300 static inline uint32_t lduw_phys_internal(hwaddr addr,
2301 enum device_endian endian)
2303 uint8_t *ptr;
2304 uint64_t val;
2305 MemoryRegionSection *section;
2307 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2309 if (!(memory_region_is_ram(section->mr) ||
2310 memory_region_is_romd(section->mr))) {
2311 /* I/O case */
2312 addr = memory_region_section_addr(section, addr);
2313 val = io_mem_read(section->mr, addr, 2);
2314 #if defined(TARGET_WORDS_BIGENDIAN)
2315 if (endian == DEVICE_LITTLE_ENDIAN) {
2316 val = bswap16(val);
2318 #else
2319 if (endian == DEVICE_BIG_ENDIAN) {
2320 val = bswap16(val);
2322 #endif
2323 } else {
2324 /* RAM case */
2325 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2326 & TARGET_PAGE_MASK)
2327 + memory_region_section_addr(section, addr));
2328 switch (endian) {
2329 case DEVICE_LITTLE_ENDIAN:
2330 val = lduw_le_p(ptr);
2331 break;
2332 case DEVICE_BIG_ENDIAN:
2333 val = lduw_be_p(ptr);
2334 break;
2335 default:
2336 val = lduw_p(ptr);
2337 break;
2340 return val;
2343 uint32_t lduw_phys(hwaddr addr)
2345 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2348 uint32_t lduw_le_phys(hwaddr addr)
2350 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2353 uint32_t lduw_be_phys(hwaddr addr)
2355 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2358 /* warning: addr must be aligned. The ram page is not masked as dirty
2359 and the code inside is not invalidated. It is useful if the dirty
2360 bits are used to track modified PTEs */
2361 void stl_phys_notdirty(hwaddr addr, uint32_t val)
2363 uint8_t *ptr;
2364 MemoryRegionSection *section;
2366 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2368 if (!memory_region_is_ram(section->mr) || section->readonly) {
2369 addr = memory_region_section_addr(section, addr);
2370 if (memory_region_is_ram(section->mr)) {
2371 section = &phys_sections[phys_section_rom];
2373 io_mem_write(section->mr, addr, val, 4);
2374 } else {
2375 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
2376 & TARGET_PAGE_MASK)
2377 + memory_region_section_addr(section, addr);
2378 ptr = qemu_get_ram_ptr(addr1);
2379 stl_p(ptr, val);
2381 if (unlikely(in_migration)) {
2382 if (!cpu_physical_memory_is_dirty(addr1)) {
2383 /* invalidate code */
2384 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2385 /* set dirty bit */
2386 cpu_physical_memory_set_dirty_flags(
2387 addr1, (0xff & ~CODE_DIRTY_FLAG));
2393 /* warning: addr must be aligned */
2394 static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2395 enum device_endian endian)
2397 uint8_t *ptr;
2398 MemoryRegionSection *section;
2400 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2402 if (!memory_region_is_ram(section->mr) || section->readonly) {
2403 addr = memory_region_section_addr(section, addr);
2404 if (memory_region_is_ram(section->mr)) {
2405 section = &phys_sections[phys_section_rom];
2407 #if defined(TARGET_WORDS_BIGENDIAN)
2408 if (endian == DEVICE_LITTLE_ENDIAN) {
2409 val = bswap32(val);
2411 #else
2412 if (endian == DEVICE_BIG_ENDIAN) {
2413 val = bswap32(val);
2415 #endif
2416 io_mem_write(section->mr, addr, val, 4);
2417 } else {
2418 unsigned long addr1;
2419 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2420 + memory_region_section_addr(section, addr);
2421 /* RAM case */
2422 ptr = qemu_get_ram_ptr(addr1);
2423 switch (endian) {
2424 case DEVICE_LITTLE_ENDIAN:
2425 stl_le_p(ptr, val);
2426 break;
2427 case DEVICE_BIG_ENDIAN:
2428 stl_be_p(ptr, val);
2429 break;
2430 default:
2431 stl_p(ptr, val);
2432 break;
2434 invalidate_and_set_dirty(addr1, 4);
2438 void stl_phys(hwaddr addr, uint32_t val)
2440 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2443 void stl_le_phys(hwaddr addr, uint32_t val)
2445 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2448 void stl_be_phys(hwaddr addr, uint32_t val)
2450 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2453 /* XXX: optimize */
2454 void stb_phys(hwaddr addr, uint32_t val)
2456 uint8_t v = val;
2457 cpu_physical_memory_write(addr, &v, 1);
2460 /* warning: addr must be aligned */
2461 static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2462 enum device_endian endian)
2464 uint8_t *ptr;
2465 MemoryRegionSection *section;
2467 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2469 if (!memory_region_is_ram(section->mr) || section->readonly) {
2470 addr = memory_region_section_addr(section, addr);
2471 if (memory_region_is_ram(section->mr)) {
2472 section = &phys_sections[phys_section_rom];
2474 #if defined(TARGET_WORDS_BIGENDIAN)
2475 if (endian == DEVICE_LITTLE_ENDIAN) {
2476 val = bswap16(val);
2478 #else
2479 if (endian == DEVICE_BIG_ENDIAN) {
2480 val = bswap16(val);
2482 #endif
2483 io_mem_write(section->mr, addr, val, 2);
2484 } else {
2485 unsigned long addr1;
2486 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2487 + memory_region_section_addr(section, addr);
2488 /* RAM case */
2489 ptr = qemu_get_ram_ptr(addr1);
2490 switch (endian) {
2491 case DEVICE_LITTLE_ENDIAN:
2492 stw_le_p(ptr, val);
2493 break;
2494 case DEVICE_BIG_ENDIAN:
2495 stw_be_p(ptr, val);
2496 break;
2497 default:
2498 stw_p(ptr, val);
2499 break;
2501 invalidate_and_set_dirty(addr1, 2);
2505 void stw_phys(hwaddr addr, uint32_t val)
2507 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2510 void stw_le_phys(hwaddr addr, uint32_t val)
2512 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2515 void stw_be_phys(hwaddr addr, uint32_t val)
2517 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2520 /* XXX: optimize */
2521 void stq_phys(hwaddr addr, uint64_t val)
2523 val = tswap64(val);
2524 cpu_physical_memory_write(addr, &val, 8);
2527 void stq_le_phys(hwaddr addr, uint64_t val)
2529 val = cpu_to_le64(val);
2530 cpu_physical_memory_write(addr, &val, 8);
2533 void stq_be_phys(hwaddr addr, uint64_t val)
2535 val = cpu_to_be64(val);
2536 cpu_physical_memory_write(addr, &val, 8);
2539 /* virtual memory access for debug (includes writing to ROM) */
2540 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
2541 uint8_t *buf, int len, int is_write)
2543 int l;
2544 hwaddr phys_addr;
2545 target_ulong page;
2547 while (len > 0) {
2548 page = addr & TARGET_PAGE_MASK;
2549 phys_addr = cpu_get_phys_page_debug(env, page);
2550 /* if no physical page mapped, return an error */
2551 if (phys_addr == -1)
2552 return -1;
2553 l = (page + TARGET_PAGE_SIZE) - addr;
2554 if (l > len)
2555 l = len;
2556 phys_addr += (addr & ~TARGET_PAGE_MASK);
2557 if (is_write)
2558 cpu_physical_memory_write_rom(phys_addr, buf, l);
2559 else
2560 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
2561 len -= l;
2562 buf += l;
2563 addr += l;
2565 return 0;
2567 #endif
2569 #if !defined(CONFIG_USER_ONLY)
2572 * A helper function for the _utterly broken_ virtio device model to find out if
2573 * it's running on a big endian machine. Don't do this at home kids!
2575 bool virtio_is_big_endian(void);
2576 bool virtio_is_big_endian(void)
2578 #if defined(TARGET_WORDS_BIGENDIAN)
2579 return true;
2580 #else
2581 return false;
2582 #endif
2585 #endif
2587 #ifndef CONFIG_USER_ONLY
2588 bool cpu_physical_memory_is_io(hwaddr phys_addr)
2590 MemoryRegionSection *section;
2592 section = phys_page_find(address_space_memory.dispatch,
2593 phys_addr >> TARGET_PAGE_BITS);
2595 return !(memory_region_is_ram(section->mr) ||
2596 memory_region_is_romd(section->mr));
2598 #endif